VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h@ 74054

Last change on this file since 74054 was 73376, checked in by vboxsync, 6 years ago

PGM/NEM: catch make-writable changes during memory exits and avoid the emulation when a page was allocated. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 231.1 KB
Line 
1/* $Id: NEMAllNativeTemplate-win.cpp.h 73376 2018-07-27 08:00:39Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, Windows code template ring-0/3.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22/** Copy back a segment from hyper-V. */
23#define NEM_WIN_COPY_BACK_SEG(a_Dst, a_Src) \
24 do { \
25 (a_Dst).u64Base = (a_Src).Base; \
26 (a_Dst).u32Limit = (a_Src).Limit; \
27 (a_Dst).ValidSel = (a_Dst).Sel = (a_Src).Selector; \
28 (a_Dst).Attr.u = (a_Src).Attributes; \
29 (a_Dst).fFlags = CPUMSELREG_FLAGS_VALID; \
30 } while (0)
31
32/** @def NEMWIN_ASSERT_MSG_REG_VAL
33 * Asserts the correctness of a register value in a message/context.
34 */
35#if 0
36# define NEMWIN_NEED_GET_REGISTER
37# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
38# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, a_Expr, a_Msg) \
39 do { \
40 HV_REGISTER_VALUE TmpVal; \
41 nemHCWinGetRegister(a_pVCpu, a_pGVCpu, a_enmReg, &TmpVal); \
42 AssertMsg(a_Expr, a_Msg); \
43 } while (0)
44# else
45# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, a_Expr, a_Msg) \
46 do { \
47 WHV_REGISTER_VALUE TmpVal; \
48 nemR3WinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \
49 AssertMsg(a_Expr, a_Msg); \
50 } while (0)
51# endif
52#else
53# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, a_Expr, a_Msg) do { } while (0)
54#endif
55
56/** @def NEMWIN_ASSERT_MSG_REG_VAL
57 * Asserts the correctness of a 64-bit register value in a message/context.
58 */
59#define NEMWIN_ASSERT_MSG_REG_VAL64(a_pVCpu, a_pGVCpu, a_enmReg, a_u64Val) \
60 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, (a_u64Val) == TmpVal.Reg64, \
61 (#a_u64Val "=%#RX64, expected %#RX64\n", (a_u64Val), TmpVal.Reg64))
62/** @def NEMWIN_ASSERT_MSG_REG_VAL
63 * Asserts the correctness of a segment register value in a message/context.
64 */
65#define NEMWIN_ASSERT_MSG_REG_SEG(a_pVCpu, a_pGVCpu, a_enmReg, a_SReg) \
66 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, \
67 (a_SReg).Base == TmpVal.Segment.Base \
68 && (a_SReg).Limit == TmpVal.Segment.Limit \
69 && (a_SReg).Selector == TmpVal.Segment.Selector \
70 && (a_SReg).Attributes == TmpVal.Segment.Attributes, \
71 ( #a_SReg "=%#RX16 {%#RX64 LB %#RX32,%#RX16} expected %#RX16 {%#RX64 LB %#RX32,%#RX16}\n", \
72 (a_SReg).Selector, (a_SReg).Base, (a_SReg).Limit, (a_SReg).Attributes, \
73 TmpVal.Segment.Selector, TmpVal.Segment.Base, TmpVal.Segment.Limit, TmpVal.Segment.Attributes))
74
75
76/*********************************************************************************************************************************
77* Global Variables *
78*********************************************************************************************************************************/
79/** NEM_WIN_PAGE_STATE_XXX names. */
80NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
81
82/** HV_INTERCEPT_ACCESS_TYPE names. */
83static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
84
85
86/*********************************************************************************************************************************
87* Internal Functions *
88*********************************************************************************************************************************/
89NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
90 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
91
92
93
94#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
95
96/**
97 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
98 *
99 * @returns VBox status code.
100 * @param pVM The cross context VM structure.
101 * @param pVCpu The cross context virtual CPU structure of the caller.
102 * @param GCPhysSrc The source page. Does not need to be page aligned.
103 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
104 * when A20 is disabled.
105 * @param fFlags HV_MAP_GPA_XXX.
106 */
107DECLINLINE(int) nemHCWinHypercallMapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
108{
109#ifdef IN_RING0
110 /** @todo optimize further, caller generally has the physical address. */
111 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
112 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
113 return nemR0WinMapPages(pGVM, pVM, &pGVM->aCpus[pVCpu->idCpu],
114 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
115 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
116 1, fFlags);
117#else
118 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
119 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
120 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
121 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
122 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
123#endif
124}
125
126
127/**
128 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
129 *
130 * @returns VBox status code.
131 * @param pVM The cross context VM structure.
132 * @param pVCpu The cross context virtual CPU structure of the caller.
133 * @param GCPhys The page to unmap. Does not need to be page aligned.
134 */
135DECLINLINE(int) nemHCWinHypercallUnmapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
136{
137# ifdef IN_RING0
138 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
139 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
140 return nemR0WinUnmapPages(pGVM, &pGVM->aCpus[pVCpu->idCpu], GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
141# else
142 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
143 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
144 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
145# endif
146}
147
148#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
149#ifndef IN_RING0
150
151NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVM pVM, PVMCPU pVCpu)
152{
153# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
154# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
155 if (pVM->nem.s.fUseRing0Runloop)
156# endif
157 {
158 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL);
159 AssertLogRelRCReturn(rc, rc);
160 return rc;
161 }
162# endif
163# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
164
165 /*
166 * The following is very similar to what nemR0WinExportState() does.
167 */
168 WHV_REGISTER_NAME aenmNames[128];
169 WHV_REGISTER_VALUE aValues[128];
170
171 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
172 if ( !fWhat
173 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
174 return VINF_SUCCESS;
175 uintptr_t iReg = 0;
176
177# define ADD_REG64(a_enmName, a_uValue) do { \
178 aenmNames[iReg] = (a_enmName); \
179 aValues[iReg].Reg128.High64 = 0; \
180 aValues[iReg].Reg64 = (a_uValue); \
181 iReg++; \
182 } while (0)
183# define ADD_REG128(a_enmName, a_uValueLo, a_uValueHi) do { \
184 aenmNames[iReg] = (a_enmName); \
185 aValues[iReg].Reg128.Low64 = (a_uValueLo); \
186 aValues[iReg].Reg128.High64 = (a_uValueHi); \
187 iReg++; \
188 } while (0)
189
190 /* GPRs */
191 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
192 {
193 if (fWhat & CPUMCTX_EXTRN_RAX)
194 ADD_REG64(WHvX64RegisterRax, pVCpu->cpum.GstCtx.rax);
195 if (fWhat & CPUMCTX_EXTRN_RCX)
196 ADD_REG64(WHvX64RegisterRcx, pVCpu->cpum.GstCtx.rcx);
197 if (fWhat & CPUMCTX_EXTRN_RDX)
198 ADD_REG64(WHvX64RegisterRdx, pVCpu->cpum.GstCtx.rdx);
199 if (fWhat & CPUMCTX_EXTRN_RBX)
200 ADD_REG64(WHvX64RegisterRbx, pVCpu->cpum.GstCtx.rbx);
201 if (fWhat & CPUMCTX_EXTRN_RSP)
202 ADD_REG64(WHvX64RegisterRsp, pVCpu->cpum.GstCtx.rsp);
203 if (fWhat & CPUMCTX_EXTRN_RBP)
204 ADD_REG64(WHvX64RegisterRbp, pVCpu->cpum.GstCtx.rbp);
205 if (fWhat & CPUMCTX_EXTRN_RSI)
206 ADD_REG64(WHvX64RegisterRsi, pVCpu->cpum.GstCtx.rsi);
207 if (fWhat & CPUMCTX_EXTRN_RDI)
208 ADD_REG64(WHvX64RegisterRdi, pVCpu->cpum.GstCtx.rdi);
209 if (fWhat & CPUMCTX_EXTRN_R8_R15)
210 {
211 ADD_REG64(WHvX64RegisterR8, pVCpu->cpum.GstCtx.r8);
212 ADD_REG64(WHvX64RegisterR9, pVCpu->cpum.GstCtx.r9);
213 ADD_REG64(WHvX64RegisterR10, pVCpu->cpum.GstCtx.r10);
214 ADD_REG64(WHvX64RegisterR11, pVCpu->cpum.GstCtx.r11);
215 ADD_REG64(WHvX64RegisterR12, pVCpu->cpum.GstCtx.r12);
216 ADD_REG64(WHvX64RegisterR13, pVCpu->cpum.GstCtx.r13);
217 ADD_REG64(WHvX64RegisterR14, pVCpu->cpum.GstCtx.r14);
218 ADD_REG64(WHvX64RegisterR15, pVCpu->cpum.GstCtx.r15);
219 }
220 }
221
222 /* RIP & Flags */
223 if (fWhat & CPUMCTX_EXTRN_RIP)
224 ADD_REG64(WHvX64RegisterRip, pVCpu->cpum.GstCtx.rip);
225 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
226 ADD_REG64(WHvX64RegisterRflags, pVCpu->cpum.GstCtx.rflags.u);
227
228 /* Segments */
229# define ADD_SEG(a_enmName, a_SReg) \
230 do { \
231 aenmNames[iReg] = a_enmName; \
232 aValues[iReg].Segment.Base = (a_SReg).u64Base; \
233 aValues[iReg].Segment.Limit = (a_SReg).u32Limit; \
234 aValues[iReg].Segment.Selector = (a_SReg).Sel; \
235 aValues[iReg].Segment.Attributes = (a_SReg).Attr.u; \
236 iReg++; \
237 } while (0)
238 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
239 {
240 if (fWhat & CPUMCTX_EXTRN_ES)
241 ADD_SEG(WHvX64RegisterEs, pVCpu->cpum.GstCtx.es);
242 if (fWhat & CPUMCTX_EXTRN_CS)
243 ADD_SEG(WHvX64RegisterCs, pVCpu->cpum.GstCtx.cs);
244 if (fWhat & CPUMCTX_EXTRN_SS)
245 ADD_SEG(WHvX64RegisterSs, pVCpu->cpum.GstCtx.ss);
246 if (fWhat & CPUMCTX_EXTRN_DS)
247 ADD_SEG(WHvX64RegisterDs, pVCpu->cpum.GstCtx.ds);
248 if (fWhat & CPUMCTX_EXTRN_FS)
249 ADD_SEG(WHvX64RegisterFs, pVCpu->cpum.GstCtx.fs);
250 if (fWhat & CPUMCTX_EXTRN_GS)
251 ADD_SEG(WHvX64RegisterGs, pVCpu->cpum.GstCtx.gs);
252 }
253
254 /* Descriptor tables & task segment. */
255 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
256 {
257 if (fWhat & CPUMCTX_EXTRN_LDTR)
258 ADD_SEG(WHvX64RegisterLdtr, pVCpu->cpum.GstCtx.ldtr);
259 if (fWhat & CPUMCTX_EXTRN_TR)
260 ADD_SEG(WHvX64RegisterTr, pVCpu->cpum.GstCtx.tr);
261 if (fWhat & CPUMCTX_EXTRN_IDTR)
262 {
263 aenmNames[iReg] = WHvX64RegisterIdtr;
264 aValues[iReg].Table.Limit = pVCpu->cpum.GstCtx.idtr.cbIdt;
265 aValues[iReg].Table.Base = pVCpu->cpum.GstCtx.idtr.pIdt;
266 iReg++;
267 }
268 if (fWhat & CPUMCTX_EXTRN_GDTR)
269 {
270 aenmNames[iReg] = WHvX64RegisterGdtr;
271 aValues[iReg].Table.Limit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
272 aValues[iReg].Table.Base = pVCpu->cpum.GstCtx.gdtr.pGdt;
273 iReg++;
274 }
275 }
276
277 /* Control registers. */
278 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
279 {
280 if (fWhat & CPUMCTX_EXTRN_CR0)
281 ADD_REG64(WHvX64RegisterCr0, pVCpu->cpum.GstCtx.cr0);
282 if (fWhat & CPUMCTX_EXTRN_CR2)
283 ADD_REG64(WHvX64RegisterCr2, pVCpu->cpum.GstCtx.cr2);
284 if (fWhat & CPUMCTX_EXTRN_CR3)
285 ADD_REG64(WHvX64RegisterCr3, pVCpu->cpum.GstCtx.cr3);
286 if (fWhat & CPUMCTX_EXTRN_CR4)
287 ADD_REG64(WHvX64RegisterCr4, pVCpu->cpum.GstCtx.cr4);
288 }
289 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
290 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu));
291
292 /* Debug registers. */
293/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
294 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
295 {
296 ADD_REG64(WHvX64RegisterDr0, pVCpu->cpum.GstCtx.dr[0]); // CPUMGetHyperDR0(pVCpu));
297 ADD_REG64(WHvX64RegisterDr1, pVCpu->cpum.GstCtx.dr[1]); // CPUMGetHyperDR1(pVCpu));
298 ADD_REG64(WHvX64RegisterDr2, pVCpu->cpum.GstCtx.dr[2]); // CPUMGetHyperDR2(pVCpu));
299 ADD_REG64(WHvX64RegisterDr3, pVCpu->cpum.GstCtx.dr[3]); // CPUMGetHyperDR3(pVCpu));
300 }
301 if (fWhat & CPUMCTX_EXTRN_DR6)
302 ADD_REG64(WHvX64RegisterDr6, pVCpu->cpum.GstCtx.dr[6]); // CPUMGetHyperDR6(pVCpu));
303 if (fWhat & CPUMCTX_EXTRN_DR7)
304 ADD_REG64(WHvX64RegisterDr7, pVCpu->cpum.GstCtx.dr[7]); // CPUMGetHyperDR7(pVCpu));
305
306 /* Floating point state. */
307 if (fWhat & CPUMCTX_EXTRN_X87)
308 {
309 ADD_REG128(WHvX64RegisterFpMmx0, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[0].au64[1]);
310 ADD_REG128(WHvX64RegisterFpMmx1, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[1].au64[1]);
311 ADD_REG128(WHvX64RegisterFpMmx2, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[2].au64[1]);
312 ADD_REG128(WHvX64RegisterFpMmx3, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[3].au64[1]);
313 ADD_REG128(WHvX64RegisterFpMmx4, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[4].au64[1]);
314 ADD_REG128(WHvX64RegisterFpMmx5, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[5].au64[1]);
315 ADD_REG128(WHvX64RegisterFpMmx6, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[6].au64[1]);
316 ADD_REG128(WHvX64RegisterFpMmx7, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[7].au64[1]);
317
318 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
319 aValues[iReg].FpControlStatus.FpControl = pVCpu->cpum.GstCtx.pXStateR3->x87.FCW;
320 aValues[iReg].FpControlStatus.FpStatus = pVCpu->cpum.GstCtx.pXStateR3->x87.FSW;
321 aValues[iReg].FpControlStatus.FpTag = pVCpu->cpum.GstCtx.pXStateR3->x87.FTW;
322 aValues[iReg].FpControlStatus.Reserved = pVCpu->cpum.GstCtx.pXStateR3->x87.FTW >> 8;
323 aValues[iReg].FpControlStatus.LastFpOp = pVCpu->cpum.GstCtx.pXStateR3->x87.FOP;
324 aValues[iReg].FpControlStatus.LastFpRip = (pVCpu->cpum.GstCtx.pXStateR3->x87.FPUIP)
325 | ((uint64_t)pVCpu->cpum.GstCtx.pXStateR3->x87.CS << 32)
326 | ((uint64_t)pVCpu->cpum.GstCtx.pXStateR3->x87.Rsrvd1 << 48);
327 iReg++;
328
329 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
330 aValues[iReg].XmmControlStatus.LastFpRdp = (pVCpu->cpum.GstCtx.pXStateR3->x87.FPUDP)
331 | ((uint64_t)pVCpu->cpum.GstCtx.pXStateR3->x87.DS << 32)
332 | ((uint64_t)pVCpu->cpum.GstCtx.pXStateR3->x87.Rsrvd2 << 48);
333 aValues[iReg].XmmControlStatus.XmmStatusControl = pVCpu->cpum.GstCtx.pXStateR3->x87.MXCSR;
334 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pVCpu->cpum.GstCtx.pXStateR3->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
335 iReg++;
336 }
337
338 /* Vector state. */
339 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
340 {
341 ADD_REG128(WHvX64RegisterXmm0, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 0].uXmm.s.Hi);
342 ADD_REG128(WHvX64RegisterXmm1, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 1].uXmm.s.Hi);
343 ADD_REG128(WHvX64RegisterXmm2, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 2].uXmm.s.Hi);
344 ADD_REG128(WHvX64RegisterXmm3, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 3].uXmm.s.Hi);
345 ADD_REG128(WHvX64RegisterXmm4, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 4].uXmm.s.Hi);
346 ADD_REG128(WHvX64RegisterXmm5, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 5].uXmm.s.Hi);
347 ADD_REG128(WHvX64RegisterXmm6, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 6].uXmm.s.Hi);
348 ADD_REG128(WHvX64RegisterXmm7, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 7].uXmm.s.Hi);
349 ADD_REG128(WHvX64RegisterXmm8, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 8].uXmm.s.Hi);
350 ADD_REG128(WHvX64RegisterXmm9, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 9].uXmm.s.Hi);
351 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[10].uXmm.s.Hi);
352 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[11].uXmm.s.Hi);
353 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[12].uXmm.s.Hi);
354 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[13].uXmm.s.Hi);
355 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[14].uXmm.s.Hi);
356 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[15].uXmm.s.Hi);
357 }
358
359 /* MSRs */
360 // WHvX64RegisterTsc - don't touch
361 if (fWhat & CPUMCTX_EXTRN_EFER)
362 ADD_REG64(WHvX64RegisterEfer, pVCpu->cpum.GstCtx.msrEFER);
363 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
364 ADD_REG64(WHvX64RegisterKernelGsBase, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
365 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
366 {
367 ADD_REG64(WHvX64RegisterSysenterCs, pVCpu->cpum.GstCtx.SysEnter.cs);
368 ADD_REG64(WHvX64RegisterSysenterEip, pVCpu->cpum.GstCtx.SysEnter.eip);
369 ADD_REG64(WHvX64RegisterSysenterEsp, pVCpu->cpum.GstCtx.SysEnter.esp);
370 }
371 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
372 {
373 ADD_REG64(WHvX64RegisterStar, pVCpu->cpum.GstCtx.msrSTAR);
374 ADD_REG64(WHvX64RegisterLstar, pVCpu->cpum.GstCtx.msrLSTAR);
375 ADD_REG64(WHvX64RegisterCstar, pVCpu->cpum.GstCtx.msrCSTAR);
376 ADD_REG64(WHvX64RegisterSfmask, pVCpu->cpum.GstCtx.msrSFMASK);
377 }
378 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
379 {
380 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu));
381 ADD_REG64(WHvX64RegisterPat, pVCpu->cpum.GstCtx.msrPAT);
382#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
383 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
384#endif
385 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
386 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
387 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
388 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
389 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
390 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
391 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
392 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
393 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
394 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
395 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
396 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
397 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
398 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
399#if 0 /** @todo these registers aren't available? Might explain something.. .*/
400 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
401 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
402 {
403 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
404 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
405 }
406#endif
407 }
408
409 /* event injection (clear it). */
410 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
411 ADD_REG64(WHvRegisterPendingInterruption, 0);
412
413 /* Interruptibility state. This can get a little complicated since we get
414 half of the state via HV_X64_VP_EXECUTION_STATE. */
415 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
416 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
417 {
418 ADD_REG64(WHvRegisterInterruptState, 0);
419 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
420 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
421 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
422 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
423 aValues[iReg - 1].InterruptState.NmiMasked = 1;
424 }
425 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
426 {
427 if ( pVCpu->nem.s.fLastInterruptShadow
428 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
429 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip))
430 {
431 ADD_REG64(WHvRegisterInterruptState, 0);
432 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
433 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
434 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
435 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
436 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
437 // aValues[iReg - 1].InterruptState.NmiMasked = 1;
438 }
439 }
440 else
441 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
442
443 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
444 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
445 if ( fDesiredIntWin
446 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
447 {
448 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
449 ADD_REG64(WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin);
450 Assert(aValues[iReg - 1].DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
451 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
452 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
453 }
454
455 /// @todo WHvRegisterPendingEvent0
456 /// @todo WHvRegisterPendingEvent1
457
458 /*
459 * Set the registers.
460 */
461 Assert(iReg < RT_ELEMENTS(aValues));
462 Assert(iReg < RT_ELEMENTS(aenmNames));
463# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
464 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
465 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
466# endif
467 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
468 if (SUCCEEDED(hrc))
469 {
470 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
471 return VINF_SUCCESS;
472 }
473 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
474 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
475 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
476 return VERR_INTERNAL_ERROR;
477
478# undef ADD_REG64
479# undef ADD_REG128
480# undef ADD_SEG
481
482# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
483}
484
485
486NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVM pVM, PVMCPU pVCpu, uint64_t fWhat)
487{
488# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
489# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
490 if (pVM->nem.s.fUseRing0Runloop)
491# endif
492 {
493 /* See NEMR0ImportState */
494 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, fWhat, NULL);
495 if (RT_SUCCESS(rc))
496 return rc;
497 if (rc == VERR_NEM_FLUSH_TLB)
498 return PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /*fGlobal*/);
499 AssertLogRelRCReturn(rc, rc);
500 return rc;
501 }
502# endif
503# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
504 WHV_REGISTER_NAME aenmNames[128];
505
506 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
507 uintptr_t iReg = 0;
508
509 /* GPRs */
510 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
511 {
512 if (fWhat & CPUMCTX_EXTRN_RAX)
513 aenmNames[iReg++] = WHvX64RegisterRax;
514 if (fWhat & CPUMCTX_EXTRN_RCX)
515 aenmNames[iReg++] = WHvX64RegisterRcx;
516 if (fWhat & CPUMCTX_EXTRN_RDX)
517 aenmNames[iReg++] = WHvX64RegisterRdx;
518 if (fWhat & CPUMCTX_EXTRN_RBX)
519 aenmNames[iReg++] = WHvX64RegisterRbx;
520 if (fWhat & CPUMCTX_EXTRN_RSP)
521 aenmNames[iReg++] = WHvX64RegisterRsp;
522 if (fWhat & CPUMCTX_EXTRN_RBP)
523 aenmNames[iReg++] = WHvX64RegisterRbp;
524 if (fWhat & CPUMCTX_EXTRN_RSI)
525 aenmNames[iReg++] = WHvX64RegisterRsi;
526 if (fWhat & CPUMCTX_EXTRN_RDI)
527 aenmNames[iReg++] = WHvX64RegisterRdi;
528 if (fWhat & CPUMCTX_EXTRN_R8_R15)
529 {
530 aenmNames[iReg++] = WHvX64RegisterR8;
531 aenmNames[iReg++] = WHvX64RegisterR9;
532 aenmNames[iReg++] = WHvX64RegisterR10;
533 aenmNames[iReg++] = WHvX64RegisterR11;
534 aenmNames[iReg++] = WHvX64RegisterR12;
535 aenmNames[iReg++] = WHvX64RegisterR13;
536 aenmNames[iReg++] = WHvX64RegisterR14;
537 aenmNames[iReg++] = WHvX64RegisterR15;
538 }
539 }
540
541 /* RIP & Flags */
542 if (fWhat & CPUMCTX_EXTRN_RIP)
543 aenmNames[iReg++] = WHvX64RegisterRip;
544 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
545 aenmNames[iReg++] = WHvX64RegisterRflags;
546
547 /* Segments */
548 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
549 {
550 if (fWhat & CPUMCTX_EXTRN_ES)
551 aenmNames[iReg++] = WHvX64RegisterEs;
552 if (fWhat & CPUMCTX_EXTRN_CS)
553 aenmNames[iReg++] = WHvX64RegisterCs;
554 if (fWhat & CPUMCTX_EXTRN_SS)
555 aenmNames[iReg++] = WHvX64RegisterSs;
556 if (fWhat & CPUMCTX_EXTRN_DS)
557 aenmNames[iReg++] = WHvX64RegisterDs;
558 if (fWhat & CPUMCTX_EXTRN_FS)
559 aenmNames[iReg++] = WHvX64RegisterFs;
560 if (fWhat & CPUMCTX_EXTRN_GS)
561 aenmNames[iReg++] = WHvX64RegisterGs;
562 }
563
564 /* Descriptor tables. */
565 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
566 {
567 if (fWhat & CPUMCTX_EXTRN_LDTR)
568 aenmNames[iReg++] = WHvX64RegisterLdtr;
569 if (fWhat & CPUMCTX_EXTRN_TR)
570 aenmNames[iReg++] = WHvX64RegisterTr;
571 if (fWhat & CPUMCTX_EXTRN_IDTR)
572 aenmNames[iReg++] = WHvX64RegisterIdtr;
573 if (fWhat & CPUMCTX_EXTRN_GDTR)
574 aenmNames[iReg++] = WHvX64RegisterGdtr;
575 }
576
577 /* Control registers. */
578 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
579 {
580 if (fWhat & CPUMCTX_EXTRN_CR0)
581 aenmNames[iReg++] = WHvX64RegisterCr0;
582 if (fWhat & CPUMCTX_EXTRN_CR2)
583 aenmNames[iReg++] = WHvX64RegisterCr2;
584 if (fWhat & CPUMCTX_EXTRN_CR3)
585 aenmNames[iReg++] = WHvX64RegisterCr3;
586 if (fWhat & CPUMCTX_EXTRN_CR4)
587 aenmNames[iReg++] = WHvX64RegisterCr4;
588 }
589 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
590 aenmNames[iReg++] = WHvX64RegisterCr8;
591
592 /* Debug registers. */
593 if (fWhat & CPUMCTX_EXTRN_DR7)
594 aenmNames[iReg++] = WHvX64RegisterDr7;
595 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
596 {
597 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_DR7))
598 {
599 fWhat |= CPUMCTX_EXTRN_DR7;
600 aenmNames[iReg++] = WHvX64RegisterDr7;
601 }
602 aenmNames[iReg++] = WHvX64RegisterDr0;
603 aenmNames[iReg++] = WHvX64RegisterDr1;
604 aenmNames[iReg++] = WHvX64RegisterDr2;
605 aenmNames[iReg++] = WHvX64RegisterDr3;
606 }
607 if (fWhat & CPUMCTX_EXTRN_DR6)
608 aenmNames[iReg++] = WHvX64RegisterDr6;
609
610 /* Floating point state. */
611 if (fWhat & CPUMCTX_EXTRN_X87)
612 {
613 aenmNames[iReg++] = WHvX64RegisterFpMmx0;
614 aenmNames[iReg++] = WHvX64RegisterFpMmx1;
615 aenmNames[iReg++] = WHvX64RegisterFpMmx2;
616 aenmNames[iReg++] = WHvX64RegisterFpMmx3;
617 aenmNames[iReg++] = WHvX64RegisterFpMmx4;
618 aenmNames[iReg++] = WHvX64RegisterFpMmx5;
619 aenmNames[iReg++] = WHvX64RegisterFpMmx6;
620 aenmNames[iReg++] = WHvX64RegisterFpMmx7;
621 aenmNames[iReg++] = WHvX64RegisterFpControlStatus;
622 }
623 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
624 aenmNames[iReg++] = WHvX64RegisterXmmControlStatus;
625
626 /* Vector state. */
627 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
628 {
629 aenmNames[iReg++] = WHvX64RegisterXmm0;
630 aenmNames[iReg++] = WHvX64RegisterXmm1;
631 aenmNames[iReg++] = WHvX64RegisterXmm2;
632 aenmNames[iReg++] = WHvX64RegisterXmm3;
633 aenmNames[iReg++] = WHvX64RegisterXmm4;
634 aenmNames[iReg++] = WHvX64RegisterXmm5;
635 aenmNames[iReg++] = WHvX64RegisterXmm6;
636 aenmNames[iReg++] = WHvX64RegisterXmm7;
637 aenmNames[iReg++] = WHvX64RegisterXmm8;
638 aenmNames[iReg++] = WHvX64RegisterXmm9;
639 aenmNames[iReg++] = WHvX64RegisterXmm10;
640 aenmNames[iReg++] = WHvX64RegisterXmm11;
641 aenmNames[iReg++] = WHvX64RegisterXmm12;
642 aenmNames[iReg++] = WHvX64RegisterXmm13;
643 aenmNames[iReg++] = WHvX64RegisterXmm14;
644 aenmNames[iReg++] = WHvX64RegisterXmm15;
645 }
646
647 /* MSRs */
648 // WHvX64RegisterTsc - don't touch
649 if (fWhat & CPUMCTX_EXTRN_EFER)
650 aenmNames[iReg++] = WHvX64RegisterEfer;
651 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
652 aenmNames[iReg++] = WHvX64RegisterKernelGsBase;
653 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
654 {
655 aenmNames[iReg++] = WHvX64RegisterSysenterCs;
656 aenmNames[iReg++] = WHvX64RegisterSysenterEip;
657 aenmNames[iReg++] = WHvX64RegisterSysenterEsp;
658 }
659 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
660 {
661 aenmNames[iReg++] = WHvX64RegisterStar;
662 aenmNames[iReg++] = WHvX64RegisterLstar;
663 aenmNames[iReg++] = WHvX64RegisterCstar;
664 aenmNames[iReg++] = WHvX64RegisterSfmask;
665 }
666
667//#ifdef LOG_ENABLED
668// const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
669//#endif
670 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
671 {
672 aenmNames[iReg++] = WHvX64RegisterApicBase; /// @todo APIC BASE
673 aenmNames[iReg++] = WHvX64RegisterPat;
674#if 0 /*def LOG_ENABLED*/ /** @todo Check if WHvX64RegisterMsrMtrrCap works... */
675 aenmNames[iReg++] = WHvX64RegisterMsrMtrrCap;
676#endif
677 aenmNames[iReg++] = WHvX64RegisterMsrMtrrDefType;
678 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix64k00000;
679 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16k80000;
680 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16kA0000;
681 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC0000;
682 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC8000;
683 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD0000;
684 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD8000;
685 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE0000;
686 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE8000;
687 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF0000;
688 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF8000;
689 aenmNames[iReg++] = WHvX64RegisterTscAux;
690 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
691//#ifdef LOG_ENABLED
692// if (enmCpuVendor != CPUMCPUVENDOR_AMD)
693// aenmNames[iReg++] = HvX64RegisterIa32FeatureControl;
694//#endif
695 }
696
697 /* Interruptibility. */
698 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
699 {
700 aenmNames[iReg++] = WHvRegisterInterruptState;
701 aenmNames[iReg++] = WHvX64RegisterRip;
702 }
703
704 /* event injection */
705 aenmNames[iReg++] = WHvRegisterPendingInterruption;
706 aenmNames[iReg++] = WHvRegisterPendingEvent0;
707 aenmNames[iReg++] = WHvRegisterPendingEvent1;
708
709 size_t const cRegs = iReg;
710 Assert(cRegs < RT_ELEMENTS(aenmNames));
711
712 /*
713 * Get the registers.
714 */
715 WHV_REGISTER_VALUE aValues[128];
716 RT_ZERO(aValues);
717 Assert(RT_ELEMENTS(aValues) >= cRegs);
718 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
719# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
720 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
721 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
722# endif
723 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
724 AssertLogRelMsgReturn(SUCCEEDED(hrc),
725 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
726 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
727 , VERR_NEM_GET_REGISTERS_FAILED);
728
729 iReg = 0;
730# define GET_REG64(a_DstVar, a_enmName) do { \
731 Assert(aenmNames[iReg] == (a_enmName)); \
732 (a_DstVar) = aValues[iReg].Reg64; \
733 iReg++; \
734 } while (0)
735# define GET_REG64_LOG7(a_DstVar, a_enmName, a_szLogName) do { \
736 Assert(aenmNames[iReg] == (a_enmName)); \
737 if ((a_DstVar) != aValues[iReg].Reg64) \
738 Log7(("NEM/%u: " a_szLogName " changed %RX64 -> %RX64\n", pVCpu->idCpu, (a_DstVar), aValues[iReg].Reg64)); \
739 (a_DstVar) = aValues[iReg].Reg64; \
740 iReg++; \
741 } while (0)
742# define GET_REG128(a_DstVarLo, a_DstVarHi, a_enmName) do { \
743 Assert(aenmNames[iReg] == a_enmName); \
744 (a_DstVarLo) = aValues[iReg].Reg128.Low64; \
745 (a_DstVarHi) = aValues[iReg].Reg128.High64; \
746 iReg++; \
747 } while (0)
748# define GET_SEG(a_SReg, a_enmName) do { \
749 Assert(aenmNames[iReg] == (a_enmName)); \
750 NEM_WIN_COPY_BACK_SEG(a_SReg, aValues[iReg].Segment); \
751 iReg++; \
752 } while (0)
753
754 /* GPRs */
755 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
756 {
757 if (fWhat & CPUMCTX_EXTRN_RAX)
758 GET_REG64(pVCpu->cpum.GstCtx.rax, WHvX64RegisterRax);
759 if (fWhat & CPUMCTX_EXTRN_RCX)
760 GET_REG64(pVCpu->cpum.GstCtx.rcx, WHvX64RegisterRcx);
761 if (fWhat & CPUMCTX_EXTRN_RDX)
762 GET_REG64(pVCpu->cpum.GstCtx.rdx, WHvX64RegisterRdx);
763 if (fWhat & CPUMCTX_EXTRN_RBX)
764 GET_REG64(pVCpu->cpum.GstCtx.rbx, WHvX64RegisterRbx);
765 if (fWhat & CPUMCTX_EXTRN_RSP)
766 GET_REG64(pVCpu->cpum.GstCtx.rsp, WHvX64RegisterRsp);
767 if (fWhat & CPUMCTX_EXTRN_RBP)
768 GET_REG64(pVCpu->cpum.GstCtx.rbp, WHvX64RegisterRbp);
769 if (fWhat & CPUMCTX_EXTRN_RSI)
770 GET_REG64(pVCpu->cpum.GstCtx.rsi, WHvX64RegisterRsi);
771 if (fWhat & CPUMCTX_EXTRN_RDI)
772 GET_REG64(pVCpu->cpum.GstCtx.rdi, WHvX64RegisterRdi);
773 if (fWhat & CPUMCTX_EXTRN_R8_R15)
774 {
775 GET_REG64(pVCpu->cpum.GstCtx.r8, WHvX64RegisterR8);
776 GET_REG64(pVCpu->cpum.GstCtx.r9, WHvX64RegisterR9);
777 GET_REG64(pVCpu->cpum.GstCtx.r10, WHvX64RegisterR10);
778 GET_REG64(pVCpu->cpum.GstCtx.r11, WHvX64RegisterR11);
779 GET_REG64(pVCpu->cpum.GstCtx.r12, WHvX64RegisterR12);
780 GET_REG64(pVCpu->cpum.GstCtx.r13, WHvX64RegisterR13);
781 GET_REG64(pVCpu->cpum.GstCtx.r14, WHvX64RegisterR14);
782 GET_REG64(pVCpu->cpum.GstCtx.r15, WHvX64RegisterR15);
783 }
784 }
785
786 /* RIP & Flags */
787 if (fWhat & CPUMCTX_EXTRN_RIP)
788 GET_REG64(pVCpu->cpum.GstCtx.rip, WHvX64RegisterRip);
789 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
790 GET_REG64(pVCpu->cpum.GstCtx.rflags.u, WHvX64RegisterRflags);
791
792 /* Segments */
793 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
794 {
795 if (fWhat & CPUMCTX_EXTRN_ES)
796 GET_SEG(pVCpu->cpum.GstCtx.es, WHvX64RegisterEs);
797 if (fWhat & CPUMCTX_EXTRN_CS)
798 GET_SEG(pVCpu->cpum.GstCtx.cs, WHvX64RegisterCs);
799 if (fWhat & CPUMCTX_EXTRN_SS)
800 GET_SEG(pVCpu->cpum.GstCtx.ss, WHvX64RegisterSs);
801 if (fWhat & CPUMCTX_EXTRN_DS)
802 GET_SEG(pVCpu->cpum.GstCtx.ds, WHvX64RegisterDs);
803 if (fWhat & CPUMCTX_EXTRN_FS)
804 GET_SEG(pVCpu->cpum.GstCtx.fs, WHvX64RegisterFs);
805 if (fWhat & CPUMCTX_EXTRN_GS)
806 GET_SEG(pVCpu->cpum.GstCtx.gs, WHvX64RegisterGs);
807 }
808
809 /* Descriptor tables and the task segment. */
810 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
811 {
812 if (fWhat & CPUMCTX_EXTRN_LDTR)
813 GET_SEG(pVCpu->cpum.GstCtx.ldtr, WHvX64RegisterLdtr);
814
815 if (fWhat & CPUMCTX_EXTRN_TR)
816 {
817 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
818 avoid to trigger sanity assertions around the code, always fix this. */
819 GET_SEG(pVCpu->cpum.GstCtx.tr, WHvX64RegisterTr);
820 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
821 {
822 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
823 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
824 break;
825 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
826 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
827 break;
828 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
829 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
830 break;
831 }
832 }
833 if (fWhat & CPUMCTX_EXTRN_IDTR)
834 {
835 Assert(aenmNames[iReg] == WHvX64RegisterIdtr);
836 pVCpu->cpum.GstCtx.idtr.cbIdt = aValues[iReg].Table.Limit;
837 pVCpu->cpum.GstCtx.idtr.pIdt = aValues[iReg].Table.Base;
838 iReg++;
839 }
840 if (fWhat & CPUMCTX_EXTRN_GDTR)
841 {
842 Assert(aenmNames[iReg] == WHvX64RegisterGdtr);
843 pVCpu->cpum.GstCtx.gdtr.cbGdt = aValues[iReg].Table.Limit;
844 pVCpu->cpum.GstCtx.gdtr.pGdt = aValues[iReg].Table.Base;
845 iReg++;
846 }
847 }
848
849 /* Control registers. */
850 bool fMaybeChangedMode = false;
851 bool fUpdateCr3 = false;
852 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
853 {
854 if (fWhat & CPUMCTX_EXTRN_CR0)
855 {
856 Assert(aenmNames[iReg] == WHvX64RegisterCr0);
857 if (pVCpu->cpum.GstCtx.cr0 != aValues[iReg].Reg64)
858 {
859 CPUMSetGuestCR0(pVCpu, aValues[iReg].Reg64);
860 fMaybeChangedMode = true;
861 }
862 iReg++;
863 }
864 if (fWhat & CPUMCTX_EXTRN_CR2)
865 GET_REG64(pVCpu->cpum.GstCtx.cr2, WHvX64RegisterCr2);
866 if (fWhat & CPUMCTX_EXTRN_CR3)
867 {
868 if (pVCpu->cpum.GstCtx.cr3 != aValues[iReg].Reg64)
869 {
870 CPUMSetGuestCR3(pVCpu, aValues[iReg].Reg64);
871 fUpdateCr3 = true;
872 }
873 iReg++;
874 }
875 if (fWhat & CPUMCTX_EXTRN_CR4)
876 {
877 if (pVCpu->cpum.GstCtx.cr4 != aValues[iReg].Reg64)
878 {
879 CPUMSetGuestCR4(pVCpu, aValues[iReg].Reg64);
880 fMaybeChangedMode = true;
881 }
882 iReg++;
883 }
884 }
885 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
886 {
887 Assert(aenmNames[iReg] == WHvX64RegisterCr8);
888 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4);
889 iReg++;
890 }
891
892 /* Debug registers. */
893 if (fWhat & CPUMCTX_EXTRN_DR7)
894 {
895 Assert(aenmNames[iReg] == WHvX64RegisterDr7);
896 if (pVCpu->cpum.GstCtx.dr[7] != aValues[iReg].Reg64)
897 CPUMSetGuestDR7(pVCpu, aValues[iReg].Reg64);
898 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
899 iReg++;
900 }
901 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
902 {
903 Assert(aenmNames[iReg] == WHvX64RegisterDr0);
904 Assert(aenmNames[iReg+3] == WHvX64RegisterDr3);
905 if (pVCpu->cpum.GstCtx.dr[0] != aValues[iReg].Reg64)
906 CPUMSetGuestDR0(pVCpu, aValues[iReg].Reg64);
907 iReg++;
908 if (pVCpu->cpum.GstCtx.dr[1] != aValues[iReg].Reg64)
909 CPUMSetGuestDR1(pVCpu, aValues[iReg].Reg64);
910 iReg++;
911 if (pVCpu->cpum.GstCtx.dr[2] != aValues[iReg].Reg64)
912 CPUMSetGuestDR2(pVCpu, aValues[iReg].Reg64);
913 iReg++;
914 if (pVCpu->cpum.GstCtx.dr[3] != aValues[iReg].Reg64)
915 CPUMSetGuestDR3(pVCpu, aValues[iReg].Reg64);
916 iReg++;
917 }
918 if (fWhat & CPUMCTX_EXTRN_DR6)
919 {
920 Assert(aenmNames[iReg] == WHvX64RegisterDr6);
921 if (pVCpu->cpum.GstCtx.dr[6] != aValues[iReg].Reg64)
922 CPUMSetGuestDR6(pVCpu, aValues[iReg].Reg64);
923 iReg++;
924 }
925
926 /* Floating point state. */
927 if (fWhat & CPUMCTX_EXTRN_X87)
928 {
929 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);
930 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);
931 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);
932 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);
933 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);
934 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);
935 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);
936 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);
937
938 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);
939 pVCpu->cpum.GstCtx.pXStateR3->x87.FCW = aValues[iReg].FpControlStatus.FpControl;
940 pVCpu->cpum.GstCtx.pXStateR3->x87.FSW = aValues[iReg].FpControlStatus.FpStatus;
941 pVCpu->cpum.GstCtx.pXStateR3->x87.FTW = aValues[iReg].FpControlStatus.FpTag
942 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;
943 pVCpu->cpum.GstCtx.pXStateR3->x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;
944 pVCpu->cpum.GstCtx.pXStateR3->x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;
945 pVCpu->cpum.GstCtx.pXStateR3->x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);
946 pVCpu->cpum.GstCtx.pXStateR3->x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);
947 iReg++;
948 }
949
950 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
951 {
952 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus);
953 if (fWhat & CPUMCTX_EXTRN_X87)
954 {
955 pVCpu->cpum.GstCtx.pXStateR3->x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp;
956 pVCpu->cpum.GstCtx.pXStateR3->x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32);
957 pVCpu->cpum.GstCtx.pXStateR3->x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48);
958 }
959 pVCpu->cpum.GstCtx.pXStateR3->x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl;
960 pVCpu->cpum.GstCtx.pXStateR3->x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
961 iReg++;
962 }
963
964 /* Vector state. */
965 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
966 {
967 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0);
968 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1);
969 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2);
970 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3);
971 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4);
972 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5);
973 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6);
974 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7);
975 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8);
976 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9);
977 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10);
978 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11);
979 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12);
980 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13);
981 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14);
982 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15);
983 }
984
985 /* MSRs */
986 // WHvX64RegisterTsc - don't touch
987 if (fWhat & CPUMCTX_EXTRN_EFER)
988 {
989 Assert(aenmNames[iReg] == WHvX64RegisterEfer);
990 if (aValues[iReg].Reg64 != pVCpu->cpum.GstCtx.msrEFER)
991 {
992 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, aValues[iReg].Reg64));
993 if ((aValues[iReg].Reg64 ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE)
994 PGMNotifyNxeChanged(pVCpu, RT_BOOL(aValues[iReg].Reg64 & MSR_K6_EFER_NXE));
995 pVCpu->cpum.GstCtx.msrEFER = aValues[iReg].Reg64;
996 fMaybeChangedMode = true;
997 }
998 iReg++;
999 }
1000 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1001 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");
1002 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1003 {
1004 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");
1005 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");
1006 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");
1007 }
1008 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1009 {
1010 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSTAR, WHvX64RegisterStar, "MSR STAR");
1011 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");
1012 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");
1013 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");
1014 }
1015 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1016 {
1017 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
1018 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1019 if (aValues[iReg].Reg64 != uOldBase)
1020 {
1021 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1022 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
1023 int rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
1024 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", rc2, aValues[iReg].Reg64));
1025 }
1026 iReg++;
1027
1028 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterPat, "MSR PAT");
1029#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1030 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterMsrMtrrCap);
1031#endif
1032 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1033 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
1034 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
1035 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
1036 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
1037 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
1038 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
1039 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
1040 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
1041 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
1042 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
1043 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
1044 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
1045 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
1046 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
1047 }
1048
1049 /* Interruptibility. */
1050 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1051 {
1052 Assert(aenmNames[iReg] == WHvRegisterInterruptState);
1053 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);
1054
1055 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1056 {
1057 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
1058 if (aValues[iReg].InterruptState.InterruptShadow)
1059 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
1060 else
1061 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1062 }
1063
1064 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1065 {
1066 if (aValues[iReg].InterruptState.NmiMasked)
1067 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1068 else
1069 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1070 }
1071
1072 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
1073 iReg += 2;
1074 }
1075
1076 /* Event injection. */
1077 /// @todo WHvRegisterPendingInterruption
1078 Assert(aenmNames[iReg] == WHvRegisterPendingInterruption);
1079 if (aValues[iReg].PendingInterruption.InterruptionPending)
1080 {
1081 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
1082 aValues[iReg].PendingInterruption.InterruptionType, aValues[iReg].PendingInterruption.InterruptionVector,
1083 aValues[iReg].PendingInterruption.DeliverErrorCode, aValues[iReg].PendingInterruption.ErrorCode,
1084 aValues[iReg].PendingInterruption.InstructionLength, aValues[iReg].PendingInterruption.NestedEvent));
1085 AssertMsg((aValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
1086 ("%#RX64\n", aValues[iReg].PendingInterruption.AsUINT64));
1087 }
1088
1089 /// @todo WHvRegisterPendingEvent0
1090 /// @todo WHvRegisterPendingEvent1
1091
1092 /* Almost done, just update extrn flags and maybe change PGM mode. */
1093 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1094 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
1095 pVCpu->cpum.GstCtx.fExtrn = 0;
1096
1097 /* Typical. */
1098 if (!fMaybeChangedMode && !fUpdateCr3)
1099 return VINF_SUCCESS;
1100
1101 /*
1102 * Slow.
1103 */
1104 if (fMaybeChangedMode)
1105 {
1106 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1107 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
1108 }
1109
1110 if (fUpdateCr3)
1111 {
1112 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3);
1113 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
1114 }
1115
1116 return VINF_SUCCESS;
1117# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1118}
1119
1120#endif /* !IN_RING0 */
1121
1122
1123/**
1124 * Interface for importing state on demand (used by IEM).
1125 *
1126 * @returns VBox status code.
1127 * @param pVCpu The cross context CPU structure.
1128 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1129 */
1130VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat)
1131{
1132 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1133
1134#ifdef IN_RING0
1135# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1136 /** @todo improve and secure this translation */
1137 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf);
1138 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1139 VMCPUID idCpu = pVCpu->idCpu;
1140 ASMCompilerBarrier();
1141 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1142
1143 return nemR0WinImportState(pGVM, &pGVM->aCpus[idCpu], &pVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);
1144# else
1145 RT_NOREF(pVCpu, fWhat);
1146 return VERR_NOT_IMPLEMENTED;
1147# endif
1148#else
1149 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1150#endif
1151}
1152
1153
1154/**
1155 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1156 *
1157 * @returns VBox status code.
1158 * @param pVCpu The cross context CPU structure.
1159 * @param pcTicks Where to return the CPU tick count.
1160 * @param puAux Where to return the TSC_AUX register value.
1161 */
1162VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPU pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1163{
1164 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1165
1166#ifdef IN_RING3
1167 PVM pVM = pVCpu->CTX_SUFF(pVM);
1168 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1169 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1170
1171# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1172# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
1173 if (pVM->nem.s.fUseRing0Runloop)
1174# endif
1175 {
1176 /* Call ring-0 and get the values. */
1177 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_QUERY_CPU_TICK, 0, NULL);
1178 AssertLogRelRCReturn(rc, rc);
1179 *pcTicks = pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks;
1180 if (puAux)
1181 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX
1182 ? pVCpu->nem.s.Hypercall.QueryCpuTick.uAux : CPUMGetGuestTscAux(pVCpu);
1183 return VINF_SUCCESS;
1184 }
1185# endif
1186# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1187 /* Call the offical API. */
1188 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1189 WHV_REGISTER_VALUE aValues[2] = { {0, 0}, {0, 0} };
1190 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1191 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1192 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1193 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1194 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1195 , VERR_NEM_GET_REGISTERS_FAILED);
1196 *pcTicks = aValues[0].Reg64;
1197 if (puAux)
1198 *pcTicks = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[0].Reg64 : CPUMGetGuestTscAux(pVCpu);
1199 return VINF_SUCCESS;
1200# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1201#else /* IN_RING0 */
1202# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1203 /** @todo improve and secure this translation */
1204 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf);
1205 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1206 VMCPUID idCpu = pVCpu->idCpu;
1207 ASMCompilerBarrier();
1208 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1209
1210 int rc = nemR0WinQueryCpuTick(pGVM, &pGVM->aCpus[idCpu], pcTicks, puAux);
1211 if (RT_SUCCESS(rc) && puAux && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX))
1212 *puAux = CPUMGetGuestTscAux(pVCpu);
1213 return rc;
1214# else
1215 RT_NOREF(pVCpu, pcTicks, puAux);
1216 return VERR_NOT_IMPLEMENTED;
1217# endif
1218#endif /* IN_RING0 */
1219}
1220
1221
1222/**
1223 * Resumes CPU clock (TSC) on all virtual CPUs.
1224 *
1225 * This is called by TM when the VM is started, restored, resumed or similar.
1226 *
1227 * @returns VBox status code.
1228 * @param pVM The cross context VM structure.
1229 * @param pVCpu The cross context CPU structure of the calling EMT.
1230 * @param uPausedTscValue The TSC value at the time of pausing.
1231 */
1232VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVM pVM, PVMCPU pVCpu, uint64_t uPausedTscValue)
1233{
1234#ifdef IN_RING0
1235# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1236 /** @todo improve and secure this translation */
1237 PGVM pGVM = GVMMR0ByHandle(pVM->hSelf);
1238 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1239 VMCPUID idCpu = pVCpu->idCpu;
1240 ASMCompilerBarrier();
1241 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1242
1243 return nemR0WinResumeCpuTickOnAll(pGVM, &pGVM->aCpus[idCpu], uPausedTscValue);
1244# else
1245 RT_NOREF(pVM, pVCpu, uPausedTscValue);
1246 return VERR_NOT_IMPLEMENTED;
1247# endif
1248#else /* IN_RING3 */
1249 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1250 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1251
1252# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1253# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
1254 if (pVM->nem.s.fUseRing0Runloop)
1255# endif
1256 {
1257 /* Call ring-0 and do it all there. */
1258 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL, uPausedTscValue, NULL);
1259 }
1260# endif
1261# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1262 /*
1263 * Call the offical API to do the job.
1264 */
1265 if (pVM->cCpus > 1)
1266 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1267
1268 /* Start with the first CPU. */
1269 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1270 WHV_REGISTER_VALUE Value = {0, 0};
1271 Value.Reg64 = uPausedTscValue;
1272 uint64_t const uFirstTsc = ASMReadTSC();
1273 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1274 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1275 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1276 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1277 , VERR_NEM_SET_TSC);
1278
1279 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1280 that we don't introduce too much drift here. */
1281 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1282 {
1283 Assert(enmName == WHvX64RegisterTsc);
1284 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1285 Value.Reg64 = uPausedTscValue + offDelta;
1286 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1287 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1288 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1289 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1290 , VERR_NEM_SET_TSC);
1291 }
1292
1293 return VINF_SUCCESS;
1294# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1295#endif /* IN_RING3 */
1296}
1297
1298#ifdef NEMWIN_NEED_GET_REGISTER
1299# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1300/** Worker for assertion macro. */
1301NEM_TMPL_STATIC int nemHCWinGetRegister(PVMCPU pVCpu, PGVMCPU pGVCpu, uint32_t enmReg, HV_REGISTER_VALUE *pRetValue)
1302{
1303 RT_ZERO(*pRetValue);
1304# ifdef IN_RING3
1305 RT_NOREF(pVCpu, pGVCpu, enmReg);
1306 return VERR_NOT_IMPLEMENTED;
1307# else
1308 NOREF(pVCpu);
1309
1310 /*
1311 * Hypercall parameters.
1312 */
1313 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1314 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1315 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1316
1317 pInput->PartitionId = pGVCpu->pGVM->nem.s.idHvPartition;
1318 pInput->VpIndex = pGVCpu->idCpu;
1319 pInput->fFlags = 0;
1320 pInput->Names[0] = (HV_REGISTER_NAME)enmReg;
1321
1322 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
1323 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1324 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
1325
1326 /*
1327 * Make the hypercall and copy out the value.
1328 */
1329 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
1330 pGVCpu->nem.s.HypercallData.HCPhysPage,
1331 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1332 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 cRegs=%#x\n", uResult, 1),
1333 VERR_NEM_GET_REGISTERS_FAILED);
1334
1335 *pRetValue = paValues[0];
1336 return VINF_SUCCESS;
1337# endif
1338}
1339# else
1340/** Worker for assertion macro. */
1341NEM_TMPL_STATIC int nemR3WinGetRegister(PVMCPU a_pVCpu, uint32_t a_enmReg, WHV_REGISTER_VALUE pValue)
1342{
1343 RT_ZERO(*pRetValue);
1344 RT_NOREF(pVCpu, pGVCpu, enmReg);
1345 return VERR_NOT_IMPLEMENTED;
1346}
1347# endif
1348#endif
1349
1350
1351#ifdef LOG_ENABLED
1352/**
1353 * Get the virtual processor running status.
1354 */
1355DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPU pVCpu)
1356{
1357# ifdef IN_RING0
1358 NOREF(pVCpu);
1359 return VidProcessorStatusUndefined;
1360# else
1361 RTERRVARS Saved;
1362 RTErrVarsSave(&Saved);
1363
1364 /*
1365 * This API is disabled in release builds, it seems. On build 17101 it requires
1366 * the following patch to be enabled (windbg): eb vid+12180 0f 84 98 00 00 00
1367 */
1368 VID_PROCESSOR_STATUS enmCpuStatus = VidProcessorStatusUndefined;
1369 NTSTATUS rcNt = g_pfnVidGetVirtualProcessorRunningStatus(pVCpu->pVMR3->nem.s.hPartitionDevice, pVCpu->idCpu, &enmCpuStatus);
1370 AssertRC(rcNt);
1371
1372 RTErrVarsRestore(&Saved);
1373 return enmCpuStatus;
1374# endif
1375}
1376#endif /* LOG_ENABLED */
1377
1378
1379#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1380# ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KTHREADs and KeAlertThread. */
1381/**
1382 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.
1383 *
1384 * This is an experiment only.
1385 *
1386 * @returns VBox status code.
1387 * @param pVM The cross context VM structure.
1388 * @param pVCpu The cross context virtual CPU structure of the
1389 * calling EMT.
1390 */
1391NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVM pVM, PVMCPU pVCpu)
1392{
1393 /*
1394 * Work the state.
1395 *
1396 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.
1397 * So, we just need to modify the state and kick the EMT if it's waiting on
1398 * messages. For the latter we use QueueUserAPC / KeAlterThread.
1399 */
1400 for (;;)
1401 {
1402 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
1403 switch (enmState)
1404 {
1405 case VMCPUSTATE_STARTED_EXEC_NEM:
1406 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))
1407 {
1408 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM -> CANCELED");
1409 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));
1410 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelChangedState);
1411 return VINF_SUCCESS;
1412 }
1413 break;
1414
1415 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:
1416 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
1417 {
1418 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM_WAIT -> CANCELED");
1419# ifdef IN_RING0
1420 NTSTATUS rcNt = KeAlertThread(??);
1421 DBGFTRACE_CUSTOM(pVM, "KeAlertThread -> %#x", rcNt);
1422# else
1423 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);
1424 DBGFTRACE_CUSTOM(pVM, "NtAlertThread -> %#x", rcNt);
1425# endif
1426 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));
1427 Assert(rcNt == STATUS_SUCCESS);
1428 if (NT_SUCCESS(rcNt))
1429 {
1430 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelAlertedThread);
1431 return VINF_SUCCESS;
1432 }
1433 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));
1434 }
1435 break;
1436
1437 default:
1438 return VINF_SUCCESS;
1439 }
1440
1441 ASMNopPause();
1442 RT_NOREF(pVM);
1443 }
1444}
1445# endif /* IN_RING3 */
1446#endif /* NEM_WIN_USE_OUR_OWN_RUN_API || NEM_WIN_WITH_RING0_RUNLOOP */
1447
1448
1449#ifdef LOG_ENABLED
1450/**
1451 * Logs the current CPU state.
1452 */
1453NEM_TMPL_STATIC void nemHCWinLogState(PVM pVM, PVMCPU pVCpu)
1454{
1455 if (LogIs3Enabled())
1456 {
1457# if 0 // def IN_RING3 - causes lazy state import assertions all over CPUM.
1458 char szRegs[4096];
1459 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1460 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1461 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1462 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1463 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1464 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1465 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1466 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1467 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1468 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1469 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1470 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1471 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1472 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1473 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1474 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1475 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1476 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1477 " efer=%016VR{efer}\n"
1478 " pat=%016VR{pat}\n"
1479 " sf_mask=%016VR{sf_mask}\n"
1480 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1481 " lstar=%016VR{lstar}\n"
1482 " star=%016VR{star} cstar=%016VR{cstar}\n"
1483 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1484 );
1485
1486 char szInstr[256];
1487 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1488 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1489 szInstr, sizeof(szInstr), NULL);
1490 Log3(("%s%s\n", szRegs, szInstr));
1491# else
1492 /** @todo stat logging in ring-0 */
1493 RT_NOREF(pVM, pVCpu);
1494# endif
1495 }
1496}
1497#endif /* LOG_ENABLED */
1498
1499
1500/** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */
1501#define SWITCH_IT(a_szPrefix) \
1502 do \
1503 switch (u)\
1504 { \
1505 case 0x00: return a_szPrefix ""; \
1506 case 0x01: return a_szPrefix ",Pnd"; \
1507 case 0x02: return a_szPrefix ",Dbg"; \
1508 case 0x03: return a_szPrefix ",Pnd,Dbg"; \
1509 case 0x04: return a_szPrefix ",Shw"; \
1510 case 0x05: return a_szPrefix ",Pnd,Shw"; \
1511 case 0x06: return a_szPrefix ",Shw,Dbg"; \
1512 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \
1513 default: AssertFailedReturn("WTF?"); \
1514 } \
1515 while (0)
1516
1517#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1518/**
1519 * Translates the execution stat bitfield into a short log string, VID version.
1520 *
1521 * @returns Read-only log string.
1522 * @param pMsgHdr The header which state to summarize.
1523 */
1524static const char *nemHCWinExecStateToLogStr(HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1525{
1526 unsigned u = (unsigned)pMsgHdr->ExecutionState.InterruptionPending
1527 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1)
1528 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2);
1529 if (pMsgHdr->ExecutionState.EferLma)
1530 SWITCH_IT("LM");
1531 else if (pMsgHdr->ExecutionState.Cr0Pe)
1532 SWITCH_IT("PM");
1533 else
1534 SWITCH_IT("RM");
1535}
1536#elif defined(IN_RING3)
1537/**
1538 * Translates the execution stat bitfield into a short log string, WinHv version.
1539 *
1540 * @returns Read-only log string.
1541 * @param pExitCtx The exit context which state to summarize.
1542 */
1543static const char *nemR3WinExecStateToLogStr(WHV_VP_EXIT_CONTEXT const *pExitCtx)
1544{
1545 unsigned u = (unsigned)pExitCtx->ExecutionState.InterruptionPending
1546 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1)
1547 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2);
1548 if (pExitCtx->ExecutionState.EferLma)
1549 SWITCH_IT("LM");
1550 else if (pExitCtx->ExecutionState.Cr0Pe)
1551 SWITCH_IT("PM");
1552 else
1553 SWITCH_IT("RM");
1554}
1555#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1556#undef SWITCH_IT
1557
1558
1559#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1560/**
1561 * Advances the guest RIP and clear EFLAGS.RF, VID version.
1562 *
1563 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1564 *
1565 * @param pVCpu The cross context virtual CPU structure.
1566 * @param pExitCtx The exit context.
1567 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1568 */
1569DECLINLINE(void)
1570nemHCWinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr, uint8_t cbMinInstr)
1571{
1572 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1573
1574 /* Advance the RIP. */
1575 Assert(pMsgHdr->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1576 pVCpu->cpum.GstCtx.rip += pMsgHdr->InstructionLength;
1577 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1578
1579 /* Update interrupt inhibition. */
1580 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1581 { /* likely */ }
1582 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
1583 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1584}
1585#elif defined(IN_RING3)
1586/**
1587 * Advances the guest RIP and clear EFLAGS.RF, WinHv version.
1588 *
1589 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1590 *
1591 * @param pVCpu The cross context virtual CPU structure.
1592 * @param pExitCtx The exit context.
1593 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1594 */
1595DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, WHV_VP_EXIT_CONTEXT const *pExitCtx, uint8_t cbMinInstr)
1596{
1597 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1598
1599 /* Advance the RIP. */
1600 Assert(pExitCtx->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1601 pVCpu->cpum.GstCtx.rip += pExitCtx->InstructionLength;
1602 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1603
1604 /* Update interrupt inhibition. */
1605 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1606 { /* likely */ }
1607 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
1608 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1609}
1610#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1611
1612
1613
1614NEM_TMPL_STATIC DECLCALLBACK(int)
1615nemHCWinUnmapOnePageCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1616{
1617 RT_NOREF_PV(pvUser);
1618#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1619 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1620 AssertRC(rc);
1621 if (RT_SUCCESS(rc))
1622#else
1623 RT_NOREF_PV(pVCpu);
1624 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1625 if (SUCCEEDED(hrc))
1626#endif
1627 {
1628 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1629 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1630 }
1631 else
1632 {
1633#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1634 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1635#else
1636 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1637 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(),
1638 RTNtLastErrorValue(), pVM->nem.s.cMappedPages));
1639#endif
1640 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1641 }
1642 if (pVM->nem.s.cMappedPages > 0)
1643 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1644 return VINF_SUCCESS;
1645}
1646
1647
1648/**
1649 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1650 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1651 */
1652typedef struct NEMHCWINHMACPCCSTATE
1653{
1654 /** Input: Write access. */
1655 bool fWriteAccess;
1656 /** Output: Set if we did something. */
1657 bool fDidSomething;
1658 /** Output: Set it we should resume. */
1659 bool fCanResume;
1660} NEMHCWINHMACPCCSTATE;
1661
1662/**
1663 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1664 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1665 * NEMHCWINHMACPCCSTATE structure. }
1666 */
1667NEM_TMPL_STATIC DECLCALLBACK(int)
1668nemHCWinHandleMemoryAccessPageCheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1669{
1670 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1671 pState->fDidSomething = false;
1672 pState->fCanResume = false;
1673
1674 /* If A20 is disabled, we may need to make another query on the masked
1675 page to get the correct protection information. */
1676 uint8_t u2State = pInfo->u2NemState;
1677 RTGCPHYS GCPhysSrc;
1678 if ( pVM->nem.s.fA20Enabled
1679 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1680 GCPhysSrc = GCPhys;
1681 else
1682 {
1683 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1684 PGMPHYSNEMPAGEINFO Info2;
1685 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1686 AssertRCReturn(rc, rc);
1687
1688 *pInfo = Info2;
1689 pInfo->u2NemState = u2State;
1690 }
1691
1692 /*
1693 * Consolidate current page state with actual page protection and access type.
1694 * We don't really consider downgrades here, as they shouldn't happen.
1695 */
1696#ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1697 /** @todo Someone at microsoft please explain:
1698 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1699 * readonly page as writable (unmap, then map again). Specifically, this was an
1700 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1701 * a hope to work around that we no longer pre-map anything, just unmap stuff
1702 * and do it lazily here. And here we will first unmap, restart, and then remap
1703 * with new protection or backing.
1704 */
1705#endif
1706 int rc;
1707 switch (u2State)
1708 {
1709 case NEM_WIN_PAGE_STATE_UNMAPPED:
1710 case NEM_WIN_PAGE_STATE_NOT_SET:
1711 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1712 {
1713 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1714 return VINF_SUCCESS;
1715 }
1716
1717 /* Don't bother remapping it if it's a write request to a non-writable page. */
1718 if ( pState->fWriteAccess
1719 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1720 {
1721 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1722 return VINF_SUCCESS;
1723 }
1724
1725 /* Map the page. */
1726 rc = nemHCNativeSetPhysPage(pVM,
1727 pVCpu,
1728 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1729 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1730 pInfo->fNemProt,
1731 &u2State,
1732 true /*fBackingState*/);
1733 pInfo->u2NemState = u2State;
1734 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1735 GCPhys, g_apszPageStates[u2State], rc));
1736 pState->fDidSomething = true;
1737 pState->fCanResume = true;
1738 return rc;
1739
1740 case NEM_WIN_PAGE_STATE_READABLE:
1741 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1742 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1743 {
1744 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1745 return VINF_SUCCESS;
1746 }
1747
1748#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1749 /* Upgrade page to writable. */
1750/** @todo test this*/
1751 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1752 && pState->fWriteAccess)
1753 {
1754 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
1755 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1756 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1757 AssertRC(rc);
1758 if (RT_SUCCESS(rc))
1759 {
1760 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
1761 pState->fDidSomething = true;
1762 pState->fCanResume = true;
1763 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
1764 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
1765 }
1766 }
1767 else
1768 {
1769 /* Need to emulate the acces. */
1770 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
1771 rc = VINF_SUCCESS;
1772 }
1773 return rc;
1774#else
1775 break;
1776#endif
1777
1778 case NEM_WIN_PAGE_STATE_WRITABLE:
1779 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1780 {
1781 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
1782 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1783 else
1784 {
1785 pState->fCanResume = true;
1786 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1787 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1788 }
1789 return VINF_SUCCESS;
1790 }
1791#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1792 AssertFailed(); /* There should be no downgrades. */
1793#endif
1794 break;
1795
1796 default:
1797 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1798 }
1799
1800 /*
1801 * Unmap and restart the instruction.
1802 * If this fails, which it does every so often, just unmap everything for now.
1803 */
1804#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1805 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1806 AssertRC(rc);
1807 if (RT_SUCCESS(rc))
1808#else
1809 /** @todo figure out whether we mess up the state or if it's WHv. */
1810 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1811 if (SUCCEEDED(hrc))
1812#endif
1813 {
1814 pState->fDidSomething = true;
1815 pState->fCanResume = true;
1816 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1817 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1818 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1819 return VINF_SUCCESS;
1820 }
1821#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1822 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
1823 return rc;
1824#else
1825 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1826 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(),
1827 pVM->nem.s.cMappedPages));
1828
1829 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
1830 Log(("nemHCWinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
1831
1832 pState->fDidSomething = true;
1833 pState->fCanResume = true;
1834 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1835 return VINF_SUCCESS;
1836#endif
1837}
1838
1839
1840
1841#if defined(IN_RING0) && defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API)
1842/**
1843 * Wrapper around nemR0WinImportState that converts VERR_NEM_FLUSH_TLB
1844 * into informational status codes and logs+asserts statuses.
1845 *
1846 * @returns VBox strict status code.
1847 * @param pGVM The global (ring-0) VM structure.
1848 * @param pGVCpu The global (ring-0) per CPU structure.
1849 * @param pVCpu The cross context per CPU structure.
1850 * @param fWhat What to import.
1851 * @param pszCaller Who is doing the importing.
1852 */
1853DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, PVMCPU pVCpu, uint64_t fWhat, const char *pszCaller)
1854{
1855 int rc = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);
1856 if (RT_SUCCESS(rc))
1857 {
1858 Assert(rc == VINF_SUCCESS);
1859 return VINF_SUCCESS;
1860 }
1861
1862 if (rc == VERR_NEM_FLUSH_TLB)
1863 {
1864 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
1865 return -rc;
1866 }
1867 RT_NOREF(pszCaller);
1868 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
1869}
1870#endif /* IN_RING0 && NEM_WIN_TEMPLATE_MODE_OWN_RUN_API*/
1871
1872#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
1873/**
1874 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
1875 *
1876 * Unlike the wrapped APIs, this checks whether it's necessary.
1877 *
1878 * @returns VBox strict status code.
1879 * @param pGVM The global (ring-0) VM structure.
1880 * @param pGVCpu The global (ring-0) per CPU structure.
1881 * @param fWhat What to import.
1882 * @param pszCaller Who is doing the importing.
1883 */
1884DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPU pVCpu, PGVMCPU pGVCpu, uint64_t fWhat, const char *pszCaller)
1885{
1886 if (pVCpu->cpum.GstCtx.fExtrn & fWhat)
1887 {
1888# ifdef IN_RING0
1889 return nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu, fWhat, pszCaller);
1890# else
1891 RT_NOREF(pGVCpu, pszCaller);
1892 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1893 AssertRCReturn(rc, rc);
1894# endif
1895 }
1896 return VINF_SUCCESS;
1897}
1898#endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API || IN_RING3 */
1899
1900#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1901/**
1902 * Copies register state from the X64 intercept message header.
1903 *
1904 * ASSUMES no state copied yet.
1905 *
1906 * @param pVCpu The cross context per CPU structure.
1907 * @param pHdr The X64 intercept message header.
1908 * @sa nemR3WinCopyStateFromX64Header
1909 */
1910DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPU pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr)
1911{
1912 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1913 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1914 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pHdr->CsSegment);
1915 pVCpu->cpum.GstCtx.rip = pHdr->Rip;
1916 pVCpu->cpum.GstCtx.rflags.u = pHdr->Rflags;
1917
1918 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow;
1919 if (!pHdr->ExecutionState.InterruptShadow)
1920 {
1921 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1922 { /* likely */ }
1923 else
1924 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1925 }
1926 else
1927 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip);
1928
1929 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1930}
1931#elif defined(IN_RING3)
1932/**
1933 * Copies register state from the (common) exit context.
1934 *
1935 * ASSUMES no state copied yet.
1936 *
1937 * @param pVCpu The cross context per CPU structure.
1938 * @param pExitCtx The common exit context.
1939 * @sa nemHCWinCopyStateFromX64Header
1940 */
1941DECLINLINE(void) nemR3WinCopyStateFromX64Header(PVMCPU pVCpu, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1942{
1943 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1944 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1945 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pExitCtx->Cs);
1946 pVCpu->cpum.GstCtx.rip = pExitCtx->Rip;
1947 pVCpu->cpum.GstCtx.rflags.u = pExitCtx->Rflags;
1948
1949 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow;
1950 if (!pExitCtx->ExecutionState.InterruptShadow)
1951 {
1952 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1953 { /* likely */ }
1954 else
1955 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1956 }
1957 else
1958 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip);
1959
1960 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1961}
1962#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1963
1964
1965#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1966/**
1967 * Deals with memory intercept message.
1968 *
1969 * @returns Strict VBox status code.
1970 * @param pVM The cross context VM structure.
1971 * @param pVCpu The cross context per CPU structure.
1972 * @param pMsg The message.
1973 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
1974 * @sa nemR3WinHandleExitMemory
1975 */
1976NEM_TMPL_STATIC VBOXSTRICTRC
1977nemHCWinHandleMessageMemory(PVM pVM, PVMCPU pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg, PGVMCPU pGVCpu)
1978{
1979 uint64_t const uHostTsc = ASMReadTSC();
1980 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
1981 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1982 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
1983
1984 /*
1985 * Whatever we do, we must clear pending event injection upon resume.
1986 */
1987 if (pMsg->Header.ExecutionState.InterruptionPending)
1988 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1989
1990# if 0 /* Experiment: 20K -> 34K exit/s. */
1991 if ( pMsg->Header.ExecutionState.EferLma
1992 && pMsg->Header.CsSegment.Long
1993 && pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
1994 {
1995 if ( pMsg->Header.Rip - (uint64_t)0xf65a < (uint64_t)(0xf662 - 0xf65a)
1996 && pMsg->InstructionBytes[0] == 0x89
1997 && pMsg->InstructionBytes[1] == 0x03)
1998 {
1999 pVCpu->cpum.GstCtx.rip = pMsg->Header.Rip + 2;
2000 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
2001 AssertMsg(pMsg->Header.InstructionLength == 2, ("%#x\n", pMsg->Header.InstructionLength));
2002 //Log(("%RX64 msg:\n%.80Rhxd\n", pVCpu->cpum.GstCtx.rip, pMsg));
2003 return VINF_SUCCESS;
2004 }
2005 }
2006# endif
2007
2008 /*
2009 * Ask PGM for information about the given GCPhys. We need to check if we're
2010 * out of sync first.
2011 */
2012 NEMHCWINHMACPCCSTATE State = { pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE, false, false };
2013 PGMPHYSNEMPAGEINFO Info;
2014 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMsg->GuestPhysicalAddress, State.fWriteAccess, &Info,
2015 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2016 if (RT_SUCCESS(rc))
2017 {
2018 if (Info.fNemProt & ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2019 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2020 {
2021 if (State.fCanResume)
2022 {
2023 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2024 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2025 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2026 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2027 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2028 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2029 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
2030 return VINF_SUCCESS;
2031 }
2032 }
2033 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2034 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2035 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2036 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2037 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2038 }
2039 else
2040 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2041 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2042 pMsg->GuestPhysicalAddress, rc, State.fDidSomething ? " modified-backing" : "",
2043 g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2044
2045 /*
2046 * Emulate the memory access, either access handler or special memory.
2047 */
2048 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2049 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2050 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2051 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2052 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
2053 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2054 VBOXSTRICTRC rcStrict;
2055# ifdef IN_RING0
2056 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu,
2057 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit");
2058 if (rcStrict != VINF_SUCCESS)
2059 return rcStrict;
2060# else
2061 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2062 AssertRCReturn(rc, rc);
2063 NOREF(pGVCpu);
2064# endif
2065
2066 if (pMsg->Reserved1)
2067 Log(("MemExit/Reserved1=%#x\n", pMsg->Reserved1));
2068 if (pMsg->Header.ExecutionState.Reserved0 || pMsg->Header.ExecutionState.Reserved1)
2069 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pMsg->Header.ExecutionState.Reserved0, pMsg->Header.ExecutionState.Reserved1));
2070
2071 if (!pExitRec)
2072 {
2073 //if (pMsg->InstructionByteCount > 0)
2074 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2075 if (pMsg->InstructionByteCount > 0)
2076 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
2077 pMsg->InstructionBytes, pMsg->InstructionByteCount);
2078 else
2079 rcStrict = IEMExecOne(pVCpu);
2080 /** @todo do we need to do anything wrt debugging here? */
2081 }
2082 else
2083 {
2084 /* Frequent access or probing. */
2085 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2086 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2087 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2088 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2089 }
2090 return rcStrict;
2091}
2092#elif defined(IN_RING3)
2093/**
2094 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2095 *
2096 * @returns Strict VBox status code.
2097 * @param pVM The cross context VM structure.
2098 * @param pVCpu The cross context per CPU structure.
2099 * @param pExit The VM exit information to handle.
2100 * @sa nemHCWinHandleMessageMemory
2101 */
2102NEM_TMPL_STATIC VBOXSTRICTRC
2103nemR3WinHandleExitMemory(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2104{
2105 uint64_t const uHostTsc = ASMReadTSC();
2106 Assert(pExit->MemoryAccess.AccessInfo.AccessType != 3);
2107
2108 /*
2109 * Whatever we do, we must clear pending event injection upon resume.
2110 */
2111 if (pExit->VpContext.ExecutionState.InterruptionPending)
2112 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2113
2114 /*
2115 * Ask PGM for information about the given GCPhys. We need to check if we're
2116 * out of sync first.
2117 */
2118 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite, false, false };
2119 PGMPHYSNEMPAGEINFO Info;
2120 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
2121 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2122 if (RT_SUCCESS(rc))
2123 {
2124 if (Info.fNemProt & ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2125 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2126 {
2127 if (State.fCanResume)
2128 {
2129 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2130 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2131 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2132 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2133 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2134 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2135 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2136 return VINF_SUCCESS;
2137 }
2138 }
2139 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2140 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2141 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2142 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2143 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2144 }
2145 else
2146 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2147 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2148 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2149 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2150
2151 /*
2152 * Emulate the memory access, either access handler or special memory.
2153 */
2154 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2155 pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2156 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2157 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2158 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2159 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2160 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2161 AssertRCReturn(rc, rc);
2162 if (pExit->VpContext.ExecutionState.Reserved0 || pExit->VpContext.ExecutionState.Reserved1)
2163 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pExit->VpContext.ExecutionState.Reserved0, pExit->VpContext.ExecutionState.Reserved1));
2164
2165 VBOXSTRICTRC rcStrict;
2166 if (!pExitRec)
2167 {
2168 //if (pMsg->InstructionByteCount > 0)
2169 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2170 if (pExit->MemoryAccess.InstructionByteCount > 0)
2171 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
2172 pExit->MemoryAccess.InstructionBytes, pExit->MemoryAccess.InstructionByteCount);
2173 else
2174 rcStrict = IEMExecOne(pVCpu);
2175 /** @todo do we need to do anything wrt debugging here? */
2176 }
2177 else
2178 {
2179 /* Frequent access or probing. */
2180 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2181 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2182 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2183 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2184 }
2185 return rcStrict;
2186}
2187#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2188
2189
2190#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2191/**
2192 * Deals with I/O port intercept message.
2193 *
2194 * @returns Strict VBox status code.
2195 * @param pVM The cross context VM structure.
2196 * @param pVCpu The cross context per CPU structure.
2197 * @param pMsg The message.
2198 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2199 */
2200NEM_TMPL_STATIC VBOXSTRICTRC
2201nemHCWinHandleMessageIoPort(PVM pVM, PVMCPU pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg, PGVMCPU pGVCpu)
2202{
2203 /*
2204 * Assert message sanity.
2205 */
2206 Assert( pMsg->AccessInfo.AccessSize == 1
2207 || pMsg->AccessInfo.AccessSize == 2
2208 || pMsg->AccessInfo.AccessSize == 4);
2209 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2210 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2211 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2212 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2213 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2214 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2215 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
2216 if (pMsg->AccessInfo.StringOp)
2217 {
2218 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterDs, pMsg->DsSegment);
2219 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterEs, pMsg->EsSegment);
2220 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRcx, pMsg->Rcx);
2221 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRsi, pMsg->Rsi);
2222 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdi, pMsg->Rdi);
2223 }
2224
2225 /*
2226 * Whatever we do, we must clear pending event injection upon resume.
2227 */
2228 if (pMsg->Header.ExecutionState.InterruptionPending)
2229 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2230
2231 /*
2232 * Add history first to avoid two paths doing EMHistoryExec calls.
2233 */
2234 VBOXSTRICTRC rcStrict;
2235 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2236 !pMsg->AccessInfo.StringOp
2237 ? ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2238 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2239 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2240 : ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2241 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2242 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2243 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2244 if (!pExitRec)
2245 {
2246 if (!pMsg->AccessInfo.StringOp)
2247 {
2248 /*
2249 * Simple port I/O.
2250 */
2251 static uint32_t const s_fAndMask[8] =
2252 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2253 uint32_t const fAndMask = s_fAndMask[pMsg->AccessInfo.AccessSize];
2254
2255 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2256 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2257 {
2258 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize);
2259 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2260 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2261 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2262 if (IOM_SUCCESS(rcStrict))
2263 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1);
2264# ifdef IN_RING0
2265 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
2266 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF
2267 /** @todo check for debug breakpoints */ )
2268 return EMRZSetPendingIoPortWrite(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2269 pMsg->AccessInfo.AccessSize, (uint32_t)pMsg->Rax & fAndMask);
2270# endif
2271 else
2272 {
2273 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2274 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2275 }
2276 }
2277 else
2278 {
2279 uint32_t uValue = 0;
2280 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize);
2281 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2282 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2283 pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2284 if (IOM_SUCCESS(rcStrict))
2285 {
2286 if (pMsg->AccessInfo.AccessSize != 4)
2287 pVCpu->cpum.GstCtx.rax = (pMsg->Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2288 else
2289 pVCpu->cpum.GstCtx.rax = uValue;
2290 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2291 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pVCpu->cpum.GstCtx.rax));
2292 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1);
2293 }
2294 else
2295 {
2296 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2297 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2298# ifdef IN_RING0
2299 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
2300 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF
2301 /** @todo check for debug breakpoints */ )
2302 return EMRZSetPendingIoPortRead(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2303 pMsg->AccessInfo.AccessSize);
2304# endif
2305 }
2306 }
2307 }
2308 else
2309 {
2310 /*
2311 * String port I/O.
2312 */
2313 /** @todo Someone at Microsoft please explain how we can get the address mode
2314 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2315 * getting the default mode, it can always be overridden by a prefix. This
2316 * forces us to interpret the instruction from opcodes, which is suboptimal.
2317 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2318 * CPUs that are reasonably new.
2319 *
2320 * Of course, it's possible this is an undocumented and we just need to do some
2321 * experiments to figure out how it's communicated. Alternatively, we can scan
2322 * the opcode bytes for possible evil prefixes.
2323 */
2324 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2325 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2326 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2327 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
2328 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment);
2329 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2330 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2331 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
2332 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
2333# ifdef IN_RING0
2334 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2335 if (rcStrict != VINF_SUCCESS)
2336 return rcStrict;
2337# else
2338 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2339 AssertRCReturn(rc, rc);
2340 RT_NOREF(pGVCpu);
2341# endif
2342
2343 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2344 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2345 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2346 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS",
2347 pMsg->PortNumber, pMsg->AccessInfo.AccessSize ));
2348 rcStrict = IEMExecOne(pVCpu);
2349 }
2350 if (IOM_SUCCESS(rcStrict))
2351 {
2352 /*
2353 * Do debug checks.
2354 */
2355 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2356 || (pMsg->Header.Rflags & X86_EFL_TF)
2357 || DBGFBpIsHwIoArmed(pVM) )
2358 {
2359 /** @todo Debugging. */
2360 }
2361 }
2362 return rcStrict;
2363 }
2364
2365 /*
2366 * Frequent exit or something needing probing.
2367 * Get state and call EMHistoryExec.
2368 */
2369 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2370 if (!pMsg->AccessInfo.StringOp)
2371 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2372 else
2373 {
2374 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2375 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2376 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
2377 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment);
2378 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2379 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
2380 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
2381 }
2382 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2383
2384# ifdef IN_RING0
2385 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2386 if (rcStrict != VINF_SUCCESS)
2387 return rcStrict;
2388# else
2389 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2390 AssertRCReturn(rc, rc);
2391 RT_NOREF(pGVCpu);
2392# endif
2393
2394 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2395 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2396 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2397 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUT" : "IN",
2398 pMsg->AccessInfo.StringOp ? "S" : "",
2399 pMsg->PortNumber, pMsg->AccessInfo.AccessSize));
2400 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2401 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2402 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2403 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2404 return rcStrict;
2405}
2406#elif defined(IN_RING3)
2407/**
2408 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess).
2409 *
2410 * @returns Strict VBox status code.
2411 * @param pVM The cross context VM structure.
2412 * @param pVCpu The cross context per CPU structure.
2413 * @param pExit The VM exit information to handle.
2414 * @sa nemHCWinHandleMessageIoPort
2415 */
2416NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitIoPort(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2417{
2418 Assert( pExit->IoPortAccess.AccessInfo.AccessSize == 1
2419 || pExit->IoPortAccess.AccessInfo.AccessSize == 2
2420 || pExit->IoPortAccess.AccessInfo.AccessSize == 4);
2421
2422 /*
2423 * Whatever we do, we must clear pending event injection upon resume.
2424 */
2425 if (pExit->VpContext.ExecutionState.InterruptionPending)
2426 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2427
2428 /*
2429 * Add history first to avoid two paths doing EMHistoryExec calls.
2430 */
2431 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2432 !pExit->IoPortAccess.AccessInfo.StringOp
2433 ? ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2434 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2435 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2436 : ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2437 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2438 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2439 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2440 if (!pExitRec)
2441 {
2442 VBOXSTRICTRC rcStrict;
2443 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2444 {
2445 /*
2446 * Simple port I/O.
2447 */
2448 static uint32_t const s_fAndMask[8] =
2449 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2450 uint32_t const fAndMask = s_fAndMask[pExit->IoPortAccess.AccessInfo.AccessSize];
2451 if (pExit->IoPortAccess.AccessInfo.IsWrite)
2452 {
2453 rcStrict = IOMIOPortWrite(pVM, pVCpu, pExit->IoPortAccess.PortNumber,
2454 (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2455 pExit->IoPortAccess.AccessInfo.AccessSize);
2456 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2457 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2458 pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2459 pExit->IoPortAccess.AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2460 if (IOM_SUCCESS(rcStrict))
2461 {
2462 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2463 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 1);
2464 }
2465 }
2466 else
2467 {
2468 uint32_t uValue = 0;
2469 rcStrict = IOMIOPortRead(pVM, pVCpu, pExit->IoPortAccess.PortNumber, &uValue,
2470 pExit->IoPortAccess.AccessInfo.AccessSize);
2471 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2472 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2473 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2474 if (IOM_SUCCESS(rcStrict))
2475 {
2476 if (pExit->IoPortAccess.AccessInfo.AccessSize != 4)
2477 pVCpu->cpum.GstCtx.rax = (pExit->IoPortAccess.Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2478 else
2479 pVCpu->cpum.GstCtx.rax = uValue;
2480 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2481 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pExit->IoPortAccess.Rax, pVCpu->cpum.GstCtx.rax));
2482 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2483 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 1);
2484 }
2485 }
2486 }
2487 else
2488 {
2489 /*
2490 * String port I/O.
2491 */
2492 /** @todo Someone at Microsoft please explain how we can get the address mode
2493 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2494 * getting the default mode, it can always be overridden by a prefix. This
2495 * forces us to interpret the instruction from opcodes, which is suboptimal.
2496 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2497 * CPUs that are reasonably new.
2498 *
2499 * Of course, it's possible this is an undocumented and we just need to do some
2500 * experiments to figure out how it's communicated. Alternatively, we can scan
2501 * the opcode bytes for possible evil prefixes.
2502 */
2503 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2504 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2505 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2506 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pExit->IoPortAccess.Ds);
2507 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pExit->IoPortAccess.Es);
2508 pVCpu->cpum.GstCtx.rax = pExit->IoPortAccess.Rax;
2509 pVCpu->cpum.GstCtx.rcx = pExit->IoPortAccess.Rcx;
2510 pVCpu->cpum.GstCtx.rdi = pExit->IoPortAccess.Rdi;
2511 pVCpu->cpum.GstCtx.rsi = pExit->IoPortAccess.Rsi;
2512 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2513 AssertRCReturn(rc, rc);
2514
2515 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2516 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2517 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2518 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUTS" : "INS",
2519 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize ));
2520 rcStrict = IEMExecOne(pVCpu);
2521 }
2522 if (IOM_SUCCESS(rcStrict))
2523 {
2524 /*
2525 * Do debug checks.
2526 */
2527 if ( pExit->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2528 || (pExit->VpContext.Rflags & X86_EFL_TF)
2529 || DBGFBpIsHwIoArmed(pVM) )
2530 {
2531 /** @todo Debugging. */
2532 }
2533 }
2534 return rcStrict;
2535 }
2536
2537 /*
2538 * Frequent exit or something needing probing.
2539 * Get state and call EMHistoryExec.
2540 */
2541 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2542 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2543 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2544 else
2545 {
2546 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2547 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2548 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pExit->IoPortAccess.Ds);
2549 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pExit->IoPortAccess.Es);
2550 pVCpu->cpum.GstCtx.rcx = pExit->IoPortAccess.Rcx;
2551 pVCpu->cpum.GstCtx.rdi = pExit->IoPortAccess.Rdi;
2552 pVCpu->cpum.GstCtx.rsi = pExit->IoPortAccess.Rsi;
2553 }
2554 pVCpu->cpum.GstCtx.rax = pExit->IoPortAccess.Rax;
2555 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2556 AssertRCReturn(rc, rc);
2557 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2558 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2559 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2560 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUT" : "IN",
2561 pExit->IoPortAccess.AccessInfo.StringOp ? "S" : "",
2562 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize));
2563 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2564 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2565 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2566 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2567 return rcStrict;
2568}
2569#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2570
2571
2572#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2573/**
2574 * Deals with interrupt window message.
2575 *
2576 * @returns Strict VBox status code.
2577 * @param pVM The cross context VM structure.
2578 * @param pVCpu The cross context per CPU structure.
2579 * @param pMsg The message.
2580 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2581 * @sa nemR3WinHandleExitInterruptWindow
2582 */
2583NEM_TMPL_STATIC VBOXSTRICTRC
2584nemHCWinHandleMessageInterruptWindow(PVM pVM, PVMCPU pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg, PGVMCPU pGVCpu)
2585{
2586 /*
2587 * Assert message sanity.
2588 */
2589 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE
2590 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here
2591 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2592 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type));
2593
2594 /*
2595 * Just copy the state we've got and handle it in the loop for now.
2596 */
2597 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2598 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2599
2600 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2601 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2602 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2603 pMsg->Type, RT_BOOL(pMsg->Header.Rflags & X86_EFL_IF), pMsg->Header.ExecutionState.InterruptShadow));
2604
2605 /** @todo call nemHCWinHandleInterruptFF */
2606 RT_NOREF(pVM, pGVCpu);
2607 return VINF_SUCCESS;
2608}
2609#elif defined(IN_RING3)
2610/**
2611 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow).
2612 *
2613 * @returns Strict VBox status code.
2614 * @param pVM The cross context VM structure.
2615 * @param pVCpu The cross context per CPU structure.
2616 * @param pExit The VM exit information to handle.
2617 * @sa nemHCWinHandleMessageInterruptWindow
2618 */
2619NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitInterruptWindow(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2620{
2621 /*
2622 * Assert message sanity.
2623 */
2624 AssertMsg( pExit->InterruptWindow.DeliverableType == WHvX64PendingInterrupt
2625 || pExit->InterruptWindow.DeliverableType == WHvX64PendingNmi,
2626 ("%#x\n", pExit->InterruptWindow.DeliverableType));
2627
2628 /*
2629 * Just copy the state we've got and handle it in the loop for now.
2630 */
2631 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2632 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2633
2634 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2635 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2636 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2637 pExit->InterruptWindow.DeliverableType, RT_BOOL(pExit->VpContext.Rflags & X86_EFL_IF),
2638 pExit->VpContext.ExecutionState.InterruptShadow));
2639
2640 /** @todo call nemHCWinHandleInterruptFF */
2641 RT_NOREF(pVM);
2642 return VINF_SUCCESS;
2643}
2644#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2645
2646
2647#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2648/**
2649 * Deals with CPUID intercept message.
2650 *
2651 * @returns Strict VBox status code.
2652 * @param pVM The cross context VM structure.
2653 * @param pVCpu The cross context per CPU structure.
2654 * @param pMsg The message.
2655 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2656 * @sa nemR3WinHandleExitCpuId
2657 */
2658NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVM pVM, PVMCPU pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg,
2659 PGVMCPU pGVCpu)
2660{
2661 /* Check message register value sanity. */
2662 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2663 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2664 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2665 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2666 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
2667 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRcx, pMsg->Rcx);
2668 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdx, pMsg->Rdx);
2669 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRbx, pMsg->Rbx);
2670
2671 /* Do exit history. */
2672 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2673 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2674 if (!pExitRec)
2675 {
2676 /*
2677 * Soak up state and execute the instruction.
2678 *
2679 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2680 * function and make everyone use it.
2681 */
2682 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2683 * only get weirder with nested VT-x and AMD-V support. */
2684 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2685
2686 /* Copy in the low register values (top is always cleared). */
2687 pVCpu->cpum.GstCtx.rax = (uint32_t)pMsg->Rax;
2688 pVCpu->cpum.GstCtx.rcx = (uint32_t)pMsg->Rcx;
2689 pVCpu->cpum.GstCtx.rdx = (uint32_t)pMsg->Rdx;
2690 pVCpu->cpum.GstCtx.rbx = (uint32_t)pMsg->Rbx;
2691 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2692
2693 /* Get the correct values. */
2694 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2695 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2696
2697 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2698 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2699 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2700 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2701 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2702
2703 /* Move RIP and we're done. */
2704 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2705
2706 return VINF_SUCCESS;
2707 }
2708
2709 /*
2710 * Frequent exit or something needing probing.
2711 * Get state and call EMHistoryExec.
2712 */
2713 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2714 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2715 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2716 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
2717 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
2718 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2719 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2720 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2721 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2722 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2723# ifdef IN_RING0
2724 VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit");
2725 if (rcStrict != VINF_SUCCESS)
2726 return rcStrict;
2727 RT_NOREF(pVM);
2728# else
2729 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2730 AssertRCReturn(rc, rc);
2731 RT_NOREF(pGVCpu);
2732# endif
2733 VBOXSTRICTRC rcStrictExec = EMHistoryExec(pVCpu, pExitRec, 0);
2734 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2735 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2736 VBOXSTRICTRC_VAL(rcStrictExec), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2737 return rcStrictExec;
2738}
2739#elif defined(IN_RING3)
2740/**
2741 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid).
2742 *
2743 * @returns Strict VBox status code.
2744 * @param pVM The cross context VM structure.
2745 * @param pVCpu The cross context per CPU structure.
2746 * @param pExit The VM exit information to handle.
2747 * @sa nemHCWinHandleMessageCpuId
2748 */
2749NEM_TMPL_STATIC VBOXSTRICTRC
2750nemR3WinHandleExitCpuId(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2751{
2752 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2753 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2754 if (!pExitRec)
2755 {
2756 /*
2757 * Soak up state and execute the instruction.
2758 *
2759 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2760 * function and make everyone use it.
2761 */
2762 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2763 * only get weirder with nested VT-x and AMD-V support. */
2764 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2765
2766 /* Copy in the low register values (top is always cleared). */
2767 pVCpu->cpum.GstCtx.rax = (uint32_t)pExit->CpuidAccess.Rax;
2768 pVCpu->cpum.GstCtx.rcx = (uint32_t)pExit->CpuidAccess.Rcx;
2769 pVCpu->cpum.GstCtx.rdx = (uint32_t)pExit->CpuidAccess.Rdx;
2770 pVCpu->cpum.GstCtx.rbx = (uint32_t)pExit->CpuidAccess.Rbx;
2771 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2772
2773 /* Get the correct values. */
2774 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2775 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2776
2777 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2778 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2779 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2780 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2781 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2782
2783 /* Move RIP and we're done. */
2784 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
2785
2786 RT_NOREF_PV(pVM);
2787 return VINF_SUCCESS;
2788 }
2789
2790 /*
2791 * Frequent exit or something needing probing.
2792 * Get state and call EMHistoryExec.
2793 */
2794 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2795 pVCpu->cpum.GstCtx.rax = pExit->CpuidAccess.Rax;
2796 pVCpu->cpum.GstCtx.rcx = pExit->CpuidAccess.Rcx;
2797 pVCpu->cpum.GstCtx.rdx = pExit->CpuidAccess.Rdx;
2798 pVCpu->cpum.GstCtx.rbx = pExit->CpuidAccess.Rbx;
2799 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2800 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2801 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2802 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2803 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2804 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2805 AssertRCReturn(rc, rc);
2806 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2807 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2808 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2809 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2810 return rcStrict;
2811}
2812#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2813
2814
2815#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2816/**
2817 * Deals with MSR intercept message.
2818 *
2819 * @returns Strict VBox status code.
2820 * @param pVCpu The cross context per CPU structure.
2821 * @param pMsg The message.
2822 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2823 * @sa nemR3WinHandleExitMsr
2824 */
2825NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPU pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg, PGVMCPU pGVCpu)
2826{
2827 /*
2828 * A wee bit of sanity first.
2829 */
2830 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2831 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2832 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2833 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2834 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2835 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2836 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
2837 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdx, pMsg->Rdx);
2838
2839 /*
2840 * Check CPL as that's common to both RDMSR and WRMSR.
2841 */
2842 VBOXSTRICTRC rcStrict;
2843 if (pMsg->Header.ExecutionState.Cpl == 0)
2844 {
2845 /*
2846 * Get all the MSR state. Since we're getting EFER, we also need to
2847 * get CR0, CR4 and CR3.
2848 */
2849 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2850 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2851 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2852 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2853 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2854
2855 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2856 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
2857 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2858 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2859 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2860 "MSRs");
2861 if (rcStrict == VINF_SUCCESS)
2862 {
2863 if (!pExitRec)
2864 {
2865 /*
2866 * Handle writes.
2867 */
2868 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2869 {
2870 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx));
2871 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n",
2872 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2873 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2874 if (rcStrict == VINF_SUCCESS)
2875 {
2876 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2877 return VINF_SUCCESS;
2878 }
2879# ifndef IN_RING3
2880 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2881 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2882 rcStrict = VINF_CPUM_R3_MSR_WRITE;
2883 return rcStrict;
2884# else
2885 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n",
2886 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2887 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2888# endif
2889 }
2890 /*
2891 * Handle reads.
2892 */
2893 else
2894 {
2895 uint64_t uValue = 0;
2896 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue);
2897 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2898 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2899 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2900 if (rcStrict == VINF_SUCCESS)
2901 {
2902 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue;
2903 pVCpu->cpum.GstCtx.rdx = uValue >> 32;
2904 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2905 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2906 return VINF_SUCCESS;
2907 }
2908# ifndef IN_RING3
2909 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2910 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2911 rcStrict = VINF_CPUM_R3_MSR_READ;
2912 return rcStrict;
2913# else
2914 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2915 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2916 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2917# endif
2918 }
2919 }
2920 else
2921 {
2922 /*
2923 * Handle frequent exit or something needing probing.
2924 */
2925 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
2926 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2927 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD", pMsg->MsrNumber));
2928 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2929 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2930 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2931 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2932 return rcStrict;
2933 }
2934 }
2935 else
2936 {
2937 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2938 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2939 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD",
2940 pMsg->MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2941 return rcStrict;
2942 }
2943 }
2944 else if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2945 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n",
2946 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2947 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx ));
2948 else
2949 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n",
2950 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2951 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber));
2952
2953 /*
2954 * If we get down here, we're supposed to #GP(0).
2955 */
2956 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2957 if (rcStrict == VINF_SUCCESS)
2958 {
2959 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2960 if (rcStrict == VINF_IEM_RAISED_XCPT)
2961 rcStrict = VINF_SUCCESS;
2962 else if (rcStrict != VINF_SUCCESS)
2963 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2964 }
2965 return rcStrict;
2966}
2967#elif defined(IN_RING3)
2968/**
2969 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess).
2970 *
2971 * @returns Strict VBox status code.
2972 * @param pVM The cross context VM structure.
2973 * @param pVCpu The cross context per CPU structure.
2974 * @param pExit The VM exit information to handle.
2975 * @sa nemHCWinHandleMessageMsr
2976 */
2977NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitMsr(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2978{
2979 /*
2980 * Check CPL as that's common to both RDMSR and WRMSR.
2981 */
2982 VBOXSTRICTRC rcStrict;
2983 if (pExit->VpContext.ExecutionState.Cpl == 0)
2984 {
2985 /*
2986 * Get all the MSR state. Since we're getting EFER, we also need to
2987 * get CR0, CR4 and CR3.
2988 */
2989 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2990 pExit->MsrAccess.AccessInfo.IsWrite
2991 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2992 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2993 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2994 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2995 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL,
2996 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2997 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2998 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2999 "MSRs");
3000 if (rcStrict == VINF_SUCCESS)
3001 {
3002 if (!pExitRec)
3003 {
3004 /*
3005 * Handle writes.
3006 */
3007 if (pExit->MsrAccess.AccessInfo.IsWrite)
3008 {
3009 rcStrict = CPUMSetGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber,
3010 RT_MAKE_U64((uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx));
3011 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3012 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
3013 (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
3014 if (rcStrict == VINF_SUCCESS)
3015 {
3016 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
3017 return VINF_SUCCESS;
3018 }
3019 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", pVCpu->idCpu,
3020 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3021 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx,
3022 VBOXSTRICTRC_VAL(rcStrict) ));
3023 }
3024 /*
3025 * Handle reads.
3026 */
3027 else
3028 {
3029 uint64_t uValue = 0;
3030 rcStrict = CPUMQueryGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, &uValue);
3031 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu,
3032 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3033 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3034 if (rcStrict == VINF_SUCCESS)
3035 {
3036 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue;
3037 pVCpu->cpum.GstCtx.rdx = uValue >> 32;
3038 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
3039 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
3040 return VINF_SUCCESS;
3041 }
3042 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3043 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
3044 uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3045 }
3046 }
3047 else
3048 {
3049 /*
3050 * Handle frequent exit or something needing probing.
3051 */
3052 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
3053 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3054 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber));
3055 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
3056 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
3057 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3058 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
3059 return rcStrict;
3060 }
3061 }
3062 else
3063 {
3064 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
3065 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3066 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
3067 return rcStrict;
3068 }
3069 }
3070 else if (pExit->MsrAccess.AccessInfo.IsWrite)
3071 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3072 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3073 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx ));
3074 else
3075 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3076 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3077 pExit->MsrAccess.MsrNumber));
3078
3079 /*
3080 * If we get down here, we're supposed to #GP(0).
3081 */
3082 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL,
3083 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
3084 if (rcStrict == VINF_SUCCESS)
3085 {
3086 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
3087 if (rcStrict == VINF_IEM_RAISED_XCPT)
3088 rcStrict = VINF_SUCCESS;
3089 else if (rcStrict != VINF_SUCCESS)
3090 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
3091 }
3092
3093 RT_NOREF_PV(pVM);
3094 return rcStrict;
3095}
3096#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3097
3098
3099/**
3100 * Worker for nemHCWinHandleMessageException & nemR3WinHandleExitException that
3101 * checks if the given opcodes are of interest at all.
3102 *
3103 * @returns true if interesting, false if not.
3104 * @param cbOpcodes Number of opcode bytes available.
3105 * @param pbOpcodes The opcode bytes.
3106 * @param f64BitMode Whether we're in 64-bit mode.
3107 */
3108DECLINLINE(bool) nemHcWinIsInterestingUndefinedOpcode(uint8_t cbOpcodes, uint8_t const *pbOpcodes, bool f64BitMode)
3109{
3110 /*
3111 * Currently only interested in VMCALL and VMMCALL.
3112 */
3113 while (cbOpcodes >= 3)
3114 {
3115 switch (pbOpcodes[0])
3116 {
3117 case 0x0f:
3118 switch (pbOpcodes[1])
3119 {
3120 case 0x01:
3121 switch (pbOpcodes[2])
3122 {
3123 case 0xc1: /* 0f 01 c1 VMCALL */
3124 return true;
3125 case 0xd9: /* 0f 01 d9 VMMCALL */
3126 return true;
3127 default:
3128 break;
3129 }
3130 break;
3131 }
3132 break;
3133
3134 default:
3135 return false;
3136
3137 /* prefixes */
3138 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
3139 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
3140 if (!f64BitMode)
3141 return false;
3142 RT_FALL_THRU();
3143 case X86_OP_PRF_CS:
3144 case X86_OP_PRF_SS:
3145 case X86_OP_PRF_DS:
3146 case X86_OP_PRF_ES:
3147 case X86_OP_PRF_FS:
3148 case X86_OP_PRF_GS:
3149 case X86_OP_PRF_SIZE_OP:
3150 case X86_OP_PRF_SIZE_ADDR:
3151 case X86_OP_PRF_LOCK:
3152 case X86_OP_PRF_REPZ:
3153 case X86_OP_PRF_REPNZ:
3154 cbOpcodes--;
3155 pbOpcodes++;
3156 continue;
3157 }
3158 break;
3159 }
3160 return false;
3161}
3162
3163
3164#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3165/**
3166 * Copies state included in a exception intercept message.
3167 *
3168 * @param pVCpu The cross context per CPU structure.
3169 * @param pMsg The message.
3170 * @param fClearXcpt Clear pending exception.
3171 */
3172DECLINLINE(void)
3173nemHCWinCopyStateFromExceptionMessage(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, bool fClearXcpt)
3174{
3175 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
3176 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_DS
3177 | (fClearXcpt ? CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT : 0) );
3178 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
3179 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
3180 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
3181 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
3182 pVCpu->cpum.GstCtx.rsp = pMsg->Rsp;
3183 pVCpu->cpum.GstCtx.rbp = pMsg->Rbp;
3184 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
3185 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
3186 pVCpu->cpum.GstCtx.r8 = pMsg->R8;
3187 pVCpu->cpum.GstCtx.r9 = pMsg->R9;
3188 pVCpu->cpum.GstCtx.r10 = pMsg->R10;
3189 pVCpu->cpum.GstCtx.r11 = pMsg->R11;
3190 pVCpu->cpum.GstCtx.r12 = pMsg->R12;
3191 pVCpu->cpum.GstCtx.r13 = pMsg->R13;
3192 pVCpu->cpum.GstCtx.r14 = pMsg->R14;
3193 pVCpu->cpum.GstCtx.r15 = pMsg->R15;
3194 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
3195 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ss, pMsg->SsSegment);
3196}
3197#elif defined(IN_RING3)
3198/**
3199 * Copies state included in a exception intercept exit.
3200 *
3201 * @param pVCpu The cross context per CPU structure.
3202 * @param pExit The VM exit information.
3203 * @param fClearXcpt Clear pending exception.
3204 */
3205DECLINLINE(void) nemR3WinCopyStateFromExceptionMessage(PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, bool fClearXcpt)
3206{
3207 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3208 if (fClearXcpt)
3209 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
3210}
3211#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3212
3213
3214#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3215/**
3216 * Deals with exception intercept message (HvMessageTypeX64ExceptionIntercept).
3217 *
3218 * @returns Strict VBox status code.
3219 * @param pVCpu The cross context per CPU structure.
3220 * @param pMsg The message.
3221 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3222 * @sa nemR3WinHandleExitMsr
3223 */
3224NEM_TMPL_STATIC VBOXSTRICTRC
3225nemHCWinHandleMessageException(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, PGVMCPU pGVCpu)
3226{
3227 /*
3228 * Assert sanity.
3229 */
3230 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
3231 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
3232 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
3233 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
3234 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
3235 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
3236 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
3237 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterDs, pMsg->DsSegment);
3238 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterSs, pMsg->SsSegment);
3239 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
3240 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRcx, pMsg->Rcx);
3241 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdx, pMsg->Rdx);
3242 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRbx, pMsg->Rbx);
3243 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRsp, pMsg->Rsp);
3244 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRbp, pMsg->Rbp);
3245 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRsi, pMsg->Rsi);
3246 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdi, pMsg->Rdi);
3247 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR8, pMsg->R8);
3248 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR9, pMsg->R9);
3249 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR10, pMsg->R10);
3250 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR11, pMsg->R11);
3251 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR12, pMsg->R12);
3252 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR13, pMsg->R13);
3253 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR14, pMsg->R14);
3254 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR15, pMsg->R15);
3255
3256 /*
3257 * Get most of the register state since we'll end up making IEM inject the
3258 * event. The exception isn't normally flaged as a pending event, so duh.
3259 *
3260 * Note! We can optimize this later with event injection.
3261 */
3262 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n",
3263 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
3264 pMsg->ExceptionVector, pMsg->ErrorCode, pMsg->ExceptionParameter));
3265 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, true /*fClearXcpt*/);
3266 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3267 if (pMsg->ExceptionVector == X86_XCPT_DB)
3268 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3269 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, fWhat, "Xcpt");
3270 if (rcStrict != VINF_SUCCESS)
3271 return rcStrict;
3272
3273 /*
3274 * Handle the intercept.
3275 */
3276 TRPMEVENT enmEvtType = TRPM_TRAP;
3277 switch (pMsg->ExceptionVector)
3278 {
3279 /*
3280 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3281 * and need to turn them over to GIM.
3282 *
3283 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3284 * #UD for handling non-native hypercall instructions. (IEM will
3285 * decode both and let the GIM provider decide whether to accept it.)
3286 */
3287 case X86_XCPT_UD:
3288 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3289 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3290 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3291
3292 if (nemHcWinIsInterestingUndefinedOpcode(pMsg->InstructionByteCount, pMsg->InstructionBytes,
3293 pMsg->Header.ExecutionState.EferLma && pMsg->Header.CsSegment.Long ))
3294 {
3295 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
3296 pMsg->InstructionBytes, pMsg->InstructionByteCount);
3297 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3298 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3299 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
3300 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3301 return rcStrict;
3302 }
3303 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3304 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->InstructionByteCount, pMsg->InstructionBytes ));
3305 break;
3306
3307 /*
3308 * Filter debug exceptions.
3309 */
3310 case X86_XCPT_DB:
3311 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3312 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3313 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3314 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3315 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header) ));
3316 break;
3317
3318 case X86_XCPT_BP:
3319 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3320 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3321 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3322 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3323 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->Header.InstructionLength));
3324 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3325 break;
3326
3327 /* This shouldn't happen. */
3328 default:
3329 AssertLogRelMsgFailedReturn(("ExceptionVector=%#x\n", pMsg->ExceptionVector), VERR_IEM_IPE_6);
3330 }
3331
3332 /*
3333 * Inject it.
3334 */
3335 rcStrict = IEMInjectTrap(pVCpu, pMsg->ExceptionVector, enmEvtType, pMsg->ErrorCode,
3336 pMsg->ExceptionParameter /*??*/, pMsg->Header.InstructionLength);
3337 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3338 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3339 nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->ExceptionVector, VBOXSTRICTRC_VAL(rcStrict) ));
3340 return rcStrict;
3341}
3342#elif defined(IN_RING3)
3343/**
3344 * Deals with MSR access exits (WHvRunVpExitReasonException).
3345 *
3346 * @returns Strict VBox status code.
3347 * @param pVM The cross context VM structure.
3348 * @param pVCpu The cross context per CPU structure.
3349 * @param pExit The VM exit information to handle.
3350 * @sa nemR3WinHandleExitException
3351 */
3352NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3353{
3354 /*
3355 * Get most of the register state since we'll end up making IEM inject the
3356 * event. The exception isn't normally flaged as a pending event, so duh.
3357 *
3358 * Note! We can optimize this later with event injection.
3359 */
3360 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3361 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType,
3362 pExit->VpException.ErrorCode, pExit->VpException.ExceptionParameter ));
3363 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, true /*fClearXcpt*/);
3364 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3365 if (pExit->VpException.ExceptionType == X86_XCPT_DB)
3366 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3367 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, fWhat, "Xcpt");
3368 if (rcStrict != VINF_SUCCESS)
3369 return rcStrict;
3370
3371 /*
3372 * Handle the intercept.
3373 */
3374 TRPMEVENT enmEvtType = TRPM_TRAP;
3375 switch (pExit->VpException.ExceptionType)
3376 {
3377 /*
3378 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3379 * and need to turn them over to GIM.
3380 *
3381 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3382 * #UD for handling non-native hypercall instructions. (IEM will
3383 * decode both and let the GIM provider decide whether to accept it.)
3384 */
3385 case X86_XCPT_UD:
3386 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3387 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3388 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3389 if (nemHcWinIsInterestingUndefinedOpcode(pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes,
3390 pExit->VpContext.ExecutionState.EferLma && pExit->VpContext.Cs.Long ))
3391 {
3392 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
3393 pExit->VpException.InstructionBytes,
3394 pExit->VpException.InstructionByteCount);
3395 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3396 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3397 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
3398 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3399 return rcStrict;
3400 }
3401
3402 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu,
3403 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3404 pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes ));
3405 break;
3406
3407 /*
3408 * Filter debug exceptions.
3409 */
3410 case X86_XCPT_DB:
3411 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3412 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3413 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3414 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3415 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext) ));
3416 break;
3417
3418 case X86_XCPT_BP:
3419 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3420 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3421 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3422 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3423 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.InstructionLength));
3424 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3425 break;
3426
3427 /* This shouldn't happen. */
3428 default:
3429 AssertLogRelMsgFailedReturn(("ExceptionType=%#x\n", pExit->VpException.ExceptionType), VERR_IEM_IPE_6);
3430 }
3431
3432 /*
3433 * Inject it.
3434 */
3435 rcStrict = IEMInjectTrap(pVCpu, pExit->VpException.ExceptionType, enmEvtType, pExit->VpException.ErrorCode,
3436 pExit->VpException.ExceptionParameter /*??*/, pExit->VpContext.InstructionLength);
3437 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3438 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3439 nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType, VBOXSTRICTRC_VAL(rcStrict) ));
3440
3441 RT_NOREF_PV(pVM);
3442 return rcStrict;
3443}
3444#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3445
3446
3447#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3448/**
3449 * Deals with unrecoverable exception (triple fault).
3450 *
3451 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up
3452 * here too. So we'll leave it to IEM to decide.
3453 *
3454 * @returns Strict VBox status code.
3455 * @param pVCpu The cross context per CPU structure.
3456 * @param pMsgHdr The message header.
3457 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3458 * @sa nemR3WinHandleExitUnrecoverableException
3459 */
3460NEM_TMPL_STATIC VBOXSTRICTRC
3461nemHCWinHandleMessageUnrecoverableException(PVMCPU pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr, PGVMCPU pGVCpu)
3462{
3463 /* Check message register value sanity. */
3464 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsgHdr->CsSegment);
3465 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsgHdr->Rip);
3466 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsgHdr->Rflags);
3467 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsgHdr->Cr8);
3468
3469# if 0
3470 /*
3471 * Just copy the state we've got and handle it in the loop for now.
3472 */
3473 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
3474 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n",
3475 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags));
3476 return VINF_EM_TRIPLE_FAULT;
3477# else
3478 /*
3479 * Let IEM decide whether this is really it.
3480 */
3481 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3482 pMsgHdr->Rip + pMsgHdr->CsSegment.Base, ASMReadTSC());
3483 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
3484 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
3485 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3486 if (rcStrict == VINF_SUCCESS)
3487 {
3488 rcStrict = IEMExecOne(pVCpu);
3489 if (rcStrict == VINF_SUCCESS)
3490 {
3491 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3492 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags ));
3493 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3494 return VINF_SUCCESS;
3495 }
3496 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3497 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3498 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3499 else
3500 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3501 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3502 }
3503 else
3504 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3505 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3506 return rcStrict;
3507# endif
3508}
3509#elif defined(IN_RING3)
3510/**
3511 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
3512 *
3513 * @returns Strict VBox status code.
3514 * @param pVM The cross context VM structure.
3515 * @param pVCpu The cross context per CPU structure.
3516 * @param pExit The VM exit information to handle.
3517 * @sa nemHCWinHandleMessageUnrecoverableException
3518 */
3519NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3520{
3521# if 0
3522 /*
3523 * Just copy the state we've got and handle it in the loop for now.
3524 */
3525 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3526 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3527 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3528 RT_NOREF_PV(pVM);
3529 return VINF_EM_TRIPLE_FAULT;
3530# else
3531 /*
3532 * Let IEM decide whether this is really it.
3533 */
3534 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3535 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3536 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3537 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL,
3538 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3539 if (rcStrict == VINF_SUCCESS)
3540 {
3541 rcStrict = IEMExecOne(pVCpu);
3542 if (rcStrict == VINF_SUCCESS)
3543 {
3544 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3545 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3546 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3547 return VINF_SUCCESS;
3548 }
3549 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3550 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3551 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3552 else
3553 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3554 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3555 }
3556 else
3557 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3558 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3559 RT_NOREF_PV(pVM);
3560 return rcStrict;
3561# endif
3562
3563}
3564#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3565
3566
3567#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3568/**
3569 * Handles messages (VM exits).
3570 *
3571 * @returns Strict VBox status code.
3572 * @param pVM The cross context VM structure.
3573 * @param pVCpu The cross context per CPU structure.
3574 * @param pMappingHeader The message slot mapping.
3575 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3576 * @sa nemR3WinHandleExit
3577 */
3578NEM_TMPL_STATIC VBOXSTRICTRC
3579nemHCWinHandleMessage(PVM pVM, PVMCPU pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader, PGVMCPU pGVCpu)
3580{
3581 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage)
3582 {
3583 AssertMsg(pMappingHeader->cbMessage == HV_MESSAGE_SIZE, ("%#x\n", pMappingHeader->cbMessage));
3584 HV_MESSAGE const *pMsg = (HV_MESSAGE const *)(pMappingHeader + 1);
3585 switch (pMsg->Header.MessageType)
3586 {
3587 case HvMessageTypeUnmappedGpa:
3588 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3589 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3590 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pGVCpu);
3591
3592 case HvMessageTypeGpaIntercept:
3593 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3594 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept);
3595 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pGVCpu);
3596
3597 case HvMessageTypeX64IoPortIntercept:
3598 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept));
3599 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3600 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept, pGVCpu);
3601
3602 case HvMessageTypeX64Halt:
3603 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3604 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3605 pMsg->X64InterceptHeader.Rip + pMsg->X64InterceptHeader.CsSegment.Base, ASMReadTSC());
3606 Log4(("HaltExit\n"));
3607 return VINF_EM_HALT;
3608
3609 case HvMessageTypeX64InterruptWindow:
3610 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow));
3611 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3612 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow, pGVCpu);
3613
3614 case HvMessageTypeX64CpuidIntercept:
3615 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept));
3616 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3617 return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept, pGVCpu);
3618
3619 case HvMessageTypeX64MsrIntercept:
3620 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept));
3621 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3622 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept, pGVCpu);
3623
3624 case HvMessageTypeX64ExceptionIntercept:
3625 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept));
3626 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3627 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept, pGVCpu);
3628
3629 case HvMessageTypeUnrecoverableException:
3630 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
3631 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3632 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader, pGVCpu);
3633
3634 case HvMessageTypeInvalidVpRegisterValue:
3635 case HvMessageTypeUnsupportedFeature:
3636 case HvMessageTypeTlbPageSizeMismatch:
3637 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3638 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg),
3639 VERR_NEM_IPE_3);
3640
3641 case HvMessageTypeX64ApicEoi:
3642 case HvMessageTypeX64LegacyFpError:
3643 case HvMessageTypeX64RegisterIntercept:
3644 case HvMessageTypeApicEoi:
3645 case HvMessageTypeFerrAsserted:
3646 case HvMessageTypeEventLogBufferComplete:
3647 case HvMessageTimerExpired:
3648 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3649 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3650 VERR_NEM_IPE_3);
3651
3652 default:
3653 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3654 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3655 VERR_NEM_IPE_3);
3656 }
3657 }
3658 else
3659 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n",
3660 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage),
3661 VERR_NEM_IPE_4);
3662}
3663#elif defined(IN_RING3)
3664/**
3665 * Handles VM exits.
3666 *
3667 * @returns Strict VBox status code.
3668 * @param pVM The cross context VM structure.
3669 * @param pVCpu The cross context per CPU structure.
3670 * @param pExit The VM exit information to handle.
3671 * @sa nemHCWinHandleMessage
3672 */
3673NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3674{
3675 switch (pExit->ExitReason)
3676 {
3677 case WHvRunVpExitReasonMemoryAccess:
3678 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3679 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
3680
3681 case WHvRunVpExitReasonX64IoPortAccess:
3682 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3683 return nemR3WinHandleExitIoPort(pVM, pVCpu, pExit);
3684
3685 case WHvRunVpExitReasonX64Halt:
3686 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3687 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3688 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3689 Log4(("HaltExit\n"));
3690 return VINF_EM_HALT;
3691
3692 case WHvRunVpExitReasonCanceled:
3693 return VINF_SUCCESS;
3694
3695 case WHvRunVpExitReasonX64InterruptWindow:
3696 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3697 return nemR3WinHandleExitInterruptWindow(pVM, pVCpu, pExit);
3698
3699 case WHvRunVpExitReasonX64Cpuid:
3700 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3701 return nemR3WinHandleExitCpuId(pVM, pVCpu, pExit);
3702
3703 case WHvRunVpExitReasonX64MsrAccess:
3704 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3705 return nemR3WinHandleExitMsr(pVM, pVCpu, pExit);
3706
3707 case WHvRunVpExitReasonException:
3708 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3709 return nemR3WinHandleExitException(pVM, pVCpu, pExit);
3710
3711 case WHvRunVpExitReasonUnrecoverableException:
3712 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3713 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
3714
3715 case WHvRunVpExitReasonUnsupportedFeature:
3716 case WHvRunVpExitReasonInvalidVpRegisterValue:
3717 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3718 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
3719 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
3720
3721 /* Undesired exits: */
3722 case WHvRunVpExitReasonNone:
3723 default:
3724 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3725 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
3726 }
3727}
3728#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3729
3730
3731#ifdef IN_RING0
3732/**
3733 * Perform an I/O control operation on the partition handle (VID.SYS),
3734 * restarting on alert-like behaviour.
3735 *
3736 * @returns NT status code.
3737 * @param pGVM The ring-0 VM structure.
3738 * @param pGVCpu The ring-0 CPU structure.
3739 * @param pVCpu The calling cross context CPU structure.
3740 * @param fFlags The wait flags.
3741 * @param cMillies The timeout in milliseconds
3742 */
3743static NTSTATUS nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(PGVM pGVM, PGVMCPU pGVCpu, PVMCPU pVCpu,
3744 uint32_t fFlags, uint32_t cMillies)
3745{
3746 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3747 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags;
3748 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3749 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3750 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3751 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3752 NULL, 0);
3753 if (rcNt == STATUS_SUCCESS)
3754 { /* likely */ }
3755 /*
3756 * Generally, if we get down here, we have been interrupted between ACK'ing
3757 * a message and waiting for the next due to a NtAlertThread call. So, we
3758 * should stop ACK'ing the previous message and get on waiting on the next.
3759 * See similar stuff in nemHCWinRunGC().
3760 */
3761 else if ( rcNt == STATUS_TIMEOUT
3762 || rcNt == STATUS_ALERTED /* just in case */
3763 || rcNt == STATUS_KERNEL_APC /* just in case */
3764 || rcNt == STATUS_USER_APC /* just in case */)
3765 {
3766 DBGFTRACE_CUSTOM(pVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/1 %#x (f=%#x)", rcNt, fFlags);
3767 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingAlerts);
3768 Assert(fFlags & VID_MSHAGN_F_GET_NEXT_MESSAGE);
3769
3770 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pVCpu->idCpu;
3771 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags & ~VID_MSHAGN_F_HANDLE_MESSAGE;
3772 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3773 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3774 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3775 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3776 NULL, 0);
3777 DBGFTRACE_CUSTOM(pVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/2 %#x", rcNt);
3778 }
3779 return rcNt;
3780}
3781
3782#endif /* IN_RING0 */
3783
3784
3785#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3786/**
3787 * Worker for nemHCWinRunGC that stops the execution on the way out.
3788 *
3789 * The CPU was running the last time we checked, no there are no messages that
3790 * needs being marked handled/whatever. Caller checks this.
3791 *
3792 * @returns rcStrict on success, error status on failure.
3793 * @param pVM The cross context VM structure.
3794 * @param pVCpu The cross context per CPU structure.
3795 * @param rcStrict The nemHCWinRunGC return status. This is a little
3796 * bit unnecessary, except in internal error cases,
3797 * since we won't need to stop the CPU if we took an
3798 * exit.
3799 * @param pMappingHeader The message slot mapping.
3800 * @param pGVM The global (ring-0) VM structure (NULL in r3).
3801 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3802 */
3803NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict,
3804 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
3805 PGVM pGVM, PGVMCPU pGVCpu)
3806{
3807# ifdef DBGFTRACE_ENABLED
3808 HV_MESSAGE const volatile *pMsgForTrace = (HV_MESSAGE const volatile *)(pMappingHeader + 1);
3809# endif
3810
3811 /*
3812 * Try stopping the processor. If we're lucky we manage to do this before it
3813 * does another VM exit.
3814 */
3815 DBGFTRACE_CUSTOM(pVM, "nemStop#0");
3816# ifdef IN_RING0
3817 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
3818 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction,
3819 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3820 NULL, 0);
3821 if (NT_SUCCESS(rcNt))
3822 {
3823 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay (%#x)", rcNt);
3824 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3825 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3826 return rcStrict;
3827 }
3828# else
3829 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu);
3830 if (fRet)
3831 {
3832 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay");
3833 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3834 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3835 return rcStrict;
3836 }
3837 RT_NOREF(pGVM, pGVCpu);
3838# endif
3839
3840 /*
3841 * Dang. The CPU stopped by itself and we got a couple of message to deal with.
3842 */
3843# ifdef IN_RING0
3844 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", rcNt);
3845 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt),
3846 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3847# else
3848 DWORD dwErr = RTNtLastErrorValue();
3849 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", dwErr);
3850 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr),
3851 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3852# endif
3853 Log8(("nemHCWinStopCpu: Stopping CPU #%u pending...\n", pVCpu->idCpu));
3854 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPending);
3855
3856 /*
3857 * First message: Exit or similar, sometimes VidMessageStopRequestComplete.
3858 * Note! We can safely ASSUME that rcStrict isn't an important information one.
3859 */
3860# ifdef IN_RING0
3861 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pGVM, pGVCpu, pVCpu, VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3862 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
3863 pMsgForTrace->Header.MessageType);
3864 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
3865 ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3866 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3867# else
3868 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3869 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3870 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
3871 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3872 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3873 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3874# endif
3875
3876 VID_MESSAGE_TYPE enmVidMsgType = pMappingHeader->enmVidMsgType;
3877 if (enmVidMsgType != VidMessageStopRequestComplete)
3878 {
3879 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pGVCpu);
3880 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict))
3881 rcStrict = rcStrict2;
3882 DBGFTRACE_CUSTOM(pVM, "nemStop#1: handled %#x -> %d", pMsgForTrace->Header.MessageType, VBOXSTRICTRC_VAL(rcStrict));
3883
3884 /*
3885 * Mark it as handled and get the stop request completed message, then mark
3886 * that as handled too. CPU is back into fully stopped stated then.
3887 */
3888# ifdef IN_RING0
3889 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pGVM, pGVCpu, pVCpu,
3890 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE,
3891 30000 /*ms*/);
3892 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
3893 pMsgForTrace->Header.MessageType);
3894 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
3895 ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3896 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3897# else
3898 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3899 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3900 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
3901 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3902 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3903 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3904# endif
3905
3906 /* It should be a stop request completed message. */
3907 enmVidMsgType = pMappingHeader->enmVidMsgType;
3908 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete,
3909 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n",
3910 enmVidMsgType, pMappingHeader->cbMessage),
3911 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3912
3913 /*
3914 * Mark the VidMessageStopRequestComplete message as handled.
3915 */
3916# ifdef IN_RING0
3917 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pGVM, pGVCpu, pVCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
3918 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType,
3919 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3920 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
3921 ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3922 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3923# else
3924 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
3925 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
3926 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3927 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3928 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3929# endif
3930 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) ));
3931 }
3932 else
3933 {
3934 /** @todo I'm not so sure about this now... */
3935 DBGFTRACE_CUSTOM(pVM, "nemStop#9: %#x %#x %#x", pMappingHeader->enmVidMsgType,
3936 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3937 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingOdd);
3938 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc) - 1st VidMessageSlotHandleAndGetNext got VidMessageStopRequestComplete.\n",
3939 VBOXSTRICTRC_VAL(rcStrict) ));
3940 }
3941 return rcStrict;
3942}
3943#endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3944
3945#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
3946
3947/**
3948 * Deals with pending interrupt related force flags, may inject interrupt.
3949 *
3950 * @returns VBox strict status code.
3951 * @param pVM The cross context VM structure.
3952 * @param pVCpu The cross context per CPU structure.
3953 * @param pGVCpu The global (ring-0) per CPU structure.
3954 * @param pfInterruptWindows Where to return interrupt window flags.
3955 */
3956NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleInterruptFF(PVM pVM, PVMCPU pVCpu, PGVMCPU pGVCpu, uint8_t *pfInterruptWindows)
3957{
3958 Assert(!TRPMHasTrap(pVCpu));
3959 RT_NOREF_PV(pVM);
3960
3961 /*
3962 * First update APIC. We ASSUME this won't need TPR/CR8.
3963 */
3964 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3965 {
3966 APICUpdatePendingInterrupts(pVCpu);
3967 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
3968 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3969 return VINF_SUCCESS;
3970 }
3971
3972 /*
3973 * We don't currently implement SMIs.
3974 */
3975 AssertReturn(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);
3976
3977 /*
3978 * Check if we've got the minimum of state required for deciding whether we
3979 * can inject interrupts and NMIs. If we don't have it, get all we might require
3980 * for injection via IEM.
3981 */
3982 bool const fPendingNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3983 uint64_t fNeedExtrn = CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
3984 | (fPendingNmi ? CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI : 0);
3985 if (pVCpu->cpum.GstCtx.fExtrn & fNeedExtrn)
3986 {
3987 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
3988 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF");
3989 if (rcStrict != VINF_SUCCESS)
3990 return rcStrict;
3991 }
3992 bool const fInhibitInterrupts = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3993 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip;
3994
3995 /*
3996 * NMI? Try deliver it first.
3997 */
3998 if (fPendingNmi)
3999 {
4000 if ( !fInhibitInterrupts
4001 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
4002 {
4003 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
4004 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4005 if (rcStrict == VINF_SUCCESS)
4006 {
4007 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4008 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_NMI, TRPM_HARDWARE_INT, 0, 0, 0);
4009 Log8(("Injected NMI on %u (%d)\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4010 }
4011 return rcStrict;
4012 }
4013 *pfInterruptWindows |= NEM_WIN_INTW_F_NMI;
4014 Log8(("NMI window pending on %u\n", pVCpu->idCpu));
4015 }
4016
4017 /*
4018 * APIC or PIC interrupt?
4019 */
4020 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4021 {
4022 if ( !fInhibitInterrupts
4023 && pVCpu->cpum.GstCtx.rflags.Bits.u1IF)
4024 {
4025 AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR);
4026 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
4027 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4028 if (rcStrict == VINF_SUCCESS)
4029 {
4030 uint8_t bInterrupt;
4031 int rc = PDMGetInterrupt(pVCpu, &bInterrupt);
4032 if (RT_SUCCESS(rc))
4033 {
4034 rcStrict = IEMInjectTrap(pVCpu, bInterrupt, TRPM_HARDWARE_INT, 0, 0, 0);
4035 Log8(("Injected interrupt %#x on %u (%d)\n", bInterrupt, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4036 }
4037 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4038 {
4039 *pfInterruptWindows |= (bInterrupt >> 4 /*??*/) << NEM_WIN_INTW_F_PRIO_SHIFT;
4040 Log8(("VERR_APIC_INTR_MASKED_BY_TPR: *pfInterruptWindows=%#x\n", *pfInterruptWindows));
4041 }
4042 else
4043 Log8(("PDMGetInterrupt failed -> %d\n", rc));
4044 }
4045 return rcStrict;
4046 }
4047 *pfInterruptWindows |= NEM_WIN_INTW_F_REGULAR;
4048 Log8(("Interrupt window pending on %u\n", pVCpu->idCpu));
4049 }
4050
4051 return VINF_SUCCESS;
4052}
4053
4054
4055/**
4056 * Inner NEM runloop for windows.
4057 *
4058 * @returns Strict VBox status code.
4059 * @param pVM The cross context VM structure.
4060 * @param pVCpu The cross context per CPU structure.
4061 * @param pGVM The ring-0 VM structure (NULL in ring-3).
4062 * @param pGVCpu The ring-0 per CPU structure (NULL in ring-3).
4063 */
4064NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVM pVM, PVMCPU pVCpu, PGVM pGVM, PGVMCPU pGVCpu)
4065{
4066 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags));
4067# ifdef LOG_ENABLED
4068 if (LogIs3Enabled())
4069 nemHCWinLogState(pVM, pVCpu);
4070# endif
4071# ifdef IN_RING0
4072 Assert(pVCpu->idCpu == pGVCpu->idCpu);
4073# endif
4074
4075 /*
4076 * Try switch to NEM runloop state.
4077 */
4078 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
4079 { /* likely */ }
4080 else
4081 {
4082 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4083 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
4084 return VINF_SUCCESS;
4085 }
4086
4087 /*
4088 * The run loop.
4089 *
4090 * Current approach to state updating to use the sledgehammer and sync
4091 * everything every time. This will be optimized later.
4092 */
4093# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4094 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;
4095# endif
4096 const bool fSingleStepping = DBGFIsStepping(pVCpu);
4097// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
4098// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
4099// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
4100 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4101 for (unsigned iLoop = 0;; iLoop++)
4102 {
4103# ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4104 /*
4105 * Hack alert!
4106 */
4107 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
4108 if (cMappedPages >= 4000)
4109 {
4110 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinWHvUnmapOnePageCallback, NULL);
4111 Log(("nemHCWinRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
4112 }
4113# endif
4114
4115 /*
4116 * Pending interrupts or such? Need to check and deal with this prior
4117 * to the state syncing.
4118 */
4119 pVCpu->nem.s.fDesiredInterruptWindows = 0;
4120 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC
4121 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4122 {
4123# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4124 /* Make sure the CPU isn't executing. */
4125 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4126 {
4127 pVCpu->nem.s.fHandleAndGetFlags = 0;
4128 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
4129 if (rcStrict == VINF_SUCCESS)
4130 { /* likely */ }
4131 else
4132 {
4133 LogFlow(("NEM/%u: breaking: nemHCWinStopCpu -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4134 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4135 break;
4136 }
4137 }
4138# endif
4139
4140 /* Try inject interrupt. */
4141 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, pGVCpu, &pVCpu->nem.s.fDesiredInterruptWindows);
4142 if (rcStrict == VINF_SUCCESS)
4143 { /* likely */ }
4144 else
4145 {
4146 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4147 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4148 break;
4149 }
4150 }
4151
4152 /*
4153 * Ensure that hyper-V has the whole state.
4154 * (We always update the interrupt windows settings when active as hyper-V seems
4155 * to forget about it after an exit.)
4156 */
4157 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK))
4158 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)
4159 || ( ( pVCpu->nem.s.fDesiredInterruptWindows
4160 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows)
4161# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4162 && pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */
4163# endif
4164 )
4165 )
4166 {
4167# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4168 AssertMsg(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */,
4169 ("%#x fExtrn=%#RX64 (%#RX64) fDesiredInterruptWindows=%d fCurrentInterruptWindows=%#x vs %#x\n",
4170 pVCpu->nem.s.fHandleAndGetFlags, pVCpu->cpum.GstCtx.fExtrn, ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK),
4171 pVCpu->nem.s.fDesiredInterruptWindows, pVCpu->nem.s.fCurrentInterruptWindows, pVCpu->nem.s.fDesiredInterruptWindows));
4172# endif
4173# ifdef IN_RING0
4174 int rc2 = nemR0WinExportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx);
4175# else
4176 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
4177 RT_NOREF(pGVM, pGVCpu);
4178# endif
4179 AssertRCReturn(rc2, rc2);
4180 }
4181
4182 /*
4183 * Poll timers and run for a bit.
4184 *
4185 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
4186 * so we take the time of the next timer event and uses that as a deadline.
4187 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
4188 */
4189 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
4190 * the whole polling job when timers have changed... */
4191 uint64_t offDeltaIgnored;
4192 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
4193 if ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4194 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4195 {
4196# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4197 if (pVCpu->nem.s.fHandleAndGetFlags)
4198 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ }
4199 else
4200 {
4201# ifdef IN_RING0
4202 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
4203 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction,
4204 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
4205 NULL, 0);
4206 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));
4207 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pGVCpu->idCpu, rcNt),
4208 VERR_NEM_IPE_5);
4209# else
4210 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
4211 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n",
4212 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()),
4213 VERR_NEM_IPE_5);
4214# endif
4215 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4216 }
4217# endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
4218
4219 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
4220 {
4221# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4222 uint64_t const nsNow = RTTimeNanoTS();
4223 int64_t const cNsNextTimerEvt = nsNow - nsNextTimerEvt;
4224 uint32_t cMsWait;
4225 if (cNsNextTimerEvt < 100000 /* ns */)
4226 cMsWait = 0;
4227 else if ((uint64_t)cNsNextTimerEvt < RT_NS_1SEC)
4228 {
4229 if ((uint32_t)cNsNextTimerEvt < 2*RT_NS_1MS)
4230 cMsWait = 1;
4231 else
4232 cMsWait = ((uint32_t)cNsNextTimerEvt - 100000 /*ns*/) / RT_NS_1MS;
4233 }
4234 else
4235 cMsWait = RT_MS_1SEC;
4236# ifdef IN_RING0
4237 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
4238 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags;
4239 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMsWait;
4240 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
4241 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
4242 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
4243 NULL, 0);
4244 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4245 if (rcNt == STATUS_SUCCESS)
4246# else
4247 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4248 pVCpu->nem.s.fHandleAndGetFlags, cMsWait);
4249 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4250 if (fRet)
4251# endif
4252# else
4253 WHV_RUN_VP_EXIT_CONTEXT ExitReason;
4254 RT_ZERO(ExitReason);
4255 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
4256 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4257 if (SUCCEEDED(hrc))
4258# endif
4259 {
4260 /*
4261 * Deal with the message.
4262 */
4263# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4264 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pGVCpu);
4265 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;
4266# else
4267 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
4268# endif
4269 if (rcStrict == VINF_SUCCESS)
4270 { /* hopefully likely */ }
4271 else
4272 {
4273 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4274 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4275 break;
4276 }
4277 }
4278 else
4279 {
4280# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4281
4282 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT,
4283 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah,
4284 the error code conversion is into WAIT_XXX, i.e. NT status codes. */
4285# ifndef IN_RING0
4286 DWORD rcNt = GetLastError();
4287# endif
4288 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt));
4289 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT
4290 || rcNt == STATUS_ALERTED /* just in case */
4291 || rcNt == STATUS_USER_APC /* ditto */
4292 || rcNt == STATUS_KERNEL_APC /* ditto */
4293 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n",
4294 pVCpu->idCpu, rcNt, rcNt),
4295 VERR_NEM_IPE_0);
4296 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4297 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout);
4298# else
4299 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
4300 pVCpu->idCpu, hrc, GetLastError()),
4301 VERR_NEM_IPE_0);
4302# endif
4303 }
4304
4305 /*
4306 * If no relevant FFs are pending, loop.
4307 */
4308 if ( !VM_FF_IS_PENDING( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
4309 && !VMCPU_FF_IS_PENDING(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4310 continue;
4311
4312 /** @todo Try handle pending flags, not just return to EM loops. Take care
4313 * not to set important RCs here unless we've handled a message. */
4314 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#x)\n",
4315 pVCpu->idCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4316 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
4317 }
4318 else
4319 {
4320 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
4321 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
4322 }
4323 }
4324 else
4325 {
4326 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
4327 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
4328 }
4329 break;
4330 } /* the run loop */
4331
4332
4333 /*
4334 * If the CPU is running, make sure to stop it before we try sync back the
4335 * state and return to EM. We don't sync back the whole state if we can help it.
4336 */
4337# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4338 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4339 {
4340 pVCpu->nem.s.fHandleAndGetFlags = 0;
4341 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
4342 }
4343# endif
4344
4345 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
4346 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4347
4348 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
4349 {
4350 /* Try anticipate what we might need. */
4351 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
4352 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
4353 || RT_FAILURE(rcStrict))
4354 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4355# ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */
4356 else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
4357 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
4358 fImport = CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
4359 else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
4360 fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
4361# endif
4362 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
4363 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4364 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
4365
4366 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
4367 {
4368# ifdef IN_RING0
4369 int rc2 = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT,
4370 true /*fCanUpdateCr3*/);
4371 if (RT_SUCCESS(rc2))
4372 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4373 else if (rc2 == VERR_NEM_FLUSH_TLB)
4374 {
4375 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4376 if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
4377 rcStrict = -rc2;
4378 else
4379 {
4380 pVCpu->nem.s.rcPending = -rc2;
4381 LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
4382 }
4383 }
4384# else
4385 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4386 if (RT_SUCCESS(rc2))
4387 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4388# endif
4389 else if (RT_SUCCESS(rcStrict))
4390 rcStrict = rc2;
4391 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
4392 pVCpu->cpum.GstCtx.fExtrn = 0;
4393 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
4394 }
4395 else
4396 {
4397 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4398 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
4399 }
4400 }
4401 else
4402 {
4403 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4404 pVCpu->cpum.GstCtx.fExtrn = 0;
4405 }
4406
4407 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
4408 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, VBOXSTRICTRC_VAL(rcStrict) ));
4409 return rcStrict;
4410}
4411
4412#endif /* defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3) */
4413
4414/**
4415 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
4416 */
4417NEM_TMPL_STATIC DECLCALLBACK(int) nemHCWinUnsetForA20CheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys,
4418 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
4419{
4420 /* We'll just unmap the memory. */
4421 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
4422 {
4423#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4424 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
4425 AssertRC(rc);
4426 if (RT_SUCCESS(rc))
4427#else
4428 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
4429 if (SUCCEEDED(hrc))
4430#endif
4431 {
4432 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4433 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
4434 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
4435 }
4436 else
4437 {
4438#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4439 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
4440 return rc;
4441#else
4442 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4443 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4444 return VERR_NEM_IPE_2;
4445#endif
4446 }
4447 }
4448 RT_NOREF(pVCpu, pvUser);
4449 return VINF_SUCCESS;
4450}
4451
4452
4453/**
4454 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
4455 *
4456 * @returns The PGMPhysNemQueryPageInfo result.
4457 * @param pVM The cross context VM structure.
4458 * @param pVCpu The cross context virtual CPU structure.
4459 * @param GCPhys The page to unmap.
4460 */
4461NEM_TMPL_STATIC int nemHCWinUnmapPageForA20Gate(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
4462{
4463 PGMPHYSNEMPAGEINFO Info;
4464 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
4465 nemHCWinUnsetForA20CheckerCallback, NULL);
4466}
4467
4468
4469void nemHCNativeNotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
4470{
4471 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
4472 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
4473}
4474
4475
4476void nemHCNativeNotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
4477 int fRestoreAsRAM, bool fRestoreAsRAM2)
4478{
4479 Log5(("nemHCNativeNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d fRestoreAsRAM=%d fRestoreAsRAM2=%d\n",
4480 GCPhys, cb, enmKind, fRestoreAsRAM, fRestoreAsRAM2));
4481 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb); NOREF(fRestoreAsRAM); NOREF(fRestoreAsRAM2);
4482}
4483
4484
4485void nemHCNativeNotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
4486 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
4487{
4488 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
4489 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
4490 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
4491}
4492
4493
4494/**
4495 * Worker that maps pages into Hyper-V.
4496 *
4497 * This is used by the PGM physical page notifications as well as the memory
4498 * access VMEXIT handlers.
4499 *
4500 * @returns VBox status code.
4501 * @param pVM The cross context VM structure.
4502 * @param pVCpu The cross context virtual CPU structure of the
4503 * calling EMT.
4504 * @param GCPhysSrc The source page address.
4505 * @param GCPhysDst The hyper-V destination page. This may differ from
4506 * GCPhysSrc when A20 is disabled.
4507 * @param fPageProt NEM_PAGE_PROT_XXX.
4508 * @param pu2State Our page state (input/output).
4509 * @param fBackingChanged Set if the page backing is being changed.
4510 * @thread EMT(pVCpu)
4511 */
4512NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
4513 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
4514{
4515#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4516 /*
4517 * When using the hypercalls instead of the ring-3 APIs, we don't need to
4518 * unmap memory before modifying it. We still want to track the state though,
4519 * since unmap will fail when called an unmapped page and we don't want to redo
4520 * upgrades/downgrades.
4521 */
4522 uint8_t const u2OldState = *pu2State;
4523 int rc;
4524 if (fPageProt == NEM_PAGE_PROT_NONE)
4525 {
4526 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4527 {
4528 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4529 if (RT_SUCCESS(rc))
4530 {
4531 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4532 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4533 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4534 }
4535 else
4536 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4537 }
4538 else
4539 rc = VINF_SUCCESS;
4540 }
4541 else if (fPageProt & NEM_PAGE_PROT_WRITE)
4542 {
4543 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
4544 {
4545 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4546 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4547 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4548 if (RT_SUCCESS(rc))
4549 {
4550 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4551 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4552 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4553 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4554 NOREF(cMappedPages);
4555 }
4556 else
4557 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4558 }
4559 else
4560 rc = VINF_SUCCESS;
4561 }
4562 else
4563 {
4564 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
4565 {
4566 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4567 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4568 if (RT_SUCCESS(rc))
4569 {
4570 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4571 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4572 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4573 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4574 NOREF(cMappedPages);
4575 }
4576 else
4577 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4578 }
4579 else
4580 rc = VINF_SUCCESS;
4581 }
4582
4583 return VINF_SUCCESS;
4584
4585#else
4586 /*
4587 * Looks like we need to unmap a page before we can change the backing
4588 * or even modify the protection. This is going to be *REALLY* efficient.
4589 * PGM lends us two bits to keep track of the state here.
4590 */
4591 uint8_t const u2OldState = *pu2State;
4592 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
4593 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
4594 if ( fBackingChanged
4595 || u2NewState != u2OldState)
4596 {
4597 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4598 {
4599# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4600 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4601 AssertRC(rc);
4602 if (RT_SUCCESS(rc))
4603 {
4604 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4605 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4606 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4607 {
4608 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4609 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4610 return VINF_SUCCESS;
4611 }
4612 }
4613 else
4614 {
4615 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4616 return rc;
4617 }
4618# else
4619 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
4620 if (SUCCEEDED(hrc))
4621 {
4622 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4623 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4624 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4625 {
4626 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4627 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4628 return VINF_SUCCESS;
4629 }
4630 }
4631 else
4632 {
4633 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4634 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4635 return VERR_NEM_INIT_FAILED;
4636 }
4637# endif
4638 }
4639 }
4640
4641 /*
4642 * Writeable mapping?
4643 */
4644 if (fPageProt & NEM_PAGE_PROT_WRITE)
4645 {
4646# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4647 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4648 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4649 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4650 AssertRC(rc);
4651 if (RT_SUCCESS(rc))
4652 {
4653 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4654 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4655 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4656 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4657 return VINF_SUCCESS;
4658 }
4659 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4660 return rc;
4661# else
4662 void *pvPage;
4663 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
4664 if (RT_SUCCESS(rc))
4665 {
4666 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
4667 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4668 if (SUCCEEDED(hrc))
4669 {
4670 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4671 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4672 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4673 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4674 return VINF_SUCCESS;
4675 }
4676 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4677 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4678 return VERR_NEM_INIT_FAILED;
4679 }
4680 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4681 return rc;
4682# endif
4683 }
4684
4685 if (fPageProt & NEM_PAGE_PROT_READ)
4686 {
4687# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4688 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4689 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4690 AssertRC(rc);
4691 if (RT_SUCCESS(rc))
4692 {
4693 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4694 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4695 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4696 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4697 return VINF_SUCCESS;
4698 }
4699 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4700 return rc;
4701# else
4702 const void *pvPage;
4703 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
4704 if (RT_SUCCESS(rc))
4705 {
4706 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
4707 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
4708 if (SUCCEEDED(hrc))
4709 {
4710 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4711 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4712 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4713 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4714 return VINF_SUCCESS;
4715 }
4716 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4717 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4718 return VERR_NEM_INIT_FAILED;
4719 }
4720 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4721 return rc;
4722# endif
4723 }
4724
4725 /* We already unmapped it above. */
4726 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4727 return VINF_SUCCESS;
4728#endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4729}
4730
4731
4732NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVM pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
4733{
4734 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
4735 {
4736 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
4737 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4738 return VINF_SUCCESS;
4739 }
4740
4741#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4742 PVMCPU pVCpu = VMMGetCpu(pVM);
4743 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4744 AssertRC(rc);
4745 if (RT_SUCCESS(rc))
4746 {
4747 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4748 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
4749 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4750 return VINF_SUCCESS;
4751 }
4752 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4753 return rc;
4754#else
4755 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
4756 if (SUCCEEDED(hrc))
4757 {
4758 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4759 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4760 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
4761 return VINF_SUCCESS;
4762 }
4763 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
4764 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4765 return VERR_NEM_IPE_6;
4766#endif
4767}
4768
4769
4770int nemHCNativeNotifyPhysPageAllocated(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4771 PGMPAGETYPE enmType, uint8_t *pu2State)
4772{
4773 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4774 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4775 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4776
4777 int rc;
4778#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4779 PVMCPU pVCpu = VMMGetCpu(pVM);
4780 if ( pVM->nem.s.fA20Enabled
4781 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4782 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4783 else
4784 {
4785 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4786 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4787 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
4788 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4789
4790 }
4791#else
4792 RT_NOREF_PV(fPageProt);
4793 if ( pVM->nem.s.fA20Enabled
4794 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4795 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4796 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4797 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4798 else
4799 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
4800#endif
4801 return rc;
4802}
4803
4804
4805void nemHCNativeNotifyPhysPageProtChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4806 PGMPAGETYPE enmType, uint8_t *pu2State)
4807{
4808 Log5(("nemHCNativeNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4809 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4810 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4811
4812#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4813 PVMCPU pVCpu = VMMGetCpu(pVM);
4814 if ( pVM->nem.s.fA20Enabled
4815 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4816 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4817 else
4818 {
4819 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4820 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4821 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4822 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4823 }
4824#else
4825 RT_NOREF_PV(fPageProt);
4826 if ( pVM->nem.s.fA20Enabled
4827 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4828 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4829 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4830 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4831 /* else: ignore since we've got the alias page at this address. */
4832#endif
4833}
4834
4835
4836void nemHCNativeNotifyPhysPageChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
4837 uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
4838{
4839 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4840 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
4841 RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType);
4842
4843#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4844 PVMCPU pVCpu = VMMGetCpu(pVM);
4845 if ( pVM->nem.s.fA20Enabled
4846 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4847 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4848 else
4849 {
4850 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4851 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4852 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4853 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4854 }
4855#else
4856 RT_NOREF_PV(fPageProt);
4857 if ( pVM->nem.s.fA20Enabled
4858 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4859 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4860 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4861 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4862 /* else: ignore since we've got the alias page at this address. */
4863#endif
4864}
4865
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette