VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h@ 93133

Last change on this file since 93133 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 241.6 KB
Line 
1/* $Id: NEMAllNativeTemplate-win.cpp.h 93115 2022-01-01 11:31:46Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, Windows code template ring-0/3.
4 */
5
6/*
7 * Copyright (C) 2018-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22/** Copy back a segment from hyper-V. */
23#define NEM_WIN_COPY_BACK_SEG(a_Dst, a_Src) \
24 do { \
25 (a_Dst).u64Base = (a_Src).Base; \
26 (a_Dst).u32Limit = (a_Src).Limit; \
27 (a_Dst).ValidSel = (a_Dst).Sel = (a_Src).Selector; \
28 (a_Dst).Attr.u = (a_Src).Attributes; \
29 (a_Dst).fFlags = CPUMSELREG_FLAGS_VALID; \
30 } while (0)
31
32/** @def NEMWIN_ASSERT_MSG_REG_VAL
33 * Asserts the correctness of a register value in a message/context.
34 */
35#if 0
36# define NEMWIN_NEED_GET_REGISTER
37# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
38# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \
39 do { \
40 HV_REGISTER_VALUE TmpVal; \
41 nemHCWinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \
42 AssertMsg(a_Expr, a_Msg); \
43 } while (0)
44# else
45# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \
46 do { \
47 WHV_REGISTER_VALUE TmpVal; \
48 nemR3WinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \
49 AssertMsg(a_Expr, a_Msg); \
50 } while (0)
51# endif
52#else
53# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) do { } while (0)
54#endif
55
56/** @def NEMWIN_ASSERT_MSG_REG_VAL
57 * Asserts the correctness of a 64-bit register value in a message/context.
58 */
59#define NEMWIN_ASSERT_MSG_REG_VAL64(a_pVCpu, a_enmReg, a_u64Val) \
60 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, (a_u64Val) == TmpVal.Reg64, \
61 (#a_u64Val "=%#RX64, expected %#RX64\n", (a_u64Val), TmpVal.Reg64))
62/** @def NEMWIN_ASSERT_MSG_REG_VAL
63 * Asserts the correctness of a segment register value in a message/context.
64 */
65#define NEMWIN_ASSERT_MSG_REG_SEG(a_pVCpu, a_enmReg, a_SReg) \
66 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, \
67 (a_SReg).Base == TmpVal.Segment.Base \
68 && (a_SReg).Limit == TmpVal.Segment.Limit \
69 && (a_SReg).Selector == TmpVal.Segment.Selector \
70 && (a_SReg).Attributes == TmpVal.Segment.Attributes, \
71 ( #a_SReg "=%#RX16 {%#RX64 LB %#RX32,%#RX16} expected %#RX16 {%#RX64 LB %#RX32,%#RX16}\n", \
72 (a_SReg).Selector, (a_SReg).Base, (a_SReg).Limit, (a_SReg).Attributes, \
73 TmpVal.Segment.Selector, TmpVal.Segment.Base, TmpVal.Segment.Limit, TmpVal.Segment.Attributes))
74
75
76#ifdef IN_RING3
77# ifndef NTDDI_WIN10_19H1
78# define NTDDI_WIN10_19H1 0x0a000007
79# endif
80
81/** WHvRegisterPendingEvent0 was renamed to WHvRegisterPendingEvent between
82 * SDK 17134 and 18362. */
83# if WDK_NTDDI_VERSION < NTDDI_WIN10_19H1
84# define WHvRegisterPendingEvent WHvRegisterPendingEvent0
85# endif
86#endif
87
88
89/*********************************************************************************************************************************
90* Global Variables *
91*********************************************************************************************************************************/
92/** NEM_WIN_PAGE_STATE_XXX names. */
93NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
94
95/** HV_INTERCEPT_ACCESS_TYPE names. */
96static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
97
98
99/*********************************************************************************************************************************
100* Internal Functions *
101*********************************************************************************************************************************/
102NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
103 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
104
105
106
107#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
108
109/**
110 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
111 *
112 * @returns VBox status code.
113 * @param pVM The cross context VM structure.
114 * @param pVCpu The cross context virtual CPU structure of the caller.
115 * @param GCPhysSrc The source page. Does not need to be page aligned.
116 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
117 * when A20 is disabled.
118 * @param fFlags HV_MAP_GPA_XXX.
119 */
120DECLINLINE(int) nemHCWinHypercallMapPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
121{
122#ifdef IN_RING0
123 /** @todo optimize further, caller generally has the physical address. */
124 return nemR0WinMapPages(pVM, pVCpu,
125 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
126 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
127 1, fFlags);
128#else
129 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
130 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
131 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
132 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
133 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
134#endif
135}
136
137
138/**
139 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
140 *
141 * @returns VBox status code.
142 * @param pVM The cross context VM structure.
143 * @param pVCpu The cross context virtual CPU structure of the caller.
144 * @param GCPhys The page to unmap. Does not need to be page aligned.
145 */
146DECLINLINE(int) nemHCWinHypercallUnmapPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys)
147{
148# ifdef IN_RING0
149 return nemR0WinUnmapPages(pVM, pVCpu, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
150# else
151 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
152 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
153 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
154# endif
155}
156
157#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
158#ifndef IN_RING0
159
160NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
161{
162# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
163# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
164 if (pVM->nem.s.fUseRing0Runloop)
165# endif
166 {
167 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL);
168 AssertLogRelRCReturn(rc, rc);
169 return rc;
170 }
171# endif
172# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
173
174 /*
175 * The following is very similar to what nemR0WinExportState() does.
176 */
177 WHV_REGISTER_NAME aenmNames[128];
178 WHV_REGISTER_VALUE aValues[128];
179
180 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
181 if ( !fWhat
182 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
183 return VINF_SUCCESS;
184 uintptr_t iReg = 0;
185
186# define ADD_REG64(a_enmName, a_uValue) do { \
187 aenmNames[iReg] = (a_enmName); \
188 aValues[iReg].Reg128.High64 = 0; \
189 aValues[iReg].Reg64 = (a_uValue); \
190 iReg++; \
191 } while (0)
192# define ADD_REG128(a_enmName, a_uValueLo, a_uValueHi) do { \
193 aenmNames[iReg] = (a_enmName); \
194 aValues[iReg].Reg128.Low64 = (a_uValueLo); \
195 aValues[iReg].Reg128.High64 = (a_uValueHi); \
196 iReg++; \
197 } while (0)
198
199 /* GPRs */
200 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
201 {
202 if (fWhat & CPUMCTX_EXTRN_RAX)
203 ADD_REG64(WHvX64RegisterRax, pVCpu->cpum.GstCtx.rax);
204 if (fWhat & CPUMCTX_EXTRN_RCX)
205 ADD_REG64(WHvX64RegisterRcx, pVCpu->cpum.GstCtx.rcx);
206 if (fWhat & CPUMCTX_EXTRN_RDX)
207 ADD_REG64(WHvX64RegisterRdx, pVCpu->cpum.GstCtx.rdx);
208 if (fWhat & CPUMCTX_EXTRN_RBX)
209 ADD_REG64(WHvX64RegisterRbx, pVCpu->cpum.GstCtx.rbx);
210 if (fWhat & CPUMCTX_EXTRN_RSP)
211 ADD_REG64(WHvX64RegisterRsp, pVCpu->cpum.GstCtx.rsp);
212 if (fWhat & CPUMCTX_EXTRN_RBP)
213 ADD_REG64(WHvX64RegisterRbp, pVCpu->cpum.GstCtx.rbp);
214 if (fWhat & CPUMCTX_EXTRN_RSI)
215 ADD_REG64(WHvX64RegisterRsi, pVCpu->cpum.GstCtx.rsi);
216 if (fWhat & CPUMCTX_EXTRN_RDI)
217 ADD_REG64(WHvX64RegisterRdi, pVCpu->cpum.GstCtx.rdi);
218 if (fWhat & CPUMCTX_EXTRN_R8_R15)
219 {
220 ADD_REG64(WHvX64RegisterR8, pVCpu->cpum.GstCtx.r8);
221 ADD_REG64(WHvX64RegisterR9, pVCpu->cpum.GstCtx.r9);
222 ADD_REG64(WHvX64RegisterR10, pVCpu->cpum.GstCtx.r10);
223 ADD_REG64(WHvX64RegisterR11, pVCpu->cpum.GstCtx.r11);
224 ADD_REG64(WHvX64RegisterR12, pVCpu->cpum.GstCtx.r12);
225 ADD_REG64(WHvX64RegisterR13, pVCpu->cpum.GstCtx.r13);
226 ADD_REG64(WHvX64RegisterR14, pVCpu->cpum.GstCtx.r14);
227 ADD_REG64(WHvX64RegisterR15, pVCpu->cpum.GstCtx.r15);
228 }
229 }
230
231 /* RIP & Flags */
232 if (fWhat & CPUMCTX_EXTRN_RIP)
233 ADD_REG64(WHvX64RegisterRip, pVCpu->cpum.GstCtx.rip);
234 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
235 ADD_REG64(WHvX64RegisterRflags, pVCpu->cpum.GstCtx.rflags.u);
236
237 /* Segments */
238# define ADD_SEG(a_enmName, a_SReg) \
239 do { \
240 aenmNames[iReg] = a_enmName; \
241 aValues[iReg].Segment.Base = (a_SReg).u64Base; \
242 aValues[iReg].Segment.Limit = (a_SReg).u32Limit; \
243 aValues[iReg].Segment.Selector = (a_SReg).Sel; \
244 aValues[iReg].Segment.Attributes = (a_SReg).Attr.u; \
245 iReg++; \
246 } while (0)
247 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
248 {
249 if (fWhat & CPUMCTX_EXTRN_ES)
250 ADD_SEG(WHvX64RegisterEs, pVCpu->cpum.GstCtx.es);
251 if (fWhat & CPUMCTX_EXTRN_CS)
252 ADD_SEG(WHvX64RegisterCs, pVCpu->cpum.GstCtx.cs);
253 if (fWhat & CPUMCTX_EXTRN_SS)
254 ADD_SEG(WHvX64RegisterSs, pVCpu->cpum.GstCtx.ss);
255 if (fWhat & CPUMCTX_EXTRN_DS)
256 ADD_SEG(WHvX64RegisterDs, pVCpu->cpum.GstCtx.ds);
257 if (fWhat & CPUMCTX_EXTRN_FS)
258 ADD_SEG(WHvX64RegisterFs, pVCpu->cpum.GstCtx.fs);
259 if (fWhat & CPUMCTX_EXTRN_GS)
260 ADD_SEG(WHvX64RegisterGs, pVCpu->cpum.GstCtx.gs);
261 }
262
263 /* Descriptor tables & task segment. */
264 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
265 {
266 if (fWhat & CPUMCTX_EXTRN_LDTR)
267 ADD_SEG(WHvX64RegisterLdtr, pVCpu->cpum.GstCtx.ldtr);
268 if (fWhat & CPUMCTX_EXTRN_TR)
269 ADD_SEG(WHvX64RegisterTr, pVCpu->cpum.GstCtx.tr);
270 if (fWhat & CPUMCTX_EXTRN_IDTR)
271 {
272 aenmNames[iReg] = WHvX64RegisterIdtr;
273 aValues[iReg].Table.Limit = pVCpu->cpum.GstCtx.idtr.cbIdt;
274 aValues[iReg].Table.Base = pVCpu->cpum.GstCtx.idtr.pIdt;
275 iReg++;
276 }
277 if (fWhat & CPUMCTX_EXTRN_GDTR)
278 {
279 aenmNames[iReg] = WHvX64RegisterGdtr;
280 aValues[iReg].Table.Limit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
281 aValues[iReg].Table.Base = pVCpu->cpum.GstCtx.gdtr.pGdt;
282 iReg++;
283 }
284 }
285
286 /* Control registers. */
287 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
288 {
289 if (fWhat & CPUMCTX_EXTRN_CR0)
290 ADD_REG64(WHvX64RegisterCr0, pVCpu->cpum.GstCtx.cr0);
291 if (fWhat & CPUMCTX_EXTRN_CR2)
292 ADD_REG64(WHvX64RegisterCr2, pVCpu->cpum.GstCtx.cr2);
293 if (fWhat & CPUMCTX_EXTRN_CR3)
294 ADD_REG64(WHvX64RegisterCr3, pVCpu->cpum.GstCtx.cr3);
295 if (fWhat & CPUMCTX_EXTRN_CR4)
296 ADD_REG64(WHvX64RegisterCr4, pVCpu->cpum.GstCtx.cr4);
297 }
298 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
299 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu));
300
301 /* Debug registers. */
302/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
303 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
304 {
305 ADD_REG64(WHvX64RegisterDr0, pVCpu->cpum.GstCtx.dr[0]); // CPUMGetHyperDR0(pVCpu));
306 ADD_REG64(WHvX64RegisterDr1, pVCpu->cpum.GstCtx.dr[1]); // CPUMGetHyperDR1(pVCpu));
307 ADD_REG64(WHvX64RegisterDr2, pVCpu->cpum.GstCtx.dr[2]); // CPUMGetHyperDR2(pVCpu));
308 ADD_REG64(WHvX64RegisterDr3, pVCpu->cpum.GstCtx.dr[3]); // CPUMGetHyperDR3(pVCpu));
309 }
310 if (fWhat & CPUMCTX_EXTRN_DR6)
311 ADD_REG64(WHvX64RegisterDr6, pVCpu->cpum.GstCtx.dr[6]); // CPUMGetHyperDR6(pVCpu));
312 if (fWhat & CPUMCTX_EXTRN_DR7)
313 ADD_REG64(WHvX64RegisterDr7, pVCpu->cpum.GstCtx.dr[7]); // CPUMGetHyperDR7(pVCpu));
314
315 /* Floating point state. */
316 if (fWhat & CPUMCTX_EXTRN_X87)
317 {
318 ADD_REG128(WHvX64RegisterFpMmx0, pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[1]);
319 ADD_REG128(WHvX64RegisterFpMmx1, pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[1]);
320 ADD_REG128(WHvX64RegisterFpMmx2, pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[1]);
321 ADD_REG128(WHvX64RegisterFpMmx3, pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[1]);
322 ADD_REG128(WHvX64RegisterFpMmx4, pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[1]);
323 ADD_REG128(WHvX64RegisterFpMmx5, pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[1]);
324 ADD_REG128(WHvX64RegisterFpMmx6, pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[1]);
325 ADD_REG128(WHvX64RegisterFpMmx7, pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[1]);
326
327 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
328 aValues[iReg].FpControlStatus.FpControl = pVCpu->cpum.GstCtx.XState.x87.FCW;
329 aValues[iReg].FpControlStatus.FpStatus = pVCpu->cpum.GstCtx.XState.x87.FSW;
330 aValues[iReg].FpControlStatus.FpTag = pVCpu->cpum.GstCtx.XState.x87.FTW;
331 aValues[iReg].FpControlStatus.Reserved = pVCpu->cpum.GstCtx.XState.x87.FTW >> 8;
332 aValues[iReg].FpControlStatus.LastFpOp = pVCpu->cpum.GstCtx.XState.x87.FOP;
333 aValues[iReg].FpControlStatus.LastFpRip = (pVCpu->cpum.GstCtx.XState.x87.FPUIP)
334 | ((uint64_t)pVCpu->cpum.GstCtx.XState.x87.CS << 32)
335 | ((uint64_t)pVCpu->cpum.GstCtx.XState.x87.Rsrvd1 << 48);
336 iReg++;
337
338 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
339 aValues[iReg].XmmControlStatus.LastFpRdp = (pVCpu->cpum.GstCtx.XState.x87.FPUDP)
340 | ((uint64_t)pVCpu->cpum.GstCtx.XState.x87.DS << 32)
341 | ((uint64_t)pVCpu->cpum.GstCtx.XState.x87.Rsrvd2 << 48);
342 aValues[iReg].XmmControlStatus.XmmStatusControl = pVCpu->cpum.GstCtx.XState.x87.MXCSR;
343 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pVCpu->cpum.GstCtx.XState.x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
344 iReg++;
345 }
346
347 /* Vector state. */
348 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
349 {
350 ADD_REG128(WHvX64RegisterXmm0, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Hi);
351 ADD_REG128(WHvX64RegisterXmm1, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Hi);
352 ADD_REG128(WHvX64RegisterXmm2, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Hi);
353 ADD_REG128(WHvX64RegisterXmm3, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Hi);
354 ADD_REG128(WHvX64RegisterXmm4, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Hi);
355 ADD_REG128(WHvX64RegisterXmm5, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Hi);
356 ADD_REG128(WHvX64RegisterXmm6, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Hi);
357 ADD_REG128(WHvX64RegisterXmm7, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Hi);
358 ADD_REG128(WHvX64RegisterXmm8, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Hi);
359 ADD_REG128(WHvX64RegisterXmm9, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Hi);
360 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Hi);
361 ADD_REG128(WHvX64RegisterXmm11, pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Hi);
362 ADD_REG128(WHvX64RegisterXmm12, pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Hi);
363 ADD_REG128(WHvX64RegisterXmm13, pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Hi);
364 ADD_REG128(WHvX64RegisterXmm14, pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Hi);
365 ADD_REG128(WHvX64RegisterXmm15, pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Hi);
366 }
367
368 /* MSRs */
369 // WHvX64RegisterTsc - don't touch
370 if (fWhat & CPUMCTX_EXTRN_EFER)
371 ADD_REG64(WHvX64RegisterEfer, pVCpu->cpum.GstCtx.msrEFER);
372 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
373 ADD_REG64(WHvX64RegisterKernelGsBase, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
374 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
375 {
376 ADD_REG64(WHvX64RegisterSysenterCs, pVCpu->cpum.GstCtx.SysEnter.cs);
377 ADD_REG64(WHvX64RegisterSysenterEip, pVCpu->cpum.GstCtx.SysEnter.eip);
378 ADD_REG64(WHvX64RegisterSysenterEsp, pVCpu->cpum.GstCtx.SysEnter.esp);
379 }
380 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
381 {
382 ADD_REG64(WHvX64RegisterStar, pVCpu->cpum.GstCtx.msrSTAR);
383 ADD_REG64(WHvX64RegisterLstar, pVCpu->cpum.GstCtx.msrLSTAR);
384 ADD_REG64(WHvX64RegisterCstar, pVCpu->cpum.GstCtx.msrCSTAR);
385 ADD_REG64(WHvX64RegisterSfmask, pVCpu->cpum.GstCtx.msrSFMASK);
386 }
387 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
388 {
389 PCPUMCTXMSRS const pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
390 if (fWhat & CPUMCTX_EXTRN_TSC_AUX)
391 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
392 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
393 {
394 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu));
395 ADD_REG64(WHvX64RegisterPat, pVCpu->cpum.GstCtx.msrPAT);
396#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
397 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
398#endif
399 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
400 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
401 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
402 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
403 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
404 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
405 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
406 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
407 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
408 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
409 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
410 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
411#if 0 /** @todo these registers aren't available? Might explain something.. .*/
412 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);
413 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
414 {
415 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
416 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
417 }
418#endif
419 }
420 }
421
422 /* event injection (clear it). */
423 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
424 ADD_REG64(WHvRegisterPendingInterruption, 0);
425
426 /* Interruptibility state. This can get a little complicated since we get
427 half of the state via HV_X64_VP_EXECUTION_STATE. */
428 if ( (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
429 == (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI) )
430 {
431 ADD_REG64(WHvRegisterInterruptState, 0);
432 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
433 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
434 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
435 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
436 aValues[iReg - 1].InterruptState.NmiMasked = 1;
437 }
438 else if (fWhat & CPUMCTX_EXTRN_INHIBIT_INT)
439 {
440 if ( pVCpu->nem.s.fLastInterruptShadow
441 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
442 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip))
443 {
444 ADD_REG64(WHvRegisterInterruptState, 0);
445 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
446 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
447 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
448 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
449 //if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
450 // aValues[iReg - 1].InterruptState.NmiMasked = 1;
451 }
452 }
453 else
454 Assert(!(fWhat & CPUMCTX_EXTRN_INHIBIT_NMI));
455
456 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
457 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
458 if ( fDesiredIntWin
459 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
460 {
461 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
462 Log8(("Setting WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin=%X\n", fDesiredIntWin));
463 ADD_REG64(WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin);
464 Assert(aValues[iReg - 1].DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
465 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
466 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptPriority == (unsigned)((fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT));
467 }
468
469 /// @todo WHvRegisterPendingEvent
470
471 /*
472 * Set the registers.
473 */
474 Assert(iReg < RT_ELEMENTS(aValues));
475 Assert(iReg < RT_ELEMENTS(aenmNames));
476# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
477 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
478 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
479# endif
480 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
481 if (SUCCEEDED(hrc))
482 {
483 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
484 return VINF_SUCCESS;
485 }
486 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
487 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
488 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
489 return VERR_INTERNAL_ERROR;
490
491# undef ADD_REG64
492# undef ADD_REG128
493# undef ADD_SEG
494
495# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
496}
497
498
499NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
500{
501# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
502# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
503 if (pVM->nem.s.fUseRing0Runloop)
504# endif
505 {
506 /* See NEMR0ImportState */
507 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, fWhat, NULL);
508 if (RT_SUCCESS(rc))
509 return rc;
510 if (rc == VERR_NEM_FLUSH_TLB)
511 {
512 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /*fGlobal*/);
513 return rc;
514 }
515 AssertLogRelRCReturn(rc, rc);
516 return rc;
517 }
518# endif
519# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
520 WHV_REGISTER_NAME aenmNames[128];
521
522 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
523 uintptr_t iReg = 0;
524
525 /* GPRs */
526 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
527 {
528 if (fWhat & CPUMCTX_EXTRN_RAX)
529 aenmNames[iReg++] = WHvX64RegisterRax;
530 if (fWhat & CPUMCTX_EXTRN_RCX)
531 aenmNames[iReg++] = WHvX64RegisterRcx;
532 if (fWhat & CPUMCTX_EXTRN_RDX)
533 aenmNames[iReg++] = WHvX64RegisterRdx;
534 if (fWhat & CPUMCTX_EXTRN_RBX)
535 aenmNames[iReg++] = WHvX64RegisterRbx;
536 if (fWhat & CPUMCTX_EXTRN_RSP)
537 aenmNames[iReg++] = WHvX64RegisterRsp;
538 if (fWhat & CPUMCTX_EXTRN_RBP)
539 aenmNames[iReg++] = WHvX64RegisterRbp;
540 if (fWhat & CPUMCTX_EXTRN_RSI)
541 aenmNames[iReg++] = WHvX64RegisterRsi;
542 if (fWhat & CPUMCTX_EXTRN_RDI)
543 aenmNames[iReg++] = WHvX64RegisterRdi;
544 if (fWhat & CPUMCTX_EXTRN_R8_R15)
545 {
546 aenmNames[iReg++] = WHvX64RegisterR8;
547 aenmNames[iReg++] = WHvX64RegisterR9;
548 aenmNames[iReg++] = WHvX64RegisterR10;
549 aenmNames[iReg++] = WHvX64RegisterR11;
550 aenmNames[iReg++] = WHvX64RegisterR12;
551 aenmNames[iReg++] = WHvX64RegisterR13;
552 aenmNames[iReg++] = WHvX64RegisterR14;
553 aenmNames[iReg++] = WHvX64RegisterR15;
554 }
555 }
556
557 /* RIP & Flags */
558 if (fWhat & CPUMCTX_EXTRN_RIP)
559 aenmNames[iReg++] = WHvX64RegisterRip;
560 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
561 aenmNames[iReg++] = WHvX64RegisterRflags;
562
563 /* Segments */
564 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
565 {
566 if (fWhat & CPUMCTX_EXTRN_ES)
567 aenmNames[iReg++] = WHvX64RegisterEs;
568 if (fWhat & CPUMCTX_EXTRN_CS)
569 aenmNames[iReg++] = WHvX64RegisterCs;
570 if (fWhat & CPUMCTX_EXTRN_SS)
571 aenmNames[iReg++] = WHvX64RegisterSs;
572 if (fWhat & CPUMCTX_EXTRN_DS)
573 aenmNames[iReg++] = WHvX64RegisterDs;
574 if (fWhat & CPUMCTX_EXTRN_FS)
575 aenmNames[iReg++] = WHvX64RegisterFs;
576 if (fWhat & CPUMCTX_EXTRN_GS)
577 aenmNames[iReg++] = WHvX64RegisterGs;
578 }
579
580 /* Descriptor tables. */
581 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
582 {
583 if (fWhat & CPUMCTX_EXTRN_LDTR)
584 aenmNames[iReg++] = WHvX64RegisterLdtr;
585 if (fWhat & CPUMCTX_EXTRN_TR)
586 aenmNames[iReg++] = WHvX64RegisterTr;
587 if (fWhat & CPUMCTX_EXTRN_IDTR)
588 aenmNames[iReg++] = WHvX64RegisterIdtr;
589 if (fWhat & CPUMCTX_EXTRN_GDTR)
590 aenmNames[iReg++] = WHvX64RegisterGdtr;
591 }
592
593 /* Control registers. */
594 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
595 {
596 if (fWhat & CPUMCTX_EXTRN_CR0)
597 aenmNames[iReg++] = WHvX64RegisterCr0;
598 if (fWhat & CPUMCTX_EXTRN_CR2)
599 aenmNames[iReg++] = WHvX64RegisterCr2;
600 if (fWhat & CPUMCTX_EXTRN_CR3)
601 aenmNames[iReg++] = WHvX64RegisterCr3;
602 if (fWhat & CPUMCTX_EXTRN_CR4)
603 aenmNames[iReg++] = WHvX64RegisterCr4;
604 }
605 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
606 aenmNames[iReg++] = WHvX64RegisterCr8;
607
608 /* Debug registers. */
609 if (fWhat & CPUMCTX_EXTRN_DR7)
610 aenmNames[iReg++] = WHvX64RegisterDr7;
611 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
612 {
613 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_DR7))
614 {
615 fWhat |= CPUMCTX_EXTRN_DR7;
616 aenmNames[iReg++] = WHvX64RegisterDr7;
617 }
618 aenmNames[iReg++] = WHvX64RegisterDr0;
619 aenmNames[iReg++] = WHvX64RegisterDr1;
620 aenmNames[iReg++] = WHvX64RegisterDr2;
621 aenmNames[iReg++] = WHvX64RegisterDr3;
622 }
623 if (fWhat & CPUMCTX_EXTRN_DR6)
624 aenmNames[iReg++] = WHvX64RegisterDr6;
625
626 /* Floating point state. */
627 if (fWhat & CPUMCTX_EXTRN_X87)
628 {
629 aenmNames[iReg++] = WHvX64RegisterFpMmx0;
630 aenmNames[iReg++] = WHvX64RegisterFpMmx1;
631 aenmNames[iReg++] = WHvX64RegisterFpMmx2;
632 aenmNames[iReg++] = WHvX64RegisterFpMmx3;
633 aenmNames[iReg++] = WHvX64RegisterFpMmx4;
634 aenmNames[iReg++] = WHvX64RegisterFpMmx5;
635 aenmNames[iReg++] = WHvX64RegisterFpMmx6;
636 aenmNames[iReg++] = WHvX64RegisterFpMmx7;
637 aenmNames[iReg++] = WHvX64RegisterFpControlStatus;
638 }
639 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
640 aenmNames[iReg++] = WHvX64RegisterXmmControlStatus;
641
642 /* Vector state. */
643 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
644 {
645 aenmNames[iReg++] = WHvX64RegisterXmm0;
646 aenmNames[iReg++] = WHvX64RegisterXmm1;
647 aenmNames[iReg++] = WHvX64RegisterXmm2;
648 aenmNames[iReg++] = WHvX64RegisterXmm3;
649 aenmNames[iReg++] = WHvX64RegisterXmm4;
650 aenmNames[iReg++] = WHvX64RegisterXmm5;
651 aenmNames[iReg++] = WHvX64RegisterXmm6;
652 aenmNames[iReg++] = WHvX64RegisterXmm7;
653 aenmNames[iReg++] = WHvX64RegisterXmm8;
654 aenmNames[iReg++] = WHvX64RegisterXmm9;
655 aenmNames[iReg++] = WHvX64RegisterXmm10;
656 aenmNames[iReg++] = WHvX64RegisterXmm11;
657 aenmNames[iReg++] = WHvX64RegisterXmm12;
658 aenmNames[iReg++] = WHvX64RegisterXmm13;
659 aenmNames[iReg++] = WHvX64RegisterXmm14;
660 aenmNames[iReg++] = WHvX64RegisterXmm15;
661 }
662
663 /* MSRs */
664 // WHvX64RegisterTsc - don't touch
665 if (fWhat & CPUMCTX_EXTRN_EFER)
666 aenmNames[iReg++] = WHvX64RegisterEfer;
667 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
668 aenmNames[iReg++] = WHvX64RegisterKernelGsBase;
669 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
670 {
671 aenmNames[iReg++] = WHvX64RegisterSysenterCs;
672 aenmNames[iReg++] = WHvX64RegisterSysenterEip;
673 aenmNames[iReg++] = WHvX64RegisterSysenterEsp;
674 }
675 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
676 {
677 aenmNames[iReg++] = WHvX64RegisterStar;
678 aenmNames[iReg++] = WHvX64RegisterLstar;
679 aenmNames[iReg++] = WHvX64RegisterCstar;
680 aenmNames[iReg++] = WHvX64RegisterSfmask;
681 }
682
683//#ifdef LOG_ENABLED
684// const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);
685//#endif
686 if (fWhat & CPUMCTX_EXTRN_TSC_AUX)
687 aenmNames[iReg++] = WHvX64RegisterTscAux;
688 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
689 {
690 aenmNames[iReg++] = WHvX64RegisterApicBase; /// @todo APIC BASE
691 aenmNames[iReg++] = WHvX64RegisterPat;
692#if 0 /*def LOG_ENABLED*/ /** @todo Check if WHvX64RegisterMsrMtrrCap works... */
693 aenmNames[iReg++] = WHvX64RegisterMsrMtrrCap;
694#endif
695 aenmNames[iReg++] = WHvX64RegisterMsrMtrrDefType;
696 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix64k00000;
697 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16k80000;
698 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16kA0000;
699 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC0000;
700 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC8000;
701 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD0000;
702 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD8000;
703 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE0000;
704 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE8000;
705 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF0000;
706 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF8000;
707 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
708//#ifdef LOG_ENABLED
709// if (enmCpuVendor != CPUMCPUVENDOR_AMD)
710// aenmNames[iReg++] = HvX64RegisterIa32FeatureControl;
711//#endif
712 }
713
714 /* Interruptibility. */
715 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
716 {
717 aenmNames[iReg++] = WHvRegisterInterruptState;
718 aenmNames[iReg++] = WHvX64RegisterRip;
719 }
720
721 /* event injection */
722 aenmNames[iReg++] = WHvRegisterPendingInterruption;
723 aenmNames[iReg++] = WHvRegisterPendingEvent;
724
725 size_t const cRegs = iReg;
726 Assert(cRegs < RT_ELEMENTS(aenmNames));
727
728 /*
729 * Get the registers.
730 */
731 WHV_REGISTER_VALUE aValues[128];
732 RT_ZERO(aValues);
733 Assert(RT_ELEMENTS(aValues) >= cRegs);
734 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
735# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
736 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
737 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
738# endif
739 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
740 AssertLogRelMsgReturn(SUCCEEDED(hrc),
741 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
742 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
743 , VERR_NEM_GET_REGISTERS_FAILED);
744
745 iReg = 0;
746# define GET_REG64(a_DstVar, a_enmName) do { \
747 Assert(aenmNames[iReg] == (a_enmName)); \
748 (a_DstVar) = aValues[iReg].Reg64; \
749 iReg++; \
750 } while (0)
751# define GET_REG64_LOG7(a_DstVar, a_enmName, a_szLogName) do { \
752 Assert(aenmNames[iReg] == (a_enmName)); \
753 if ((a_DstVar) != aValues[iReg].Reg64) \
754 Log7(("NEM/%u: " a_szLogName " changed %RX64 -> %RX64\n", pVCpu->idCpu, (a_DstVar), aValues[iReg].Reg64)); \
755 (a_DstVar) = aValues[iReg].Reg64; \
756 iReg++; \
757 } while (0)
758# define GET_REG128(a_DstVarLo, a_DstVarHi, a_enmName) do { \
759 Assert(aenmNames[iReg] == a_enmName); \
760 (a_DstVarLo) = aValues[iReg].Reg128.Low64; \
761 (a_DstVarHi) = aValues[iReg].Reg128.High64; \
762 iReg++; \
763 } while (0)
764# define GET_SEG(a_SReg, a_enmName) do { \
765 Assert(aenmNames[iReg] == (a_enmName)); \
766 NEM_WIN_COPY_BACK_SEG(a_SReg, aValues[iReg].Segment); \
767 iReg++; \
768 } while (0)
769
770 /* GPRs */
771 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
772 {
773 if (fWhat & CPUMCTX_EXTRN_RAX)
774 GET_REG64(pVCpu->cpum.GstCtx.rax, WHvX64RegisterRax);
775 if (fWhat & CPUMCTX_EXTRN_RCX)
776 GET_REG64(pVCpu->cpum.GstCtx.rcx, WHvX64RegisterRcx);
777 if (fWhat & CPUMCTX_EXTRN_RDX)
778 GET_REG64(pVCpu->cpum.GstCtx.rdx, WHvX64RegisterRdx);
779 if (fWhat & CPUMCTX_EXTRN_RBX)
780 GET_REG64(pVCpu->cpum.GstCtx.rbx, WHvX64RegisterRbx);
781 if (fWhat & CPUMCTX_EXTRN_RSP)
782 GET_REG64(pVCpu->cpum.GstCtx.rsp, WHvX64RegisterRsp);
783 if (fWhat & CPUMCTX_EXTRN_RBP)
784 GET_REG64(pVCpu->cpum.GstCtx.rbp, WHvX64RegisterRbp);
785 if (fWhat & CPUMCTX_EXTRN_RSI)
786 GET_REG64(pVCpu->cpum.GstCtx.rsi, WHvX64RegisterRsi);
787 if (fWhat & CPUMCTX_EXTRN_RDI)
788 GET_REG64(pVCpu->cpum.GstCtx.rdi, WHvX64RegisterRdi);
789 if (fWhat & CPUMCTX_EXTRN_R8_R15)
790 {
791 GET_REG64(pVCpu->cpum.GstCtx.r8, WHvX64RegisterR8);
792 GET_REG64(pVCpu->cpum.GstCtx.r9, WHvX64RegisterR9);
793 GET_REG64(pVCpu->cpum.GstCtx.r10, WHvX64RegisterR10);
794 GET_REG64(pVCpu->cpum.GstCtx.r11, WHvX64RegisterR11);
795 GET_REG64(pVCpu->cpum.GstCtx.r12, WHvX64RegisterR12);
796 GET_REG64(pVCpu->cpum.GstCtx.r13, WHvX64RegisterR13);
797 GET_REG64(pVCpu->cpum.GstCtx.r14, WHvX64RegisterR14);
798 GET_REG64(pVCpu->cpum.GstCtx.r15, WHvX64RegisterR15);
799 }
800 }
801
802 /* RIP & Flags */
803 if (fWhat & CPUMCTX_EXTRN_RIP)
804 GET_REG64(pVCpu->cpum.GstCtx.rip, WHvX64RegisterRip);
805 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
806 GET_REG64(pVCpu->cpum.GstCtx.rflags.u, WHvX64RegisterRflags);
807
808 /* Segments */
809 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
810 {
811 if (fWhat & CPUMCTX_EXTRN_ES)
812 GET_SEG(pVCpu->cpum.GstCtx.es, WHvX64RegisterEs);
813 if (fWhat & CPUMCTX_EXTRN_CS)
814 GET_SEG(pVCpu->cpum.GstCtx.cs, WHvX64RegisterCs);
815 if (fWhat & CPUMCTX_EXTRN_SS)
816 GET_SEG(pVCpu->cpum.GstCtx.ss, WHvX64RegisterSs);
817 if (fWhat & CPUMCTX_EXTRN_DS)
818 GET_SEG(pVCpu->cpum.GstCtx.ds, WHvX64RegisterDs);
819 if (fWhat & CPUMCTX_EXTRN_FS)
820 GET_SEG(pVCpu->cpum.GstCtx.fs, WHvX64RegisterFs);
821 if (fWhat & CPUMCTX_EXTRN_GS)
822 GET_SEG(pVCpu->cpum.GstCtx.gs, WHvX64RegisterGs);
823 }
824
825 /* Descriptor tables and the task segment. */
826 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
827 {
828 if (fWhat & CPUMCTX_EXTRN_LDTR)
829 GET_SEG(pVCpu->cpum.GstCtx.ldtr, WHvX64RegisterLdtr);
830
831 if (fWhat & CPUMCTX_EXTRN_TR)
832 {
833 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
834 avoid to trigger sanity assertions around the code, always fix this. */
835 GET_SEG(pVCpu->cpum.GstCtx.tr, WHvX64RegisterTr);
836 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
837 {
838 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
839 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
840 break;
841 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
842 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
843 break;
844 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
845 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
846 break;
847 }
848 }
849 if (fWhat & CPUMCTX_EXTRN_IDTR)
850 {
851 Assert(aenmNames[iReg] == WHvX64RegisterIdtr);
852 pVCpu->cpum.GstCtx.idtr.cbIdt = aValues[iReg].Table.Limit;
853 pVCpu->cpum.GstCtx.idtr.pIdt = aValues[iReg].Table.Base;
854 iReg++;
855 }
856 if (fWhat & CPUMCTX_EXTRN_GDTR)
857 {
858 Assert(aenmNames[iReg] == WHvX64RegisterGdtr);
859 pVCpu->cpum.GstCtx.gdtr.cbGdt = aValues[iReg].Table.Limit;
860 pVCpu->cpum.GstCtx.gdtr.pGdt = aValues[iReg].Table.Base;
861 iReg++;
862 }
863 }
864
865 /* Control registers. */
866 bool fMaybeChangedMode = false;
867 bool fUpdateCr3 = false;
868 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
869 {
870 if (fWhat & CPUMCTX_EXTRN_CR0)
871 {
872 Assert(aenmNames[iReg] == WHvX64RegisterCr0);
873 if (pVCpu->cpum.GstCtx.cr0 != aValues[iReg].Reg64)
874 {
875 CPUMSetGuestCR0(pVCpu, aValues[iReg].Reg64);
876 fMaybeChangedMode = true;
877 }
878 iReg++;
879 }
880 if (fWhat & CPUMCTX_EXTRN_CR2)
881 GET_REG64(pVCpu->cpum.GstCtx.cr2, WHvX64RegisterCr2);
882 if (fWhat & CPUMCTX_EXTRN_CR3)
883 {
884 if (pVCpu->cpum.GstCtx.cr3 != aValues[iReg].Reg64)
885 {
886 CPUMSetGuestCR3(pVCpu, aValues[iReg].Reg64);
887 fUpdateCr3 = true;
888 }
889 iReg++;
890 }
891 if (fWhat & CPUMCTX_EXTRN_CR4)
892 {
893 if (pVCpu->cpum.GstCtx.cr4 != aValues[iReg].Reg64)
894 {
895 CPUMSetGuestCR4(pVCpu, aValues[iReg].Reg64);
896 fMaybeChangedMode = true;
897 }
898 iReg++;
899 }
900 }
901 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
902 {
903 Assert(aenmNames[iReg] == WHvX64RegisterCr8);
904 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4);
905 iReg++;
906 }
907
908 /* Debug registers. */
909 if (fWhat & CPUMCTX_EXTRN_DR7)
910 {
911 Assert(aenmNames[iReg] == WHvX64RegisterDr7);
912 if (pVCpu->cpum.GstCtx.dr[7] != aValues[iReg].Reg64)
913 CPUMSetGuestDR7(pVCpu, aValues[iReg].Reg64);
914 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
915 iReg++;
916 }
917 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
918 {
919 Assert(aenmNames[iReg] == WHvX64RegisterDr0);
920 Assert(aenmNames[iReg+3] == WHvX64RegisterDr3);
921 if (pVCpu->cpum.GstCtx.dr[0] != aValues[iReg].Reg64)
922 CPUMSetGuestDR0(pVCpu, aValues[iReg].Reg64);
923 iReg++;
924 if (pVCpu->cpum.GstCtx.dr[1] != aValues[iReg].Reg64)
925 CPUMSetGuestDR1(pVCpu, aValues[iReg].Reg64);
926 iReg++;
927 if (pVCpu->cpum.GstCtx.dr[2] != aValues[iReg].Reg64)
928 CPUMSetGuestDR2(pVCpu, aValues[iReg].Reg64);
929 iReg++;
930 if (pVCpu->cpum.GstCtx.dr[3] != aValues[iReg].Reg64)
931 CPUMSetGuestDR3(pVCpu, aValues[iReg].Reg64);
932 iReg++;
933 }
934 if (fWhat & CPUMCTX_EXTRN_DR6)
935 {
936 Assert(aenmNames[iReg] == WHvX64RegisterDr6);
937 if (pVCpu->cpum.GstCtx.dr[6] != aValues[iReg].Reg64)
938 CPUMSetGuestDR6(pVCpu, aValues[iReg].Reg64);
939 iReg++;
940 }
941
942 /* Floating point state. */
943 if (fWhat & CPUMCTX_EXTRN_X87)
944 {
945 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);
946 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);
947 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);
948 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);
949 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);
950 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);
951 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);
952 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);
953
954 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);
955 pVCpu->cpum.GstCtx.XState.x87.FCW = aValues[iReg].FpControlStatus.FpControl;
956 pVCpu->cpum.GstCtx.XState.x87.FSW = aValues[iReg].FpControlStatus.FpStatus;
957 pVCpu->cpum.GstCtx.XState.x87.FTW = aValues[iReg].FpControlStatus.FpTag
958 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;
959 pVCpu->cpum.GstCtx.XState.x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;
960 pVCpu->cpum.GstCtx.XState.x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;
961 pVCpu->cpum.GstCtx.XState.x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);
962 pVCpu->cpum.GstCtx.XState.x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);
963 iReg++;
964 }
965
966 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
967 {
968 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus);
969 if (fWhat & CPUMCTX_EXTRN_X87)
970 {
971 pVCpu->cpum.GstCtx.XState.x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp;
972 pVCpu->cpum.GstCtx.XState.x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32);
973 pVCpu->cpum.GstCtx.XState.x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48);
974 }
975 pVCpu->cpum.GstCtx.XState.x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl;
976 pVCpu->cpum.GstCtx.XState.x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
977 iReg++;
978 }
979
980 /* Vector state. */
981 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
982 {
983 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0);
984 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1);
985 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2);
986 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3);
987 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4);
988 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5);
989 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6);
990 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7);
991 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8);
992 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9);
993 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10);
994 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11);
995 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12);
996 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13);
997 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14);
998 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15);
999 }
1000
1001 /* MSRs */
1002 // WHvX64RegisterTsc - don't touch
1003 if (fWhat & CPUMCTX_EXTRN_EFER)
1004 {
1005 Assert(aenmNames[iReg] == WHvX64RegisterEfer);
1006 if (aValues[iReg].Reg64 != pVCpu->cpum.GstCtx.msrEFER)
1007 {
1008 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, aValues[iReg].Reg64));
1009 if ((aValues[iReg].Reg64 ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE)
1010 PGMNotifyNxeChanged(pVCpu, RT_BOOL(aValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1011 pVCpu->cpum.GstCtx.msrEFER = aValues[iReg].Reg64;
1012 fMaybeChangedMode = true;
1013 }
1014 iReg++;
1015 }
1016 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1017 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");
1018 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1019 {
1020 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");
1021 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");
1022 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");
1023 }
1024 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1025 {
1026 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSTAR, WHvX64RegisterStar, "MSR STAR");
1027 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");
1028 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");
1029 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");
1030 }
1031 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
1032 {
1033 PCPUMCTXMSRS const pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1034 if (fWhat & CPUMCTX_EXTRN_TSC_AUX)
1035 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
1036 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1037 {
1038 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
1039 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1040 if (aValues[iReg].Reg64 != uOldBase)
1041 {
1042 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1043 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
1044 int rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
1045 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", rc2, aValues[iReg].Reg64));
1046 }
1047 iReg++;
1048
1049 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterPat, "MSR PAT");
1050#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1051 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterMsrMtrrCap);
1052#endif
1053 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
1054 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
1055 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
1056 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
1057 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
1058 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
1059 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
1060 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
1061 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
1062 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
1063 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
1064 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
1065 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
1066 }
1067 }
1068
1069 /* Interruptibility. */
1070 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
1071 {
1072 Assert(aenmNames[iReg] == WHvRegisterInterruptState);
1073 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);
1074
1075 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_INT))
1076 {
1077 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
1078 if (aValues[iReg].InterruptState.InterruptShadow)
1079 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
1080 else
1081 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1082 }
1083
1084 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_NMI))
1085 {
1086 if (aValues[iReg].InterruptState.NmiMasked)
1087 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1088 else
1089 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1090 }
1091
1092 fWhat |= CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI;
1093 iReg += 2;
1094 }
1095
1096 /* Event injection. */
1097 /// @todo WHvRegisterPendingInterruption
1098 Assert(aenmNames[iReg] == WHvRegisterPendingInterruption);
1099 if (aValues[iReg].PendingInterruption.InterruptionPending)
1100 {
1101 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
1102 aValues[iReg].PendingInterruption.InterruptionType, aValues[iReg].PendingInterruption.InterruptionVector,
1103 aValues[iReg].PendingInterruption.DeliverErrorCode, aValues[iReg].PendingInterruption.ErrorCode,
1104 aValues[iReg].PendingInterruption.InstructionLength, aValues[iReg].PendingInterruption.NestedEvent));
1105 AssertMsg((aValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
1106 ("%#RX64\n", aValues[iReg].PendingInterruption.AsUINT64));
1107 }
1108
1109 /// @todo WHvRegisterPendingEvent
1110
1111 /* Almost done, just update extrn flags and maybe change PGM mode. */
1112 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1113 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
1114 pVCpu->cpum.GstCtx.fExtrn = 0;
1115
1116 /* Typical. */
1117 if (!fMaybeChangedMode && !fUpdateCr3)
1118 return VINF_SUCCESS;
1119
1120 /*
1121 * Slow.
1122 */
1123 if (fMaybeChangedMode)
1124 {
1125 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
1126 false /* fForce */);
1127 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
1128 }
1129
1130 if (fUpdateCr3)
1131 {
1132 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3);
1133 if (rc == VINF_SUCCESS)
1134 { /* likely */ }
1135 else
1136 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
1137 }
1138
1139 return VINF_SUCCESS;
1140# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1141}
1142
1143#endif /* !IN_RING0 */
1144
1145
1146/**
1147 * Interface for importing state on demand (used by IEM).
1148 *
1149 * @returns VBox status code.
1150 * @param pVCpu The cross context CPU structure.
1151 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1152 */
1153VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1154{
1155 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1156
1157#ifdef IN_RING0
1158# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1159 return nemR0WinImportState(pVCpu->pGVM, pVCpu, &pVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);
1160# else
1161 RT_NOREF(pVCpu, fWhat);
1162 return VERR_NOT_IMPLEMENTED;
1163# endif
1164#else
1165 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1166#endif
1167}
1168
1169
1170/**
1171 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1172 *
1173 * @returns VBox status code.
1174 * @param pVCpu The cross context CPU structure.
1175 * @param pcTicks Where to return the CPU tick count.
1176 * @param puAux Where to return the TSC_AUX register value.
1177 */
1178VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1179{
1180 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1181
1182#ifdef IN_RING3
1183 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1184 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1185 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1186
1187# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1188# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
1189 if (pVM->nem.s.fUseRing0Runloop)
1190# endif
1191 {
1192 /* Call ring-0 and get the values. */
1193 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_QUERY_CPU_TICK, 0, NULL);
1194 AssertLogRelRCReturn(rc, rc);
1195 *pcTicks = pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks;
1196 if (puAux)
1197 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX
1198 ? pVCpu->nem.s.Hypercall.QueryCpuTick.uAux : CPUMGetGuestTscAux(pVCpu);
1199 return VINF_SUCCESS;
1200 }
1201# endif
1202# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1203 /* Call the offical API. */
1204 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1205 WHV_REGISTER_VALUE aValues[2] = { {0, 0}, {0, 0} };
1206 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1207 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1208 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1209 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1210 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1211 , VERR_NEM_GET_REGISTERS_FAILED);
1212 *pcTicks = aValues[0].Reg64;
1213 if (puAux)
1214 *pcTicks = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[0].Reg64 : CPUMGetGuestTscAux(pVCpu);
1215 return VINF_SUCCESS;
1216# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1217#else /* IN_RING0 */
1218# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1219 int rc = nemR0WinQueryCpuTick(pVCpu->pGVM, pVCpu, pcTicks, puAux);
1220 if (RT_SUCCESS(rc) && puAux && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX))
1221 *puAux = CPUMGetGuestTscAux(pVCpu);
1222 return rc;
1223# else
1224 RT_NOREF(pVCpu, pcTicks, puAux);
1225 return VERR_NOT_IMPLEMENTED;
1226# endif
1227#endif /* IN_RING0 */
1228}
1229
1230
1231/**
1232 * Resumes CPU clock (TSC) on all virtual CPUs.
1233 *
1234 * This is called by TM when the VM is started, restored, resumed or similar.
1235 *
1236 * @returns VBox status code.
1237 * @param pVM The cross context VM structure.
1238 * @param pVCpu The cross context CPU structure of the calling EMT.
1239 * @param uPausedTscValue The TSC value at the time of pausing.
1240 */
1241VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1242{
1243#ifdef IN_RING0
1244# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1245 return nemR0WinResumeCpuTickOnAll(pVM, pVCpu, uPausedTscValue);
1246# else
1247 RT_NOREF(pVM, pVCpu, uPausedTscValue);
1248 return VERR_NOT_IMPLEMENTED;
1249# endif
1250#else /* IN_RING3 */
1251 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1252 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1253
1254# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1255# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
1256 if (pVM->nem.s.fUseRing0Runloop)
1257# endif
1258 {
1259 /* Call ring-0 and do it all there. */
1260 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL, uPausedTscValue, NULL);
1261 }
1262# endif
1263# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1264 /*
1265 * Call the offical API to do the job.
1266 */
1267 if (pVM->cCpus > 1)
1268 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1269
1270 /* Start with the first CPU. */
1271 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1272 WHV_REGISTER_VALUE Value = {0, 0};
1273 Value.Reg64 = uPausedTscValue;
1274 uint64_t const uFirstTsc = ASMReadTSC();
1275 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1276 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1277 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1278 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1279 , VERR_NEM_SET_TSC);
1280
1281 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1282 that we don't introduce too much drift here. */
1283 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1284 {
1285 Assert(enmName == WHvX64RegisterTsc);
1286 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1287 Value.Reg64 = uPausedTscValue + offDelta;
1288 hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1289 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1290 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1291 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1292 , VERR_NEM_SET_TSC);
1293 }
1294
1295 return VINF_SUCCESS;
1296# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1297#endif /* IN_RING3 */
1298}
1299
1300#ifdef NEMWIN_NEED_GET_REGISTER
1301# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1302/** Worker for assertion macro. */
1303NEM_TMPL_STATIC int nemHCWinGetRegister(PVMCPUCC pVCpu, PGVMCPU pGVCpu, uint32_t enmReg, HV_REGISTER_VALUE *pRetValue)
1304{
1305 RT_ZERO(*pRetValue);
1306# ifdef IN_RING3
1307 RT_NOREF(pVCpu, pGVCpu, enmReg);
1308 return VERR_NOT_IMPLEMENTED;
1309# else
1310 NOREF(pVCpu);
1311
1312 /*
1313 * Hypercall parameters.
1314 */
1315 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1316 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1317 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1318
1319 pInput->PartitionId = pVCpu->pGVM->nemr0.s.idHvPartition;
1320 pInput->VpIndex = pVCpu->idCpu;
1321 pInput->fFlags = 0;
1322 pInput->Names[0] = (HV_REGISTER_NAME)enmReg;
1323
1324 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
1325 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1326 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
1327
1328 /*
1329 * Make the hypercall and copy out the value.
1330 */
1331 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
1332 pGVCpu->nem.s.HypercallData.HCPhysPage,
1333 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1334 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 cRegs=%#x\n", uResult, 1),
1335 VERR_NEM_GET_REGISTERS_FAILED);
1336
1337 *pRetValue = paValues[0];
1338 return VINF_SUCCESS;
1339# endif
1340}
1341# else
1342/** Worker for assertion macro. */
1343NEM_TMPL_STATIC int nemR3WinGetRegister(PVMCPUCC a_pVCpu, uint32_t a_enmReg, WHV_REGISTER_VALUE pValue)
1344{
1345 RT_ZERO(*pRetValue);
1346 RT_NOREF(pVCpu, pGVCpu, enmReg);
1347 return VERR_NOT_IMPLEMENTED;
1348}
1349# endif
1350#endif
1351
1352
1353#ifdef LOG_ENABLED
1354/**
1355 * Get the virtual processor running status.
1356 */
1357DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPUCC pVCpu)
1358{
1359# ifdef IN_RING0
1360 NOREF(pVCpu);
1361 return VidProcessorStatusUndefined;
1362# else
1363 RTERRVARS Saved;
1364 RTErrVarsSave(&Saved);
1365
1366 /*
1367 * This API is disabled in release builds, it seems. On build 17101 it requires
1368 * the following patch to be enabled (windbg): eb vid+12180 0f 84 98 00 00 00
1369 */
1370 VID_PROCESSOR_STATUS enmCpuStatus = VidProcessorStatusUndefined;
1371 NTSTATUS rcNt = g_pfnVidGetVirtualProcessorRunningStatus(pVCpu->pVMR3->nem.s.hPartitionDevice, pVCpu->idCpu, &enmCpuStatus);
1372 AssertRC(rcNt);
1373
1374 RTErrVarsRestore(&Saved);
1375 return enmCpuStatus;
1376# endif
1377}
1378#endif /* LOG_ENABLED */
1379
1380
1381#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1382# ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KTHREADs and KeAlertThread. */
1383/**
1384 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.
1385 *
1386 * This is an experiment only.
1387 *
1388 * @returns VBox status code.
1389 * @param pVM The cross context VM structure.
1390 * @param pVCpu The cross context virtual CPU structure of the
1391 * calling EMT.
1392 */
1393NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVMCC pVM, PVMCPUCC pVCpu)
1394{
1395 /*
1396 * Work the state.
1397 *
1398 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.
1399 * So, we just need to modify the state and kick the EMT if it's waiting on
1400 * messages. For the latter we use QueueUserAPC / KeAlterThread.
1401 */
1402 for (;;)
1403 {
1404 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
1405 switch (enmState)
1406 {
1407 case VMCPUSTATE_STARTED_EXEC_NEM:
1408 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))
1409 {
1410 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM -> CANCELED");
1411 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));
1412 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelChangedState);
1413 return VINF_SUCCESS;
1414 }
1415 break;
1416
1417 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:
1418 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
1419 {
1420 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM_WAIT -> CANCELED");
1421# ifdef IN_RING0
1422 NTSTATUS rcNt = KeAlertThread(??);
1423 DBGFTRACE_CUSTOM(pVM, "KeAlertThread -> %#x", rcNt);
1424# else
1425 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);
1426 DBGFTRACE_CUSTOM(pVM, "NtAlertThread -> %#x", rcNt);
1427# endif
1428 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));
1429 Assert(rcNt == STATUS_SUCCESS);
1430 if (NT_SUCCESS(rcNt))
1431 {
1432 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelAlertedThread);
1433 return VINF_SUCCESS;
1434 }
1435 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));
1436 }
1437 break;
1438
1439 default:
1440 return VINF_SUCCESS;
1441 }
1442
1443 ASMNopPause();
1444 RT_NOREF(pVM);
1445 }
1446}
1447# endif /* IN_RING3 */
1448#endif /* NEM_WIN_USE_OUR_OWN_RUN_API || NEM_WIN_WITH_RING0_RUNLOOP */
1449
1450
1451#ifdef LOG_ENABLED
1452/**
1453 * Logs the current CPU state.
1454 */
1455NEM_TMPL_STATIC void nemHCWinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1456{
1457 if (LogIs3Enabled())
1458 {
1459# if 0 // def IN_RING3 - causes lazy state import assertions all over CPUM.
1460 char szRegs[4096];
1461 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1462 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1463 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1464 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1465 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1466 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1467 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1468 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1469 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1470 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1471 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1472 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1473 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1474 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1475 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1476 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1477 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1478 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1479 " efer=%016VR{efer}\n"
1480 " pat=%016VR{pat}\n"
1481 " sf_mask=%016VR{sf_mask}\n"
1482 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1483 " lstar=%016VR{lstar}\n"
1484 " star=%016VR{star} cstar=%016VR{cstar}\n"
1485 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1486 );
1487
1488 char szInstr[256];
1489 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1490 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1491 szInstr, sizeof(szInstr), NULL);
1492 Log3(("%s%s\n", szRegs, szInstr));
1493# else
1494 /** @todo stat logging in ring-0 */
1495 RT_NOREF(pVM, pVCpu);
1496# endif
1497 }
1498}
1499#endif /* LOG_ENABLED */
1500
1501
1502/** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */
1503#define SWITCH_IT(a_szPrefix) \
1504 do \
1505 switch (u)\
1506 { \
1507 case 0x00: return a_szPrefix ""; \
1508 case 0x01: return a_szPrefix ",Pnd"; \
1509 case 0x02: return a_szPrefix ",Dbg"; \
1510 case 0x03: return a_szPrefix ",Pnd,Dbg"; \
1511 case 0x04: return a_szPrefix ",Shw"; \
1512 case 0x05: return a_szPrefix ",Pnd,Shw"; \
1513 case 0x06: return a_szPrefix ",Shw,Dbg"; \
1514 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \
1515 default: AssertFailedReturn("WTF?"); \
1516 } \
1517 while (0)
1518
1519#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1520/**
1521 * Translates the execution stat bitfield into a short log string, VID version.
1522 *
1523 * @returns Read-only log string.
1524 * @param pMsgHdr The header which state to summarize.
1525 */
1526static const char *nemHCWinExecStateToLogStr(HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1527{
1528 unsigned u = (unsigned)pMsgHdr->ExecutionState.InterruptionPending
1529 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1)
1530 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2);
1531 if (pMsgHdr->ExecutionState.EferLma)
1532 SWITCH_IT("LM");
1533 else if (pMsgHdr->ExecutionState.Cr0Pe)
1534 SWITCH_IT("PM");
1535 else
1536 SWITCH_IT("RM");
1537}
1538#elif defined(IN_RING3)
1539/**
1540 * Translates the execution stat bitfield into a short log string, WinHv version.
1541 *
1542 * @returns Read-only log string.
1543 * @param pExitCtx The exit context which state to summarize.
1544 */
1545static const char *nemR3WinExecStateToLogStr(WHV_VP_EXIT_CONTEXT const *pExitCtx)
1546{
1547 unsigned u = (unsigned)pExitCtx->ExecutionState.InterruptionPending
1548 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1)
1549 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2);
1550 if (pExitCtx->ExecutionState.EferLma)
1551 SWITCH_IT("LM");
1552 else if (pExitCtx->ExecutionState.Cr0Pe)
1553 SWITCH_IT("PM");
1554 else
1555 SWITCH_IT("RM");
1556}
1557#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1558#undef SWITCH_IT
1559
1560
1561#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1562/**
1563 * Advances the guest RIP and clear EFLAGS.RF, VID version.
1564 *
1565 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1566 *
1567 * @param pVCpu The cross context virtual CPU structure.
1568 * @param pExitCtx The exit context.
1569 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1570 */
1571DECLINLINE(void)
1572nemHCWinAdvanceGuestRipAndClearRF(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr, uint8_t cbMinInstr)
1573{
1574 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1575
1576 /* Advance the RIP. */
1577 Assert(pMsgHdr->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1578 pVCpu->cpum.GstCtx.rip += pMsgHdr->InstructionLength;
1579 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1580
1581 /* Update interrupt inhibition. */
1582 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1583 { /* likely */ }
1584 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
1585 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1586}
1587#elif defined(IN_RING3)
1588/**
1589 * Advances the guest RIP and clear EFLAGS.RF, WinHv version.
1590 *
1591 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1592 *
1593 * @param pVCpu The cross context virtual CPU structure.
1594 * @param pExitCtx The exit context.
1595 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1596 */
1597DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPUCC pVCpu, WHV_VP_EXIT_CONTEXT const *pExitCtx, uint8_t cbMinInstr)
1598{
1599 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1600
1601 /* Advance the RIP. */
1602 Assert(pExitCtx->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1603 pVCpu->cpum.GstCtx.rip += pExitCtx->InstructionLength;
1604 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1605
1606 /* Update interrupt inhibition. */
1607 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1608 { /* likely */ }
1609 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
1610 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1611}
1612#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1613
1614#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3)
1615
1616NEM_TMPL_STATIC DECLCALLBACK(int)
1617nemHCWinUnmapOnePageCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1618{
1619 RT_NOREF_PV(pvUser);
1620# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1621 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1622 AssertRC(rc);
1623 if (RT_SUCCESS(rc))
1624# else
1625 RT_NOREF_PV(pVCpu);
1626 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1627 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1628 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1629 if (SUCCEEDED(hrc))
1630# endif
1631 {
1632 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1633 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1634 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
1635 }
1636 else
1637 {
1638# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1639 LogRel(("nemHCWinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1640# else
1641 LogRel(("nemHCWinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1642 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(),
1643 RTNtLastErrorValue(), pVM->nem.s.cMappedPages));
1644# endif
1645 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1646 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
1647 }
1648 if (pVM->nem.s.cMappedPages > 0)
1649 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1650 return VINF_SUCCESS;
1651}
1652
1653
1654/**
1655 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1656 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1657 */
1658typedef struct NEMHCWINHMACPCCSTATE
1659{
1660 /** Input: Write access. */
1661 bool fWriteAccess;
1662 /** Output: Set if we did something. */
1663 bool fDidSomething;
1664 /** Output: Set it we should resume. */
1665 bool fCanResume;
1666} NEMHCWINHMACPCCSTATE;
1667
1668/**
1669 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1670 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1671 * NEMHCWINHMACPCCSTATE structure. }
1672 */
1673NEM_TMPL_STATIC DECLCALLBACK(int)
1674nemHCWinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1675{
1676 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1677 pState->fDidSomething = false;
1678 pState->fCanResume = false;
1679
1680 /* If A20 is disabled, we may need to make another query on the masked
1681 page to get the correct protection information. */
1682 uint8_t u2State = pInfo->u2NemState;
1683 RTGCPHYS GCPhysSrc;
1684# ifdef NEM_WIN_WITH_A20
1685 if ( pVM->nem.s.fA20Enabled
1686 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1687# endif
1688 GCPhysSrc = GCPhys;
1689# ifdef NEM_WIN_WITH_A20
1690 else
1691 {
1692 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1693 PGMPHYSNEMPAGEINFO Info2;
1694 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1695 AssertRCReturn(rc, rc);
1696
1697 *pInfo = Info2;
1698 pInfo->u2NemState = u2State;
1699 }
1700# endif
1701
1702 /*
1703 * Consolidate current page state with actual page protection and access type.
1704 * We don't really consider downgrades here, as they shouldn't happen.
1705 */
1706# ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1707 /** @todo Someone at microsoft please explain:
1708 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1709 * readonly page as writable (unmap, then map again). Specifically, this was an
1710 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1711 * a hope to work around that we no longer pre-map anything, just unmap stuff
1712 * and do it lazily here. And here we will first unmap, restart, and then remap
1713 * with new protection or backing.
1714 */
1715# endif
1716 int rc;
1717 switch (u2State)
1718 {
1719 case NEM_WIN_PAGE_STATE_UNMAPPED:
1720 case NEM_WIN_PAGE_STATE_NOT_SET:
1721 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1722 {
1723 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1724 return VINF_SUCCESS;
1725 }
1726
1727 /* Don't bother remapping it if it's a write request to a non-writable page. */
1728 if ( pState->fWriteAccess
1729 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1730 {
1731 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1732 return VINF_SUCCESS;
1733 }
1734
1735 /* Map the page. */
1736 rc = nemHCNativeSetPhysPage(pVM,
1737 pVCpu,
1738 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1739 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1740 pInfo->fNemProt,
1741 &u2State,
1742 true /*fBackingState*/);
1743 pInfo->u2NemState = u2State;
1744 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1745 GCPhys, g_apszPageStates[u2State], rc));
1746 pState->fDidSomething = true;
1747 pState->fCanResume = true;
1748 return rc;
1749
1750 case NEM_WIN_PAGE_STATE_READABLE:
1751 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1752 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1753 {
1754 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1755 return VINF_SUCCESS;
1756 }
1757
1758# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1759 /* Upgrade page to writable. */
1760/** @todo test this*/
1761 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1762 && pState->fWriteAccess)
1763 {
1764 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
1765 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1766 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1767 AssertRC(rc);
1768 if (RT_SUCCESS(rc))
1769 {
1770 STAM_REL_COUNTER_INC(&pVM->nem.s.StatRemapPage);
1771 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
1772 pState->fDidSomething = true;
1773 pState->fCanResume = true;
1774 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
1775 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
1776 }
1777 else
1778 STAM_REL_COUNTER_INC(&pVM->nem.s.StatRemapPageFailed);
1779 }
1780 else
1781 {
1782 /* Need to emulate the acces. */
1783 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
1784 rc = VINF_SUCCESS;
1785 }
1786 return rc;
1787# else
1788 break;
1789# endif
1790
1791 case NEM_WIN_PAGE_STATE_WRITABLE:
1792 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1793 {
1794 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
1795 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1796 else
1797 {
1798 pState->fCanResume = true;
1799 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1800 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1801 }
1802 return VINF_SUCCESS;
1803 }
1804# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1805 AssertFailed(); /* There should be no downgrades. */
1806# endif
1807 break;
1808
1809 default:
1810 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1811 }
1812
1813 /*
1814 * Unmap and restart the instruction.
1815 * If this fails, which it does every so often, just unmap everything for now.
1816 */
1817# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1818 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1819 AssertRC(rc);
1820 if (RT_SUCCESS(rc))
1821# else
1822 /** @todo figure out whether we mess up the state or if it's WHv. */
1823 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1824 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1825 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1826 if (SUCCEEDED(hrc))
1827# endif
1828 {
1829 pState->fDidSomething = true;
1830 pState->fCanResume = true;
1831 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1832 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
1833 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1834 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1835 return VINF_SUCCESS;
1836 }
1837 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
1838# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1839 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
1840 return rc;
1841# elif defined(VBOX_WITH_PGM_NEM_MODE)
1842 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n",
1843 GCPhys, g_apszPageStates[u2State], hrc, hrc));
1844 return VERR_NEM_UNMAP_PAGES_FAILED;
1845# else
1846 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1847 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(),
1848 pVM->nem.s.cMappedPages));
1849
1850 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemHCWinUnmapOnePageCallback, NULL);
1851 Log(("nemHCWinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
1852 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapAllPages);
1853
1854 pState->fDidSomething = true;
1855 pState->fCanResume = true;
1856 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1857 return VINF_SUCCESS;
1858# endif
1859}
1860
1861#endif /* defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3) */
1862
1863
1864#if defined(IN_RING0) && defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API)
1865/**
1866 * Wrapper around nemR0WinImportState that converts VERR_NEM_FLUSH_TLB
1867 * into informational status codes and logs+asserts statuses.
1868 *
1869 * @returns VBox strict status code.
1870 * @param pGVM The global (ring-0) VM structure.
1871 * @param pGVCpu The global (ring-0) per CPU structure.
1872 * @param fWhat What to import.
1873 * @param pszCaller Who is doing the importing.
1874 */
1875DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, uint64_t fWhat, const char *pszCaller)
1876{
1877 int rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);
1878 if (RT_SUCCESS(rc))
1879 {
1880 Assert(rc == VINF_SUCCESS);
1881 return VINF_SUCCESS;
1882 }
1883
1884 if (rc == VERR_NEM_FLUSH_TLB)
1885 {
1886 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
1887 return -rc;
1888 }
1889 RT_NOREF(pszCaller);
1890 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
1891}
1892#endif /* IN_RING0 && NEM_WIN_TEMPLATE_MODE_OWN_RUN_API*/
1893
1894#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
1895/**
1896 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
1897 *
1898 * Unlike the wrapped APIs, this checks whether it's necessary.
1899 *
1900 * @returns VBox strict status code.
1901 * @param pVCpu The cross context per CPU structure.
1902 * @param fWhat What to import.
1903 * @param pszCaller Who is doing the importing.
1904 */
1905DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPUCC pVCpu, uint64_t fWhat, const char *pszCaller)
1906{
1907 if (pVCpu->cpum.GstCtx.fExtrn & fWhat)
1908 {
1909# ifdef IN_RING0
1910 return nemR0WinImportStateStrict(pVCpu->pGVM, pVCpu, fWhat, pszCaller);
1911# else
1912 RT_NOREF(pszCaller);
1913 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1914 AssertRCReturn(rc, rc);
1915# endif
1916 }
1917 return VINF_SUCCESS;
1918}
1919#endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API || IN_RING3 */
1920
1921#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1922/**
1923 * Copies register state from the X64 intercept message header.
1924 *
1925 * ASSUMES no state copied yet.
1926 *
1927 * @param pVCpu The cross context per CPU structure.
1928 * @param pHdr The X64 intercept message header.
1929 * @sa nemR3WinCopyStateFromX64Header
1930 */
1931DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr)
1932{
1933 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT))
1934 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT));
1935 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pHdr->CsSegment);
1936 pVCpu->cpum.GstCtx.rip = pHdr->Rip;
1937 pVCpu->cpum.GstCtx.rflags.u = pHdr->Rflags;
1938
1939 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow;
1940 if (!pHdr->ExecutionState.InterruptShadow)
1941 {
1942 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1943 { /* likely */ }
1944 else
1945 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1946 }
1947 else
1948 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip);
1949
1950 APICSetTpr(pVCpu, pHdr->Cr8 << 4);
1951
1952 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_APIC_TPR);
1953}
1954#elif defined(IN_RING3)
1955/**
1956 * Copies register state from the (common) exit context.
1957 *
1958 * ASSUMES no state copied yet.
1959 *
1960 * @param pVCpu The cross context per CPU structure.
1961 * @param pExitCtx The common exit context.
1962 * @sa nemHCWinCopyStateFromX64Header
1963 */
1964DECLINLINE(void) nemR3WinCopyStateFromX64Header(PVMCPUCC pVCpu, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1965{
1966 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT))
1967 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT));
1968 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pExitCtx->Cs);
1969 pVCpu->cpum.GstCtx.rip = pExitCtx->Rip;
1970 pVCpu->cpum.GstCtx.rflags.u = pExitCtx->Rflags;
1971
1972 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow;
1973 if (!pExitCtx->ExecutionState.InterruptShadow)
1974 {
1975 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1976 { /* likely */ }
1977 else
1978 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1979 }
1980 else
1981 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip);
1982
1983 APICSetTpr(pVCpu, pExitCtx->Cr8 << 4);
1984
1985 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_APIC_TPR);
1986}
1987#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1988
1989
1990#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1991/**
1992 * Deals with memory intercept message.
1993 *
1994 * @returns Strict VBox status code.
1995 * @param pVM The cross context VM structure.
1996 * @param pVCpu The cross context per CPU structure.
1997 * @param pMsg The message.
1998 * @sa nemR3WinHandleExitMemory
1999 */
2000NEM_TMPL_STATIC VBOXSTRICTRC
2001nemHCWinHandleMessageMemory(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg)
2002{
2003 uint64_t const uHostTsc = ASMReadTSC();
2004 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2005 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2006 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
2007
2008 /*
2009 * Whatever we do, we must clear pending event injection upon resume.
2010 */
2011 if (pMsg->Header.ExecutionState.InterruptionPending)
2012 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2013
2014# if 0 /* Experiment: 20K -> 34K exit/s. */
2015 if ( pMsg->Header.ExecutionState.EferLma
2016 && pMsg->Header.CsSegment.Long
2017 && pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2018 {
2019 if ( pMsg->Header.Rip - (uint64_t)0xf65a < (uint64_t)(0xf662 - 0xf65a)
2020 && pMsg->InstructionBytes[0] == 0x89
2021 && pMsg->InstructionBytes[1] == 0x03)
2022 {
2023 pVCpu->cpum.GstCtx.rip = pMsg->Header.Rip + 2;
2024 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
2025 AssertMsg(pMsg->Header.InstructionLength == 2, ("%#x\n", pMsg->Header.InstructionLength));
2026 //Log(("%RX64 msg:\n%.80Rhxd\n", pVCpu->cpum.GstCtx.rip, pMsg));
2027 return VINF_SUCCESS;
2028 }
2029 }
2030# endif
2031
2032 /*
2033 * Ask PGM for information about the given GCPhys. We need to check if we're
2034 * out of sync first.
2035 */
2036 NEMHCWINHMACPCCSTATE State = { pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE, false, false };
2037 PGMPHYSNEMPAGEINFO Info;
2038 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMsg->GuestPhysicalAddress, State.fWriteAccess, &Info,
2039 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2040 if (RT_SUCCESS(rc))
2041 {
2042 if (Info.fNemProt & ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2043 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2044 {
2045 if (State.fCanResume)
2046 {
2047 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2048 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2049 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2050 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2051 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2052 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2053 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
2054 return VINF_SUCCESS;
2055 }
2056 }
2057 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2058 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2059 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2060 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2061 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2062 }
2063 else
2064 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2065 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2066 pMsg->GuestPhysicalAddress, rc, State.fDidSomething ? " modified-backing" : "",
2067 g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2068
2069 /*
2070 * Emulate the memory access, either access handler or special memory.
2071 */
2072 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2073 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2074 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2075 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2076 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
2077 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2078 VBOXSTRICTRC rcStrict;
2079# ifdef IN_RING0
2080 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu,
2081 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit");
2082 if (rcStrict != VINF_SUCCESS)
2083 return rcStrict;
2084# else
2085 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2086 AssertRCReturn(rc, rc);
2087# endif
2088
2089 if (pMsg->Reserved1)
2090 Log(("MemExit/Reserved1=%#x\n", pMsg->Reserved1));
2091 if (pMsg->Header.ExecutionState.Reserved0 || pMsg->Header.ExecutionState.Reserved1)
2092 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pMsg->Header.ExecutionState.Reserved0, pMsg->Header.ExecutionState.Reserved1));
2093
2094 if (!pExitRec)
2095 {
2096 //if (pMsg->InstructionByteCount > 0)
2097 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2098 if (pMsg->InstructionByteCount > 0)
2099 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
2100 pMsg->InstructionBytes, pMsg->InstructionByteCount);
2101 else
2102 rcStrict = IEMExecOne(pVCpu);
2103 /** @todo do we need to do anything wrt debugging here? */
2104 }
2105 else
2106 {
2107 /* Frequent access or probing. */
2108 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2109 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2110 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2111 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2112 }
2113 return rcStrict;
2114}
2115#elif defined(IN_RING3)
2116/**
2117 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2118 *
2119 * @returns Strict VBox status code.
2120 * @param pVM The cross context VM structure.
2121 * @param pVCpu The cross context per CPU structure.
2122 * @param pExit The VM exit information to handle.
2123 * @sa nemHCWinHandleMessageMemory
2124 */
2125NEM_TMPL_STATIC VBOXSTRICTRC
2126nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2127{
2128 uint64_t const uHostTsc = ASMReadTSC();
2129 Assert(pExit->MemoryAccess.AccessInfo.AccessType != 3);
2130
2131 /*
2132 * Whatever we do, we must clear pending event injection upon resume.
2133 */
2134 if (pExit->VpContext.ExecutionState.InterruptionPending)
2135 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2136
2137 /*
2138 * Ask PGM for information about the given GCPhys. We need to check if we're
2139 * out of sync first.
2140 */
2141 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite, false, false };
2142 PGMPHYSNEMPAGEINFO Info;
2143 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
2144 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2145 if (RT_SUCCESS(rc))
2146 {
2147 if (Info.fNemProt & ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2148 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2149 {
2150 if (State.fCanResume)
2151 {
2152 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2153 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2154 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2155 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2156 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2157 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2158 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2159 return VINF_SUCCESS;
2160 }
2161 }
2162 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2163 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2164 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2165 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2166 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2167 }
2168 else
2169 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2170 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2171 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2172 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2173
2174 /*
2175 * Emulate the memory access, either access handler or special memory.
2176 */
2177 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2178 pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2179 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2180 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2181 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2182 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2183 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2184 AssertRCReturn(rc, rc);
2185 if (pExit->VpContext.ExecutionState.Reserved0 || pExit->VpContext.ExecutionState.Reserved1)
2186 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pExit->VpContext.ExecutionState.Reserved0, pExit->VpContext.ExecutionState.Reserved1));
2187
2188 VBOXSTRICTRC rcStrict;
2189 if (!pExitRec)
2190 {
2191 //if (pMsg->InstructionByteCount > 0)
2192 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2193 if (pExit->MemoryAccess.InstructionByteCount > 0)
2194 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
2195 pExit->MemoryAccess.InstructionBytes, pExit->MemoryAccess.InstructionByteCount);
2196 else
2197 rcStrict = IEMExecOne(pVCpu);
2198 /** @todo do we need to do anything wrt debugging here? */
2199 }
2200 else
2201 {
2202 /* Frequent access or probing. */
2203 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2204 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2205 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2206 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2207 }
2208 return rcStrict;
2209}
2210#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2211
2212
2213#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2214/**
2215 * Deals with I/O port intercept message.
2216 *
2217 * @returns Strict VBox status code.
2218 * @param pVM The cross context VM structure.
2219 * @param pVCpu The cross context per CPU structure.
2220 * @param pMsg The message.
2221 */
2222NEM_TMPL_STATIC VBOXSTRICTRC
2223nemHCWinHandleMessageIoPort(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg)
2224{
2225 /*
2226 * Assert message sanity.
2227 */
2228 Assert( pMsg->AccessInfo.AccessSize == 1
2229 || pMsg->AccessInfo.AccessSize == 2
2230 || pMsg->AccessInfo.AccessSize == 4);
2231 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2232 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2233 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2234 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2235 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2236 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2237 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
2238 if (pMsg->AccessInfo.StringOp)
2239 {
2240 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterDs, pMsg->DsSegment);
2241 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterEs, pMsg->EsSegment);
2242 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
2243 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi);
2244 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi);
2245 }
2246
2247 /*
2248 * Whatever we do, we must clear pending event injection upon resume.
2249 */
2250 if (pMsg->Header.ExecutionState.InterruptionPending)
2251 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2252
2253 /*
2254 * Add history first to avoid two paths doing EMHistoryExec calls.
2255 */
2256 VBOXSTRICTRC rcStrict;
2257 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2258 !pMsg->AccessInfo.StringOp
2259 ? ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2260 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2261 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2262 : ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2263 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2264 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2265 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2266 if (!pExitRec)
2267 {
2268 if (!pMsg->AccessInfo.StringOp)
2269 {
2270 /*
2271 * Simple port I/O.
2272 */
2273 static uint32_t const s_fAndMask[8] =
2274 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2275 uint32_t const fAndMask = s_fAndMask[pMsg->AccessInfo.AccessSize];
2276
2277 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2278 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2279 {
2280 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize);
2281 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2282 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2283 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2284 if (IOM_SUCCESS(rcStrict))
2285 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1);
2286# ifdef IN_RING0
2287 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
2288 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF
2289 /** @todo check for debug breakpoints */ )
2290 return EMRZSetPendingIoPortWrite(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2291 pMsg->AccessInfo.AccessSize, (uint32_t)pMsg->Rax & fAndMask);
2292# endif
2293 else
2294 {
2295 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2296 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2297 }
2298 }
2299 else
2300 {
2301 uint32_t uValue = 0;
2302 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize);
2303 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2304 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2305 pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2306 if (IOM_SUCCESS(rcStrict))
2307 {
2308 if (pMsg->AccessInfo.AccessSize != 4)
2309 pVCpu->cpum.GstCtx.rax = (pMsg->Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2310 else
2311 pVCpu->cpum.GstCtx.rax = uValue;
2312 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2313 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pVCpu->cpum.GstCtx.rax));
2314 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1);
2315 }
2316 else
2317 {
2318 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2319 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2320# ifdef IN_RING0
2321 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
2322 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF
2323 /** @todo check for debug breakpoints */ )
2324 return EMRZSetPendingIoPortRead(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2325 pMsg->AccessInfo.AccessSize);
2326# endif
2327 }
2328 }
2329 }
2330 else
2331 {
2332 /*
2333 * String port I/O.
2334 */
2335 /** @todo Someone at Microsoft please explain how we can get the address mode
2336 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2337 * getting the default mode, it can always be overridden by a prefix. This
2338 * forces us to interpret the instruction from opcodes, which is suboptimal.
2339 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2340 * CPUs that are reasonably new.
2341 *
2342 * Of course, it's possible this is an undocumented and we just need to do some
2343 * experiments to figure out how it's communicated. Alternatively, we can scan
2344 * the opcode bytes for possible evil prefixes.
2345 */
2346 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2347 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2348 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2349 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
2350 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment);
2351 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2352 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2353 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
2354 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
2355# ifdef IN_RING0
2356 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2357 if (rcStrict != VINF_SUCCESS)
2358 return rcStrict;
2359# else
2360 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2361 AssertRCReturn(rc, rc);
2362# endif
2363
2364 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2365 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2366 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2367 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS",
2368 pMsg->PortNumber, pMsg->AccessInfo.AccessSize ));
2369 rcStrict = IEMExecOne(pVCpu);
2370 }
2371 if (IOM_SUCCESS(rcStrict))
2372 {
2373 /*
2374 * Do debug checks.
2375 */
2376 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2377 || (pMsg->Header.Rflags & X86_EFL_TF)
2378 || DBGFBpIsHwIoArmed(pVM) )
2379 {
2380 /** @todo Debugging. */
2381 }
2382 }
2383 return rcStrict;
2384 }
2385
2386 /*
2387 * Frequent exit or something needing probing.
2388 * Get state and call EMHistoryExec.
2389 */
2390 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2391 if (!pMsg->AccessInfo.StringOp)
2392 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2393 else
2394 {
2395 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2396 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2397 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
2398 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment);
2399 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2400 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
2401 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
2402 }
2403 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2404
2405# ifdef IN_RING0
2406 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2407 if (rcStrict != VINF_SUCCESS)
2408 return rcStrict;
2409# else
2410 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2411 AssertRCReturn(rc, rc);
2412# endif
2413
2414 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2415 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2416 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2417 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUT" : "IN",
2418 pMsg->AccessInfo.StringOp ? "S" : "",
2419 pMsg->PortNumber, pMsg->AccessInfo.AccessSize));
2420 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2421 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2422 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2423 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2424 return rcStrict;
2425}
2426#elif defined(IN_RING3)
2427/**
2428 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess).
2429 *
2430 * @returns Strict VBox status code.
2431 * @param pVM The cross context VM structure.
2432 * @param pVCpu The cross context per CPU structure.
2433 * @param pExit The VM exit information to handle.
2434 * @sa nemHCWinHandleMessageIoPort
2435 */
2436NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitIoPort(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2437{
2438 Assert( pExit->IoPortAccess.AccessInfo.AccessSize == 1
2439 || pExit->IoPortAccess.AccessInfo.AccessSize == 2
2440 || pExit->IoPortAccess.AccessInfo.AccessSize == 4);
2441
2442 /*
2443 * Whatever we do, we must clear pending event injection upon resume.
2444 */
2445 if (pExit->VpContext.ExecutionState.InterruptionPending)
2446 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2447
2448 /*
2449 * Add history first to avoid two paths doing EMHistoryExec calls.
2450 */
2451 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2452 !pExit->IoPortAccess.AccessInfo.StringOp
2453 ? ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2454 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2455 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2456 : ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2457 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2458 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2459 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2460 if (!pExitRec)
2461 {
2462 VBOXSTRICTRC rcStrict;
2463 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2464 {
2465 /*
2466 * Simple port I/O.
2467 */
2468 static uint32_t const s_fAndMask[8] =
2469 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2470 uint32_t const fAndMask = s_fAndMask[pExit->IoPortAccess.AccessInfo.AccessSize];
2471 if (pExit->IoPortAccess.AccessInfo.IsWrite)
2472 {
2473 rcStrict = IOMIOPortWrite(pVM, pVCpu, pExit->IoPortAccess.PortNumber,
2474 (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2475 pExit->IoPortAccess.AccessInfo.AccessSize);
2476 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2477 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2478 pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2479 pExit->IoPortAccess.AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2480 if (IOM_SUCCESS(rcStrict))
2481 {
2482 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2483 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 1);
2484 }
2485 }
2486 else
2487 {
2488 uint32_t uValue = 0;
2489 rcStrict = IOMIOPortRead(pVM, pVCpu, pExit->IoPortAccess.PortNumber, &uValue,
2490 pExit->IoPortAccess.AccessInfo.AccessSize);
2491 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2492 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2493 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2494 if (IOM_SUCCESS(rcStrict))
2495 {
2496 if (pExit->IoPortAccess.AccessInfo.AccessSize != 4)
2497 pVCpu->cpum.GstCtx.rax = (pExit->IoPortAccess.Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2498 else
2499 pVCpu->cpum.GstCtx.rax = uValue;
2500 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2501 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pExit->IoPortAccess.Rax, pVCpu->cpum.GstCtx.rax));
2502 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2503 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 1);
2504 }
2505 }
2506 }
2507 else
2508 {
2509 /*
2510 * String port I/O.
2511 */
2512 /** @todo Someone at Microsoft please explain how we can get the address mode
2513 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2514 * getting the default mode, it can always be overridden by a prefix. This
2515 * forces us to interpret the instruction from opcodes, which is suboptimal.
2516 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2517 * CPUs that are reasonably new.
2518 *
2519 * Of course, it's possible this is an undocumented and we just need to do some
2520 * experiments to figure out how it's communicated. Alternatively, we can scan
2521 * the opcode bytes for possible evil prefixes.
2522 */
2523 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2524 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2525 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2526 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pExit->IoPortAccess.Ds);
2527 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pExit->IoPortAccess.Es);
2528 pVCpu->cpum.GstCtx.rax = pExit->IoPortAccess.Rax;
2529 pVCpu->cpum.GstCtx.rcx = pExit->IoPortAccess.Rcx;
2530 pVCpu->cpum.GstCtx.rdi = pExit->IoPortAccess.Rdi;
2531 pVCpu->cpum.GstCtx.rsi = pExit->IoPortAccess.Rsi;
2532 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2533 AssertRCReturn(rc, rc);
2534
2535 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2536 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2537 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2538 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUTS" : "INS",
2539 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize ));
2540 rcStrict = IEMExecOne(pVCpu);
2541 }
2542 if (IOM_SUCCESS(rcStrict))
2543 {
2544 /*
2545 * Do debug checks.
2546 */
2547 if ( pExit->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2548 || (pExit->VpContext.Rflags & X86_EFL_TF)
2549 || DBGFBpIsHwIoArmed(pVM) )
2550 {
2551 /** @todo Debugging. */
2552 }
2553 }
2554 return rcStrict;
2555 }
2556
2557 /*
2558 * Frequent exit or something needing probing.
2559 * Get state and call EMHistoryExec.
2560 */
2561 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2562 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2563 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2564 else
2565 {
2566 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2567 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2568 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pExit->IoPortAccess.Ds);
2569 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pExit->IoPortAccess.Es);
2570 pVCpu->cpum.GstCtx.rcx = pExit->IoPortAccess.Rcx;
2571 pVCpu->cpum.GstCtx.rdi = pExit->IoPortAccess.Rdi;
2572 pVCpu->cpum.GstCtx.rsi = pExit->IoPortAccess.Rsi;
2573 }
2574 pVCpu->cpum.GstCtx.rax = pExit->IoPortAccess.Rax;
2575 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2576 AssertRCReturn(rc, rc);
2577 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2578 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2579 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2580 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUT" : "IN",
2581 pExit->IoPortAccess.AccessInfo.StringOp ? "S" : "",
2582 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize));
2583 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2584 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2585 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2586 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2587 return rcStrict;
2588}
2589#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2590
2591
2592#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2593/**
2594 * Deals with interrupt window message.
2595 *
2596 * @returns Strict VBox status code.
2597 * @param pVM The cross context VM structure.
2598 * @param pVCpu The cross context per CPU structure.
2599 * @param pMsg The message.
2600 * @sa nemR3WinHandleExitInterruptWindow
2601 */
2602NEM_TMPL_STATIC VBOXSTRICTRC
2603nemHCWinHandleMessageInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg)
2604{
2605 /*
2606 * Assert message sanity.
2607 */
2608 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE
2609 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here
2610 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2611 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type));
2612
2613 /*
2614 * Just copy the state we've got and handle it in the loop for now.
2615 */
2616 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2617 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2618
2619 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2620 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2621 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2622 pMsg->Type, RT_BOOL(pMsg->Header.Rflags & X86_EFL_IF), pMsg->Header.ExecutionState.InterruptShadow));
2623
2624 /** @todo call nemHCWinHandleInterruptFF */
2625 RT_NOREF(pVM);
2626 return VINF_SUCCESS;
2627}
2628#elif defined(IN_RING3)
2629/**
2630 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow).
2631 *
2632 * @returns Strict VBox status code.
2633 * @param pVM The cross context VM structure.
2634 * @param pVCpu The cross context per CPU structure.
2635 * @param pExit The VM exit information to handle.
2636 * @sa nemHCWinHandleMessageInterruptWindow
2637 */
2638NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2639{
2640 /*
2641 * Assert message sanity.
2642 */
2643 AssertMsg( pExit->InterruptWindow.DeliverableType == WHvX64PendingInterrupt
2644 || pExit->InterruptWindow.DeliverableType == WHvX64PendingNmi,
2645 ("%#x\n", pExit->InterruptWindow.DeliverableType));
2646
2647 /*
2648 * Just copy the state we've got and handle it in the loop for now.
2649 */
2650 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2651 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2652
2653 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2654 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d CR8=%#x\n",
2655 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2656 pExit->InterruptWindow.DeliverableType, RT_BOOL(pExit->VpContext.Rflags & X86_EFL_IF),
2657 pExit->VpContext.ExecutionState.InterruptShadow, pExit->VpContext.Cr8));
2658
2659 /** @todo call nemHCWinHandleInterruptFF */
2660 RT_NOREF(pVM);
2661 return VINF_SUCCESS;
2662}
2663#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2664
2665
2666#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2667/**
2668 * Deals with CPUID intercept message.
2669 *
2670 * @returns Strict VBox status code.
2671 * @param pVM The cross context VM structure.
2672 * @param pVCpu The cross context per CPU structure.
2673 * @param pMsg The message.
2674 * @sa nemR3WinHandleExitCpuId
2675 */
2676NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg)
2677{
2678 /* Check message register value sanity. */
2679 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2680 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2681 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2682 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2683 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
2684 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
2685 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
2686 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx);
2687
2688 /* Do exit history. */
2689 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2690 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2691 if (!pExitRec)
2692 {
2693 /*
2694 * Soak up state and execute the instruction.
2695 *
2696 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2697 * function and make everyone use it.
2698 */
2699 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2700 * only get weirder with nested VT-x and AMD-V support. */
2701 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2702
2703 /* Copy in the low register values (top is always cleared). */
2704 pVCpu->cpum.GstCtx.rax = (uint32_t)pMsg->Rax;
2705 pVCpu->cpum.GstCtx.rcx = (uint32_t)pMsg->Rcx;
2706 pVCpu->cpum.GstCtx.rdx = (uint32_t)pMsg->Rdx;
2707 pVCpu->cpum.GstCtx.rbx = (uint32_t)pMsg->Rbx;
2708 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2709
2710 /* Get the correct values. */
2711 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2712 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2713
2714 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2715 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2716 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2717 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2718 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2719
2720 /* Move RIP and we're done. */
2721 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2722
2723 return VINF_SUCCESS;
2724 }
2725
2726 /*
2727 * Frequent exit or something needing probing.
2728 * Get state and call EMHistoryExec.
2729 */
2730 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2731 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2732 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2733 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
2734 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
2735 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2736 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2737 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2738 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2739 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2740# ifdef IN_RING0
2741 VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit");
2742 if (rcStrict != VINF_SUCCESS)
2743 return rcStrict;
2744 RT_NOREF(pVM);
2745# else
2746 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2747 AssertRCReturn(rc, rc);
2748# endif
2749 VBOXSTRICTRC rcStrictExec = EMHistoryExec(pVCpu, pExitRec, 0);
2750 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2751 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2752 VBOXSTRICTRC_VAL(rcStrictExec), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2753 return rcStrictExec;
2754}
2755#elif defined(IN_RING3)
2756/**
2757 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid).
2758 *
2759 * @returns Strict VBox status code.
2760 * @param pVM The cross context VM structure.
2761 * @param pVCpu The cross context per CPU structure.
2762 * @param pExit The VM exit information to handle.
2763 * @sa nemHCWinHandleMessageCpuId
2764 */
2765NEM_TMPL_STATIC VBOXSTRICTRC
2766nemR3WinHandleExitCpuId(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2767{
2768 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2769 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2770 if (!pExitRec)
2771 {
2772 /*
2773 * Soak up state and execute the instruction.
2774 *
2775 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2776 * function and make everyone use it.
2777 */
2778 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2779 * only get weirder with nested VT-x and AMD-V support. */
2780 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2781
2782 /* Copy in the low register values (top is always cleared). */
2783 pVCpu->cpum.GstCtx.rax = (uint32_t)pExit->CpuidAccess.Rax;
2784 pVCpu->cpum.GstCtx.rcx = (uint32_t)pExit->CpuidAccess.Rcx;
2785 pVCpu->cpum.GstCtx.rdx = (uint32_t)pExit->CpuidAccess.Rdx;
2786 pVCpu->cpum.GstCtx.rbx = (uint32_t)pExit->CpuidAccess.Rbx;
2787 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2788
2789 /* Get the correct values. */
2790 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2791 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2792
2793 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2794 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2795 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2796 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2797 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2798
2799 /* Move RIP and we're done. */
2800 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
2801
2802 RT_NOREF_PV(pVM);
2803 return VINF_SUCCESS;
2804 }
2805
2806 /*
2807 * Frequent exit or something needing probing.
2808 * Get state and call EMHistoryExec.
2809 */
2810 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2811 pVCpu->cpum.GstCtx.rax = pExit->CpuidAccess.Rax;
2812 pVCpu->cpum.GstCtx.rcx = pExit->CpuidAccess.Rcx;
2813 pVCpu->cpum.GstCtx.rdx = pExit->CpuidAccess.Rdx;
2814 pVCpu->cpum.GstCtx.rbx = pExit->CpuidAccess.Rbx;
2815 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2816 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2817 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2818 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2819 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2820 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2821 AssertRCReturn(rc, rc);
2822 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2823 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2824 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2825 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2826 return rcStrict;
2827}
2828#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2829
2830
2831#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2832/**
2833 * Deals with MSR intercept message.
2834 *
2835 * @returns Strict VBox status code.
2836 * @param pVCpu The cross context per CPU structure.
2837 * @param pMsg The message.
2838 * @sa nemR3WinHandleExitMsr
2839 */
2840NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPUCC pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg)
2841{
2842 /*
2843 * A wee bit of sanity first.
2844 */
2845 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2846 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2847 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2848 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2849 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2850 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2851 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
2852 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
2853
2854 /*
2855 * Check CPL as that's common to both RDMSR and WRMSR.
2856 */
2857 VBOXSTRICTRC rcStrict;
2858 if (pMsg->Header.ExecutionState.Cpl == 0)
2859 {
2860 /*
2861 * Get all the MSR state. Since we're getting EFER, we also need to
2862 * get CR0, CR4 and CR3.
2863 */
2864 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2865 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2866 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2867 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2868 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2869
2870 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2871 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu,
2872 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2873 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2874 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2875 "MSRs");
2876 if (rcStrict == VINF_SUCCESS)
2877 {
2878 if (!pExitRec)
2879 {
2880 /*
2881 * Handle writes.
2882 */
2883 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2884 {
2885 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx));
2886 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n",
2887 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2888 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2889 if (rcStrict == VINF_SUCCESS)
2890 {
2891 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2892 return VINF_SUCCESS;
2893 }
2894# ifndef IN_RING3
2895 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2896 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2897 rcStrict = VINF_CPUM_R3_MSR_WRITE;
2898 return rcStrict;
2899# else
2900 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n",
2901 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2902 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2903# endif
2904 }
2905 /*
2906 * Handle reads.
2907 */
2908 else
2909 {
2910 uint64_t uValue = 0;
2911 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue);
2912 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2913 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2914 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2915 if (rcStrict == VINF_SUCCESS)
2916 {
2917 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue;
2918 pVCpu->cpum.GstCtx.rdx = uValue >> 32;
2919 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2920 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2921 return VINF_SUCCESS;
2922 }
2923# ifndef IN_RING3
2924 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2925 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2926 rcStrict = VINF_CPUM_R3_MSR_READ;
2927 return rcStrict;
2928# else
2929 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2930 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2931 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2932# endif
2933 }
2934 }
2935 else
2936 {
2937 /*
2938 * Handle frequent exit or something needing probing.
2939 */
2940 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
2941 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2942 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD", pMsg->MsrNumber));
2943 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2944 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2945 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2946 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2947 return rcStrict;
2948 }
2949 }
2950 else
2951 {
2952 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2953 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2954 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD",
2955 pMsg->MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2956 return rcStrict;
2957 }
2958 }
2959 else if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2960 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n",
2961 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2962 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx ));
2963 else
2964 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n",
2965 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2966 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber));
2967
2968 /*
2969 * If we get down here, we're supposed to #GP(0).
2970 */
2971 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2972 if (rcStrict == VINF_SUCCESS)
2973 {
2974 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2975 if (rcStrict == VINF_IEM_RAISED_XCPT)
2976 rcStrict = VINF_SUCCESS;
2977 else if (rcStrict != VINF_SUCCESS)
2978 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2979 }
2980 return rcStrict;
2981}
2982#elif defined(IN_RING3)
2983/**
2984 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess).
2985 *
2986 * @returns Strict VBox status code.
2987 * @param pVM The cross context VM structure.
2988 * @param pVCpu The cross context per CPU structure.
2989 * @param pExit The VM exit information to handle.
2990 * @sa nemHCWinHandleMessageMsr
2991 */
2992NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitMsr(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2993{
2994 /*
2995 * Check CPL as that's common to both RDMSR and WRMSR.
2996 */
2997 VBOXSTRICTRC rcStrict;
2998 if (pExit->VpContext.ExecutionState.Cpl == 0)
2999 {
3000 /*
3001 * Get all the MSR state. Since we're getting EFER, we also need to
3002 * get CR0, CR4 and CR3.
3003 */
3004 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
3005 pExit->MsrAccess.AccessInfo.IsWrite
3006 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
3007 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
3008 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3009 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3010 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu,
3011 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
3012 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
3013 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
3014 "MSRs");
3015 if (rcStrict == VINF_SUCCESS)
3016 {
3017 if (!pExitRec)
3018 {
3019 /*
3020 * Handle writes.
3021 */
3022 if (pExit->MsrAccess.AccessInfo.IsWrite)
3023 {
3024 rcStrict = CPUMSetGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber,
3025 RT_MAKE_U64((uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx));
3026 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3027 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
3028 (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
3029 if (rcStrict == VINF_SUCCESS)
3030 {
3031 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
3032 return VINF_SUCCESS;
3033 }
3034 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", pVCpu->idCpu,
3035 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3036 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx,
3037 VBOXSTRICTRC_VAL(rcStrict) ));
3038 }
3039 /*
3040 * Handle reads.
3041 */
3042 else
3043 {
3044 uint64_t uValue = 0;
3045 rcStrict = CPUMQueryGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, &uValue);
3046 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu,
3047 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3048 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3049 if (rcStrict == VINF_SUCCESS)
3050 {
3051 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue;
3052 pVCpu->cpum.GstCtx.rdx = uValue >> 32;
3053 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
3054 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
3055 return VINF_SUCCESS;
3056 }
3057 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3058 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
3059 uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3060 }
3061 }
3062 else
3063 {
3064 /*
3065 * Handle frequent exit or something needing probing.
3066 */
3067 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
3068 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3069 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber));
3070 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
3071 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
3072 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3073 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
3074 return rcStrict;
3075 }
3076 }
3077 else
3078 {
3079 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
3080 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3081 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
3082 return rcStrict;
3083 }
3084 }
3085 else if (pExit->MsrAccess.AccessInfo.IsWrite)
3086 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3087 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3088 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx ));
3089 else
3090 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3091 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3092 pExit->MsrAccess.MsrNumber));
3093
3094 /*
3095 * If we get down here, we're supposed to #GP(0).
3096 */
3097 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
3098 if (rcStrict == VINF_SUCCESS)
3099 {
3100 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
3101 if (rcStrict == VINF_IEM_RAISED_XCPT)
3102 rcStrict = VINF_SUCCESS;
3103 else if (rcStrict != VINF_SUCCESS)
3104 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
3105 }
3106
3107 RT_NOREF_PV(pVM);
3108 return rcStrict;
3109}
3110#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3111
3112
3113/**
3114 * Worker for nemHCWinHandleMessageException & nemR3WinHandleExitException that
3115 * checks if the given opcodes are of interest at all.
3116 *
3117 * @returns true if interesting, false if not.
3118 * @param cbOpcodes Number of opcode bytes available.
3119 * @param pbOpcodes The opcode bytes.
3120 * @param f64BitMode Whether we're in 64-bit mode.
3121 */
3122DECLINLINE(bool) nemHcWinIsInterestingUndefinedOpcode(uint8_t cbOpcodes, uint8_t const *pbOpcodes, bool f64BitMode)
3123{
3124 /*
3125 * Currently only interested in VMCALL and VMMCALL.
3126 */
3127 while (cbOpcodes >= 3)
3128 {
3129 switch (pbOpcodes[0])
3130 {
3131 case 0x0f:
3132 switch (pbOpcodes[1])
3133 {
3134 case 0x01:
3135 switch (pbOpcodes[2])
3136 {
3137 case 0xc1: /* 0f 01 c1 VMCALL */
3138 return true;
3139 case 0xd9: /* 0f 01 d9 VMMCALL */
3140 return true;
3141 default:
3142 break;
3143 }
3144 break;
3145 }
3146 break;
3147
3148 default:
3149 return false;
3150
3151 /* prefixes */
3152 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
3153 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
3154 if (!f64BitMode)
3155 return false;
3156 RT_FALL_THRU();
3157 case X86_OP_PRF_CS:
3158 case X86_OP_PRF_SS:
3159 case X86_OP_PRF_DS:
3160 case X86_OP_PRF_ES:
3161 case X86_OP_PRF_FS:
3162 case X86_OP_PRF_GS:
3163 case X86_OP_PRF_SIZE_OP:
3164 case X86_OP_PRF_SIZE_ADDR:
3165 case X86_OP_PRF_LOCK:
3166 case X86_OP_PRF_REPZ:
3167 case X86_OP_PRF_REPNZ:
3168 cbOpcodes--;
3169 pbOpcodes++;
3170 continue;
3171 }
3172 break;
3173 }
3174 return false;
3175}
3176
3177
3178#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3179/**
3180 * Copies state included in a exception intercept message.
3181 *
3182 * @param pVCpu The cross context per CPU structure.
3183 * @param pMsg The message.
3184 * @param fClearXcpt Clear pending exception.
3185 */
3186DECLINLINE(void)
3187nemHCWinCopyStateFromExceptionMessage(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, bool fClearXcpt)
3188{
3189 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
3190 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_DS
3191 | (fClearXcpt ? CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT : 0) );
3192 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
3193 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
3194 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
3195 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
3196 pVCpu->cpum.GstCtx.rsp = pMsg->Rsp;
3197 pVCpu->cpum.GstCtx.rbp = pMsg->Rbp;
3198 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
3199 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
3200 pVCpu->cpum.GstCtx.r8 = pMsg->R8;
3201 pVCpu->cpum.GstCtx.r9 = pMsg->R9;
3202 pVCpu->cpum.GstCtx.r10 = pMsg->R10;
3203 pVCpu->cpum.GstCtx.r11 = pMsg->R11;
3204 pVCpu->cpum.GstCtx.r12 = pMsg->R12;
3205 pVCpu->cpum.GstCtx.r13 = pMsg->R13;
3206 pVCpu->cpum.GstCtx.r14 = pMsg->R14;
3207 pVCpu->cpum.GstCtx.r15 = pMsg->R15;
3208 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
3209 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ss, pMsg->SsSegment);
3210}
3211#elif defined(IN_RING3)
3212/**
3213 * Copies state included in a exception intercept exit.
3214 *
3215 * @param pVCpu The cross context per CPU structure.
3216 * @param pExit The VM exit information.
3217 * @param fClearXcpt Clear pending exception.
3218 */
3219DECLINLINE(void) nemR3WinCopyStateFromExceptionMessage(PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, bool fClearXcpt)
3220{
3221 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3222 if (fClearXcpt)
3223 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
3224}
3225#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3226
3227
3228/**
3229 * Advances the guest RIP by the number of bytes specified in @a cb.
3230 *
3231 * @param pVCpu The cross context virtual CPU structure.
3232 * @param cb RIP increment value in bytes.
3233 */
3234DECLINLINE(void) nemHcWinAdvanceRip(PVMCPUCC pVCpu, uint32_t cb)
3235{
3236 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3237 pCtx->rip += cb;
3238
3239 /* Update interrupt shadow. */
3240 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3241 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
3242 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3243}
3244
3245
3246/**
3247 * Hacks its way around the lovely mesa driver's backdoor accesses.
3248 *
3249 * @sa hmR0VmxHandleMesaDrvGp
3250 * @sa hmR0SvmHandleMesaDrvGp
3251 */
3252static int nemHcWinHandleMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx)
3253{
3254 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK)));
3255 RT_NOREF(pCtx);
3256
3257 /* For now we'll just skip the instruction. */
3258 nemHcWinAdvanceRip(pVCpu, 1);
3259 return VINF_SUCCESS;
3260}
3261
3262
3263/**
3264 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
3265 * backdoor logging w/o checking what it is running inside.
3266 *
3267 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
3268 * backdoor port and magic numbers loaded in registers.
3269 *
3270 * @returns true if it is, false if it isn't.
3271 * @sa hmR0VmxIsMesaDrvGp
3272 * @sa hmR0SvmIsMesaDrvGp
3273 */
3274DECLINLINE(bool) nemHcWinIsMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx, const uint8_t *pbInsn, uint32_t cbInsn)
3275{
3276 /* #GP(0) is already checked by caller. */
3277
3278 /* Check magic and port. */
3279 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RAX)));
3280 if (pCtx->dx != UINT32_C(0x5658))
3281 return false;
3282 if (pCtx->rax != UINT32_C(0x564d5868))
3283 return false;
3284
3285 /* Flat ring-3 CS. */
3286 if (CPUMGetGuestCPL(pVCpu) != 3)
3287 return false;
3288 if (pCtx->cs.u64Base != 0)
3289 return false;
3290
3291 /* 0xed: IN eAX,dx */
3292 if (cbInsn < 1) /* Play safe (shouldn't happen). */
3293 {
3294 uint8_t abInstr[1];
3295 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
3296 if (RT_FAILURE(rc))
3297 return false;
3298 if (abInstr[0] != 0xed)
3299 return false;
3300 }
3301 else
3302 {
3303 if (pbInsn[0] != 0xed)
3304 return false;
3305 }
3306
3307 return true;
3308}
3309
3310
3311#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3312/**
3313 * Deals with exception intercept message (HvMessageTypeX64ExceptionIntercept).
3314 *
3315 * @returns Strict VBox status code.
3316 * @param pVCpu The cross context per CPU structure.
3317 * @param pMsg The message.
3318 * @sa nemR3WinHandleExitMsr
3319 */
3320NEM_TMPL_STATIC VBOXSTRICTRC
3321nemHCWinHandleMessageException(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg)
3322{
3323 /*
3324 * Assert sanity.
3325 */
3326 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
3327 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
3328 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
3329 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
3330 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
3331 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
3332 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
3333 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterDs, pMsg->DsSegment);
3334 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterSs, pMsg->SsSegment);
3335 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
3336 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
3337 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
3338 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx);
3339 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsp, pMsg->Rsp);
3340 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbp, pMsg->Rbp);
3341 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi);
3342 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi);
3343 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR8, pMsg->R8);
3344 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR9, pMsg->R9);
3345 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR10, pMsg->R10);
3346 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR11, pMsg->R11);
3347 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR12, pMsg->R12);
3348 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR13, pMsg->R13);
3349 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR14, pMsg->R14);
3350 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR15, pMsg->R15);
3351
3352 /*
3353 * Get most of the register state since we'll end up making IEM inject the
3354 * event. The exception isn't normally flaged as a pending event, so duh.
3355 *
3356 * Note! We can optimize this later with event injection.
3357 */
3358 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n",
3359 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
3360 pMsg->ExceptionVector, pMsg->ErrorCode, pMsg->ExceptionParameter));
3361 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, true /*fClearXcpt*/);
3362 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3363 if (pMsg->ExceptionVector == X86_XCPT_DB)
3364 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3365 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt");
3366 if (rcStrict != VINF_SUCCESS)
3367 return rcStrict;
3368
3369 /*
3370 * Handle the intercept.
3371 */
3372 TRPMEVENT enmEvtType = TRPM_TRAP;
3373 switch (pMsg->ExceptionVector)
3374 {
3375 /*
3376 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3377 * and need to turn them over to GIM.
3378 *
3379 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3380 * #UD for handling non-native hypercall instructions. (IEM will
3381 * decode both and let the GIM provider decide whether to accept it.)
3382 */
3383 case X86_XCPT_UD:
3384 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3385 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3386 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3387
3388 if (nemHcWinIsInterestingUndefinedOpcode(pMsg->InstructionByteCount, pMsg->InstructionBytes,
3389 pMsg->Header.ExecutionState.EferLma && pMsg->Header.CsSegment.Long ))
3390 {
3391 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
3392 pMsg->InstructionBytes, pMsg->InstructionByteCount);
3393 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3394 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3395 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
3396 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3397 return rcStrict;
3398 }
3399 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3400 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->InstructionByteCount, pMsg->InstructionBytes ));
3401 break;
3402
3403 /*
3404 * Workaround the lovely mesa driver assuming that vmsvga means vmware
3405 * hypervisor and tries to log stuff to the host.
3406 */
3407 case X86_XCPT_GP:
3408 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGp);
3409 /** @todo r=bird: Need workaround in IEM for this, right?
3410 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_GP),
3411 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC()); */
3412 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
3413 || !nemHcWinIsMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx, pMsg->InstructionBytes, pMsg->InstructionByteCount))
3414 {
3415# if 1 /** @todo Need to emulate instruction or we get a triple fault when trying to inject the #GP... */
3416 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
3417 pMsg->InstructionBytes, pMsg->InstructionByteCount);
3418 Log4(("XcptExit/%u: %04x:%08RX64/%s: #GP -> emulated -> %Rrc\n",
3419 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3420 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
3421 return rcStrict;
3422# else
3423 break;
3424# endif
3425 }
3426 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGpMesa);
3427 return nemHcWinHandleMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx);
3428
3429 /*
3430 * Filter debug exceptions.
3431 */
3432 case X86_XCPT_DB:
3433 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3434 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3435 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3436 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3437 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header) ));
3438 break;
3439
3440 case X86_XCPT_BP:
3441 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3442 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3443 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3444 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3445 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->Header.InstructionLength));
3446 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3447 break;
3448
3449 /* This shouldn't happen. */
3450 default:
3451 AssertLogRelMsgFailedReturn(("ExceptionVector=%#x\n", pMsg->ExceptionVector), VERR_IEM_IPE_6);
3452 }
3453
3454 /*
3455 * Inject it.
3456 */
3457 rcStrict = IEMInjectTrap(pVCpu, pMsg->ExceptionVector, enmEvtType, pMsg->ErrorCode,
3458 pMsg->ExceptionParameter /*??*/, pMsg->Header.InstructionLength);
3459 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3460 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3461 nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->ExceptionVector, VBOXSTRICTRC_VAL(rcStrict) ));
3462 return rcStrict;
3463}
3464#elif defined(IN_RING3)
3465/**
3466 * Deals with MSR access exits (WHvRunVpExitReasonException).
3467 *
3468 * @returns Strict VBox status code.
3469 * @param pVM The cross context VM structure.
3470 * @param pVCpu The cross context per CPU structure.
3471 * @param pExit The VM exit information to handle.
3472 * @sa nemR3WinHandleExitException
3473 */
3474NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitException(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3475{
3476 /*
3477 * Get most of the register state since we'll end up making IEM inject the
3478 * event. The exception isn't normally flaged as a pending event, so duh.
3479 *
3480 * Note! We can optimize this later with event injection.
3481 */
3482 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3483 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType,
3484 pExit->VpException.ErrorCode, pExit->VpException.ExceptionParameter ));
3485 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, true /*fClearXcpt*/);
3486 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3487 if (pExit->VpException.ExceptionType == X86_XCPT_DB)
3488 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3489 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt");
3490 if (rcStrict != VINF_SUCCESS)
3491 return rcStrict;
3492
3493 /*
3494 * Handle the intercept.
3495 */
3496 TRPMEVENT enmEvtType = TRPM_TRAP;
3497 switch (pExit->VpException.ExceptionType)
3498 {
3499 /*
3500 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3501 * and need to turn them over to GIM.
3502 *
3503 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3504 * #UD for handling non-native hypercall instructions. (IEM will
3505 * decode both and let the GIM provider decide whether to accept it.)
3506 */
3507 case X86_XCPT_UD:
3508 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3509 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3510 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3511 if (nemHcWinIsInterestingUndefinedOpcode(pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes,
3512 pExit->VpContext.ExecutionState.EferLma && pExit->VpContext.Cs.Long ))
3513 {
3514 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
3515 pExit->VpException.InstructionBytes,
3516 pExit->VpException.InstructionByteCount);
3517 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3518 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3519 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
3520 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3521 return rcStrict;
3522 }
3523
3524 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu,
3525 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3526 pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes ));
3527 break;
3528
3529 /*
3530 * Workaround the lovely mesa driver assuming that vmsvga means vmware
3531 * hypervisor and tries to log stuff to the host.
3532 */
3533 case X86_XCPT_GP:
3534 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGp);
3535 /** @todo r=bird: Need workaround in IEM for this, right?
3536 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_GP),
3537 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC()); */
3538 if ( !pVCpu->nem.s.fTrapXcptGpForLovelyMesaDrv
3539 || !nemHcWinIsMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx, pExit->VpException.InstructionBytes,
3540 pExit->VpException.InstructionByteCount))
3541 {
3542# if 1 /** @todo Need to emulate instruction or we get a triple fault when trying to inject the #GP... */
3543 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
3544 pExit->VpException.InstructionBytes,
3545 pExit->VpException.InstructionByteCount);
3546 Log4(("XcptExit/%u: %04x:%08RX64/%s: #GP -> emulated -> %Rrc\n",
3547 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3548 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
3549 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3550 return rcStrict;
3551# else
3552 break;
3553# endif
3554 }
3555 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGpMesa);
3556 return nemHcWinHandleMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx);
3557
3558 /*
3559 * Filter debug exceptions.
3560 */
3561 case X86_XCPT_DB:
3562 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3563 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3564 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3565 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3566 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext) ));
3567 break;
3568
3569 case X86_XCPT_BP:
3570 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3571 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3572 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3573 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3574 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.InstructionLength));
3575 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3576 break;
3577
3578 /* This shouldn't happen. */
3579 default:
3580 AssertLogRelMsgFailedReturn(("ExceptionType=%#x\n", pExit->VpException.ExceptionType), VERR_IEM_IPE_6);
3581 }
3582
3583 /*
3584 * Inject it.
3585 */
3586 rcStrict = IEMInjectTrap(pVCpu, pExit->VpException.ExceptionType, enmEvtType, pExit->VpException.ErrorCode,
3587 pExit->VpException.ExceptionParameter /*??*/, pExit->VpContext.InstructionLength);
3588 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3589 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3590 nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType, VBOXSTRICTRC_VAL(rcStrict) ));
3591
3592 RT_NOREF_PV(pVM);
3593 return rcStrict;
3594}
3595#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3596
3597
3598#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3599/**
3600 * Deals with unrecoverable exception (triple fault).
3601 *
3602 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up
3603 * here too. So we'll leave it to IEM to decide.
3604 *
3605 * @returns Strict VBox status code.
3606 * @param pVCpu The cross context per CPU structure.
3607 * @param pMsgHdr The message header.
3608 * @sa nemR3WinHandleExitUnrecoverableException
3609 */
3610NEM_TMPL_STATIC VBOXSTRICTRC
3611nemHCWinHandleMessageUnrecoverableException(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
3612{
3613 /* Check message register value sanity. */
3614 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsgHdr->CsSegment);
3615 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsgHdr->Rip);
3616 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsgHdr->Rflags);
3617 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsgHdr->Cr8);
3618
3619# if 0
3620 /*
3621 * Just copy the state we've got and handle it in the loop for now.
3622 */
3623 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
3624 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n",
3625 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags));
3626 return VINF_EM_TRIPLE_FAULT;
3627# else
3628 /*
3629 * Let IEM decide whether this is really it.
3630 */
3631 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3632 pMsgHdr->Rip + pMsgHdr->CsSegment.Base, ASMReadTSC());
3633 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
3634 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3635 if (rcStrict == VINF_SUCCESS)
3636 {
3637 rcStrict = IEMExecOne(pVCpu);
3638 if (rcStrict == VINF_SUCCESS)
3639 {
3640 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3641 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags ));
3642 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3643 return VINF_SUCCESS;
3644 }
3645 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3646 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3647 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3648 else
3649 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3650 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3651 }
3652 else
3653 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3654 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3655 return rcStrict;
3656# endif
3657}
3658#elif defined(IN_RING3)
3659/**
3660 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
3661 *
3662 * @returns Strict VBox status code.
3663 * @param pVM The cross context VM structure.
3664 * @param pVCpu The cross context per CPU structure.
3665 * @param pExit The VM exit information to handle.
3666 * @sa nemHCWinHandleMessageUnrecoverableException
3667 */
3668NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3669{
3670# if 0
3671 /*
3672 * Just copy the state we've got and handle it in the loop for now.
3673 */
3674 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3675 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3676 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3677 RT_NOREF_PV(pVM);
3678 return VINF_EM_TRIPLE_FAULT;
3679# else
3680 /*
3681 * Let IEM decide whether this is really it.
3682 */
3683 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3684 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3685 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3686 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3687 if (rcStrict == VINF_SUCCESS)
3688 {
3689 rcStrict = IEMExecOne(pVCpu);
3690 if (rcStrict == VINF_SUCCESS)
3691 {
3692 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3693 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3694 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3695 return VINF_SUCCESS;
3696 }
3697 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3698 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3699 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3700 else
3701 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3702 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3703 }
3704 else
3705 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3706 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3707 RT_NOREF_PV(pVM);
3708 return rcStrict;
3709# endif
3710
3711}
3712#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3713
3714
3715#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3716/**
3717 * Handles messages (VM exits).
3718 *
3719 * @returns Strict VBox status code.
3720 * @param pVM The cross context VM structure.
3721 * @param pVCpu The cross context per CPU structure.
3722 * @param pMappingHeader The message slot mapping.
3723 * @sa nemR3WinHandleExit
3724 */
3725NEM_TMPL_STATIC VBOXSTRICTRC
3726nemHCWinHandleMessage(PVMCC pVM, PVMCPUCC pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader)
3727{
3728 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage)
3729 {
3730 AssertMsg(pMappingHeader->cbMessage == HV_MESSAGE_SIZE, ("%#x\n", pMappingHeader->cbMessage));
3731 HV_MESSAGE const *pMsg = (HV_MESSAGE const *)(pMappingHeader + 1);
3732 switch (pMsg->Header.MessageType)
3733 {
3734 case HvMessageTypeUnmappedGpa:
3735 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3736 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3737 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept);
3738
3739 case HvMessageTypeGpaIntercept:
3740 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3741 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept);
3742 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept);
3743
3744 case HvMessageTypeX64IoPortIntercept:
3745 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept));
3746 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3747 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept);
3748
3749 case HvMessageTypeX64Halt:
3750 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3751 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3752 pMsg->X64InterceptHeader.Rip + pMsg->X64InterceptHeader.CsSegment.Base, ASMReadTSC());
3753 Log4(("HaltExit\n"));
3754 return VINF_EM_HALT;
3755
3756 case HvMessageTypeX64InterruptWindow:
3757 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow));
3758 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3759 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow);
3760
3761 case HvMessageTypeX64CpuidIntercept:
3762 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept));
3763 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3764 return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept);
3765
3766 case HvMessageTypeX64MsrIntercept:
3767 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept));
3768 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3769 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept);
3770
3771 case HvMessageTypeX64ExceptionIntercept:
3772 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept));
3773 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3774 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept);
3775
3776 case HvMessageTypeUnrecoverableException:
3777 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
3778 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3779 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader);
3780
3781 case HvMessageTypeInvalidVpRegisterValue:
3782 case HvMessageTypeUnsupportedFeature:
3783 case HvMessageTypeTlbPageSizeMismatch:
3784 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3785 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg),
3786 VERR_NEM_IPE_3);
3787
3788 case HvMessageTypeX64ApicEoi:
3789 case HvMessageTypeX64LegacyFpError:
3790 case HvMessageTypeX64RegisterIntercept:
3791 case HvMessageTypeApicEoi:
3792 case HvMessageTypeFerrAsserted:
3793 case HvMessageTypeEventLogBufferComplete:
3794 case HvMessageTimerExpired:
3795 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3796 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3797 VERR_NEM_IPE_3);
3798
3799 default:
3800 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3801 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3802 VERR_NEM_IPE_3);
3803 }
3804 }
3805 else
3806 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n",
3807 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage),
3808 VERR_NEM_IPE_4);
3809}
3810#elif defined(IN_RING3)
3811/**
3812 * Handles VM exits.
3813 *
3814 * @returns Strict VBox status code.
3815 * @param pVM The cross context VM structure.
3816 * @param pVCpu The cross context per CPU structure.
3817 * @param pExit The VM exit information to handle.
3818 * @sa nemHCWinHandleMessage
3819 */
3820NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3821{
3822 switch (pExit->ExitReason)
3823 {
3824 case WHvRunVpExitReasonMemoryAccess:
3825 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3826 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
3827
3828 case WHvRunVpExitReasonX64IoPortAccess:
3829 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3830 return nemR3WinHandleExitIoPort(pVM, pVCpu, pExit);
3831
3832 case WHvRunVpExitReasonX64Halt:
3833 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3834 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3835 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3836 Log4(("HaltExit/%u\n", pVCpu->idCpu));
3837 return VINF_EM_HALT;
3838
3839 case WHvRunVpExitReasonCanceled:
3840 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
3841 return VINF_SUCCESS;
3842
3843 case WHvRunVpExitReasonX64InterruptWindow:
3844 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3845 return nemR3WinHandleExitInterruptWindow(pVM, pVCpu, pExit);
3846
3847 case WHvRunVpExitReasonX64Cpuid:
3848 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3849 return nemR3WinHandleExitCpuId(pVM, pVCpu, pExit);
3850
3851 case WHvRunVpExitReasonX64MsrAccess:
3852 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3853 return nemR3WinHandleExitMsr(pVM, pVCpu, pExit);
3854
3855 case WHvRunVpExitReasonException:
3856 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3857 return nemR3WinHandleExitException(pVM, pVCpu, pExit);
3858
3859 case WHvRunVpExitReasonUnrecoverableException:
3860 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3861 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
3862
3863 case WHvRunVpExitReasonUnsupportedFeature:
3864 case WHvRunVpExitReasonInvalidVpRegisterValue:
3865 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3866 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
3867 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
3868
3869 /* Undesired exits: */
3870 case WHvRunVpExitReasonNone:
3871 default:
3872 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3873 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
3874 }
3875}
3876#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3877
3878
3879#if defined(IN_RING0) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
3880/**
3881 * Perform an I/O control operation on the partition handle (VID.SYS),
3882 * restarting on alert-like behaviour.
3883 *
3884 * @returns NT status code.
3885 * @param pGVM The ring-0 VM structure.
3886 * @param pGVCpu The global (ring-0) per CPU structure.
3887 * @param fFlags The wait flags.
3888 * @param cMillies The timeout in milliseconds
3889 */
3890static NTSTATUS nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(PGVM pGVM, PGVMCPU pGVCpu, uint32_t fFlags, uint32_t cMillies)
3891{
3892 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3893 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags;
3894 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3895 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3896 &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3897 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
3898 NULL, 0);
3899 if (rcNt == STATUS_SUCCESS)
3900 { /* likely */ }
3901 /*
3902 * Generally, if we get down here, we have been interrupted between ACK'ing
3903 * a message and waiting for the next due to a NtAlertThread call. So, we
3904 * should stop ACK'ing the previous message and get on waiting on the next.
3905 * See similar stuff in nemHCWinRunGC().
3906 */
3907 else if ( rcNt == STATUS_TIMEOUT
3908 || rcNt == STATUS_ALERTED /* just in case */
3909 || rcNt == STATUS_KERNEL_APC /* just in case */
3910 || rcNt == STATUS_USER_APC /* just in case */)
3911 {
3912 DBGFTRACE_CUSTOM(pGVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/1 %#x (f=%#x)", rcNt, fFlags);
3913 STAM_REL_COUNTER_INC(&pGVCpu->nem.s.StatStopCpuPendingAlerts);
3914 Assert(fFlags & VID_MSHAGN_F_GET_NEXT_MESSAGE);
3915
3916 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3917 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags & ~VID_MSHAGN_F_HANDLE_MESSAGE;
3918 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3919 rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3920 &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3921 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
3922 NULL, 0);
3923 DBGFTRACE_CUSTOM(pGVM, "IoCtlMessageSlotHandleAndGetNextRestart/2 %#x", rcNt);
3924 }
3925 return rcNt;
3926}
3927#endif /* IN_RING0 */
3928
3929
3930#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3931/**
3932 * Worker for nemHCWinRunGC that stops the execution on the way out.
3933 *
3934 * The CPU was running the last time we checked, no there are no messages that
3935 * needs being marked handled/whatever. Caller checks this.
3936 *
3937 * @returns rcStrict on success, error status on failure.
3938 * @param pVM The cross context VM structure.
3939 * @param pVCpu The cross context per CPU structure.
3940 * @param rcStrict The nemHCWinRunGC return status. This is a little
3941 * bit unnecessary, except in internal error cases,
3942 * since we won't need to stop the CPU if we took an
3943 * exit.
3944 * @param pMappingHeader The message slot mapping.
3945 */
3946NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVMCC pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict,
3947 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader)
3948{
3949# ifdef DBGFTRACE_ENABLED
3950 HV_MESSAGE const volatile *pMsgForTrace = (HV_MESSAGE const volatile *)(pMappingHeader + 1);
3951# endif
3952
3953 /*
3954 * Try stopping the processor. If we're lucky we manage to do this before it
3955 * does another VM exit.
3956 */
3957 DBGFTRACE_CUSTOM(pVM, "nemStop#0");
3958# ifdef IN_RING0
3959 pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu;
3960 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction,
3961 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3962 NULL, 0);
3963 if (NT_SUCCESS(rcNt))
3964 {
3965 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay (%#x)", rcNt);
3966 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3967 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3968 return rcStrict;
3969 }
3970# else
3971 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu);
3972 if (fRet)
3973 {
3974 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay");
3975 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3976 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3977 return rcStrict;
3978 }
3979# endif
3980
3981 /*
3982 * Dang. The CPU stopped by itself and we got a couple of message to deal with.
3983 */
3984# ifdef IN_RING0
3985 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", rcNt);
3986 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt),
3987 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3988# else
3989 DWORD dwErr = RTNtLastErrorValue();
3990 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", dwErr);
3991 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr),
3992 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3993# endif
3994 Log8(("nemHCWinStopCpu: Stopping CPU #%u pending...\n", pVCpu->idCpu));
3995 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPending);
3996
3997 /*
3998 * First message: Exit or similar, sometimes VidMessageStopRequestComplete.
3999 * Note! We can safely ASSUME that rcStrict isn't an important information one.
4000 */
4001# ifdef IN_RING0
4002 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
4003 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
4004 pMsgForTrace->Header.MessageType);
4005 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
4006 ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
4007 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4008# else
4009 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4010 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
4011 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
4012 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4013 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
4014 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4015# endif
4016
4017 VID_MESSAGE_TYPE enmVidMsgType = pMappingHeader->enmVidMsgType;
4018 if (enmVidMsgType != VidMessageStopRequestComplete)
4019 {
4020 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader);
4021 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict))
4022 rcStrict = rcStrict2;
4023 DBGFTRACE_CUSTOM(pVM, "nemStop#1: handled %#x -> %d", pMsgForTrace->Header.MessageType, VBOXSTRICTRC_VAL(rcStrict));
4024
4025 /*
4026 * Mark it as handled and get the stop request completed message, then mark
4027 * that as handled too. CPU is back into fully stopped stated then.
4028 */
4029# ifdef IN_RING0
4030 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu,
4031 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE,
4032 30000 /*ms*/);
4033 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
4034 pMsgForTrace->Header.MessageType);
4035 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
4036 ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
4037 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4038# else
4039 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4040 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
4041 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
4042 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4043 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
4044 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4045# endif
4046
4047 /* It should be a stop request completed message. */
4048 enmVidMsgType = pMappingHeader->enmVidMsgType;
4049 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete,
4050 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n",
4051 enmVidMsgType, pMappingHeader->cbMessage),
4052 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4053
4054 /*
4055 * Mark the VidMessageStopRequestComplete message as handled.
4056 */
4057# ifdef IN_RING0
4058 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
4059 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType,
4060 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4061 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
4062 ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
4063 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4064# else
4065 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
4066 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
4067 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4068 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
4069 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4070# endif
4071 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) ));
4072 }
4073 else
4074 {
4075 /** @todo I'm not so sure about this now... */
4076 DBGFTRACE_CUSTOM(pVM, "nemStop#9: %#x %#x %#x", pMappingHeader->enmVidMsgType,
4077 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4078 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingOdd);
4079 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc) - 1st VidMessageSlotHandleAndGetNext got VidMessageStopRequestComplete.\n",
4080 VBOXSTRICTRC_VAL(rcStrict) ));
4081 }
4082 return rcStrict;
4083}
4084#endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
4085
4086#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
4087
4088/**
4089 * Deals with pending interrupt related force flags, may inject interrupt.
4090 *
4091 * @returns VBox strict status code.
4092 * @param pVM The cross context VM structure.
4093 * @param pVCpu The cross context per CPU structure.
4094 * @param pfInterruptWindows Where to return interrupt window flags.
4095 */
4096NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleInterruptFF(PVMCC pVM, PVMCPUCC pVCpu, uint8_t *pfInterruptWindows)
4097{
4098 Assert(!TRPMHasTrap(pVCpu));
4099 RT_NOREF_PV(pVM);
4100
4101 /*
4102 * First update APIC. We ASSUME this won't need TPR/CR8.
4103 */
4104 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4105 {
4106 APICUpdatePendingInterrupts(pVCpu);
4107 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
4108 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4109 return VINF_SUCCESS;
4110 }
4111
4112 /*
4113 * We don't currently implement SMIs.
4114 */
4115 AssertReturn(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);
4116
4117 /*
4118 * Check if we've got the minimum of state required for deciding whether we
4119 * can inject interrupts and NMIs. If we don't have it, get all we might require
4120 * for injection via IEM.
4121 */
4122 bool const fPendingNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4123 uint64_t fNeedExtrn = CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
4124 | (fPendingNmi ? CPUMCTX_EXTRN_INHIBIT_NMI : 0);
4125 if (pVCpu->cpum.GstCtx.fExtrn & fNeedExtrn)
4126 {
4127 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF");
4128 if (rcStrict != VINF_SUCCESS)
4129 return rcStrict;
4130 }
4131 bool const fInhibitInterrupts = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
4132 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip;
4133
4134 /*
4135 * NMI? Try deliver it first.
4136 */
4137 if (fPendingNmi)
4138 {
4139 if ( !fInhibitInterrupts
4140 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4141 {
4142 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4143 if (rcStrict == VINF_SUCCESS)
4144 {
4145 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4146 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_NMI, TRPM_HARDWARE_INT, 0, 0, 0);
4147 Log8(("Injected NMI on %u (%d)\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4148 }
4149 return rcStrict;
4150 }
4151 *pfInterruptWindows |= NEM_WIN_INTW_F_NMI;
4152 Log8(("NMI window pending on %u\n", pVCpu->idCpu));
4153 }
4154
4155 /*
4156 * APIC or PIC interrupt?
4157 */
4158 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4159 {
4160 if ( !fInhibitInterrupts
4161 && pVCpu->cpum.GstCtx.rflags.Bits.u1IF)
4162 {
4163 AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR);
4164 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4165 if (rcStrict == VINF_SUCCESS)
4166 {
4167 uint8_t bInterrupt;
4168 int rc = PDMGetInterrupt(pVCpu, &bInterrupt);
4169 if (RT_SUCCESS(rc))
4170 {
4171 Log8(("Injecting interrupt %#x on %u: %04x:%08RX64 efl=%#x\n", bInterrupt, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
4172 rcStrict = IEMInjectTrap(pVCpu, bInterrupt, TRPM_HARDWARE_INT, 0, 0, 0);
4173 Log8(("Injected interrupt %#x on %u (%d)\n", bInterrupt, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4174 }
4175 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4176 {
4177 *pfInterruptWindows |= ((bInterrupt >> 4) << NEM_WIN_INTW_F_PRIO_SHIFT) | NEM_WIN_INTW_F_REGULAR;
4178 Log8(("VERR_APIC_INTR_MASKED_BY_TPR: *pfInterruptWindows=%#x\n", *pfInterruptWindows));
4179 }
4180 else
4181 Log8(("PDMGetInterrupt failed -> %Rrc\n", rc));
4182 }
4183 return rcStrict;
4184 }
4185
4186 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC) && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))
4187 {
4188 /* If only an APIC interrupt is pending, we need to know its priority. Otherwise we'll
4189 * likely get pointless deliverability notifications with IF=1 but TPR still too high.
4190 */
4191 bool fPendingIntr = false;
4192 uint8_t bTpr = 0;
4193 uint8_t bPendingIntr = 0;
4194 int rc = APICGetTpr(pVCpu, &bTpr, &fPendingIntr, &bPendingIntr);
4195 AssertRC(rc);
4196 *pfInterruptWindows |= (bPendingIntr >> 4) << NEM_WIN_INTW_F_PRIO_SHIFT;
4197 Log8(("Interrupt window pending on %u: %#x (bTpr=%#x fPendingIntr=%d bPendingIntr=%#x)\n",
4198 pVCpu->idCpu, *pfInterruptWindows, bTpr, fPendingIntr, bPendingIntr));
4199 }
4200 else
4201 {
4202 *pfInterruptWindows |= NEM_WIN_INTW_F_REGULAR;
4203 Log8(("Interrupt window pending on %u: %#x\n", pVCpu->idCpu, *pfInterruptWindows));
4204 }
4205 }
4206
4207 return VINF_SUCCESS;
4208}
4209
4210
4211/**
4212 * Inner NEM runloop for windows.
4213 *
4214 * @returns Strict VBox status code.
4215 * @param pVM The cross context VM structure.
4216 * @param pVCpu The cross context per CPU structure.
4217 */
4218NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVMCC pVM, PVMCPUCC pVCpu)
4219{
4220 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags));
4221# ifdef LOG_ENABLED
4222 if (LogIs3Enabled())
4223 nemHCWinLogState(pVM, pVCpu);
4224# endif
4225
4226 /*
4227 * Try switch to NEM runloop state.
4228 */
4229 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
4230 { /* likely */ }
4231 else
4232 {
4233 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4234 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
4235 return VINF_SUCCESS;
4236 }
4237
4238 /*
4239 * The run loop.
4240 *
4241 * Current approach to state updating to use the sledgehammer and sync
4242 * everything every time. This will be optimized later.
4243 */
4244# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4245 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;
4246# endif
4247 const bool fSingleStepping = DBGFIsStepping(pVCpu);
4248// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
4249// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
4250// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
4251 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4252 for (unsigned iLoop = 0;; iLoop++)
4253 {
4254# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && !defined(VBOX_WITH_PGM_NEM_MODE)
4255 /*
4256 * Hack alert!
4257 */
4258 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
4259 if (cMappedPages < pVM->nem.s.cMaxMappedPages)
4260 { /* likely*/ }
4261 else
4262 {
4263 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemHCWinUnmapOnePageCallback, NULL);
4264 Log(("nemHCWinRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
4265 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapAllPages);
4266 }
4267# endif
4268
4269 /*
4270 * Pending interrupts or such? Need to check and deal with this prior
4271 * to the state syncing.
4272 */
4273 pVCpu->nem.s.fDesiredInterruptWindows = 0;
4274 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC
4275 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4276 {
4277# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4278 /* Make sure the CPU isn't executing. */
4279 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4280 {
4281 pVCpu->nem.s.fHandleAndGetFlags = 0;
4282 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader);
4283 if (rcStrict == VINF_SUCCESS)
4284 { /* likely */ }
4285 else
4286 {
4287 LogFlow(("NEM/%u: breaking: nemHCWinStopCpu -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4288 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4289 break;
4290 }
4291 }
4292# endif
4293
4294 /* Try inject interrupt. */
4295 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, &pVCpu->nem.s.fDesiredInterruptWindows);
4296 if (rcStrict == VINF_SUCCESS)
4297 { /* likely */ }
4298 else
4299 {
4300 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4301 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4302 break;
4303 }
4304 }
4305
4306# ifndef NEM_WIN_WITH_A20
4307 /*
4308 * Do not execute in hyper-V if the A20 isn't enabled.
4309 */
4310 if (PGMPhysIsA20Enabled(pVCpu))
4311 { /* likely */ }
4312 else
4313 {
4314 rcStrict = VINF_EM_RESCHEDULE_REM;
4315 LogFlow(("NEM/%u: breaking: A20 disabled\n", pVCpu->idCpu));
4316 break;
4317 }
4318# endif
4319
4320 /*
4321 * Ensure that hyper-V has the whole state.
4322 * (We always update the interrupt windows settings when active as hyper-V seems
4323 * to forget about it after an exit.)
4324 */
4325 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK))
4326 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)
4327 || ( ( pVCpu->nem.s.fDesiredInterruptWindows
4328 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows)
4329# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4330 && pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */
4331# endif
4332 )
4333 )
4334 {
4335# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4336 AssertMsg(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */,
4337 ("%#x fExtrn=%#RX64 (%#RX64) fDesiredInterruptWindows=%d fCurrentInterruptWindows=%#x vs %#x\n",
4338 pVCpu->nem.s.fHandleAndGetFlags, pVCpu->cpum.GstCtx.fExtrn, ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK),
4339 pVCpu->nem.s.fDesiredInterruptWindows, pVCpu->nem.s.fCurrentInterruptWindows, pVCpu->nem.s.fDesiredInterruptWindows));
4340# endif
4341# ifdef IN_RING0
4342 int rc2 = nemR0WinExportState(pVM, pVCpu, &pVCpu->cpum.GstCtx);
4343# else
4344 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
4345# endif
4346 AssertRCReturn(rc2, rc2);
4347 }
4348
4349 /*
4350 * Poll timers and run for a bit.
4351 *
4352 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
4353 * so we take the time of the next timer event and uses that as a deadline.
4354 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
4355 */
4356 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
4357 * the whole polling job when timers have changed... */
4358 uint64_t offDeltaIgnored;
4359 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
4360 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4361 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4362 {
4363# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4364 if (pVCpu->nem.s.fHandleAndGetFlags)
4365 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ }
4366 else
4367 {
4368# ifdef IN_RING0
4369 pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu;
4370 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction,
4371 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
4372 NULL, 0);
4373 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));
4374 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pVCpu->idCpu, rcNt),
4375 VERR_NEM_IPE_5);
4376# else
4377 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
4378 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n",
4379 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()),
4380 VERR_NEM_IPE_5);
4381# endif
4382 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4383 }
4384# endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
4385
4386 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
4387 {
4388# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4389 uint64_t const nsNow = RTTimeNanoTS();
4390 int64_t const cNsNextTimerEvt = nsNow - nsNextTimerEvt;
4391 uint32_t cMsWait;
4392 if (cNsNextTimerEvt < 100000 /* ns */)
4393 cMsWait = 0;
4394 else if ((uint64_t)cNsNextTimerEvt < RT_NS_1SEC)
4395 {
4396 if ((uint32_t)cNsNextTimerEvt < 2*RT_NS_1MS)
4397 cMsWait = 1;
4398 else
4399 cMsWait = ((uint32_t)cNsNextTimerEvt - 100000 /*ns*/) / RT_NS_1MS;
4400 }
4401 else
4402 cMsWait = RT_MS_1SEC;
4403# ifdef IN_RING0
4404 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pVCpu->idCpu;
4405 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags;
4406 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMsWait;
4407 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
4408 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
4409 pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
4410 NULL, 0);
4411 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4412 if (rcNt == STATUS_SUCCESS)
4413# else
4414 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4415 pVCpu->nem.s.fHandleAndGetFlags, cMsWait);
4416 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4417 if (fRet)
4418# endif
4419# else
4420# ifdef LOG_ENABLED
4421 if (LogIsFlowEnabled())
4422 {
4423 static const WHV_REGISTER_NAME s_aNames[6] = { WHvX64RegisterCs, WHvX64RegisterRip, WHvX64RegisterRflags,
4424 WHvX64RegisterSs, WHvX64RegisterRsp, WHvX64RegisterCr0 };
4425 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = {0};
4426 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
4427 LogFlow(("NEM/%u: Entry @ %04x:%08RX64 IF=%d EFL=%#RX64 SS:RSP=%04x:%08RX64 cr0=%RX64\n",
4428 pVCpu->idCpu, aRegs[0].Segment.Selector, aRegs[1].Reg64, RT_BOOL(aRegs[2].Reg64 & X86_EFL_IF),
4429 aRegs[2].Reg64, aRegs[3].Segment.Selector, aRegs[4].Reg64, aRegs[5].Reg64));
4430 }
4431# endif
4432 WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
4433 TMNotifyStartOfExecution(pVM, pVCpu);
4434
4435 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
4436
4437 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4438 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
4439# ifdef LOG_ENABLED
4440 LogFlow(("NEM/%u: Exit @ %04X:%08RX64 IF=%d CR8=%#x Reason=%#x\n", pVCpu->idCpu, ExitReason.VpContext.Cs.Selector,
4441 ExitReason.VpContext.Rip, RT_BOOL(ExitReason.VpContext.Rflags & X86_EFL_IF), ExitReason.VpContext.Cr8,
4442 ExitReason.ExitReason));
4443# endif
4444 if (SUCCEEDED(hrc))
4445# endif
4446 {
4447 /*
4448 * Deal with the message.
4449 */
4450# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4451 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader);
4452 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;
4453# else
4454 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
4455# endif
4456 if (rcStrict == VINF_SUCCESS)
4457 { /* hopefully likely */ }
4458 else
4459 {
4460 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4461 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4462 break;
4463 }
4464 }
4465 else
4466 {
4467# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4468
4469 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT,
4470 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah,
4471 the error code conversion is into WAIT_XXX, i.e. NT status codes. */
4472# ifndef IN_RING0
4473 DWORD rcNt = GetLastError();
4474# endif
4475 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt));
4476 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT
4477 || rcNt == STATUS_ALERTED /* just in case */
4478 || rcNt == STATUS_USER_APC /* ditto */
4479 || rcNt == STATUS_KERNEL_APC /* ditto */
4480 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n",
4481 pVCpu->idCpu, rcNt, rcNt),
4482 VERR_NEM_IPE_0);
4483 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4484 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout);
4485# else
4486 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
4487 pVCpu->idCpu, hrc, GetLastError()),
4488 VERR_NEM_IPE_0);
4489# endif
4490 }
4491
4492 /*
4493 * If no relevant FFs are pending, loop.
4494 */
4495 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
4496 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4497 continue;
4498
4499 /** @todo Try handle pending flags, not just return to EM loops. Take care
4500 * not to set important RCs here unless we've handled a message. */
4501 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
4502 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
4503 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
4504 }
4505 else
4506 {
4507 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
4508 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
4509 }
4510 }
4511 else
4512 {
4513 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
4514 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
4515 }
4516 break;
4517 } /* the run loop */
4518
4519
4520 /*
4521 * If the CPU is running, make sure to stop it before we try sync back the
4522 * state and return to EM. We don't sync back the whole state if we can help it.
4523 */
4524# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4525 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4526 {
4527 pVCpu->nem.s.fHandleAndGetFlags = 0;
4528 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader);
4529 }
4530# endif
4531
4532 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
4533 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4534
4535 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
4536 {
4537 /* Try anticipate what we might need. */
4538 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI;
4539 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
4540 || RT_FAILURE(rcStrict))
4541 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4542# ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */
4543 else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
4544 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
4545 fImport = CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT;
4546 else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
4547 fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT;
4548# endif
4549 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
4550 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4551 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
4552
4553 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
4554 {
4555# ifdef IN_RING0
4556 int rc2 = nemR0WinImportState(pVM, pVCpu, &pVCpu->cpum.GstCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT,
4557 true /*fCanUpdateCr3*/);
4558 if (RT_SUCCESS(rc2))
4559 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4560 else if (rc2 == VERR_NEM_FLUSH_TLB)
4561 {
4562 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4563 if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
4564 rcStrict = -rc2;
4565 else
4566 {
4567 pVCpu->nem.s.rcPending = -rc2;
4568 LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
4569 }
4570 }
4571# else
4572 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4573 if (RT_SUCCESS(rc2))
4574 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4575# endif
4576 else if (RT_SUCCESS(rcStrict))
4577 rcStrict = rc2;
4578 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
4579 pVCpu->cpum.GstCtx.fExtrn = 0;
4580 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
4581 }
4582 else
4583 {
4584 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4585 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
4586 }
4587 }
4588 else
4589 {
4590 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4591 pVCpu->cpum.GstCtx.fExtrn = 0;
4592 }
4593
4594 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
4595 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, VBOXSTRICTRC_VAL(rcStrict) ));
4596 return rcStrict;
4597}
4598
4599#endif /* defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3) */
4600#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3)
4601
4602/**
4603 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
4604 */
4605NEM_TMPL_STATIC DECLCALLBACK(int) nemHCWinUnsetForA20CheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys,
4606 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
4607{
4608 /* We'll just unmap the memory. */
4609 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
4610 {
4611# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4612 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
4613 AssertRC(rc);
4614 if (RT_SUCCESS(rc))
4615# else
4616 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
4617 if (SUCCEEDED(hrc))
4618# endif
4619 {
4620 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4621 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4622 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
4623 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
4624 }
4625 else
4626 {
4627 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4628# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4629 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
4630 return rc;
4631# else
4632 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4633 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4634 return VERR_NEM_IPE_2;
4635# endif
4636 }
4637 }
4638 RT_NOREF(pVCpu, pvUser);
4639 return VINF_SUCCESS;
4640}
4641
4642
4643/**
4644 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
4645 *
4646 * @returns The PGMPhysNemQueryPageInfo result.
4647 * @param pVM The cross context VM structure.
4648 * @param pVCpu The cross context virtual CPU structure.
4649 * @param GCPhys The page to unmap.
4650 */
4651NEM_TMPL_STATIC int nemHCWinUnmapPageForA20Gate(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys)
4652{
4653 PGMPHYSNEMPAGEINFO Info;
4654 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
4655 nemHCWinUnsetForA20CheckerCallback, NULL);
4656}
4657
4658#endif /* defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3) */
4659
4660void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
4661{
4662 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
4663 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
4664}
4665
4666
4667VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
4668 RTR3PTR pvMemR3, uint8_t *pu2State)
4669{
4670 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
4671 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
4672
4673 *pu2State = UINT8_MAX;
4674#if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
4675 if (pvMemR3)
4676 {
4677 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
4678 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
4679 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4680 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
4681 if (SUCCEEDED(hrc))
4682 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4683 else
4684 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
4685 pvMemR3, GCPhys, cb, hrc));
4686 }
4687 RT_NOREF(enmKind);
4688#else
4689 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
4690#endif
4691}
4692
4693
4694void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
4695 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
4696{
4697 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
4698 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
4699 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
4700}
4701
4702
4703#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3)
4704/**
4705 * Worker that maps pages into Hyper-V.
4706 *
4707 * This is used by the PGM physical page notifications as well as the memory
4708 * access VMEXIT handlers.
4709 *
4710 * @returns VBox status code.
4711 * @param pVM The cross context VM structure.
4712 * @param pVCpu The cross context virtual CPU structure of the
4713 * calling EMT.
4714 * @param GCPhysSrc The source page address.
4715 * @param GCPhysDst The hyper-V destination page. This may differ from
4716 * GCPhysSrc when A20 is disabled.
4717 * @param fPageProt NEM_PAGE_PROT_XXX.
4718 * @param pu2State Our page state (input/output).
4719 * @param fBackingChanged Set if the page backing is being changed.
4720 * @thread EMT(pVCpu)
4721 */
4722NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
4723 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
4724{
4725# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4726 /*
4727 * When using the hypercalls instead of the ring-3 APIs, we don't need to
4728 * unmap memory before modifying it. We still want to track the state though,
4729 * since unmap will fail when called an unmapped page and we don't want to redo
4730 * upgrades/downgrades.
4731 */
4732 uint8_t const u2OldState = *pu2State;
4733 int rc;
4734 if (fPageProt == NEM_PAGE_PROT_NONE)
4735 {
4736 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4737 {
4738 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4739 if (RT_SUCCESS(rc))
4740 {
4741 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4742 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4743 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4744 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4745 }
4746 else
4747 {
4748 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4749 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4750 }
4751 }
4752 else
4753 rc = VINF_SUCCESS;
4754 }
4755 else if (fPageProt & NEM_PAGE_PROT_WRITE)
4756 {
4757 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
4758 {
4759 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4760 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4761 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4762 if (RT_SUCCESS(rc))
4763 {
4764 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4765 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4766 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4767 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4768 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4769 NOREF(cMappedPages);
4770 }
4771 else
4772 {
4773 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4774 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4775 }
4776 }
4777 else
4778 rc = VINF_SUCCESS;
4779 }
4780 else
4781 {
4782 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
4783 {
4784 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4785 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4786 if (RT_SUCCESS(rc))
4787 {
4788 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4789 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4790 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4791 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4792 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4793 NOREF(cMappedPages);
4794 }
4795 else
4796 {
4797 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4798 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4799 }
4800 }
4801 else
4802 rc = VINF_SUCCESS;
4803 }
4804
4805 return VINF_SUCCESS;
4806
4807# else /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4808 /*
4809 * Looks like we need to unmap a page before we can change the backing
4810 * or even modify the protection. This is going to be *REALLY* efficient.
4811 * PGM lends us two bits to keep track of the state here.
4812 */
4813 RT_NOREF(pVCpu);
4814 uint8_t const u2OldState = *pu2State;
4815 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
4816 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
4817 if ( fBackingChanged
4818 || u2NewState != u2OldState)
4819 {
4820 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4821 {
4822# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4823 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4824 AssertRC(rc);
4825 if (RT_SUCCESS(rc))
4826 {
4827 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4828 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4829 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4830 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4831 {
4832 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4833 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4834 return VINF_SUCCESS;
4835 }
4836 }
4837 else
4838 {
4839 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4840 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4841 return rc;
4842 }
4843# else
4844 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
4845 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
4846 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
4847 if (SUCCEEDED(hrc))
4848 {
4849 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4850 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4851 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4852 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4853 {
4854 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4855 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4856 return VINF_SUCCESS;
4857 }
4858 }
4859 else
4860 {
4861 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4862 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4863 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4864 return VERR_NEM_INIT_FAILED;
4865 }
4866# endif
4867 }
4868 }
4869
4870 /*
4871 * Writeable mapping?
4872 */
4873 if (fPageProt & NEM_PAGE_PROT_WRITE)
4874 {
4875# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4876 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4877 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4878 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4879 AssertRC(rc);
4880 if (RT_SUCCESS(rc))
4881 {
4882 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4883 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4884 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4885 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4886 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4887 return VINF_SUCCESS;
4888 }
4889 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4890 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4891 return rc;
4892# else
4893 void *pvPage;
4894 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
4895 if (RT_SUCCESS(rc))
4896 {
4897 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
4898 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4899 if (SUCCEEDED(hrc))
4900 {
4901 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4902 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4903 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4904 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4905 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4906 return VINF_SUCCESS;
4907 }
4908 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4909 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4910 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4911 return VERR_NEM_INIT_FAILED;
4912 }
4913 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4914 return rc;
4915# endif
4916 }
4917
4918 if (fPageProt & NEM_PAGE_PROT_READ)
4919 {
4920# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4921 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4922 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4923 AssertRC(rc);
4924 if (RT_SUCCESS(rc))
4925 {
4926 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4927 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4928 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4929 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4930 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4931 return VINF_SUCCESS;
4932 }
4933 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4934 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4935 return rc;
4936# else
4937 const void *pvPage;
4938 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
4939 if (RT_SUCCESS(rc))
4940 {
4941 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRangePage, a);
4942 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
4943 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
4944 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRangePage, a);
4945 if (SUCCEEDED(hrc))
4946 {
4947 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4948 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4949 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4950 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4951 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4952 return VINF_SUCCESS;
4953 }
4954 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4955 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4956 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4957 return VERR_NEM_INIT_FAILED;
4958 }
4959 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4960 return rc;
4961# endif
4962 }
4963
4964 /* We already unmapped it above. */
4965 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4966 return VINF_SUCCESS;
4967# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4968}
4969#endif /* defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3) */
4970
4971
4972NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
4973{
4974 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
4975 {
4976 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
4977 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4978 return VINF_SUCCESS;
4979 }
4980
4981#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES)
4982 PVMCPUCC pVCpu = VMMGetCpu(pVM);
4983 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4984 AssertRC(rc);
4985 if (RT_SUCCESS(rc))
4986 {
4987 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4988 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4989 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
4990 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4991 return VINF_SUCCESS;
4992 }
4993 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4994 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4995 return rc;
4996
4997#elif defined(IN_RING3)
4998 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
4999 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
5000 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
5001 if (SUCCEEDED(hrc))
5002 {
5003 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
5004 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
5005 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
5006 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
5007 return VINF_SUCCESS;
5008 }
5009 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
5010 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
5011 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
5012 return VERR_NEM_IPE_6;
5013#else
5014 RT_NOREF(pVM, GCPhysDst, pu2State);
5015 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): Why are we here?!?\n", GCPhysDst));
5016 return VERR_NEM_IPE_6;
5017#endif
5018}
5019
5020
5021int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
5022 PGMPAGETYPE enmType, uint8_t *pu2State)
5023{
5024 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
5025 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
5026 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
5027
5028 int rc;
5029#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
5030 PVMCPUCC pVCpu = VMMGetCpu(pVM);
5031# ifdef NEM_WIN_WITH_A20
5032 if ( pVM->nem.s.fA20Enabled
5033 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5034# endif
5035 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
5036# ifdef NEM_WIN_WITH_A20
5037 else
5038 {
5039 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
5040 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
5041 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
5042 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
5043
5044 }
5045# endif
5046#else
5047 RT_NOREF_PV(fPageProt);
5048# ifdef NEM_WIN_WITH_A20
5049 if ( pVM->nem.s.fA20Enabled
5050 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5051# endif
5052 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5053# ifdef NEM_WIN_WITH_A20
5054 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5055 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5056 else
5057 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
5058# endif
5059#endif
5060 return rc;
5061}
5062
5063
5064VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
5065 PGMPAGETYPE enmType, uint8_t *pu2State)
5066{
5067 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
5068 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
5069 Assert(VM_IS_NEM_ENABLED(pVM));
5070 RT_NOREF(HCPhys, enmType, pvR3);
5071
5072#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
5073 PVMCPUCC pVCpu = VMMGetCpu(pVM);
5074# ifdef NEM_WIN_WITH_A20
5075 if ( pVM->nem.s.fA20Enabled
5076 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5077# endif
5078 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
5079# ifdef NEM_WIN_WITH_A20
5080 else
5081 {
5082 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
5083 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
5084 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5085 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
5086 }
5087# endif
5088#else
5089 RT_NOREF_PV(fPageProt);
5090# ifdef NEM_WIN_WITH_A20
5091 if ( pVM->nem.s.fA20Enabled
5092 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5093# endif
5094 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5095# ifdef NEM_WIN_WITH_A20
5096 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5097 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5098 /* else: ignore since we've got the alias page at this address. */
5099# endif
5100#endif
5101}
5102
5103
5104VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
5105 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
5106{
5107 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
5108 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
5109 Assert(VM_IS_NEM_ENABLED(pVM));
5110 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, enmType);
5111
5112#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
5113 PVMCPUCC pVCpu = VMMGetCpu(pVM);
5114# ifdef NEM_WIN_WITH_A20
5115 if ( pVM->nem.s.fA20Enabled
5116 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5117# endif
5118 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
5119# ifdef NEM_WIN_WITH_A20
5120 else
5121 {
5122 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
5123 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
5124 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5125 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
5126 }
5127# endif
5128#else
5129 RT_NOREF_PV(fPageProt);
5130# ifdef NEM_WIN_WITH_A20
5131 if ( pVM->nem.s.fA20Enabled
5132 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5133# endif
5134 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5135# ifdef NEM_WIN_WITH_A20
5136 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5137 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5138 /* else: ignore since we've got the alias page at this address. */
5139# endif
5140#endif
5141}
5142
5143
5144/**
5145 * Returns features supported by the NEM backend.
5146 *
5147 * @returns Flags of features supported by the native NEM backend.
5148 * @param pVM The cross context VM structure.
5149 */
5150VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
5151{
5152 RT_NOREF(pVM);
5153 /** @todo Make use of the WHvGetVirtualProcessorXsaveState/WHvSetVirtualProcessorXsaveState
5154 * interface added in 2019 to enable passthrough of xsave/xrstor (and depending) features to the guest. */
5155 /** @todo Is NEM_FEAT_F_FULL_GST_EXEC always true? */
5156 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC;
5157}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette