VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h@ 92918

Last change on this file since 92918 was 92918, checked in by vboxsync, 3 years ago

VMM/NEM-win: Tweak for newer SDKs. bugref:10116

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 241.6 KB
Line 
1/* $Id: NEMAllNativeTemplate-win.cpp.h 92918 2021-12-15 09:44:05Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, Windows code template ring-0/3.
4 */
5
6/*
7 * Copyright (C) 2018-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22/** Copy back a segment from hyper-V. */
23#define NEM_WIN_COPY_BACK_SEG(a_Dst, a_Src) \
24 do { \
25 (a_Dst).u64Base = (a_Src).Base; \
26 (a_Dst).u32Limit = (a_Src).Limit; \
27 (a_Dst).ValidSel = (a_Dst).Sel = (a_Src).Selector; \
28 (a_Dst).Attr.u = (a_Src).Attributes; \
29 (a_Dst).fFlags = CPUMSELREG_FLAGS_VALID; \
30 } while (0)
31
32/** @def NEMWIN_ASSERT_MSG_REG_VAL
33 * Asserts the correctness of a register value in a message/context.
34 */
35#if 0
36# define NEMWIN_NEED_GET_REGISTER
37# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
38# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \
39 do { \
40 HV_REGISTER_VALUE TmpVal; \
41 nemHCWinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \
42 AssertMsg(a_Expr, a_Msg); \
43 } while (0)
44# else
45# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \
46 do { \
47 WHV_REGISTER_VALUE TmpVal; \
48 nemR3WinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \
49 AssertMsg(a_Expr, a_Msg); \
50 } while (0)
51# endif
52#else
53# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) do { } while (0)
54#endif
55
56/** @def NEMWIN_ASSERT_MSG_REG_VAL
57 * Asserts the correctness of a 64-bit register value in a message/context.
58 */
59#define NEMWIN_ASSERT_MSG_REG_VAL64(a_pVCpu, a_enmReg, a_u64Val) \
60 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, (a_u64Val) == TmpVal.Reg64, \
61 (#a_u64Val "=%#RX64, expected %#RX64\n", (a_u64Val), TmpVal.Reg64))
62/** @def NEMWIN_ASSERT_MSG_REG_VAL
63 * Asserts the correctness of a segment register value in a message/context.
64 */
65#define NEMWIN_ASSERT_MSG_REG_SEG(a_pVCpu, a_enmReg, a_SReg) \
66 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, \
67 (a_SReg).Base == TmpVal.Segment.Base \
68 && (a_SReg).Limit == TmpVal.Segment.Limit \
69 && (a_SReg).Selector == TmpVal.Segment.Selector \
70 && (a_SReg).Attributes == TmpVal.Segment.Attributes, \
71 ( #a_SReg "=%#RX16 {%#RX64 LB %#RX32,%#RX16} expected %#RX16 {%#RX64 LB %#RX32,%#RX16}\n", \
72 (a_SReg).Selector, (a_SReg).Base, (a_SReg).Limit, (a_SReg).Attributes, \
73 TmpVal.Segment.Selector, TmpVal.Segment.Base, TmpVal.Segment.Limit, TmpVal.Segment.Attributes))
74
75
76#ifdef IN_RING3
77/** WHvRegisterPendingEvent0 was renamed to WHvRegisterPendingEvent between
78 * SDK 17134 and 18362. */
79# if WDK_NTDDI_VERSION < NTDDI_WIN10_19H1
80# define WHvRegisterPendingEvent WHvRegisterPendingEvent0
81# endif
82#endif
83
84
85/*********************************************************************************************************************************
86* Global Variables *
87*********************************************************************************************************************************/
88/** NEM_WIN_PAGE_STATE_XXX names. */
89NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
90
91/** HV_INTERCEPT_ACCESS_TYPE names. */
92static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
93
94
95/*********************************************************************************************************************************
96* Internal Functions *
97*********************************************************************************************************************************/
98NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
99 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
100
101
102
103#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
104
105/**
106 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
107 *
108 * @returns VBox status code.
109 * @param pVM The cross context VM structure.
110 * @param pVCpu The cross context virtual CPU structure of the caller.
111 * @param GCPhysSrc The source page. Does not need to be page aligned.
112 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
113 * when A20 is disabled.
114 * @param fFlags HV_MAP_GPA_XXX.
115 */
116DECLINLINE(int) nemHCWinHypercallMapPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
117{
118#ifdef IN_RING0
119 /** @todo optimize further, caller generally has the physical address. */
120 return nemR0WinMapPages(pVM, pVCpu,
121 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
122 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
123 1, fFlags);
124#else
125 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
126 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
127 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
128 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
129 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
130#endif
131}
132
133
134/**
135 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
136 *
137 * @returns VBox status code.
138 * @param pVM The cross context VM structure.
139 * @param pVCpu The cross context virtual CPU structure of the caller.
140 * @param GCPhys The page to unmap. Does not need to be page aligned.
141 */
142DECLINLINE(int) nemHCWinHypercallUnmapPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys)
143{
144# ifdef IN_RING0
145 return nemR0WinUnmapPages(pVM, pVCpu, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
146# else
147 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
148 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
149 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
150# endif
151}
152
153#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
154#ifndef IN_RING0
155
156NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
157{
158# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
159# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
160 if (pVM->nem.s.fUseRing0Runloop)
161# endif
162 {
163 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL);
164 AssertLogRelRCReturn(rc, rc);
165 return rc;
166 }
167# endif
168# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
169
170 /*
171 * The following is very similar to what nemR0WinExportState() does.
172 */
173 WHV_REGISTER_NAME aenmNames[128];
174 WHV_REGISTER_VALUE aValues[128];
175
176 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
177 if ( !fWhat
178 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
179 return VINF_SUCCESS;
180 uintptr_t iReg = 0;
181
182# define ADD_REG64(a_enmName, a_uValue) do { \
183 aenmNames[iReg] = (a_enmName); \
184 aValues[iReg].Reg128.High64 = 0; \
185 aValues[iReg].Reg64 = (a_uValue); \
186 iReg++; \
187 } while (0)
188# define ADD_REG128(a_enmName, a_uValueLo, a_uValueHi) do { \
189 aenmNames[iReg] = (a_enmName); \
190 aValues[iReg].Reg128.Low64 = (a_uValueLo); \
191 aValues[iReg].Reg128.High64 = (a_uValueHi); \
192 iReg++; \
193 } while (0)
194
195 /* GPRs */
196 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
197 {
198 if (fWhat & CPUMCTX_EXTRN_RAX)
199 ADD_REG64(WHvX64RegisterRax, pVCpu->cpum.GstCtx.rax);
200 if (fWhat & CPUMCTX_EXTRN_RCX)
201 ADD_REG64(WHvX64RegisterRcx, pVCpu->cpum.GstCtx.rcx);
202 if (fWhat & CPUMCTX_EXTRN_RDX)
203 ADD_REG64(WHvX64RegisterRdx, pVCpu->cpum.GstCtx.rdx);
204 if (fWhat & CPUMCTX_EXTRN_RBX)
205 ADD_REG64(WHvX64RegisterRbx, pVCpu->cpum.GstCtx.rbx);
206 if (fWhat & CPUMCTX_EXTRN_RSP)
207 ADD_REG64(WHvX64RegisterRsp, pVCpu->cpum.GstCtx.rsp);
208 if (fWhat & CPUMCTX_EXTRN_RBP)
209 ADD_REG64(WHvX64RegisterRbp, pVCpu->cpum.GstCtx.rbp);
210 if (fWhat & CPUMCTX_EXTRN_RSI)
211 ADD_REG64(WHvX64RegisterRsi, pVCpu->cpum.GstCtx.rsi);
212 if (fWhat & CPUMCTX_EXTRN_RDI)
213 ADD_REG64(WHvX64RegisterRdi, pVCpu->cpum.GstCtx.rdi);
214 if (fWhat & CPUMCTX_EXTRN_R8_R15)
215 {
216 ADD_REG64(WHvX64RegisterR8, pVCpu->cpum.GstCtx.r8);
217 ADD_REG64(WHvX64RegisterR9, pVCpu->cpum.GstCtx.r9);
218 ADD_REG64(WHvX64RegisterR10, pVCpu->cpum.GstCtx.r10);
219 ADD_REG64(WHvX64RegisterR11, pVCpu->cpum.GstCtx.r11);
220 ADD_REG64(WHvX64RegisterR12, pVCpu->cpum.GstCtx.r12);
221 ADD_REG64(WHvX64RegisterR13, pVCpu->cpum.GstCtx.r13);
222 ADD_REG64(WHvX64RegisterR14, pVCpu->cpum.GstCtx.r14);
223 ADD_REG64(WHvX64RegisterR15, pVCpu->cpum.GstCtx.r15);
224 }
225 }
226
227 /* RIP & Flags */
228 if (fWhat & CPUMCTX_EXTRN_RIP)
229 ADD_REG64(WHvX64RegisterRip, pVCpu->cpum.GstCtx.rip);
230 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
231 ADD_REG64(WHvX64RegisterRflags, pVCpu->cpum.GstCtx.rflags.u);
232
233 /* Segments */
234# define ADD_SEG(a_enmName, a_SReg) \
235 do { \
236 aenmNames[iReg] = a_enmName; \
237 aValues[iReg].Segment.Base = (a_SReg).u64Base; \
238 aValues[iReg].Segment.Limit = (a_SReg).u32Limit; \
239 aValues[iReg].Segment.Selector = (a_SReg).Sel; \
240 aValues[iReg].Segment.Attributes = (a_SReg).Attr.u; \
241 iReg++; \
242 } while (0)
243 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
244 {
245 if (fWhat & CPUMCTX_EXTRN_ES)
246 ADD_SEG(WHvX64RegisterEs, pVCpu->cpum.GstCtx.es);
247 if (fWhat & CPUMCTX_EXTRN_CS)
248 ADD_SEG(WHvX64RegisterCs, pVCpu->cpum.GstCtx.cs);
249 if (fWhat & CPUMCTX_EXTRN_SS)
250 ADD_SEG(WHvX64RegisterSs, pVCpu->cpum.GstCtx.ss);
251 if (fWhat & CPUMCTX_EXTRN_DS)
252 ADD_SEG(WHvX64RegisterDs, pVCpu->cpum.GstCtx.ds);
253 if (fWhat & CPUMCTX_EXTRN_FS)
254 ADD_SEG(WHvX64RegisterFs, pVCpu->cpum.GstCtx.fs);
255 if (fWhat & CPUMCTX_EXTRN_GS)
256 ADD_SEG(WHvX64RegisterGs, pVCpu->cpum.GstCtx.gs);
257 }
258
259 /* Descriptor tables & task segment. */
260 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
261 {
262 if (fWhat & CPUMCTX_EXTRN_LDTR)
263 ADD_SEG(WHvX64RegisterLdtr, pVCpu->cpum.GstCtx.ldtr);
264 if (fWhat & CPUMCTX_EXTRN_TR)
265 ADD_SEG(WHvX64RegisterTr, pVCpu->cpum.GstCtx.tr);
266 if (fWhat & CPUMCTX_EXTRN_IDTR)
267 {
268 aenmNames[iReg] = WHvX64RegisterIdtr;
269 aValues[iReg].Table.Limit = pVCpu->cpum.GstCtx.idtr.cbIdt;
270 aValues[iReg].Table.Base = pVCpu->cpum.GstCtx.idtr.pIdt;
271 iReg++;
272 }
273 if (fWhat & CPUMCTX_EXTRN_GDTR)
274 {
275 aenmNames[iReg] = WHvX64RegisterGdtr;
276 aValues[iReg].Table.Limit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
277 aValues[iReg].Table.Base = pVCpu->cpum.GstCtx.gdtr.pGdt;
278 iReg++;
279 }
280 }
281
282 /* Control registers. */
283 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
284 {
285 if (fWhat & CPUMCTX_EXTRN_CR0)
286 ADD_REG64(WHvX64RegisterCr0, pVCpu->cpum.GstCtx.cr0);
287 if (fWhat & CPUMCTX_EXTRN_CR2)
288 ADD_REG64(WHvX64RegisterCr2, pVCpu->cpum.GstCtx.cr2);
289 if (fWhat & CPUMCTX_EXTRN_CR3)
290 ADD_REG64(WHvX64RegisterCr3, pVCpu->cpum.GstCtx.cr3);
291 if (fWhat & CPUMCTX_EXTRN_CR4)
292 ADD_REG64(WHvX64RegisterCr4, pVCpu->cpum.GstCtx.cr4);
293 }
294 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
295 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu));
296
297 /* Debug registers. */
298/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
299 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
300 {
301 ADD_REG64(WHvX64RegisterDr0, pVCpu->cpum.GstCtx.dr[0]); // CPUMGetHyperDR0(pVCpu));
302 ADD_REG64(WHvX64RegisterDr1, pVCpu->cpum.GstCtx.dr[1]); // CPUMGetHyperDR1(pVCpu));
303 ADD_REG64(WHvX64RegisterDr2, pVCpu->cpum.GstCtx.dr[2]); // CPUMGetHyperDR2(pVCpu));
304 ADD_REG64(WHvX64RegisterDr3, pVCpu->cpum.GstCtx.dr[3]); // CPUMGetHyperDR3(pVCpu));
305 }
306 if (fWhat & CPUMCTX_EXTRN_DR6)
307 ADD_REG64(WHvX64RegisterDr6, pVCpu->cpum.GstCtx.dr[6]); // CPUMGetHyperDR6(pVCpu));
308 if (fWhat & CPUMCTX_EXTRN_DR7)
309 ADD_REG64(WHvX64RegisterDr7, pVCpu->cpum.GstCtx.dr[7]); // CPUMGetHyperDR7(pVCpu));
310
311 /* Floating point state. */
312 if (fWhat & CPUMCTX_EXTRN_X87)
313 {
314 ADD_REG128(WHvX64RegisterFpMmx0, pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[1]);
315 ADD_REG128(WHvX64RegisterFpMmx1, pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[1]);
316 ADD_REG128(WHvX64RegisterFpMmx2, pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[1]);
317 ADD_REG128(WHvX64RegisterFpMmx3, pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[1]);
318 ADD_REG128(WHvX64RegisterFpMmx4, pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[1]);
319 ADD_REG128(WHvX64RegisterFpMmx5, pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[1]);
320 ADD_REG128(WHvX64RegisterFpMmx6, pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[1]);
321 ADD_REG128(WHvX64RegisterFpMmx7, pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[1]);
322
323 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
324 aValues[iReg].FpControlStatus.FpControl = pVCpu->cpum.GstCtx.XState.x87.FCW;
325 aValues[iReg].FpControlStatus.FpStatus = pVCpu->cpum.GstCtx.XState.x87.FSW;
326 aValues[iReg].FpControlStatus.FpTag = pVCpu->cpum.GstCtx.XState.x87.FTW;
327 aValues[iReg].FpControlStatus.Reserved = pVCpu->cpum.GstCtx.XState.x87.FTW >> 8;
328 aValues[iReg].FpControlStatus.LastFpOp = pVCpu->cpum.GstCtx.XState.x87.FOP;
329 aValues[iReg].FpControlStatus.LastFpRip = (pVCpu->cpum.GstCtx.XState.x87.FPUIP)
330 | ((uint64_t)pVCpu->cpum.GstCtx.XState.x87.CS << 32)
331 | ((uint64_t)pVCpu->cpum.GstCtx.XState.x87.Rsrvd1 << 48);
332 iReg++;
333
334 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
335 aValues[iReg].XmmControlStatus.LastFpRdp = (pVCpu->cpum.GstCtx.XState.x87.FPUDP)
336 | ((uint64_t)pVCpu->cpum.GstCtx.XState.x87.DS << 32)
337 | ((uint64_t)pVCpu->cpum.GstCtx.XState.x87.Rsrvd2 << 48);
338 aValues[iReg].XmmControlStatus.XmmStatusControl = pVCpu->cpum.GstCtx.XState.x87.MXCSR;
339 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pVCpu->cpum.GstCtx.XState.x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
340 iReg++;
341 }
342
343 /* Vector state. */
344 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
345 {
346 ADD_REG128(WHvX64RegisterXmm0, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Hi);
347 ADD_REG128(WHvX64RegisterXmm1, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Hi);
348 ADD_REG128(WHvX64RegisterXmm2, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Hi);
349 ADD_REG128(WHvX64RegisterXmm3, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Hi);
350 ADD_REG128(WHvX64RegisterXmm4, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Hi);
351 ADD_REG128(WHvX64RegisterXmm5, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Hi);
352 ADD_REG128(WHvX64RegisterXmm6, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Hi);
353 ADD_REG128(WHvX64RegisterXmm7, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Hi);
354 ADD_REG128(WHvX64RegisterXmm8, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Hi);
355 ADD_REG128(WHvX64RegisterXmm9, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Hi);
356 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Hi);
357 ADD_REG128(WHvX64RegisterXmm11, pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Hi);
358 ADD_REG128(WHvX64RegisterXmm12, pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Hi);
359 ADD_REG128(WHvX64RegisterXmm13, pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Hi);
360 ADD_REG128(WHvX64RegisterXmm14, pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Hi);
361 ADD_REG128(WHvX64RegisterXmm15, pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Hi);
362 }
363
364 /* MSRs */
365 // WHvX64RegisterTsc - don't touch
366 if (fWhat & CPUMCTX_EXTRN_EFER)
367 ADD_REG64(WHvX64RegisterEfer, pVCpu->cpum.GstCtx.msrEFER);
368 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
369 ADD_REG64(WHvX64RegisterKernelGsBase, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
370 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
371 {
372 ADD_REG64(WHvX64RegisterSysenterCs, pVCpu->cpum.GstCtx.SysEnter.cs);
373 ADD_REG64(WHvX64RegisterSysenterEip, pVCpu->cpum.GstCtx.SysEnter.eip);
374 ADD_REG64(WHvX64RegisterSysenterEsp, pVCpu->cpum.GstCtx.SysEnter.esp);
375 }
376 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
377 {
378 ADD_REG64(WHvX64RegisterStar, pVCpu->cpum.GstCtx.msrSTAR);
379 ADD_REG64(WHvX64RegisterLstar, pVCpu->cpum.GstCtx.msrLSTAR);
380 ADD_REG64(WHvX64RegisterCstar, pVCpu->cpum.GstCtx.msrCSTAR);
381 ADD_REG64(WHvX64RegisterSfmask, pVCpu->cpum.GstCtx.msrSFMASK);
382 }
383 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
384 {
385 PCPUMCTXMSRS const pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
386 if (fWhat & CPUMCTX_EXTRN_TSC_AUX)
387 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
388 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
389 {
390 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu));
391 ADD_REG64(WHvX64RegisterPat, pVCpu->cpum.GstCtx.msrPAT);
392#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
393 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
394#endif
395 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
396 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
397 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
398 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
399 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
400 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
401 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
402 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
403 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
404 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
405 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
406 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
407#if 0 /** @todo these registers aren't available? Might explain something.. .*/
408 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);
409 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
410 {
411 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
412 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
413 }
414#endif
415 }
416 }
417
418 /* event injection (clear it). */
419 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
420 ADD_REG64(WHvRegisterPendingInterruption, 0);
421
422 /* Interruptibility state. This can get a little complicated since we get
423 half of the state via HV_X64_VP_EXECUTION_STATE. */
424 if ( (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
425 == (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI) )
426 {
427 ADD_REG64(WHvRegisterInterruptState, 0);
428 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
429 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
430 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
431 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
432 aValues[iReg - 1].InterruptState.NmiMasked = 1;
433 }
434 else if (fWhat & CPUMCTX_EXTRN_INHIBIT_INT)
435 {
436 if ( pVCpu->nem.s.fLastInterruptShadow
437 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
438 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip))
439 {
440 ADD_REG64(WHvRegisterInterruptState, 0);
441 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
442 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
443 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
444 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
445 //if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
446 // aValues[iReg - 1].InterruptState.NmiMasked = 1;
447 }
448 }
449 else
450 Assert(!(fWhat & CPUMCTX_EXTRN_INHIBIT_NMI));
451
452 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
453 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
454 if ( fDesiredIntWin
455 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
456 {
457 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
458 Log8(("Setting WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin=%X\n", fDesiredIntWin));
459 ADD_REG64(WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin);
460 Assert(aValues[iReg - 1].DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
461 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
462 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptPriority == (unsigned)((fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT));
463 }
464
465 /// @todo WHvRegisterPendingEvent
466
467 /*
468 * Set the registers.
469 */
470 Assert(iReg < RT_ELEMENTS(aValues));
471 Assert(iReg < RT_ELEMENTS(aenmNames));
472# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
473 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
474 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
475# endif
476 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
477 if (SUCCEEDED(hrc))
478 {
479 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
480 return VINF_SUCCESS;
481 }
482 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
483 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
484 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
485 return VERR_INTERNAL_ERROR;
486
487# undef ADD_REG64
488# undef ADD_REG128
489# undef ADD_SEG
490
491# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
492}
493
494
495NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
496{
497# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
498# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
499 if (pVM->nem.s.fUseRing0Runloop)
500# endif
501 {
502 /* See NEMR0ImportState */
503 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, fWhat, NULL);
504 if (RT_SUCCESS(rc))
505 return rc;
506 if (rc == VERR_NEM_FLUSH_TLB)
507 {
508 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /*fGlobal*/);
509 return rc;
510 }
511 AssertLogRelRCReturn(rc, rc);
512 return rc;
513 }
514# endif
515# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
516 WHV_REGISTER_NAME aenmNames[128];
517
518 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
519 uintptr_t iReg = 0;
520
521 /* GPRs */
522 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
523 {
524 if (fWhat & CPUMCTX_EXTRN_RAX)
525 aenmNames[iReg++] = WHvX64RegisterRax;
526 if (fWhat & CPUMCTX_EXTRN_RCX)
527 aenmNames[iReg++] = WHvX64RegisterRcx;
528 if (fWhat & CPUMCTX_EXTRN_RDX)
529 aenmNames[iReg++] = WHvX64RegisterRdx;
530 if (fWhat & CPUMCTX_EXTRN_RBX)
531 aenmNames[iReg++] = WHvX64RegisterRbx;
532 if (fWhat & CPUMCTX_EXTRN_RSP)
533 aenmNames[iReg++] = WHvX64RegisterRsp;
534 if (fWhat & CPUMCTX_EXTRN_RBP)
535 aenmNames[iReg++] = WHvX64RegisterRbp;
536 if (fWhat & CPUMCTX_EXTRN_RSI)
537 aenmNames[iReg++] = WHvX64RegisterRsi;
538 if (fWhat & CPUMCTX_EXTRN_RDI)
539 aenmNames[iReg++] = WHvX64RegisterRdi;
540 if (fWhat & CPUMCTX_EXTRN_R8_R15)
541 {
542 aenmNames[iReg++] = WHvX64RegisterR8;
543 aenmNames[iReg++] = WHvX64RegisterR9;
544 aenmNames[iReg++] = WHvX64RegisterR10;
545 aenmNames[iReg++] = WHvX64RegisterR11;
546 aenmNames[iReg++] = WHvX64RegisterR12;
547 aenmNames[iReg++] = WHvX64RegisterR13;
548 aenmNames[iReg++] = WHvX64RegisterR14;
549 aenmNames[iReg++] = WHvX64RegisterR15;
550 }
551 }
552
553 /* RIP & Flags */
554 if (fWhat & CPUMCTX_EXTRN_RIP)
555 aenmNames[iReg++] = WHvX64RegisterRip;
556 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
557 aenmNames[iReg++] = WHvX64RegisterRflags;
558
559 /* Segments */
560 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
561 {
562 if (fWhat & CPUMCTX_EXTRN_ES)
563 aenmNames[iReg++] = WHvX64RegisterEs;
564 if (fWhat & CPUMCTX_EXTRN_CS)
565 aenmNames[iReg++] = WHvX64RegisterCs;
566 if (fWhat & CPUMCTX_EXTRN_SS)
567 aenmNames[iReg++] = WHvX64RegisterSs;
568 if (fWhat & CPUMCTX_EXTRN_DS)
569 aenmNames[iReg++] = WHvX64RegisterDs;
570 if (fWhat & CPUMCTX_EXTRN_FS)
571 aenmNames[iReg++] = WHvX64RegisterFs;
572 if (fWhat & CPUMCTX_EXTRN_GS)
573 aenmNames[iReg++] = WHvX64RegisterGs;
574 }
575
576 /* Descriptor tables. */
577 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
578 {
579 if (fWhat & CPUMCTX_EXTRN_LDTR)
580 aenmNames[iReg++] = WHvX64RegisterLdtr;
581 if (fWhat & CPUMCTX_EXTRN_TR)
582 aenmNames[iReg++] = WHvX64RegisterTr;
583 if (fWhat & CPUMCTX_EXTRN_IDTR)
584 aenmNames[iReg++] = WHvX64RegisterIdtr;
585 if (fWhat & CPUMCTX_EXTRN_GDTR)
586 aenmNames[iReg++] = WHvX64RegisterGdtr;
587 }
588
589 /* Control registers. */
590 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
591 {
592 if (fWhat & CPUMCTX_EXTRN_CR0)
593 aenmNames[iReg++] = WHvX64RegisterCr0;
594 if (fWhat & CPUMCTX_EXTRN_CR2)
595 aenmNames[iReg++] = WHvX64RegisterCr2;
596 if (fWhat & CPUMCTX_EXTRN_CR3)
597 aenmNames[iReg++] = WHvX64RegisterCr3;
598 if (fWhat & CPUMCTX_EXTRN_CR4)
599 aenmNames[iReg++] = WHvX64RegisterCr4;
600 }
601 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
602 aenmNames[iReg++] = WHvX64RegisterCr8;
603
604 /* Debug registers. */
605 if (fWhat & CPUMCTX_EXTRN_DR7)
606 aenmNames[iReg++] = WHvX64RegisterDr7;
607 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
608 {
609 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_DR7))
610 {
611 fWhat |= CPUMCTX_EXTRN_DR7;
612 aenmNames[iReg++] = WHvX64RegisterDr7;
613 }
614 aenmNames[iReg++] = WHvX64RegisterDr0;
615 aenmNames[iReg++] = WHvX64RegisterDr1;
616 aenmNames[iReg++] = WHvX64RegisterDr2;
617 aenmNames[iReg++] = WHvX64RegisterDr3;
618 }
619 if (fWhat & CPUMCTX_EXTRN_DR6)
620 aenmNames[iReg++] = WHvX64RegisterDr6;
621
622 /* Floating point state. */
623 if (fWhat & CPUMCTX_EXTRN_X87)
624 {
625 aenmNames[iReg++] = WHvX64RegisterFpMmx0;
626 aenmNames[iReg++] = WHvX64RegisterFpMmx1;
627 aenmNames[iReg++] = WHvX64RegisterFpMmx2;
628 aenmNames[iReg++] = WHvX64RegisterFpMmx3;
629 aenmNames[iReg++] = WHvX64RegisterFpMmx4;
630 aenmNames[iReg++] = WHvX64RegisterFpMmx5;
631 aenmNames[iReg++] = WHvX64RegisterFpMmx6;
632 aenmNames[iReg++] = WHvX64RegisterFpMmx7;
633 aenmNames[iReg++] = WHvX64RegisterFpControlStatus;
634 }
635 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
636 aenmNames[iReg++] = WHvX64RegisterXmmControlStatus;
637
638 /* Vector state. */
639 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
640 {
641 aenmNames[iReg++] = WHvX64RegisterXmm0;
642 aenmNames[iReg++] = WHvX64RegisterXmm1;
643 aenmNames[iReg++] = WHvX64RegisterXmm2;
644 aenmNames[iReg++] = WHvX64RegisterXmm3;
645 aenmNames[iReg++] = WHvX64RegisterXmm4;
646 aenmNames[iReg++] = WHvX64RegisterXmm5;
647 aenmNames[iReg++] = WHvX64RegisterXmm6;
648 aenmNames[iReg++] = WHvX64RegisterXmm7;
649 aenmNames[iReg++] = WHvX64RegisterXmm8;
650 aenmNames[iReg++] = WHvX64RegisterXmm9;
651 aenmNames[iReg++] = WHvX64RegisterXmm10;
652 aenmNames[iReg++] = WHvX64RegisterXmm11;
653 aenmNames[iReg++] = WHvX64RegisterXmm12;
654 aenmNames[iReg++] = WHvX64RegisterXmm13;
655 aenmNames[iReg++] = WHvX64RegisterXmm14;
656 aenmNames[iReg++] = WHvX64RegisterXmm15;
657 }
658
659 /* MSRs */
660 // WHvX64RegisterTsc - don't touch
661 if (fWhat & CPUMCTX_EXTRN_EFER)
662 aenmNames[iReg++] = WHvX64RegisterEfer;
663 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
664 aenmNames[iReg++] = WHvX64RegisterKernelGsBase;
665 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
666 {
667 aenmNames[iReg++] = WHvX64RegisterSysenterCs;
668 aenmNames[iReg++] = WHvX64RegisterSysenterEip;
669 aenmNames[iReg++] = WHvX64RegisterSysenterEsp;
670 }
671 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
672 {
673 aenmNames[iReg++] = WHvX64RegisterStar;
674 aenmNames[iReg++] = WHvX64RegisterLstar;
675 aenmNames[iReg++] = WHvX64RegisterCstar;
676 aenmNames[iReg++] = WHvX64RegisterSfmask;
677 }
678
679//#ifdef LOG_ENABLED
680// const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);
681//#endif
682 if (fWhat & CPUMCTX_EXTRN_TSC_AUX)
683 aenmNames[iReg++] = WHvX64RegisterTscAux;
684 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
685 {
686 aenmNames[iReg++] = WHvX64RegisterApicBase; /// @todo APIC BASE
687 aenmNames[iReg++] = WHvX64RegisterPat;
688#if 0 /*def LOG_ENABLED*/ /** @todo Check if WHvX64RegisterMsrMtrrCap works... */
689 aenmNames[iReg++] = WHvX64RegisterMsrMtrrCap;
690#endif
691 aenmNames[iReg++] = WHvX64RegisterMsrMtrrDefType;
692 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix64k00000;
693 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16k80000;
694 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16kA0000;
695 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC0000;
696 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC8000;
697 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD0000;
698 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD8000;
699 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE0000;
700 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE8000;
701 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF0000;
702 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF8000;
703 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
704//#ifdef LOG_ENABLED
705// if (enmCpuVendor != CPUMCPUVENDOR_AMD)
706// aenmNames[iReg++] = HvX64RegisterIa32FeatureControl;
707//#endif
708 }
709
710 /* Interruptibility. */
711 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
712 {
713 aenmNames[iReg++] = WHvRegisterInterruptState;
714 aenmNames[iReg++] = WHvX64RegisterRip;
715 }
716
717 /* event injection */
718 aenmNames[iReg++] = WHvRegisterPendingInterruption;
719 aenmNames[iReg++] = WHvRegisterPendingEvent;
720
721 size_t const cRegs = iReg;
722 Assert(cRegs < RT_ELEMENTS(aenmNames));
723
724 /*
725 * Get the registers.
726 */
727 WHV_REGISTER_VALUE aValues[128];
728 RT_ZERO(aValues);
729 Assert(RT_ELEMENTS(aValues) >= cRegs);
730 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
731# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
732 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
733 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
734# endif
735 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
736 AssertLogRelMsgReturn(SUCCEEDED(hrc),
737 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
738 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
739 , VERR_NEM_GET_REGISTERS_FAILED);
740
741 iReg = 0;
742# define GET_REG64(a_DstVar, a_enmName) do { \
743 Assert(aenmNames[iReg] == (a_enmName)); \
744 (a_DstVar) = aValues[iReg].Reg64; \
745 iReg++; \
746 } while (0)
747# define GET_REG64_LOG7(a_DstVar, a_enmName, a_szLogName) do { \
748 Assert(aenmNames[iReg] == (a_enmName)); \
749 if ((a_DstVar) != aValues[iReg].Reg64) \
750 Log7(("NEM/%u: " a_szLogName " changed %RX64 -> %RX64\n", pVCpu->idCpu, (a_DstVar), aValues[iReg].Reg64)); \
751 (a_DstVar) = aValues[iReg].Reg64; \
752 iReg++; \
753 } while (0)
754# define GET_REG128(a_DstVarLo, a_DstVarHi, a_enmName) do { \
755 Assert(aenmNames[iReg] == a_enmName); \
756 (a_DstVarLo) = aValues[iReg].Reg128.Low64; \
757 (a_DstVarHi) = aValues[iReg].Reg128.High64; \
758 iReg++; \
759 } while (0)
760# define GET_SEG(a_SReg, a_enmName) do { \
761 Assert(aenmNames[iReg] == (a_enmName)); \
762 NEM_WIN_COPY_BACK_SEG(a_SReg, aValues[iReg].Segment); \
763 iReg++; \
764 } while (0)
765
766 /* GPRs */
767 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
768 {
769 if (fWhat & CPUMCTX_EXTRN_RAX)
770 GET_REG64(pVCpu->cpum.GstCtx.rax, WHvX64RegisterRax);
771 if (fWhat & CPUMCTX_EXTRN_RCX)
772 GET_REG64(pVCpu->cpum.GstCtx.rcx, WHvX64RegisterRcx);
773 if (fWhat & CPUMCTX_EXTRN_RDX)
774 GET_REG64(pVCpu->cpum.GstCtx.rdx, WHvX64RegisterRdx);
775 if (fWhat & CPUMCTX_EXTRN_RBX)
776 GET_REG64(pVCpu->cpum.GstCtx.rbx, WHvX64RegisterRbx);
777 if (fWhat & CPUMCTX_EXTRN_RSP)
778 GET_REG64(pVCpu->cpum.GstCtx.rsp, WHvX64RegisterRsp);
779 if (fWhat & CPUMCTX_EXTRN_RBP)
780 GET_REG64(pVCpu->cpum.GstCtx.rbp, WHvX64RegisterRbp);
781 if (fWhat & CPUMCTX_EXTRN_RSI)
782 GET_REG64(pVCpu->cpum.GstCtx.rsi, WHvX64RegisterRsi);
783 if (fWhat & CPUMCTX_EXTRN_RDI)
784 GET_REG64(pVCpu->cpum.GstCtx.rdi, WHvX64RegisterRdi);
785 if (fWhat & CPUMCTX_EXTRN_R8_R15)
786 {
787 GET_REG64(pVCpu->cpum.GstCtx.r8, WHvX64RegisterR8);
788 GET_REG64(pVCpu->cpum.GstCtx.r9, WHvX64RegisterR9);
789 GET_REG64(pVCpu->cpum.GstCtx.r10, WHvX64RegisterR10);
790 GET_REG64(pVCpu->cpum.GstCtx.r11, WHvX64RegisterR11);
791 GET_REG64(pVCpu->cpum.GstCtx.r12, WHvX64RegisterR12);
792 GET_REG64(pVCpu->cpum.GstCtx.r13, WHvX64RegisterR13);
793 GET_REG64(pVCpu->cpum.GstCtx.r14, WHvX64RegisterR14);
794 GET_REG64(pVCpu->cpum.GstCtx.r15, WHvX64RegisterR15);
795 }
796 }
797
798 /* RIP & Flags */
799 if (fWhat & CPUMCTX_EXTRN_RIP)
800 GET_REG64(pVCpu->cpum.GstCtx.rip, WHvX64RegisterRip);
801 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
802 GET_REG64(pVCpu->cpum.GstCtx.rflags.u, WHvX64RegisterRflags);
803
804 /* Segments */
805 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
806 {
807 if (fWhat & CPUMCTX_EXTRN_ES)
808 GET_SEG(pVCpu->cpum.GstCtx.es, WHvX64RegisterEs);
809 if (fWhat & CPUMCTX_EXTRN_CS)
810 GET_SEG(pVCpu->cpum.GstCtx.cs, WHvX64RegisterCs);
811 if (fWhat & CPUMCTX_EXTRN_SS)
812 GET_SEG(pVCpu->cpum.GstCtx.ss, WHvX64RegisterSs);
813 if (fWhat & CPUMCTX_EXTRN_DS)
814 GET_SEG(pVCpu->cpum.GstCtx.ds, WHvX64RegisterDs);
815 if (fWhat & CPUMCTX_EXTRN_FS)
816 GET_SEG(pVCpu->cpum.GstCtx.fs, WHvX64RegisterFs);
817 if (fWhat & CPUMCTX_EXTRN_GS)
818 GET_SEG(pVCpu->cpum.GstCtx.gs, WHvX64RegisterGs);
819 }
820
821 /* Descriptor tables and the task segment. */
822 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
823 {
824 if (fWhat & CPUMCTX_EXTRN_LDTR)
825 GET_SEG(pVCpu->cpum.GstCtx.ldtr, WHvX64RegisterLdtr);
826
827 if (fWhat & CPUMCTX_EXTRN_TR)
828 {
829 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
830 avoid to trigger sanity assertions around the code, always fix this. */
831 GET_SEG(pVCpu->cpum.GstCtx.tr, WHvX64RegisterTr);
832 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
833 {
834 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
835 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
836 break;
837 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
838 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
839 break;
840 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
841 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
842 break;
843 }
844 }
845 if (fWhat & CPUMCTX_EXTRN_IDTR)
846 {
847 Assert(aenmNames[iReg] == WHvX64RegisterIdtr);
848 pVCpu->cpum.GstCtx.idtr.cbIdt = aValues[iReg].Table.Limit;
849 pVCpu->cpum.GstCtx.idtr.pIdt = aValues[iReg].Table.Base;
850 iReg++;
851 }
852 if (fWhat & CPUMCTX_EXTRN_GDTR)
853 {
854 Assert(aenmNames[iReg] == WHvX64RegisterGdtr);
855 pVCpu->cpum.GstCtx.gdtr.cbGdt = aValues[iReg].Table.Limit;
856 pVCpu->cpum.GstCtx.gdtr.pGdt = aValues[iReg].Table.Base;
857 iReg++;
858 }
859 }
860
861 /* Control registers. */
862 bool fMaybeChangedMode = false;
863 bool fUpdateCr3 = false;
864 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
865 {
866 if (fWhat & CPUMCTX_EXTRN_CR0)
867 {
868 Assert(aenmNames[iReg] == WHvX64RegisterCr0);
869 if (pVCpu->cpum.GstCtx.cr0 != aValues[iReg].Reg64)
870 {
871 CPUMSetGuestCR0(pVCpu, aValues[iReg].Reg64);
872 fMaybeChangedMode = true;
873 }
874 iReg++;
875 }
876 if (fWhat & CPUMCTX_EXTRN_CR2)
877 GET_REG64(pVCpu->cpum.GstCtx.cr2, WHvX64RegisterCr2);
878 if (fWhat & CPUMCTX_EXTRN_CR3)
879 {
880 if (pVCpu->cpum.GstCtx.cr3 != aValues[iReg].Reg64)
881 {
882 CPUMSetGuestCR3(pVCpu, aValues[iReg].Reg64);
883 fUpdateCr3 = true;
884 }
885 iReg++;
886 }
887 if (fWhat & CPUMCTX_EXTRN_CR4)
888 {
889 if (pVCpu->cpum.GstCtx.cr4 != aValues[iReg].Reg64)
890 {
891 CPUMSetGuestCR4(pVCpu, aValues[iReg].Reg64);
892 fMaybeChangedMode = true;
893 }
894 iReg++;
895 }
896 }
897 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
898 {
899 Assert(aenmNames[iReg] == WHvX64RegisterCr8);
900 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4);
901 iReg++;
902 }
903
904 /* Debug registers. */
905 if (fWhat & CPUMCTX_EXTRN_DR7)
906 {
907 Assert(aenmNames[iReg] == WHvX64RegisterDr7);
908 if (pVCpu->cpum.GstCtx.dr[7] != aValues[iReg].Reg64)
909 CPUMSetGuestDR7(pVCpu, aValues[iReg].Reg64);
910 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
911 iReg++;
912 }
913 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
914 {
915 Assert(aenmNames[iReg] == WHvX64RegisterDr0);
916 Assert(aenmNames[iReg+3] == WHvX64RegisterDr3);
917 if (pVCpu->cpum.GstCtx.dr[0] != aValues[iReg].Reg64)
918 CPUMSetGuestDR0(pVCpu, aValues[iReg].Reg64);
919 iReg++;
920 if (pVCpu->cpum.GstCtx.dr[1] != aValues[iReg].Reg64)
921 CPUMSetGuestDR1(pVCpu, aValues[iReg].Reg64);
922 iReg++;
923 if (pVCpu->cpum.GstCtx.dr[2] != aValues[iReg].Reg64)
924 CPUMSetGuestDR2(pVCpu, aValues[iReg].Reg64);
925 iReg++;
926 if (pVCpu->cpum.GstCtx.dr[3] != aValues[iReg].Reg64)
927 CPUMSetGuestDR3(pVCpu, aValues[iReg].Reg64);
928 iReg++;
929 }
930 if (fWhat & CPUMCTX_EXTRN_DR6)
931 {
932 Assert(aenmNames[iReg] == WHvX64RegisterDr6);
933 if (pVCpu->cpum.GstCtx.dr[6] != aValues[iReg].Reg64)
934 CPUMSetGuestDR6(pVCpu, aValues[iReg].Reg64);
935 iReg++;
936 }
937
938 /* Floating point state. */
939 if (fWhat & CPUMCTX_EXTRN_X87)
940 {
941 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);
942 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);
943 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);
944 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);
945 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);
946 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);
947 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);
948 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);
949
950 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);
951 pVCpu->cpum.GstCtx.XState.x87.FCW = aValues[iReg].FpControlStatus.FpControl;
952 pVCpu->cpum.GstCtx.XState.x87.FSW = aValues[iReg].FpControlStatus.FpStatus;
953 pVCpu->cpum.GstCtx.XState.x87.FTW = aValues[iReg].FpControlStatus.FpTag
954 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;
955 pVCpu->cpum.GstCtx.XState.x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;
956 pVCpu->cpum.GstCtx.XState.x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;
957 pVCpu->cpum.GstCtx.XState.x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);
958 pVCpu->cpum.GstCtx.XState.x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);
959 iReg++;
960 }
961
962 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
963 {
964 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus);
965 if (fWhat & CPUMCTX_EXTRN_X87)
966 {
967 pVCpu->cpum.GstCtx.XState.x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp;
968 pVCpu->cpum.GstCtx.XState.x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32);
969 pVCpu->cpum.GstCtx.XState.x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48);
970 }
971 pVCpu->cpum.GstCtx.XState.x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl;
972 pVCpu->cpum.GstCtx.XState.x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
973 iReg++;
974 }
975
976 /* Vector state. */
977 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
978 {
979 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0);
980 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1);
981 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2);
982 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3);
983 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4);
984 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5);
985 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6);
986 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7);
987 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8);
988 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9);
989 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10);
990 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11);
991 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12);
992 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13);
993 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14);
994 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15);
995 }
996
997 /* MSRs */
998 // WHvX64RegisterTsc - don't touch
999 if (fWhat & CPUMCTX_EXTRN_EFER)
1000 {
1001 Assert(aenmNames[iReg] == WHvX64RegisterEfer);
1002 if (aValues[iReg].Reg64 != pVCpu->cpum.GstCtx.msrEFER)
1003 {
1004 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, aValues[iReg].Reg64));
1005 if ((aValues[iReg].Reg64 ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE)
1006 PGMNotifyNxeChanged(pVCpu, RT_BOOL(aValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1007 pVCpu->cpum.GstCtx.msrEFER = aValues[iReg].Reg64;
1008 fMaybeChangedMode = true;
1009 }
1010 iReg++;
1011 }
1012 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1013 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");
1014 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1015 {
1016 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");
1017 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");
1018 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");
1019 }
1020 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1021 {
1022 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSTAR, WHvX64RegisterStar, "MSR STAR");
1023 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");
1024 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");
1025 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");
1026 }
1027 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
1028 {
1029 PCPUMCTXMSRS const pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1030 if (fWhat & CPUMCTX_EXTRN_TSC_AUX)
1031 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
1032 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1033 {
1034 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
1035 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1036 if (aValues[iReg].Reg64 != uOldBase)
1037 {
1038 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1039 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
1040 int rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
1041 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", rc2, aValues[iReg].Reg64));
1042 }
1043 iReg++;
1044
1045 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterPat, "MSR PAT");
1046#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1047 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterMsrMtrrCap);
1048#endif
1049 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
1050 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
1051 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
1052 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
1053 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
1054 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
1055 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
1056 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
1057 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
1058 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
1059 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
1060 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
1061 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
1062 }
1063 }
1064
1065 /* Interruptibility. */
1066 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
1067 {
1068 Assert(aenmNames[iReg] == WHvRegisterInterruptState);
1069 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);
1070
1071 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_INT))
1072 {
1073 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
1074 if (aValues[iReg].InterruptState.InterruptShadow)
1075 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
1076 else
1077 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1078 }
1079
1080 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_NMI))
1081 {
1082 if (aValues[iReg].InterruptState.NmiMasked)
1083 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1084 else
1085 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1086 }
1087
1088 fWhat |= CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI;
1089 iReg += 2;
1090 }
1091
1092 /* Event injection. */
1093 /// @todo WHvRegisterPendingInterruption
1094 Assert(aenmNames[iReg] == WHvRegisterPendingInterruption);
1095 if (aValues[iReg].PendingInterruption.InterruptionPending)
1096 {
1097 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
1098 aValues[iReg].PendingInterruption.InterruptionType, aValues[iReg].PendingInterruption.InterruptionVector,
1099 aValues[iReg].PendingInterruption.DeliverErrorCode, aValues[iReg].PendingInterruption.ErrorCode,
1100 aValues[iReg].PendingInterruption.InstructionLength, aValues[iReg].PendingInterruption.NestedEvent));
1101 AssertMsg((aValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
1102 ("%#RX64\n", aValues[iReg].PendingInterruption.AsUINT64));
1103 }
1104
1105 /// @todo WHvRegisterPendingEvent
1106
1107 /* Almost done, just update extrn flags and maybe change PGM mode. */
1108 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1109 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
1110 pVCpu->cpum.GstCtx.fExtrn = 0;
1111
1112 /* Typical. */
1113 if (!fMaybeChangedMode && !fUpdateCr3)
1114 return VINF_SUCCESS;
1115
1116 /*
1117 * Slow.
1118 */
1119 if (fMaybeChangedMode)
1120 {
1121 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
1122 false /* fForce */);
1123 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
1124 }
1125
1126 if (fUpdateCr3)
1127 {
1128 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3);
1129 if (rc == VINF_SUCCESS)
1130 { /* likely */ }
1131 else
1132 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
1133 }
1134
1135 return VINF_SUCCESS;
1136# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1137}
1138
1139#endif /* !IN_RING0 */
1140
1141
1142/**
1143 * Interface for importing state on demand (used by IEM).
1144 *
1145 * @returns VBox status code.
1146 * @param pVCpu The cross context CPU structure.
1147 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1148 */
1149VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1150{
1151 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1152
1153#ifdef IN_RING0
1154# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1155 return nemR0WinImportState(pVCpu->pGVM, pVCpu, &pVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);
1156# else
1157 RT_NOREF(pVCpu, fWhat);
1158 return VERR_NOT_IMPLEMENTED;
1159# endif
1160#else
1161 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1162#endif
1163}
1164
1165
1166/**
1167 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1168 *
1169 * @returns VBox status code.
1170 * @param pVCpu The cross context CPU structure.
1171 * @param pcTicks Where to return the CPU tick count.
1172 * @param puAux Where to return the TSC_AUX register value.
1173 */
1174VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1175{
1176 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1177
1178#ifdef IN_RING3
1179 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1180 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1181 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1182
1183# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1184# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
1185 if (pVM->nem.s.fUseRing0Runloop)
1186# endif
1187 {
1188 /* Call ring-0 and get the values. */
1189 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_QUERY_CPU_TICK, 0, NULL);
1190 AssertLogRelRCReturn(rc, rc);
1191 *pcTicks = pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks;
1192 if (puAux)
1193 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX
1194 ? pVCpu->nem.s.Hypercall.QueryCpuTick.uAux : CPUMGetGuestTscAux(pVCpu);
1195 return VINF_SUCCESS;
1196 }
1197# endif
1198# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1199 /* Call the offical API. */
1200 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1201 WHV_REGISTER_VALUE aValues[2] = { {0, 0}, {0, 0} };
1202 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1203 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1204 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1205 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1206 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1207 , VERR_NEM_GET_REGISTERS_FAILED);
1208 *pcTicks = aValues[0].Reg64;
1209 if (puAux)
1210 *pcTicks = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[0].Reg64 : CPUMGetGuestTscAux(pVCpu);
1211 return VINF_SUCCESS;
1212# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1213#else /* IN_RING0 */
1214# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1215 int rc = nemR0WinQueryCpuTick(pVCpu->pGVM, pVCpu, pcTicks, puAux);
1216 if (RT_SUCCESS(rc) && puAux && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX))
1217 *puAux = CPUMGetGuestTscAux(pVCpu);
1218 return rc;
1219# else
1220 RT_NOREF(pVCpu, pcTicks, puAux);
1221 return VERR_NOT_IMPLEMENTED;
1222# endif
1223#endif /* IN_RING0 */
1224}
1225
1226
1227/**
1228 * Resumes CPU clock (TSC) on all virtual CPUs.
1229 *
1230 * This is called by TM when the VM is started, restored, resumed or similar.
1231 *
1232 * @returns VBox status code.
1233 * @param pVM The cross context VM structure.
1234 * @param pVCpu The cross context CPU structure of the calling EMT.
1235 * @param uPausedTscValue The TSC value at the time of pausing.
1236 */
1237VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1238{
1239#ifdef IN_RING0
1240# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1241 return nemR0WinResumeCpuTickOnAll(pVM, pVCpu, uPausedTscValue);
1242# else
1243 RT_NOREF(pVM, pVCpu, uPausedTscValue);
1244 return VERR_NOT_IMPLEMENTED;
1245# endif
1246#else /* IN_RING3 */
1247 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1248 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1249
1250# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1251# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
1252 if (pVM->nem.s.fUseRing0Runloop)
1253# endif
1254 {
1255 /* Call ring-0 and do it all there. */
1256 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL, uPausedTscValue, NULL);
1257 }
1258# endif
1259# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1260 /*
1261 * Call the offical API to do the job.
1262 */
1263 if (pVM->cCpus > 1)
1264 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1265
1266 /* Start with the first CPU. */
1267 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1268 WHV_REGISTER_VALUE Value = {0, 0};
1269 Value.Reg64 = uPausedTscValue;
1270 uint64_t const uFirstTsc = ASMReadTSC();
1271 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1272 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1273 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1274 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1275 , VERR_NEM_SET_TSC);
1276
1277 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1278 that we don't introduce too much drift here. */
1279 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1280 {
1281 Assert(enmName == WHvX64RegisterTsc);
1282 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1283 Value.Reg64 = uPausedTscValue + offDelta;
1284 hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1285 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1286 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1287 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1288 , VERR_NEM_SET_TSC);
1289 }
1290
1291 return VINF_SUCCESS;
1292# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1293#endif /* IN_RING3 */
1294}
1295
1296#ifdef NEMWIN_NEED_GET_REGISTER
1297# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1298/** Worker for assertion macro. */
1299NEM_TMPL_STATIC int nemHCWinGetRegister(PVMCPUCC pVCpu, PGVMCPU pGVCpu, uint32_t enmReg, HV_REGISTER_VALUE *pRetValue)
1300{
1301 RT_ZERO(*pRetValue);
1302# ifdef IN_RING3
1303 RT_NOREF(pVCpu, pGVCpu, enmReg);
1304 return VERR_NOT_IMPLEMENTED;
1305# else
1306 NOREF(pVCpu);
1307
1308 /*
1309 * Hypercall parameters.
1310 */
1311 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1312 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1313 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1314
1315 pInput->PartitionId = pVCpu->pGVM->nemr0.s.idHvPartition;
1316 pInput->VpIndex = pVCpu->idCpu;
1317 pInput->fFlags = 0;
1318 pInput->Names[0] = (HV_REGISTER_NAME)enmReg;
1319
1320 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
1321 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1322 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
1323
1324 /*
1325 * Make the hypercall and copy out the value.
1326 */
1327 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
1328 pGVCpu->nem.s.HypercallData.HCPhysPage,
1329 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1330 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 cRegs=%#x\n", uResult, 1),
1331 VERR_NEM_GET_REGISTERS_FAILED);
1332
1333 *pRetValue = paValues[0];
1334 return VINF_SUCCESS;
1335# endif
1336}
1337# else
1338/** Worker for assertion macro. */
1339NEM_TMPL_STATIC int nemR3WinGetRegister(PVMCPUCC a_pVCpu, uint32_t a_enmReg, WHV_REGISTER_VALUE pValue)
1340{
1341 RT_ZERO(*pRetValue);
1342 RT_NOREF(pVCpu, pGVCpu, enmReg);
1343 return VERR_NOT_IMPLEMENTED;
1344}
1345# endif
1346#endif
1347
1348
1349#ifdef LOG_ENABLED
1350/**
1351 * Get the virtual processor running status.
1352 */
1353DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPUCC pVCpu)
1354{
1355# ifdef IN_RING0
1356 NOREF(pVCpu);
1357 return VidProcessorStatusUndefined;
1358# else
1359 RTERRVARS Saved;
1360 RTErrVarsSave(&Saved);
1361
1362 /*
1363 * This API is disabled in release builds, it seems. On build 17101 it requires
1364 * the following patch to be enabled (windbg): eb vid+12180 0f 84 98 00 00 00
1365 */
1366 VID_PROCESSOR_STATUS enmCpuStatus = VidProcessorStatusUndefined;
1367 NTSTATUS rcNt = g_pfnVidGetVirtualProcessorRunningStatus(pVCpu->pVMR3->nem.s.hPartitionDevice, pVCpu->idCpu, &enmCpuStatus);
1368 AssertRC(rcNt);
1369
1370 RTErrVarsRestore(&Saved);
1371 return enmCpuStatus;
1372# endif
1373}
1374#endif /* LOG_ENABLED */
1375
1376
1377#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1378# ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KTHREADs and KeAlertThread. */
1379/**
1380 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.
1381 *
1382 * This is an experiment only.
1383 *
1384 * @returns VBox status code.
1385 * @param pVM The cross context VM structure.
1386 * @param pVCpu The cross context virtual CPU structure of the
1387 * calling EMT.
1388 */
1389NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVMCC pVM, PVMCPUCC pVCpu)
1390{
1391 /*
1392 * Work the state.
1393 *
1394 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.
1395 * So, we just need to modify the state and kick the EMT if it's waiting on
1396 * messages. For the latter we use QueueUserAPC / KeAlterThread.
1397 */
1398 for (;;)
1399 {
1400 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
1401 switch (enmState)
1402 {
1403 case VMCPUSTATE_STARTED_EXEC_NEM:
1404 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))
1405 {
1406 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM -> CANCELED");
1407 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));
1408 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelChangedState);
1409 return VINF_SUCCESS;
1410 }
1411 break;
1412
1413 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:
1414 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
1415 {
1416 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM_WAIT -> CANCELED");
1417# ifdef IN_RING0
1418 NTSTATUS rcNt = KeAlertThread(??);
1419 DBGFTRACE_CUSTOM(pVM, "KeAlertThread -> %#x", rcNt);
1420# else
1421 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);
1422 DBGFTRACE_CUSTOM(pVM, "NtAlertThread -> %#x", rcNt);
1423# endif
1424 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));
1425 Assert(rcNt == STATUS_SUCCESS);
1426 if (NT_SUCCESS(rcNt))
1427 {
1428 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelAlertedThread);
1429 return VINF_SUCCESS;
1430 }
1431 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));
1432 }
1433 break;
1434
1435 default:
1436 return VINF_SUCCESS;
1437 }
1438
1439 ASMNopPause();
1440 RT_NOREF(pVM);
1441 }
1442}
1443# endif /* IN_RING3 */
1444#endif /* NEM_WIN_USE_OUR_OWN_RUN_API || NEM_WIN_WITH_RING0_RUNLOOP */
1445
1446
1447#ifdef LOG_ENABLED
1448/**
1449 * Logs the current CPU state.
1450 */
1451NEM_TMPL_STATIC void nemHCWinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1452{
1453 if (LogIs3Enabled())
1454 {
1455# if 0 // def IN_RING3 - causes lazy state import assertions all over CPUM.
1456 char szRegs[4096];
1457 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1458 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1459 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1460 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1461 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1462 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1463 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1464 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1465 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1466 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1467 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1468 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1469 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1470 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1471 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1472 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1473 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1474 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1475 " efer=%016VR{efer}\n"
1476 " pat=%016VR{pat}\n"
1477 " sf_mask=%016VR{sf_mask}\n"
1478 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1479 " lstar=%016VR{lstar}\n"
1480 " star=%016VR{star} cstar=%016VR{cstar}\n"
1481 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1482 );
1483
1484 char szInstr[256];
1485 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1486 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1487 szInstr, sizeof(szInstr), NULL);
1488 Log3(("%s%s\n", szRegs, szInstr));
1489# else
1490 /** @todo stat logging in ring-0 */
1491 RT_NOREF(pVM, pVCpu);
1492# endif
1493 }
1494}
1495#endif /* LOG_ENABLED */
1496
1497
1498/** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */
1499#define SWITCH_IT(a_szPrefix) \
1500 do \
1501 switch (u)\
1502 { \
1503 case 0x00: return a_szPrefix ""; \
1504 case 0x01: return a_szPrefix ",Pnd"; \
1505 case 0x02: return a_szPrefix ",Dbg"; \
1506 case 0x03: return a_szPrefix ",Pnd,Dbg"; \
1507 case 0x04: return a_szPrefix ",Shw"; \
1508 case 0x05: return a_szPrefix ",Pnd,Shw"; \
1509 case 0x06: return a_szPrefix ",Shw,Dbg"; \
1510 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \
1511 default: AssertFailedReturn("WTF?"); \
1512 } \
1513 while (0)
1514
1515#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1516/**
1517 * Translates the execution stat bitfield into a short log string, VID version.
1518 *
1519 * @returns Read-only log string.
1520 * @param pMsgHdr The header which state to summarize.
1521 */
1522static const char *nemHCWinExecStateToLogStr(HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1523{
1524 unsigned u = (unsigned)pMsgHdr->ExecutionState.InterruptionPending
1525 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1)
1526 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2);
1527 if (pMsgHdr->ExecutionState.EferLma)
1528 SWITCH_IT("LM");
1529 else if (pMsgHdr->ExecutionState.Cr0Pe)
1530 SWITCH_IT("PM");
1531 else
1532 SWITCH_IT("RM");
1533}
1534#elif defined(IN_RING3)
1535/**
1536 * Translates the execution stat bitfield into a short log string, WinHv version.
1537 *
1538 * @returns Read-only log string.
1539 * @param pExitCtx The exit context which state to summarize.
1540 */
1541static const char *nemR3WinExecStateToLogStr(WHV_VP_EXIT_CONTEXT const *pExitCtx)
1542{
1543 unsigned u = (unsigned)pExitCtx->ExecutionState.InterruptionPending
1544 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1)
1545 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2);
1546 if (pExitCtx->ExecutionState.EferLma)
1547 SWITCH_IT("LM");
1548 else if (pExitCtx->ExecutionState.Cr0Pe)
1549 SWITCH_IT("PM");
1550 else
1551 SWITCH_IT("RM");
1552}
1553#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1554#undef SWITCH_IT
1555
1556
1557#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1558/**
1559 * Advances the guest RIP and clear EFLAGS.RF, VID version.
1560 *
1561 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1562 *
1563 * @param pVCpu The cross context virtual CPU structure.
1564 * @param pExitCtx The exit context.
1565 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1566 */
1567DECLINLINE(void)
1568nemHCWinAdvanceGuestRipAndClearRF(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr, uint8_t cbMinInstr)
1569{
1570 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1571
1572 /* Advance the RIP. */
1573 Assert(pMsgHdr->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1574 pVCpu->cpum.GstCtx.rip += pMsgHdr->InstructionLength;
1575 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1576
1577 /* Update interrupt inhibition. */
1578 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1579 { /* likely */ }
1580 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
1581 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1582}
1583#elif defined(IN_RING3)
1584/**
1585 * Advances the guest RIP and clear EFLAGS.RF, WinHv version.
1586 *
1587 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1588 *
1589 * @param pVCpu The cross context virtual CPU structure.
1590 * @param pExitCtx The exit context.
1591 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1592 */
1593DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPUCC pVCpu, WHV_VP_EXIT_CONTEXT const *pExitCtx, uint8_t cbMinInstr)
1594{
1595 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1596
1597 /* Advance the RIP. */
1598 Assert(pExitCtx->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1599 pVCpu->cpum.GstCtx.rip += pExitCtx->InstructionLength;
1600 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1601
1602 /* Update interrupt inhibition. */
1603 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1604 { /* likely */ }
1605 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
1606 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1607}
1608#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1609
1610#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3)
1611
1612NEM_TMPL_STATIC DECLCALLBACK(int)
1613nemHCWinUnmapOnePageCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1614{
1615 RT_NOREF_PV(pvUser);
1616# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1617 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1618 AssertRC(rc);
1619 if (RT_SUCCESS(rc))
1620# else
1621 RT_NOREF_PV(pVCpu);
1622 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1623 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1624 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1625 if (SUCCEEDED(hrc))
1626# endif
1627 {
1628 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1629 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1630 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
1631 }
1632 else
1633 {
1634# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1635 LogRel(("nemHCWinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1636# else
1637 LogRel(("nemHCWinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1638 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(),
1639 RTNtLastErrorValue(), pVM->nem.s.cMappedPages));
1640# endif
1641 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1642 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
1643 }
1644 if (pVM->nem.s.cMappedPages > 0)
1645 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1646 return VINF_SUCCESS;
1647}
1648
1649
1650/**
1651 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1652 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1653 */
1654typedef struct NEMHCWINHMACPCCSTATE
1655{
1656 /** Input: Write access. */
1657 bool fWriteAccess;
1658 /** Output: Set if we did something. */
1659 bool fDidSomething;
1660 /** Output: Set it we should resume. */
1661 bool fCanResume;
1662} NEMHCWINHMACPCCSTATE;
1663
1664/**
1665 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1666 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1667 * NEMHCWINHMACPCCSTATE structure. }
1668 */
1669NEM_TMPL_STATIC DECLCALLBACK(int)
1670nemHCWinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1671{
1672 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1673 pState->fDidSomething = false;
1674 pState->fCanResume = false;
1675
1676 /* If A20 is disabled, we may need to make another query on the masked
1677 page to get the correct protection information. */
1678 uint8_t u2State = pInfo->u2NemState;
1679 RTGCPHYS GCPhysSrc;
1680# ifdef NEM_WIN_WITH_A20
1681 if ( pVM->nem.s.fA20Enabled
1682 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1683# endif
1684 GCPhysSrc = GCPhys;
1685# ifdef NEM_WIN_WITH_A20
1686 else
1687 {
1688 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1689 PGMPHYSNEMPAGEINFO Info2;
1690 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1691 AssertRCReturn(rc, rc);
1692
1693 *pInfo = Info2;
1694 pInfo->u2NemState = u2State;
1695 }
1696# endif
1697
1698 /*
1699 * Consolidate current page state with actual page protection and access type.
1700 * We don't really consider downgrades here, as they shouldn't happen.
1701 */
1702# ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1703 /** @todo Someone at microsoft please explain:
1704 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1705 * readonly page as writable (unmap, then map again). Specifically, this was an
1706 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1707 * a hope to work around that we no longer pre-map anything, just unmap stuff
1708 * and do it lazily here. And here we will first unmap, restart, and then remap
1709 * with new protection or backing.
1710 */
1711# endif
1712 int rc;
1713 switch (u2State)
1714 {
1715 case NEM_WIN_PAGE_STATE_UNMAPPED:
1716 case NEM_WIN_PAGE_STATE_NOT_SET:
1717 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1718 {
1719 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1720 return VINF_SUCCESS;
1721 }
1722
1723 /* Don't bother remapping it if it's a write request to a non-writable page. */
1724 if ( pState->fWriteAccess
1725 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1726 {
1727 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1728 return VINF_SUCCESS;
1729 }
1730
1731 /* Map the page. */
1732 rc = nemHCNativeSetPhysPage(pVM,
1733 pVCpu,
1734 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1735 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1736 pInfo->fNemProt,
1737 &u2State,
1738 true /*fBackingState*/);
1739 pInfo->u2NemState = u2State;
1740 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1741 GCPhys, g_apszPageStates[u2State], rc));
1742 pState->fDidSomething = true;
1743 pState->fCanResume = true;
1744 return rc;
1745
1746 case NEM_WIN_PAGE_STATE_READABLE:
1747 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1748 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1749 {
1750 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1751 return VINF_SUCCESS;
1752 }
1753
1754# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1755 /* Upgrade page to writable. */
1756/** @todo test this*/
1757 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1758 && pState->fWriteAccess)
1759 {
1760 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
1761 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1762 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1763 AssertRC(rc);
1764 if (RT_SUCCESS(rc))
1765 {
1766 STAM_REL_COUNTER_INC(&pVM->nem.s.StatRemapPage);
1767 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
1768 pState->fDidSomething = true;
1769 pState->fCanResume = true;
1770 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
1771 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
1772 }
1773 else
1774 STAM_REL_COUNTER_INC(&pVM->nem.s.StatRemapPageFailed);
1775 }
1776 else
1777 {
1778 /* Need to emulate the acces. */
1779 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
1780 rc = VINF_SUCCESS;
1781 }
1782 return rc;
1783# else
1784 break;
1785# endif
1786
1787 case NEM_WIN_PAGE_STATE_WRITABLE:
1788 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1789 {
1790 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
1791 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1792 else
1793 {
1794 pState->fCanResume = true;
1795 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1796 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1797 }
1798 return VINF_SUCCESS;
1799 }
1800# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1801 AssertFailed(); /* There should be no downgrades. */
1802# endif
1803 break;
1804
1805 default:
1806 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1807 }
1808
1809 /*
1810 * Unmap and restart the instruction.
1811 * If this fails, which it does every so often, just unmap everything for now.
1812 */
1813# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1814 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1815 AssertRC(rc);
1816 if (RT_SUCCESS(rc))
1817# else
1818 /** @todo figure out whether we mess up the state or if it's WHv. */
1819 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1820 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1821 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1822 if (SUCCEEDED(hrc))
1823# endif
1824 {
1825 pState->fDidSomething = true;
1826 pState->fCanResume = true;
1827 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1828 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
1829 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1830 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1831 return VINF_SUCCESS;
1832 }
1833 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
1834# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1835 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
1836 return rc;
1837# elif defined(VBOX_WITH_PGM_NEM_MODE)
1838 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n",
1839 GCPhys, g_apszPageStates[u2State], hrc, hrc));
1840 return VERR_NEM_UNMAP_PAGES_FAILED;
1841# else
1842 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1843 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(),
1844 pVM->nem.s.cMappedPages));
1845
1846 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemHCWinUnmapOnePageCallback, NULL);
1847 Log(("nemHCWinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
1848 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapAllPages);
1849
1850 pState->fDidSomething = true;
1851 pState->fCanResume = true;
1852 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1853 return VINF_SUCCESS;
1854# endif
1855}
1856
1857#endif /* defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3) */
1858
1859
1860#if defined(IN_RING0) && defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API)
1861/**
1862 * Wrapper around nemR0WinImportState that converts VERR_NEM_FLUSH_TLB
1863 * into informational status codes and logs+asserts statuses.
1864 *
1865 * @returns VBox strict status code.
1866 * @param pGVM The global (ring-0) VM structure.
1867 * @param pGVCpu The global (ring-0) per CPU structure.
1868 * @param fWhat What to import.
1869 * @param pszCaller Who is doing the importing.
1870 */
1871DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, uint64_t fWhat, const char *pszCaller)
1872{
1873 int rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);
1874 if (RT_SUCCESS(rc))
1875 {
1876 Assert(rc == VINF_SUCCESS);
1877 return VINF_SUCCESS;
1878 }
1879
1880 if (rc == VERR_NEM_FLUSH_TLB)
1881 {
1882 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
1883 return -rc;
1884 }
1885 RT_NOREF(pszCaller);
1886 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
1887}
1888#endif /* IN_RING0 && NEM_WIN_TEMPLATE_MODE_OWN_RUN_API*/
1889
1890#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
1891/**
1892 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
1893 *
1894 * Unlike the wrapped APIs, this checks whether it's necessary.
1895 *
1896 * @returns VBox strict status code.
1897 * @param pVCpu The cross context per CPU structure.
1898 * @param fWhat What to import.
1899 * @param pszCaller Who is doing the importing.
1900 */
1901DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPUCC pVCpu, uint64_t fWhat, const char *pszCaller)
1902{
1903 if (pVCpu->cpum.GstCtx.fExtrn & fWhat)
1904 {
1905# ifdef IN_RING0
1906 return nemR0WinImportStateStrict(pVCpu->pGVM, pVCpu, fWhat, pszCaller);
1907# else
1908 RT_NOREF(pszCaller);
1909 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1910 AssertRCReturn(rc, rc);
1911# endif
1912 }
1913 return VINF_SUCCESS;
1914}
1915#endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API || IN_RING3 */
1916
1917#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1918/**
1919 * Copies register state from the X64 intercept message header.
1920 *
1921 * ASSUMES no state copied yet.
1922 *
1923 * @param pVCpu The cross context per CPU structure.
1924 * @param pHdr The X64 intercept message header.
1925 * @sa nemR3WinCopyStateFromX64Header
1926 */
1927DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr)
1928{
1929 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT))
1930 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT));
1931 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pHdr->CsSegment);
1932 pVCpu->cpum.GstCtx.rip = pHdr->Rip;
1933 pVCpu->cpum.GstCtx.rflags.u = pHdr->Rflags;
1934
1935 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow;
1936 if (!pHdr->ExecutionState.InterruptShadow)
1937 {
1938 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1939 { /* likely */ }
1940 else
1941 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1942 }
1943 else
1944 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip);
1945
1946 APICSetTpr(pVCpu, pHdr->Cr8 << 4);
1947
1948 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_APIC_TPR);
1949}
1950#elif defined(IN_RING3)
1951/**
1952 * Copies register state from the (common) exit context.
1953 *
1954 * ASSUMES no state copied yet.
1955 *
1956 * @param pVCpu The cross context per CPU structure.
1957 * @param pExitCtx The common exit context.
1958 * @sa nemHCWinCopyStateFromX64Header
1959 */
1960DECLINLINE(void) nemR3WinCopyStateFromX64Header(PVMCPUCC pVCpu, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1961{
1962 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT))
1963 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT));
1964 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pExitCtx->Cs);
1965 pVCpu->cpum.GstCtx.rip = pExitCtx->Rip;
1966 pVCpu->cpum.GstCtx.rflags.u = pExitCtx->Rflags;
1967
1968 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow;
1969 if (!pExitCtx->ExecutionState.InterruptShadow)
1970 {
1971 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1972 { /* likely */ }
1973 else
1974 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1975 }
1976 else
1977 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip);
1978
1979 APICSetTpr(pVCpu, pExitCtx->Cr8 << 4);
1980
1981 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_APIC_TPR);
1982}
1983#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1984
1985
1986#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1987/**
1988 * Deals with memory intercept message.
1989 *
1990 * @returns Strict VBox status code.
1991 * @param pVM The cross context VM structure.
1992 * @param pVCpu The cross context per CPU structure.
1993 * @param pMsg The message.
1994 * @sa nemR3WinHandleExitMemory
1995 */
1996NEM_TMPL_STATIC VBOXSTRICTRC
1997nemHCWinHandleMessageMemory(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg)
1998{
1999 uint64_t const uHostTsc = ASMReadTSC();
2000 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2001 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2002 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
2003
2004 /*
2005 * Whatever we do, we must clear pending event injection upon resume.
2006 */
2007 if (pMsg->Header.ExecutionState.InterruptionPending)
2008 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2009
2010# if 0 /* Experiment: 20K -> 34K exit/s. */
2011 if ( pMsg->Header.ExecutionState.EferLma
2012 && pMsg->Header.CsSegment.Long
2013 && pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2014 {
2015 if ( pMsg->Header.Rip - (uint64_t)0xf65a < (uint64_t)(0xf662 - 0xf65a)
2016 && pMsg->InstructionBytes[0] == 0x89
2017 && pMsg->InstructionBytes[1] == 0x03)
2018 {
2019 pVCpu->cpum.GstCtx.rip = pMsg->Header.Rip + 2;
2020 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
2021 AssertMsg(pMsg->Header.InstructionLength == 2, ("%#x\n", pMsg->Header.InstructionLength));
2022 //Log(("%RX64 msg:\n%.80Rhxd\n", pVCpu->cpum.GstCtx.rip, pMsg));
2023 return VINF_SUCCESS;
2024 }
2025 }
2026# endif
2027
2028 /*
2029 * Ask PGM for information about the given GCPhys. We need to check if we're
2030 * out of sync first.
2031 */
2032 NEMHCWINHMACPCCSTATE State = { pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE, false, false };
2033 PGMPHYSNEMPAGEINFO Info;
2034 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMsg->GuestPhysicalAddress, State.fWriteAccess, &Info,
2035 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2036 if (RT_SUCCESS(rc))
2037 {
2038 if (Info.fNemProt & ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2039 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2040 {
2041 if (State.fCanResume)
2042 {
2043 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2044 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2045 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2046 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2047 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2048 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2049 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
2050 return VINF_SUCCESS;
2051 }
2052 }
2053 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2054 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2055 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2056 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2057 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2058 }
2059 else
2060 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2061 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2062 pMsg->GuestPhysicalAddress, rc, State.fDidSomething ? " modified-backing" : "",
2063 g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2064
2065 /*
2066 * Emulate the memory access, either access handler or special memory.
2067 */
2068 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2069 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2070 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2071 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2072 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
2073 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2074 VBOXSTRICTRC rcStrict;
2075# ifdef IN_RING0
2076 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu,
2077 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit");
2078 if (rcStrict != VINF_SUCCESS)
2079 return rcStrict;
2080# else
2081 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2082 AssertRCReturn(rc, rc);
2083# endif
2084
2085 if (pMsg->Reserved1)
2086 Log(("MemExit/Reserved1=%#x\n", pMsg->Reserved1));
2087 if (pMsg->Header.ExecutionState.Reserved0 || pMsg->Header.ExecutionState.Reserved1)
2088 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pMsg->Header.ExecutionState.Reserved0, pMsg->Header.ExecutionState.Reserved1));
2089
2090 if (!pExitRec)
2091 {
2092 //if (pMsg->InstructionByteCount > 0)
2093 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2094 if (pMsg->InstructionByteCount > 0)
2095 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
2096 pMsg->InstructionBytes, pMsg->InstructionByteCount);
2097 else
2098 rcStrict = IEMExecOne(pVCpu);
2099 /** @todo do we need to do anything wrt debugging here? */
2100 }
2101 else
2102 {
2103 /* Frequent access or probing. */
2104 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2105 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2106 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2107 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2108 }
2109 return rcStrict;
2110}
2111#elif defined(IN_RING3)
2112/**
2113 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2114 *
2115 * @returns Strict VBox status code.
2116 * @param pVM The cross context VM structure.
2117 * @param pVCpu The cross context per CPU structure.
2118 * @param pExit The VM exit information to handle.
2119 * @sa nemHCWinHandleMessageMemory
2120 */
2121NEM_TMPL_STATIC VBOXSTRICTRC
2122nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2123{
2124 uint64_t const uHostTsc = ASMReadTSC();
2125 Assert(pExit->MemoryAccess.AccessInfo.AccessType != 3);
2126
2127 /*
2128 * Whatever we do, we must clear pending event injection upon resume.
2129 */
2130 if (pExit->VpContext.ExecutionState.InterruptionPending)
2131 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2132
2133 /*
2134 * Ask PGM for information about the given GCPhys. We need to check if we're
2135 * out of sync first.
2136 */
2137 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite, false, false };
2138 PGMPHYSNEMPAGEINFO Info;
2139 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
2140 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2141 if (RT_SUCCESS(rc))
2142 {
2143 if (Info.fNemProt & ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2144 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2145 {
2146 if (State.fCanResume)
2147 {
2148 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2149 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2150 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2151 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2152 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2153 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2154 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2155 return VINF_SUCCESS;
2156 }
2157 }
2158 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2159 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2160 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2161 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2162 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2163 }
2164 else
2165 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2166 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2167 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2168 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2169
2170 /*
2171 * Emulate the memory access, either access handler or special memory.
2172 */
2173 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2174 pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2175 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2176 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2177 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2178 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2179 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2180 AssertRCReturn(rc, rc);
2181 if (pExit->VpContext.ExecutionState.Reserved0 || pExit->VpContext.ExecutionState.Reserved1)
2182 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pExit->VpContext.ExecutionState.Reserved0, pExit->VpContext.ExecutionState.Reserved1));
2183
2184 VBOXSTRICTRC rcStrict;
2185 if (!pExitRec)
2186 {
2187 //if (pMsg->InstructionByteCount > 0)
2188 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2189 if (pExit->MemoryAccess.InstructionByteCount > 0)
2190 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
2191 pExit->MemoryAccess.InstructionBytes, pExit->MemoryAccess.InstructionByteCount);
2192 else
2193 rcStrict = IEMExecOne(pVCpu);
2194 /** @todo do we need to do anything wrt debugging here? */
2195 }
2196 else
2197 {
2198 /* Frequent access or probing. */
2199 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2200 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2201 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2202 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2203 }
2204 return rcStrict;
2205}
2206#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2207
2208
2209#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2210/**
2211 * Deals with I/O port intercept message.
2212 *
2213 * @returns Strict VBox status code.
2214 * @param pVM The cross context VM structure.
2215 * @param pVCpu The cross context per CPU structure.
2216 * @param pMsg The message.
2217 */
2218NEM_TMPL_STATIC VBOXSTRICTRC
2219nemHCWinHandleMessageIoPort(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg)
2220{
2221 /*
2222 * Assert message sanity.
2223 */
2224 Assert( pMsg->AccessInfo.AccessSize == 1
2225 || pMsg->AccessInfo.AccessSize == 2
2226 || pMsg->AccessInfo.AccessSize == 4);
2227 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2228 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2229 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2230 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2231 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2232 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2233 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
2234 if (pMsg->AccessInfo.StringOp)
2235 {
2236 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterDs, pMsg->DsSegment);
2237 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterEs, pMsg->EsSegment);
2238 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
2239 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi);
2240 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi);
2241 }
2242
2243 /*
2244 * Whatever we do, we must clear pending event injection upon resume.
2245 */
2246 if (pMsg->Header.ExecutionState.InterruptionPending)
2247 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2248
2249 /*
2250 * Add history first to avoid two paths doing EMHistoryExec calls.
2251 */
2252 VBOXSTRICTRC rcStrict;
2253 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2254 !pMsg->AccessInfo.StringOp
2255 ? ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2256 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2257 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2258 : ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2259 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2260 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2261 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2262 if (!pExitRec)
2263 {
2264 if (!pMsg->AccessInfo.StringOp)
2265 {
2266 /*
2267 * Simple port I/O.
2268 */
2269 static uint32_t const s_fAndMask[8] =
2270 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2271 uint32_t const fAndMask = s_fAndMask[pMsg->AccessInfo.AccessSize];
2272
2273 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2274 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2275 {
2276 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize);
2277 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2278 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2279 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2280 if (IOM_SUCCESS(rcStrict))
2281 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1);
2282# ifdef IN_RING0
2283 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
2284 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF
2285 /** @todo check for debug breakpoints */ )
2286 return EMRZSetPendingIoPortWrite(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2287 pMsg->AccessInfo.AccessSize, (uint32_t)pMsg->Rax & fAndMask);
2288# endif
2289 else
2290 {
2291 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2292 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2293 }
2294 }
2295 else
2296 {
2297 uint32_t uValue = 0;
2298 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize);
2299 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2300 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2301 pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2302 if (IOM_SUCCESS(rcStrict))
2303 {
2304 if (pMsg->AccessInfo.AccessSize != 4)
2305 pVCpu->cpum.GstCtx.rax = (pMsg->Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2306 else
2307 pVCpu->cpum.GstCtx.rax = uValue;
2308 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2309 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pVCpu->cpum.GstCtx.rax));
2310 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1);
2311 }
2312 else
2313 {
2314 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2315 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2316# ifdef IN_RING0
2317 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
2318 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF
2319 /** @todo check for debug breakpoints */ )
2320 return EMRZSetPendingIoPortRead(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2321 pMsg->AccessInfo.AccessSize);
2322# endif
2323 }
2324 }
2325 }
2326 else
2327 {
2328 /*
2329 * String port I/O.
2330 */
2331 /** @todo Someone at Microsoft please explain how we can get the address mode
2332 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2333 * getting the default mode, it can always be overridden by a prefix. This
2334 * forces us to interpret the instruction from opcodes, which is suboptimal.
2335 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2336 * CPUs that are reasonably new.
2337 *
2338 * Of course, it's possible this is an undocumented and we just need to do some
2339 * experiments to figure out how it's communicated. Alternatively, we can scan
2340 * the opcode bytes for possible evil prefixes.
2341 */
2342 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2343 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2344 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2345 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
2346 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment);
2347 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2348 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2349 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
2350 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
2351# ifdef IN_RING0
2352 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2353 if (rcStrict != VINF_SUCCESS)
2354 return rcStrict;
2355# else
2356 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2357 AssertRCReturn(rc, rc);
2358# endif
2359
2360 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2361 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2362 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2363 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS",
2364 pMsg->PortNumber, pMsg->AccessInfo.AccessSize ));
2365 rcStrict = IEMExecOne(pVCpu);
2366 }
2367 if (IOM_SUCCESS(rcStrict))
2368 {
2369 /*
2370 * Do debug checks.
2371 */
2372 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2373 || (pMsg->Header.Rflags & X86_EFL_TF)
2374 || DBGFBpIsHwIoArmed(pVM) )
2375 {
2376 /** @todo Debugging. */
2377 }
2378 }
2379 return rcStrict;
2380 }
2381
2382 /*
2383 * Frequent exit or something needing probing.
2384 * Get state and call EMHistoryExec.
2385 */
2386 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2387 if (!pMsg->AccessInfo.StringOp)
2388 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2389 else
2390 {
2391 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2392 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2393 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
2394 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment);
2395 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2396 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
2397 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
2398 }
2399 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2400
2401# ifdef IN_RING0
2402 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2403 if (rcStrict != VINF_SUCCESS)
2404 return rcStrict;
2405# else
2406 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2407 AssertRCReturn(rc, rc);
2408# endif
2409
2410 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2411 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2412 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2413 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUT" : "IN",
2414 pMsg->AccessInfo.StringOp ? "S" : "",
2415 pMsg->PortNumber, pMsg->AccessInfo.AccessSize));
2416 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2417 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2418 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2419 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2420 return rcStrict;
2421}
2422#elif defined(IN_RING3)
2423/**
2424 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess).
2425 *
2426 * @returns Strict VBox status code.
2427 * @param pVM The cross context VM structure.
2428 * @param pVCpu The cross context per CPU structure.
2429 * @param pExit The VM exit information to handle.
2430 * @sa nemHCWinHandleMessageIoPort
2431 */
2432NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitIoPort(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2433{
2434 Assert( pExit->IoPortAccess.AccessInfo.AccessSize == 1
2435 || pExit->IoPortAccess.AccessInfo.AccessSize == 2
2436 || pExit->IoPortAccess.AccessInfo.AccessSize == 4);
2437
2438 /*
2439 * Whatever we do, we must clear pending event injection upon resume.
2440 */
2441 if (pExit->VpContext.ExecutionState.InterruptionPending)
2442 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2443
2444 /*
2445 * Add history first to avoid two paths doing EMHistoryExec calls.
2446 */
2447 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2448 !pExit->IoPortAccess.AccessInfo.StringOp
2449 ? ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2450 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2451 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2452 : ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2453 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2454 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2455 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2456 if (!pExitRec)
2457 {
2458 VBOXSTRICTRC rcStrict;
2459 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2460 {
2461 /*
2462 * Simple port I/O.
2463 */
2464 static uint32_t const s_fAndMask[8] =
2465 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2466 uint32_t const fAndMask = s_fAndMask[pExit->IoPortAccess.AccessInfo.AccessSize];
2467 if (pExit->IoPortAccess.AccessInfo.IsWrite)
2468 {
2469 rcStrict = IOMIOPortWrite(pVM, pVCpu, pExit->IoPortAccess.PortNumber,
2470 (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2471 pExit->IoPortAccess.AccessInfo.AccessSize);
2472 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2473 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2474 pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2475 pExit->IoPortAccess.AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2476 if (IOM_SUCCESS(rcStrict))
2477 {
2478 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2479 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 1);
2480 }
2481 }
2482 else
2483 {
2484 uint32_t uValue = 0;
2485 rcStrict = IOMIOPortRead(pVM, pVCpu, pExit->IoPortAccess.PortNumber, &uValue,
2486 pExit->IoPortAccess.AccessInfo.AccessSize);
2487 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2488 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2489 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2490 if (IOM_SUCCESS(rcStrict))
2491 {
2492 if (pExit->IoPortAccess.AccessInfo.AccessSize != 4)
2493 pVCpu->cpum.GstCtx.rax = (pExit->IoPortAccess.Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2494 else
2495 pVCpu->cpum.GstCtx.rax = uValue;
2496 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2497 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pExit->IoPortAccess.Rax, pVCpu->cpum.GstCtx.rax));
2498 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2499 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 1);
2500 }
2501 }
2502 }
2503 else
2504 {
2505 /*
2506 * String port I/O.
2507 */
2508 /** @todo Someone at Microsoft please explain how we can get the address mode
2509 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2510 * getting the default mode, it can always be overridden by a prefix. This
2511 * forces us to interpret the instruction from opcodes, which is suboptimal.
2512 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2513 * CPUs that are reasonably new.
2514 *
2515 * Of course, it's possible this is an undocumented and we just need to do some
2516 * experiments to figure out how it's communicated. Alternatively, we can scan
2517 * the opcode bytes for possible evil prefixes.
2518 */
2519 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2520 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2521 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2522 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pExit->IoPortAccess.Ds);
2523 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pExit->IoPortAccess.Es);
2524 pVCpu->cpum.GstCtx.rax = pExit->IoPortAccess.Rax;
2525 pVCpu->cpum.GstCtx.rcx = pExit->IoPortAccess.Rcx;
2526 pVCpu->cpum.GstCtx.rdi = pExit->IoPortAccess.Rdi;
2527 pVCpu->cpum.GstCtx.rsi = pExit->IoPortAccess.Rsi;
2528 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2529 AssertRCReturn(rc, rc);
2530
2531 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2532 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2533 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2534 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUTS" : "INS",
2535 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize ));
2536 rcStrict = IEMExecOne(pVCpu);
2537 }
2538 if (IOM_SUCCESS(rcStrict))
2539 {
2540 /*
2541 * Do debug checks.
2542 */
2543 if ( pExit->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2544 || (pExit->VpContext.Rflags & X86_EFL_TF)
2545 || DBGFBpIsHwIoArmed(pVM) )
2546 {
2547 /** @todo Debugging. */
2548 }
2549 }
2550 return rcStrict;
2551 }
2552
2553 /*
2554 * Frequent exit or something needing probing.
2555 * Get state and call EMHistoryExec.
2556 */
2557 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2558 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2559 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2560 else
2561 {
2562 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2563 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2564 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pExit->IoPortAccess.Ds);
2565 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pExit->IoPortAccess.Es);
2566 pVCpu->cpum.GstCtx.rcx = pExit->IoPortAccess.Rcx;
2567 pVCpu->cpum.GstCtx.rdi = pExit->IoPortAccess.Rdi;
2568 pVCpu->cpum.GstCtx.rsi = pExit->IoPortAccess.Rsi;
2569 }
2570 pVCpu->cpum.GstCtx.rax = pExit->IoPortAccess.Rax;
2571 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2572 AssertRCReturn(rc, rc);
2573 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2574 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2575 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2576 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUT" : "IN",
2577 pExit->IoPortAccess.AccessInfo.StringOp ? "S" : "",
2578 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize));
2579 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2580 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2581 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2582 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2583 return rcStrict;
2584}
2585#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2586
2587
2588#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2589/**
2590 * Deals with interrupt window message.
2591 *
2592 * @returns Strict VBox status code.
2593 * @param pVM The cross context VM structure.
2594 * @param pVCpu The cross context per CPU structure.
2595 * @param pMsg The message.
2596 * @sa nemR3WinHandleExitInterruptWindow
2597 */
2598NEM_TMPL_STATIC VBOXSTRICTRC
2599nemHCWinHandleMessageInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg)
2600{
2601 /*
2602 * Assert message sanity.
2603 */
2604 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE
2605 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here
2606 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2607 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type));
2608
2609 /*
2610 * Just copy the state we've got and handle it in the loop for now.
2611 */
2612 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2613 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2614
2615 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2616 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2617 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2618 pMsg->Type, RT_BOOL(pMsg->Header.Rflags & X86_EFL_IF), pMsg->Header.ExecutionState.InterruptShadow));
2619
2620 /** @todo call nemHCWinHandleInterruptFF */
2621 RT_NOREF(pVM);
2622 return VINF_SUCCESS;
2623}
2624#elif defined(IN_RING3)
2625/**
2626 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow).
2627 *
2628 * @returns Strict VBox status code.
2629 * @param pVM The cross context VM structure.
2630 * @param pVCpu The cross context per CPU structure.
2631 * @param pExit The VM exit information to handle.
2632 * @sa nemHCWinHandleMessageInterruptWindow
2633 */
2634NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2635{
2636 /*
2637 * Assert message sanity.
2638 */
2639 AssertMsg( pExit->InterruptWindow.DeliverableType == WHvX64PendingInterrupt
2640 || pExit->InterruptWindow.DeliverableType == WHvX64PendingNmi,
2641 ("%#x\n", pExit->InterruptWindow.DeliverableType));
2642
2643 /*
2644 * Just copy the state we've got and handle it in the loop for now.
2645 */
2646 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2647 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2648
2649 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2650 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d CR8=%#x\n",
2651 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2652 pExit->InterruptWindow.DeliverableType, RT_BOOL(pExit->VpContext.Rflags & X86_EFL_IF),
2653 pExit->VpContext.ExecutionState.InterruptShadow, pExit->VpContext.Cr8));
2654
2655 /** @todo call nemHCWinHandleInterruptFF */
2656 RT_NOREF(pVM);
2657 return VINF_SUCCESS;
2658}
2659#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2660
2661
2662#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2663/**
2664 * Deals with CPUID intercept message.
2665 *
2666 * @returns Strict VBox status code.
2667 * @param pVM The cross context VM structure.
2668 * @param pVCpu The cross context per CPU structure.
2669 * @param pMsg The message.
2670 * @sa nemR3WinHandleExitCpuId
2671 */
2672NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg)
2673{
2674 /* Check message register value sanity. */
2675 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2676 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2677 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2678 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2679 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
2680 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
2681 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
2682 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx);
2683
2684 /* Do exit history. */
2685 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2686 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2687 if (!pExitRec)
2688 {
2689 /*
2690 * Soak up state and execute the instruction.
2691 *
2692 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2693 * function and make everyone use it.
2694 */
2695 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2696 * only get weirder with nested VT-x and AMD-V support. */
2697 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2698
2699 /* Copy in the low register values (top is always cleared). */
2700 pVCpu->cpum.GstCtx.rax = (uint32_t)pMsg->Rax;
2701 pVCpu->cpum.GstCtx.rcx = (uint32_t)pMsg->Rcx;
2702 pVCpu->cpum.GstCtx.rdx = (uint32_t)pMsg->Rdx;
2703 pVCpu->cpum.GstCtx.rbx = (uint32_t)pMsg->Rbx;
2704 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2705
2706 /* Get the correct values. */
2707 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2708 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2709
2710 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2711 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2712 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2713 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2714 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2715
2716 /* Move RIP and we're done. */
2717 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2718
2719 return VINF_SUCCESS;
2720 }
2721
2722 /*
2723 * Frequent exit or something needing probing.
2724 * Get state and call EMHistoryExec.
2725 */
2726 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2727 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2728 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2729 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
2730 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
2731 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2732 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2733 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2734 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2735 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2736# ifdef IN_RING0
2737 VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit");
2738 if (rcStrict != VINF_SUCCESS)
2739 return rcStrict;
2740 RT_NOREF(pVM);
2741# else
2742 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2743 AssertRCReturn(rc, rc);
2744# endif
2745 VBOXSTRICTRC rcStrictExec = EMHistoryExec(pVCpu, pExitRec, 0);
2746 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2747 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2748 VBOXSTRICTRC_VAL(rcStrictExec), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2749 return rcStrictExec;
2750}
2751#elif defined(IN_RING3)
2752/**
2753 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid).
2754 *
2755 * @returns Strict VBox status code.
2756 * @param pVM The cross context VM structure.
2757 * @param pVCpu The cross context per CPU structure.
2758 * @param pExit The VM exit information to handle.
2759 * @sa nemHCWinHandleMessageCpuId
2760 */
2761NEM_TMPL_STATIC VBOXSTRICTRC
2762nemR3WinHandleExitCpuId(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2763{
2764 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2765 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2766 if (!pExitRec)
2767 {
2768 /*
2769 * Soak up state and execute the instruction.
2770 *
2771 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2772 * function and make everyone use it.
2773 */
2774 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2775 * only get weirder with nested VT-x and AMD-V support. */
2776 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2777
2778 /* Copy in the low register values (top is always cleared). */
2779 pVCpu->cpum.GstCtx.rax = (uint32_t)pExit->CpuidAccess.Rax;
2780 pVCpu->cpum.GstCtx.rcx = (uint32_t)pExit->CpuidAccess.Rcx;
2781 pVCpu->cpum.GstCtx.rdx = (uint32_t)pExit->CpuidAccess.Rdx;
2782 pVCpu->cpum.GstCtx.rbx = (uint32_t)pExit->CpuidAccess.Rbx;
2783 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2784
2785 /* Get the correct values. */
2786 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2787 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2788
2789 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2790 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2791 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2792 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2793 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2794
2795 /* Move RIP and we're done. */
2796 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
2797
2798 RT_NOREF_PV(pVM);
2799 return VINF_SUCCESS;
2800 }
2801
2802 /*
2803 * Frequent exit or something needing probing.
2804 * Get state and call EMHistoryExec.
2805 */
2806 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2807 pVCpu->cpum.GstCtx.rax = pExit->CpuidAccess.Rax;
2808 pVCpu->cpum.GstCtx.rcx = pExit->CpuidAccess.Rcx;
2809 pVCpu->cpum.GstCtx.rdx = pExit->CpuidAccess.Rdx;
2810 pVCpu->cpum.GstCtx.rbx = pExit->CpuidAccess.Rbx;
2811 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2812 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2813 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2814 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2815 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2816 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2817 AssertRCReturn(rc, rc);
2818 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2819 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2820 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2821 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2822 return rcStrict;
2823}
2824#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2825
2826
2827#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2828/**
2829 * Deals with MSR intercept message.
2830 *
2831 * @returns Strict VBox status code.
2832 * @param pVCpu The cross context per CPU structure.
2833 * @param pMsg The message.
2834 * @sa nemR3WinHandleExitMsr
2835 */
2836NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPUCC pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg)
2837{
2838 /*
2839 * A wee bit of sanity first.
2840 */
2841 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2842 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2843 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2844 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2845 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2846 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2847 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
2848 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
2849
2850 /*
2851 * Check CPL as that's common to both RDMSR and WRMSR.
2852 */
2853 VBOXSTRICTRC rcStrict;
2854 if (pMsg->Header.ExecutionState.Cpl == 0)
2855 {
2856 /*
2857 * Get all the MSR state. Since we're getting EFER, we also need to
2858 * get CR0, CR4 and CR3.
2859 */
2860 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2861 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2862 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2863 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2864 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2865
2866 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2867 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu,
2868 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2869 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2870 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2871 "MSRs");
2872 if (rcStrict == VINF_SUCCESS)
2873 {
2874 if (!pExitRec)
2875 {
2876 /*
2877 * Handle writes.
2878 */
2879 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2880 {
2881 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx));
2882 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n",
2883 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2884 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2885 if (rcStrict == VINF_SUCCESS)
2886 {
2887 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2888 return VINF_SUCCESS;
2889 }
2890# ifndef IN_RING3
2891 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2892 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2893 rcStrict = VINF_CPUM_R3_MSR_WRITE;
2894 return rcStrict;
2895# else
2896 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n",
2897 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2898 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2899# endif
2900 }
2901 /*
2902 * Handle reads.
2903 */
2904 else
2905 {
2906 uint64_t uValue = 0;
2907 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue);
2908 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2909 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2910 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2911 if (rcStrict == VINF_SUCCESS)
2912 {
2913 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue;
2914 pVCpu->cpum.GstCtx.rdx = uValue >> 32;
2915 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2916 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2917 return VINF_SUCCESS;
2918 }
2919# ifndef IN_RING3
2920 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2921 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2922 rcStrict = VINF_CPUM_R3_MSR_READ;
2923 return rcStrict;
2924# else
2925 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2926 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2927 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2928# endif
2929 }
2930 }
2931 else
2932 {
2933 /*
2934 * Handle frequent exit or something needing probing.
2935 */
2936 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
2937 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2938 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD", pMsg->MsrNumber));
2939 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2940 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2941 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2942 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2943 return rcStrict;
2944 }
2945 }
2946 else
2947 {
2948 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2949 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2950 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD",
2951 pMsg->MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2952 return rcStrict;
2953 }
2954 }
2955 else if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2956 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n",
2957 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2958 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx ));
2959 else
2960 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n",
2961 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2962 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber));
2963
2964 /*
2965 * If we get down here, we're supposed to #GP(0).
2966 */
2967 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2968 if (rcStrict == VINF_SUCCESS)
2969 {
2970 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2971 if (rcStrict == VINF_IEM_RAISED_XCPT)
2972 rcStrict = VINF_SUCCESS;
2973 else if (rcStrict != VINF_SUCCESS)
2974 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2975 }
2976 return rcStrict;
2977}
2978#elif defined(IN_RING3)
2979/**
2980 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess).
2981 *
2982 * @returns Strict VBox status code.
2983 * @param pVM The cross context VM structure.
2984 * @param pVCpu The cross context per CPU structure.
2985 * @param pExit The VM exit information to handle.
2986 * @sa nemHCWinHandleMessageMsr
2987 */
2988NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitMsr(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2989{
2990 /*
2991 * Check CPL as that's common to both RDMSR and WRMSR.
2992 */
2993 VBOXSTRICTRC rcStrict;
2994 if (pExit->VpContext.ExecutionState.Cpl == 0)
2995 {
2996 /*
2997 * Get all the MSR state. Since we're getting EFER, we also need to
2998 * get CR0, CR4 and CR3.
2999 */
3000 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
3001 pExit->MsrAccess.AccessInfo.IsWrite
3002 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
3003 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
3004 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3005 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3006 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu,
3007 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
3008 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
3009 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
3010 "MSRs");
3011 if (rcStrict == VINF_SUCCESS)
3012 {
3013 if (!pExitRec)
3014 {
3015 /*
3016 * Handle writes.
3017 */
3018 if (pExit->MsrAccess.AccessInfo.IsWrite)
3019 {
3020 rcStrict = CPUMSetGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber,
3021 RT_MAKE_U64((uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx));
3022 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3023 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
3024 (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
3025 if (rcStrict == VINF_SUCCESS)
3026 {
3027 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
3028 return VINF_SUCCESS;
3029 }
3030 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", pVCpu->idCpu,
3031 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3032 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx,
3033 VBOXSTRICTRC_VAL(rcStrict) ));
3034 }
3035 /*
3036 * Handle reads.
3037 */
3038 else
3039 {
3040 uint64_t uValue = 0;
3041 rcStrict = CPUMQueryGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, &uValue);
3042 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu,
3043 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3044 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3045 if (rcStrict == VINF_SUCCESS)
3046 {
3047 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue;
3048 pVCpu->cpum.GstCtx.rdx = uValue >> 32;
3049 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
3050 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
3051 return VINF_SUCCESS;
3052 }
3053 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3054 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
3055 uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3056 }
3057 }
3058 else
3059 {
3060 /*
3061 * Handle frequent exit or something needing probing.
3062 */
3063 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
3064 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3065 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber));
3066 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
3067 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
3068 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3069 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
3070 return rcStrict;
3071 }
3072 }
3073 else
3074 {
3075 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
3076 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3077 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
3078 return rcStrict;
3079 }
3080 }
3081 else if (pExit->MsrAccess.AccessInfo.IsWrite)
3082 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3083 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3084 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx ));
3085 else
3086 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3087 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3088 pExit->MsrAccess.MsrNumber));
3089
3090 /*
3091 * If we get down here, we're supposed to #GP(0).
3092 */
3093 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
3094 if (rcStrict == VINF_SUCCESS)
3095 {
3096 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
3097 if (rcStrict == VINF_IEM_RAISED_XCPT)
3098 rcStrict = VINF_SUCCESS;
3099 else if (rcStrict != VINF_SUCCESS)
3100 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
3101 }
3102
3103 RT_NOREF_PV(pVM);
3104 return rcStrict;
3105}
3106#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3107
3108
3109/**
3110 * Worker for nemHCWinHandleMessageException & nemR3WinHandleExitException that
3111 * checks if the given opcodes are of interest at all.
3112 *
3113 * @returns true if interesting, false if not.
3114 * @param cbOpcodes Number of opcode bytes available.
3115 * @param pbOpcodes The opcode bytes.
3116 * @param f64BitMode Whether we're in 64-bit mode.
3117 */
3118DECLINLINE(bool) nemHcWinIsInterestingUndefinedOpcode(uint8_t cbOpcodes, uint8_t const *pbOpcodes, bool f64BitMode)
3119{
3120 /*
3121 * Currently only interested in VMCALL and VMMCALL.
3122 */
3123 while (cbOpcodes >= 3)
3124 {
3125 switch (pbOpcodes[0])
3126 {
3127 case 0x0f:
3128 switch (pbOpcodes[1])
3129 {
3130 case 0x01:
3131 switch (pbOpcodes[2])
3132 {
3133 case 0xc1: /* 0f 01 c1 VMCALL */
3134 return true;
3135 case 0xd9: /* 0f 01 d9 VMMCALL */
3136 return true;
3137 default:
3138 break;
3139 }
3140 break;
3141 }
3142 break;
3143
3144 default:
3145 return false;
3146
3147 /* prefixes */
3148 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
3149 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
3150 if (!f64BitMode)
3151 return false;
3152 RT_FALL_THRU();
3153 case X86_OP_PRF_CS:
3154 case X86_OP_PRF_SS:
3155 case X86_OP_PRF_DS:
3156 case X86_OP_PRF_ES:
3157 case X86_OP_PRF_FS:
3158 case X86_OP_PRF_GS:
3159 case X86_OP_PRF_SIZE_OP:
3160 case X86_OP_PRF_SIZE_ADDR:
3161 case X86_OP_PRF_LOCK:
3162 case X86_OP_PRF_REPZ:
3163 case X86_OP_PRF_REPNZ:
3164 cbOpcodes--;
3165 pbOpcodes++;
3166 continue;
3167 }
3168 break;
3169 }
3170 return false;
3171}
3172
3173
3174#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3175/**
3176 * Copies state included in a exception intercept message.
3177 *
3178 * @param pVCpu The cross context per CPU structure.
3179 * @param pMsg The message.
3180 * @param fClearXcpt Clear pending exception.
3181 */
3182DECLINLINE(void)
3183nemHCWinCopyStateFromExceptionMessage(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, bool fClearXcpt)
3184{
3185 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
3186 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_DS
3187 | (fClearXcpt ? CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT : 0) );
3188 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
3189 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
3190 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
3191 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
3192 pVCpu->cpum.GstCtx.rsp = pMsg->Rsp;
3193 pVCpu->cpum.GstCtx.rbp = pMsg->Rbp;
3194 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
3195 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
3196 pVCpu->cpum.GstCtx.r8 = pMsg->R8;
3197 pVCpu->cpum.GstCtx.r9 = pMsg->R9;
3198 pVCpu->cpum.GstCtx.r10 = pMsg->R10;
3199 pVCpu->cpum.GstCtx.r11 = pMsg->R11;
3200 pVCpu->cpum.GstCtx.r12 = pMsg->R12;
3201 pVCpu->cpum.GstCtx.r13 = pMsg->R13;
3202 pVCpu->cpum.GstCtx.r14 = pMsg->R14;
3203 pVCpu->cpum.GstCtx.r15 = pMsg->R15;
3204 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
3205 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ss, pMsg->SsSegment);
3206}
3207#elif defined(IN_RING3)
3208/**
3209 * Copies state included in a exception intercept exit.
3210 *
3211 * @param pVCpu The cross context per CPU structure.
3212 * @param pExit The VM exit information.
3213 * @param fClearXcpt Clear pending exception.
3214 */
3215DECLINLINE(void) nemR3WinCopyStateFromExceptionMessage(PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, bool fClearXcpt)
3216{
3217 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3218 if (fClearXcpt)
3219 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
3220}
3221#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3222
3223
3224/**
3225 * Advances the guest RIP by the number of bytes specified in @a cb.
3226 *
3227 * @param pVCpu The cross context virtual CPU structure.
3228 * @param cb RIP increment value in bytes.
3229 */
3230DECLINLINE(void) nemHcWinAdvanceRip(PVMCPUCC pVCpu, uint32_t cb)
3231{
3232 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3233 pCtx->rip += cb;
3234
3235 /* Update interrupt shadow. */
3236 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3237 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
3238 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3239}
3240
3241
3242/**
3243 * Hacks its way around the lovely mesa driver's backdoor accesses.
3244 *
3245 * @sa hmR0VmxHandleMesaDrvGp
3246 * @sa hmR0SvmHandleMesaDrvGp
3247 */
3248static int nemHcWinHandleMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx)
3249{
3250 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK)));
3251 RT_NOREF(pCtx);
3252
3253 /* For now we'll just skip the instruction. */
3254 nemHcWinAdvanceRip(pVCpu, 1);
3255 return VINF_SUCCESS;
3256}
3257
3258
3259/**
3260 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
3261 * backdoor logging w/o checking what it is running inside.
3262 *
3263 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
3264 * backdoor port and magic numbers loaded in registers.
3265 *
3266 * @returns true if it is, false if it isn't.
3267 * @sa hmR0VmxIsMesaDrvGp
3268 * @sa hmR0SvmIsMesaDrvGp
3269 */
3270DECLINLINE(bool) nemHcWinIsMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx, const uint8_t *pbInsn, uint32_t cbInsn)
3271{
3272 /* #GP(0) is already checked by caller. */
3273
3274 /* Check magic and port. */
3275 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RAX)));
3276 if (pCtx->dx != UINT32_C(0x5658))
3277 return false;
3278 if (pCtx->rax != UINT32_C(0x564d5868))
3279 return false;
3280
3281 /* Flat ring-3 CS. */
3282 if (CPUMGetGuestCPL(pVCpu) != 3)
3283 return false;
3284 if (pCtx->cs.u64Base != 0)
3285 return false;
3286
3287 /* 0xed: IN eAX,dx */
3288 if (cbInsn < 1) /* Play safe (shouldn't happen). */
3289 {
3290 uint8_t abInstr[1];
3291 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
3292 if (RT_FAILURE(rc))
3293 return false;
3294 if (abInstr[0] != 0xed)
3295 return false;
3296 }
3297 else
3298 {
3299 if (pbInsn[0] != 0xed)
3300 return false;
3301 }
3302
3303 return true;
3304}
3305
3306
3307#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3308/**
3309 * Deals with exception intercept message (HvMessageTypeX64ExceptionIntercept).
3310 *
3311 * @returns Strict VBox status code.
3312 * @param pVCpu The cross context per CPU structure.
3313 * @param pMsg The message.
3314 * @sa nemR3WinHandleExitMsr
3315 */
3316NEM_TMPL_STATIC VBOXSTRICTRC
3317nemHCWinHandleMessageException(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg)
3318{
3319 /*
3320 * Assert sanity.
3321 */
3322 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
3323 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
3324 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
3325 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
3326 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
3327 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
3328 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
3329 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterDs, pMsg->DsSegment);
3330 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterSs, pMsg->SsSegment);
3331 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
3332 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
3333 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
3334 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx);
3335 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsp, pMsg->Rsp);
3336 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbp, pMsg->Rbp);
3337 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi);
3338 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi);
3339 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR8, pMsg->R8);
3340 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR9, pMsg->R9);
3341 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR10, pMsg->R10);
3342 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR11, pMsg->R11);
3343 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR12, pMsg->R12);
3344 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR13, pMsg->R13);
3345 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR14, pMsg->R14);
3346 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR15, pMsg->R15);
3347
3348 /*
3349 * Get most of the register state since we'll end up making IEM inject the
3350 * event. The exception isn't normally flaged as a pending event, so duh.
3351 *
3352 * Note! We can optimize this later with event injection.
3353 */
3354 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n",
3355 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
3356 pMsg->ExceptionVector, pMsg->ErrorCode, pMsg->ExceptionParameter));
3357 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, true /*fClearXcpt*/);
3358 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3359 if (pMsg->ExceptionVector == X86_XCPT_DB)
3360 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3361 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt");
3362 if (rcStrict != VINF_SUCCESS)
3363 return rcStrict;
3364
3365 /*
3366 * Handle the intercept.
3367 */
3368 TRPMEVENT enmEvtType = TRPM_TRAP;
3369 switch (pMsg->ExceptionVector)
3370 {
3371 /*
3372 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3373 * and need to turn them over to GIM.
3374 *
3375 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3376 * #UD for handling non-native hypercall instructions. (IEM will
3377 * decode both and let the GIM provider decide whether to accept it.)
3378 */
3379 case X86_XCPT_UD:
3380 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3381 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3382 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3383
3384 if (nemHcWinIsInterestingUndefinedOpcode(pMsg->InstructionByteCount, pMsg->InstructionBytes,
3385 pMsg->Header.ExecutionState.EferLma && pMsg->Header.CsSegment.Long ))
3386 {
3387 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
3388 pMsg->InstructionBytes, pMsg->InstructionByteCount);
3389 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3390 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3391 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
3392 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3393 return rcStrict;
3394 }
3395 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3396 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->InstructionByteCount, pMsg->InstructionBytes ));
3397 break;
3398
3399 /*
3400 * Workaround the lovely mesa driver assuming that vmsvga means vmware
3401 * hypervisor and tries to log stuff to the host.
3402 */
3403 case X86_XCPT_GP:
3404 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGp);
3405 /** @todo r=bird: Need workaround in IEM for this, right?
3406 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_GP),
3407 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC()); */
3408 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
3409 || !nemHcWinIsMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx, pMsg->InstructionBytes, pMsg->InstructionByteCount))
3410 {
3411# if 1 /** @todo Need to emulate instruction or we get a triple fault when trying to inject the #GP... */
3412 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
3413 pMsg->InstructionBytes, pMsg->InstructionByteCount);
3414 Log4(("XcptExit/%u: %04x:%08RX64/%s: #GP -> emulated -> %Rrc\n",
3415 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3416 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
3417 return rcStrict;
3418# else
3419 break;
3420# endif
3421 }
3422 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGpMesa);
3423 return nemHcWinHandleMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx);
3424
3425 /*
3426 * Filter debug exceptions.
3427 */
3428 case X86_XCPT_DB:
3429 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3430 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3431 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3432 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3433 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header) ));
3434 break;
3435
3436 case X86_XCPT_BP:
3437 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3438 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3439 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3440 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3441 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->Header.InstructionLength));
3442 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3443 break;
3444
3445 /* This shouldn't happen. */
3446 default:
3447 AssertLogRelMsgFailedReturn(("ExceptionVector=%#x\n", pMsg->ExceptionVector), VERR_IEM_IPE_6);
3448 }
3449
3450 /*
3451 * Inject it.
3452 */
3453 rcStrict = IEMInjectTrap(pVCpu, pMsg->ExceptionVector, enmEvtType, pMsg->ErrorCode,
3454 pMsg->ExceptionParameter /*??*/, pMsg->Header.InstructionLength);
3455 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3456 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3457 nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->ExceptionVector, VBOXSTRICTRC_VAL(rcStrict) ));
3458 return rcStrict;
3459}
3460#elif defined(IN_RING3)
3461/**
3462 * Deals with MSR access exits (WHvRunVpExitReasonException).
3463 *
3464 * @returns Strict VBox status code.
3465 * @param pVM The cross context VM structure.
3466 * @param pVCpu The cross context per CPU structure.
3467 * @param pExit The VM exit information to handle.
3468 * @sa nemR3WinHandleExitException
3469 */
3470NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitException(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3471{
3472 /*
3473 * Get most of the register state since we'll end up making IEM inject the
3474 * event. The exception isn't normally flaged as a pending event, so duh.
3475 *
3476 * Note! We can optimize this later with event injection.
3477 */
3478 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3479 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType,
3480 pExit->VpException.ErrorCode, pExit->VpException.ExceptionParameter ));
3481 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, true /*fClearXcpt*/);
3482 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3483 if (pExit->VpException.ExceptionType == X86_XCPT_DB)
3484 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3485 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt");
3486 if (rcStrict != VINF_SUCCESS)
3487 return rcStrict;
3488
3489 /*
3490 * Handle the intercept.
3491 */
3492 TRPMEVENT enmEvtType = TRPM_TRAP;
3493 switch (pExit->VpException.ExceptionType)
3494 {
3495 /*
3496 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3497 * and need to turn them over to GIM.
3498 *
3499 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3500 * #UD for handling non-native hypercall instructions. (IEM will
3501 * decode both and let the GIM provider decide whether to accept it.)
3502 */
3503 case X86_XCPT_UD:
3504 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3505 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3506 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3507 if (nemHcWinIsInterestingUndefinedOpcode(pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes,
3508 pExit->VpContext.ExecutionState.EferLma && pExit->VpContext.Cs.Long ))
3509 {
3510 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
3511 pExit->VpException.InstructionBytes,
3512 pExit->VpException.InstructionByteCount);
3513 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3514 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3515 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
3516 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3517 return rcStrict;
3518 }
3519
3520 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu,
3521 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3522 pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes ));
3523 break;
3524
3525 /*
3526 * Workaround the lovely mesa driver assuming that vmsvga means vmware
3527 * hypervisor and tries to log stuff to the host.
3528 */
3529 case X86_XCPT_GP:
3530 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGp);
3531 /** @todo r=bird: Need workaround in IEM for this, right?
3532 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_GP),
3533 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC()); */
3534 if ( !pVCpu->nem.s.fTrapXcptGpForLovelyMesaDrv
3535 || !nemHcWinIsMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx, pExit->VpException.InstructionBytes,
3536 pExit->VpException.InstructionByteCount))
3537 {
3538# if 1 /** @todo Need to emulate instruction or we get a triple fault when trying to inject the #GP... */
3539 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
3540 pExit->VpException.InstructionBytes,
3541 pExit->VpException.InstructionByteCount);
3542 Log4(("XcptExit/%u: %04x:%08RX64/%s: #GP -> emulated -> %Rrc\n",
3543 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3544 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
3545 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3546 return rcStrict;
3547# else
3548 break;
3549# endif
3550 }
3551 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGpMesa);
3552 return nemHcWinHandleMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx);
3553
3554 /*
3555 * Filter debug exceptions.
3556 */
3557 case X86_XCPT_DB:
3558 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3559 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3560 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3561 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3562 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext) ));
3563 break;
3564
3565 case X86_XCPT_BP:
3566 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3567 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3568 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3569 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3570 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.InstructionLength));
3571 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3572 break;
3573
3574 /* This shouldn't happen. */
3575 default:
3576 AssertLogRelMsgFailedReturn(("ExceptionType=%#x\n", pExit->VpException.ExceptionType), VERR_IEM_IPE_6);
3577 }
3578
3579 /*
3580 * Inject it.
3581 */
3582 rcStrict = IEMInjectTrap(pVCpu, pExit->VpException.ExceptionType, enmEvtType, pExit->VpException.ErrorCode,
3583 pExit->VpException.ExceptionParameter /*??*/, pExit->VpContext.InstructionLength);
3584 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3585 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3586 nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType, VBOXSTRICTRC_VAL(rcStrict) ));
3587
3588 RT_NOREF_PV(pVM);
3589 return rcStrict;
3590}
3591#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3592
3593
3594#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3595/**
3596 * Deals with unrecoverable exception (triple fault).
3597 *
3598 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up
3599 * here too. So we'll leave it to IEM to decide.
3600 *
3601 * @returns Strict VBox status code.
3602 * @param pVCpu The cross context per CPU structure.
3603 * @param pMsgHdr The message header.
3604 * @sa nemR3WinHandleExitUnrecoverableException
3605 */
3606NEM_TMPL_STATIC VBOXSTRICTRC
3607nemHCWinHandleMessageUnrecoverableException(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
3608{
3609 /* Check message register value sanity. */
3610 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsgHdr->CsSegment);
3611 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsgHdr->Rip);
3612 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsgHdr->Rflags);
3613 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsgHdr->Cr8);
3614
3615# if 0
3616 /*
3617 * Just copy the state we've got and handle it in the loop for now.
3618 */
3619 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
3620 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n",
3621 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags));
3622 return VINF_EM_TRIPLE_FAULT;
3623# else
3624 /*
3625 * Let IEM decide whether this is really it.
3626 */
3627 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3628 pMsgHdr->Rip + pMsgHdr->CsSegment.Base, ASMReadTSC());
3629 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
3630 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3631 if (rcStrict == VINF_SUCCESS)
3632 {
3633 rcStrict = IEMExecOne(pVCpu);
3634 if (rcStrict == VINF_SUCCESS)
3635 {
3636 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3637 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags ));
3638 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3639 return VINF_SUCCESS;
3640 }
3641 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3642 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3643 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3644 else
3645 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3646 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3647 }
3648 else
3649 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3650 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3651 return rcStrict;
3652# endif
3653}
3654#elif defined(IN_RING3)
3655/**
3656 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
3657 *
3658 * @returns Strict VBox status code.
3659 * @param pVM The cross context VM structure.
3660 * @param pVCpu The cross context per CPU structure.
3661 * @param pExit The VM exit information to handle.
3662 * @sa nemHCWinHandleMessageUnrecoverableException
3663 */
3664NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3665{
3666# if 0
3667 /*
3668 * Just copy the state we've got and handle it in the loop for now.
3669 */
3670 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3671 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3672 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3673 RT_NOREF_PV(pVM);
3674 return VINF_EM_TRIPLE_FAULT;
3675# else
3676 /*
3677 * Let IEM decide whether this is really it.
3678 */
3679 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3680 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3681 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3682 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3683 if (rcStrict == VINF_SUCCESS)
3684 {
3685 rcStrict = IEMExecOne(pVCpu);
3686 if (rcStrict == VINF_SUCCESS)
3687 {
3688 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3689 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3690 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3691 return VINF_SUCCESS;
3692 }
3693 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3694 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3695 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3696 else
3697 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3698 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3699 }
3700 else
3701 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3702 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3703 RT_NOREF_PV(pVM);
3704 return rcStrict;
3705# endif
3706
3707}
3708#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3709
3710
3711#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3712/**
3713 * Handles messages (VM exits).
3714 *
3715 * @returns Strict VBox status code.
3716 * @param pVM The cross context VM structure.
3717 * @param pVCpu The cross context per CPU structure.
3718 * @param pMappingHeader The message slot mapping.
3719 * @sa nemR3WinHandleExit
3720 */
3721NEM_TMPL_STATIC VBOXSTRICTRC
3722nemHCWinHandleMessage(PVMCC pVM, PVMCPUCC pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader)
3723{
3724 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage)
3725 {
3726 AssertMsg(pMappingHeader->cbMessage == HV_MESSAGE_SIZE, ("%#x\n", pMappingHeader->cbMessage));
3727 HV_MESSAGE const *pMsg = (HV_MESSAGE const *)(pMappingHeader + 1);
3728 switch (pMsg->Header.MessageType)
3729 {
3730 case HvMessageTypeUnmappedGpa:
3731 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3732 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3733 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept);
3734
3735 case HvMessageTypeGpaIntercept:
3736 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3737 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept);
3738 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept);
3739
3740 case HvMessageTypeX64IoPortIntercept:
3741 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept));
3742 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3743 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept);
3744
3745 case HvMessageTypeX64Halt:
3746 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3747 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3748 pMsg->X64InterceptHeader.Rip + pMsg->X64InterceptHeader.CsSegment.Base, ASMReadTSC());
3749 Log4(("HaltExit\n"));
3750 return VINF_EM_HALT;
3751
3752 case HvMessageTypeX64InterruptWindow:
3753 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow));
3754 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3755 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow);
3756
3757 case HvMessageTypeX64CpuidIntercept:
3758 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept));
3759 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3760 return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept);
3761
3762 case HvMessageTypeX64MsrIntercept:
3763 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept));
3764 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3765 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept);
3766
3767 case HvMessageTypeX64ExceptionIntercept:
3768 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept));
3769 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3770 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept);
3771
3772 case HvMessageTypeUnrecoverableException:
3773 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
3774 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3775 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader);
3776
3777 case HvMessageTypeInvalidVpRegisterValue:
3778 case HvMessageTypeUnsupportedFeature:
3779 case HvMessageTypeTlbPageSizeMismatch:
3780 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3781 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg),
3782 VERR_NEM_IPE_3);
3783
3784 case HvMessageTypeX64ApicEoi:
3785 case HvMessageTypeX64LegacyFpError:
3786 case HvMessageTypeX64RegisterIntercept:
3787 case HvMessageTypeApicEoi:
3788 case HvMessageTypeFerrAsserted:
3789 case HvMessageTypeEventLogBufferComplete:
3790 case HvMessageTimerExpired:
3791 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3792 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3793 VERR_NEM_IPE_3);
3794
3795 default:
3796 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3797 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3798 VERR_NEM_IPE_3);
3799 }
3800 }
3801 else
3802 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n",
3803 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage),
3804 VERR_NEM_IPE_4);
3805}
3806#elif defined(IN_RING3)
3807/**
3808 * Handles VM exits.
3809 *
3810 * @returns Strict VBox status code.
3811 * @param pVM The cross context VM structure.
3812 * @param pVCpu The cross context per CPU structure.
3813 * @param pExit The VM exit information to handle.
3814 * @sa nemHCWinHandleMessage
3815 */
3816NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3817{
3818 switch (pExit->ExitReason)
3819 {
3820 case WHvRunVpExitReasonMemoryAccess:
3821 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3822 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
3823
3824 case WHvRunVpExitReasonX64IoPortAccess:
3825 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3826 return nemR3WinHandleExitIoPort(pVM, pVCpu, pExit);
3827
3828 case WHvRunVpExitReasonX64Halt:
3829 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3830 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3831 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3832 Log4(("HaltExit/%u\n", pVCpu->idCpu));
3833 return VINF_EM_HALT;
3834
3835 case WHvRunVpExitReasonCanceled:
3836 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
3837 return VINF_SUCCESS;
3838
3839 case WHvRunVpExitReasonX64InterruptWindow:
3840 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3841 return nemR3WinHandleExitInterruptWindow(pVM, pVCpu, pExit);
3842
3843 case WHvRunVpExitReasonX64Cpuid:
3844 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3845 return nemR3WinHandleExitCpuId(pVM, pVCpu, pExit);
3846
3847 case WHvRunVpExitReasonX64MsrAccess:
3848 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3849 return nemR3WinHandleExitMsr(pVM, pVCpu, pExit);
3850
3851 case WHvRunVpExitReasonException:
3852 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3853 return nemR3WinHandleExitException(pVM, pVCpu, pExit);
3854
3855 case WHvRunVpExitReasonUnrecoverableException:
3856 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3857 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
3858
3859 case WHvRunVpExitReasonUnsupportedFeature:
3860 case WHvRunVpExitReasonInvalidVpRegisterValue:
3861 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3862 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
3863 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
3864
3865 /* Undesired exits: */
3866 case WHvRunVpExitReasonNone:
3867 default:
3868 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3869 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
3870 }
3871}
3872#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3873
3874
3875#if defined(IN_RING0) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
3876/**
3877 * Perform an I/O control operation on the partition handle (VID.SYS),
3878 * restarting on alert-like behaviour.
3879 *
3880 * @returns NT status code.
3881 * @param pGVM The ring-0 VM structure.
3882 * @param pGVCpu The global (ring-0) per CPU structure.
3883 * @param fFlags The wait flags.
3884 * @param cMillies The timeout in milliseconds
3885 */
3886static NTSTATUS nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(PGVM pGVM, PGVMCPU pGVCpu, uint32_t fFlags, uint32_t cMillies)
3887{
3888 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3889 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags;
3890 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3891 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3892 &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3893 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
3894 NULL, 0);
3895 if (rcNt == STATUS_SUCCESS)
3896 { /* likely */ }
3897 /*
3898 * Generally, if we get down here, we have been interrupted between ACK'ing
3899 * a message and waiting for the next due to a NtAlertThread call. So, we
3900 * should stop ACK'ing the previous message and get on waiting on the next.
3901 * See similar stuff in nemHCWinRunGC().
3902 */
3903 else if ( rcNt == STATUS_TIMEOUT
3904 || rcNt == STATUS_ALERTED /* just in case */
3905 || rcNt == STATUS_KERNEL_APC /* just in case */
3906 || rcNt == STATUS_USER_APC /* just in case */)
3907 {
3908 DBGFTRACE_CUSTOM(pGVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/1 %#x (f=%#x)", rcNt, fFlags);
3909 STAM_REL_COUNTER_INC(&pGVCpu->nem.s.StatStopCpuPendingAlerts);
3910 Assert(fFlags & VID_MSHAGN_F_GET_NEXT_MESSAGE);
3911
3912 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3913 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags & ~VID_MSHAGN_F_HANDLE_MESSAGE;
3914 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3915 rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3916 &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3917 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
3918 NULL, 0);
3919 DBGFTRACE_CUSTOM(pGVM, "IoCtlMessageSlotHandleAndGetNextRestart/2 %#x", rcNt);
3920 }
3921 return rcNt;
3922}
3923#endif /* IN_RING0 */
3924
3925
3926#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3927/**
3928 * Worker for nemHCWinRunGC that stops the execution on the way out.
3929 *
3930 * The CPU was running the last time we checked, no there are no messages that
3931 * needs being marked handled/whatever. Caller checks this.
3932 *
3933 * @returns rcStrict on success, error status on failure.
3934 * @param pVM The cross context VM structure.
3935 * @param pVCpu The cross context per CPU structure.
3936 * @param rcStrict The nemHCWinRunGC return status. This is a little
3937 * bit unnecessary, except in internal error cases,
3938 * since we won't need to stop the CPU if we took an
3939 * exit.
3940 * @param pMappingHeader The message slot mapping.
3941 */
3942NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVMCC pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict,
3943 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader)
3944{
3945# ifdef DBGFTRACE_ENABLED
3946 HV_MESSAGE const volatile *pMsgForTrace = (HV_MESSAGE const volatile *)(pMappingHeader + 1);
3947# endif
3948
3949 /*
3950 * Try stopping the processor. If we're lucky we manage to do this before it
3951 * does another VM exit.
3952 */
3953 DBGFTRACE_CUSTOM(pVM, "nemStop#0");
3954# ifdef IN_RING0
3955 pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu;
3956 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction,
3957 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3958 NULL, 0);
3959 if (NT_SUCCESS(rcNt))
3960 {
3961 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay (%#x)", rcNt);
3962 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3963 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3964 return rcStrict;
3965 }
3966# else
3967 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu);
3968 if (fRet)
3969 {
3970 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay");
3971 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3972 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3973 return rcStrict;
3974 }
3975# endif
3976
3977 /*
3978 * Dang. The CPU stopped by itself and we got a couple of message to deal with.
3979 */
3980# ifdef IN_RING0
3981 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", rcNt);
3982 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt),
3983 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3984# else
3985 DWORD dwErr = RTNtLastErrorValue();
3986 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", dwErr);
3987 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr),
3988 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3989# endif
3990 Log8(("nemHCWinStopCpu: Stopping CPU #%u pending...\n", pVCpu->idCpu));
3991 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPending);
3992
3993 /*
3994 * First message: Exit or similar, sometimes VidMessageStopRequestComplete.
3995 * Note! We can safely ASSUME that rcStrict isn't an important information one.
3996 */
3997# ifdef IN_RING0
3998 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3999 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
4000 pMsgForTrace->Header.MessageType);
4001 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
4002 ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
4003 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4004# else
4005 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4006 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
4007 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
4008 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4009 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
4010 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4011# endif
4012
4013 VID_MESSAGE_TYPE enmVidMsgType = pMappingHeader->enmVidMsgType;
4014 if (enmVidMsgType != VidMessageStopRequestComplete)
4015 {
4016 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader);
4017 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict))
4018 rcStrict = rcStrict2;
4019 DBGFTRACE_CUSTOM(pVM, "nemStop#1: handled %#x -> %d", pMsgForTrace->Header.MessageType, VBOXSTRICTRC_VAL(rcStrict));
4020
4021 /*
4022 * Mark it as handled and get the stop request completed message, then mark
4023 * that as handled too. CPU is back into fully stopped stated then.
4024 */
4025# ifdef IN_RING0
4026 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu,
4027 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE,
4028 30000 /*ms*/);
4029 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
4030 pMsgForTrace->Header.MessageType);
4031 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
4032 ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
4033 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4034# else
4035 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4036 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
4037 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
4038 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4039 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
4040 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4041# endif
4042
4043 /* It should be a stop request completed message. */
4044 enmVidMsgType = pMappingHeader->enmVidMsgType;
4045 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete,
4046 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n",
4047 enmVidMsgType, pMappingHeader->cbMessage),
4048 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4049
4050 /*
4051 * Mark the VidMessageStopRequestComplete message as handled.
4052 */
4053# ifdef IN_RING0
4054 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
4055 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType,
4056 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4057 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
4058 ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
4059 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4060# else
4061 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
4062 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
4063 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4064 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
4065 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4066# endif
4067 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) ));
4068 }
4069 else
4070 {
4071 /** @todo I'm not so sure about this now... */
4072 DBGFTRACE_CUSTOM(pVM, "nemStop#9: %#x %#x %#x", pMappingHeader->enmVidMsgType,
4073 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4074 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingOdd);
4075 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc) - 1st VidMessageSlotHandleAndGetNext got VidMessageStopRequestComplete.\n",
4076 VBOXSTRICTRC_VAL(rcStrict) ));
4077 }
4078 return rcStrict;
4079}
4080#endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
4081
4082#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
4083
4084/**
4085 * Deals with pending interrupt related force flags, may inject interrupt.
4086 *
4087 * @returns VBox strict status code.
4088 * @param pVM The cross context VM structure.
4089 * @param pVCpu The cross context per CPU structure.
4090 * @param pfInterruptWindows Where to return interrupt window flags.
4091 */
4092NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleInterruptFF(PVMCC pVM, PVMCPUCC pVCpu, uint8_t *pfInterruptWindows)
4093{
4094 Assert(!TRPMHasTrap(pVCpu));
4095 RT_NOREF_PV(pVM);
4096
4097 /*
4098 * First update APIC. We ASSUME this won't need TPR/CR8.
4099 */
4100 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4101 {
4102 APICUpdatePendingInterrupts(pVCpu);
4103 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
4104 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4105 return VINF_SUCCESS;
4106 }
4107
4108 /*
4109 * We don't currently implement SMIs.
4110 */
4111 AssertReturn(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);
4112
4113 /*
4114 * Check if we've got the minimum of state required for deciding whether we
4115 * can inject interrupts and NMIs. If we don't have it, get all we might require
4116 * for injection via IEM.
4117 */
4118 bool const fPendingNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4119 uint64_t fNeedExtrn = CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
4120 | (fPendingNmi ? CPUMCTX_EXTRN_INHIBIT_NMI : 0);
4121 if (pVCpu->cpum.GstCtx.fExtrn & fNeedExtrn)
4122 {
4123 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF");
4124 if (rcStrict != VINF_SUCCESS)
4125 return rcStrict;
4126 }
4127 bool const fInhibitInterrupts = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
4128 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip;
4129
4130 /*
4131 * NMI? Try deliver it first.
4132 */
4133 if (fPendingNmi)
4134 {
4135 if ( !fInhibitInterrupts
4136 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4137 {
4138 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4139 if (rcStrict == VINF_SUCCESS)
4140 {
4141 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4142 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_NMI, TRPM_HARDWARE_INT, 0, 0, 0);
4143 Log8(("Injected NMI on %u (%d)\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4144 }
4145 return rcStrict;
4146 }
4147 *pfInterruptWindows |= NEM_WIN_INTW_F_NMI;
4148 Log8(("NMI window pending on %u\n", pVCpu->idCpu));
4149 }
4150
4151 /*
4152 * APIC or PIC interrupt?
4153 */
4154 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4155 {
4156 if ( !fInhibitInterrupts
4157 && pVCpu->cpum.GstCtx.rflags.Bits.u1IF)
4158 {
4159 AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR);
4160 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4161 if (rcStrict == VINF_SUCCESS)
4162 {
4163 uint8_t bInterrupt;
4164 int rc = PDMGetInterrupt(pVCpu, &bInterrupt);
4165 if (RT_SUCCESS(rc))
4166 {
4167 Log8(("Injecting interrupt %#x on %u: %04x:%08RX64 efl=%#x\n", bInterrupt, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
4168 rcStrict = IEMInjectTrap(pVCpu, bInterrupt, TRPM_HARDWARE_INT, 0, 0, 0);
4169 Log8(("Injected interrupt %#x on %u (%d)\n", bInterrupt, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4170 }
4171 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4172 {
4173 *pfInterruptWindows |= ((bInterrupt >> 4) << NEM_WIN_INTW_F_PRIO_SHIFT) | NEM_WIN_INTW_F_REGULAR;
4174 Log8(("VERR_APIC_INTR_MASKED_BY_TPR: *pfInterruptWindows=%#x\n", *pfInterruptWindows));
4175 }
4176 else
4177 Log8(("PDMGetInterrupt failed -> %Rrc\n", rc));
4178 }
4179 return rcStrict;
4180 }
4181
4182 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC) && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))
4183 {
4184 /* If only an APIC interrupt is pending, we need to know its priority. Otherwise we'll
4185 * likely get pointless deliverability notifications with IF=1 but TPR still too high.
4186 */
4187 bool fPendingIntr = false;
4188 uint8_t bTpr = 0;
4189 uint8_t bPendingIntr = 0;
4190 int rc = APICGetTpr(pVCpu, &bTpr, &fPendingIntr, &bPendingIntr);
4191 AssertRC(rc);
4192 *pfInterruptWindows |= (bPendingIntr >> 4) << NEM_WIN_INTW_F_PRIO_SHIFT;
4193 Log8(("Interrupt window pending on %u: %#x (bTpr=%#x fPendingIntr=%d bPendingIntr=%#x)\n",
4194 pVCpu->idCpu, *pfInterruptWindows, bTpr, fPendingIntr, bPendingIntr));
4195 }
4196 else
4197 {
4198 *pfInterruptWindows |= NEM_WIN_INTW_F_REGULAR;
4199 Log8(("Interrupt window pending on %u: %#x\n", pVCpu->idCpu, *pfInterruptWindows));
4200 }
4201 }
4202
4203 return VINF_SUCCESS;
4204}
4205
4206
4207/**
4208 * Inner NEM runloop for windows.
4209 *
4210 * @returns Strict VBox status code.
4211 * @param pVM The cross context VM structure.
4212 * @param pVCpu The cross context per CPU structure.
4213 */
4214NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVMCC pVM, PVMCPUCC pVCpu)
4215{
4216 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags));
4217# ifdef LOG_ENABLED
4218 if (LogIs3Enabled())
4219 nemHCWinLogState(pVM, pVCpu);
4220# endif
4221
4222 /*
4223 * Try switch to NEM runloop state.
4224 */
4225 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
4226 { /* likely */ }
4227 else
4228 {
4229 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4230 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
4231 return VINF_SUCCESS;
4232 }
4233
4234 /*
4235 * The run loop.
4236 *
4237 * Current approach to state updating to use the sledgehammer and sync
4238 * everything every time. This will be optimized later.
4239 */
4240# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4241 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;
4242# endif
4243 const bool fSingleStepping = DBGFIsStepping(pVCpu);
4244// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
4245// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
4246// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
4247 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4248 for (unsigned iLoop = 0;; iLoop++)
4249 {
4250# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && !defined(VBOX_WITH_PGM_NEM_MODE)
4251 /*
4252 * Hack alert!
4253 */
4254 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
4255 if (cMappedPages < pVM->nem.s.cMaxMappedPages)
4256 { /* likely*/ }
4257 else
4258 {
4259 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemHCWinUnmapOnePageCallback, NULL);
4260 Log(("nemHCWinRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
4261 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapAllPages);
4262 }
4263# endif
4264
4265 /*
4266 * Pending interrupts or such? Need to check and deal with this prior
4267 * to the state syncing.
4268 */
4269 pVCpu->nem.s.fDesiredInterruptWindows = 0;
4270 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC
4271 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4272 {
4273# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4274 /* Make sure the CPU isn't executing. */
4275 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4276 {
4277 pVCpu->nem.s.fHandleAndGetFlags = 0;
4278 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader);
4279 if (rcStrict == VINF_SUCCESS)
4280 { /* likely */ }
4281 else
4282 {
4283 LogFlow(("NEM/%u: breaking: nemHCWinStopCpu -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4284 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4285 break;
4286 }
4287 }
4288# endif
4289
4290 /* Try inject interrupt. */
4291 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, &pVCpu->nem.s.fDesiredInterruptWindows);
4292 if (rcStrict == VINF_SUCCESS)
4293 { /* likely */ }
4294 else
4295 {
4296 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4297 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4298 break;
4299 }
4300 }
4301
4302# ifndef NEM_WIN_WITH_A20
4303 /*
4304 * Do not execute in hyper-V if the A20 isn't enabled.
4305 */
4306 if (PGMPhysIsA20Enabled(pVCpu))
4307 { /* likely */ }
4308 else
4309 {
4310 rcStrict = VINF_EM_RESCHEDULE_REM;
4311 LogFlow(("NEM/%u: breaking: A20 disabled\n", pVCpu->idCpu));
4312 break;
4313 }
4314# endif
4315
4316 /*
4317 * Ensure that hyper-V has the whole state.
4318 * (We always update the interrupt windows settings when active as hyper-V seems
4319 * to forget about it after an exit.)
4320 */
4321 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK))
4322 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)
4323 || ( ( pVCpu->nem.s.fDesiredInterruptWindows
4324 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows)
4325# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4326 && pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */
4327# endif
4328 )
4329 )
4330 {
4331# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4332 AssertMsg(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */,
4333 ("%#x fExtrn=%#RX64 (%#RX64) fDesiredInterruptWindows=%d fCurrentInterruptWindows=%#x vs %#x\n",
4334 pVCpu->nem.s.fHandleAndGetFlags, pVCpu->cpum.GstCtx.fExtrn, ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK),
4335 pVCpu->nem.s.fDesiredInterruptWindows, pVCpu->nem.s.fCurrentInterruptWindows, pVCpu->nem.s.fDesiredInterruptWindows));
4336# endif
4337# ifdef IN_RING0
4338 int rc2 = nemR0WinExportState(pVM, pVCpu, &pVCpu->cpum.GstCtx);
4339# else
4340 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
4341# endif
4342 AssertRCReturn(rc2, rc2);
4343 }
4344
4345 /*
4346 * Poll timers and run for a bit.
4347 *
4348 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
4349 * so we take the time of the next timer event and uses that as a deadline.
4350 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
4351 */
4352 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
4353 * the whole polling job when timers have changed... */
4354 uint64_t offDeltaIgnored;
4355 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
4356 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4357 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4358 {
4359# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4360 if (pVCpu->nem.s.fHandleAndGetFlags)
4361 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ }
4362 else
4363 {
4364# ifdef IN_RING0
4365 pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu;
4366 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction,
4367 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
4368 NULL, 0);
4369 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));
4370 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pVCpu->idCpu, rcNt),
4371 VERR_NEM_IPE_5);
4372# else
4373 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
4374 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n",
4375 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()),
4376 VERR_NEM_IPE_5);
4377# endif
4378 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4379 }
4380# endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
4381
4382 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
4383 {
4384# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4385 uint64_t const nsNow = RTTimeNanoTS();
4386 int64_t const cNsNextTimerEvt = nsNow - nsNextTimerEvt;
4387 uint32_t cMsWait;
4388 if (cNsNextTimerEvt < 100000 /* ns */)
4389 cMsWait = 0;
4390 else if ((uint64_t)cNsNextTimerEvt < RT_NS_1SEC)
4391 {
4392 if ((uint32_t)cNsNextTimerEvt < 2*RT_NS_1MS)
4393 cMsWait = 1;
4394 else
4395 cMsWait = ((uint32_t)cNsNextTimerEvt - 100000 /*ns*/) / RT_NS_1MS;
4396 }
4397 else
4398 cMsWait = RT_MS_1SEC;
4399# ifdef IN_RING0
4400 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pVCpu->idCpu;
4401 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags;
4402 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMsWait;
4403 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
4404 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
4405 pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
4406 NULL, 0);
4407 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4408 if (rcNt == STATUS_SUCCESS)
4409# else
4410 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4411 pVCpu->nem.s.fHandleAndGetFlags, cMsWait);
4412 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4413 if (fRet)
4414# endif
4415# else
4416# ifdef LOG_ENABLED
4417 if (LogIsFlowEnabled())
4418 {
4419 static const WHV_REGISTER_NAME s_aNames[6] = { WHvX64RegisterCs, WHvX64RegisterRip, WHvX64RegisterRflags,
4420 WHvX64RegisterSs, WHvX64RegisterRsp, WHvX64RegisterCr0 };
4421 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = {0};
4422 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
4423 LogFlow(("NEM/%u: Entry @ %04x:%08RX64 IF=%d EFL=%#RX64 SS:RSP=%04x:%08RX64 cr0=%RX64\n",
4424 pVCpu->idCpu, aRegs[0].Segment.Selector, aRegs[1].Reg64, RT_BOOL(aRegs[2].Reg64 & X86_EFL_IF),
4425 aRegs[2].Reg64, aRegs[3].Segment.Selector, aRegs[4].Reg64, aRegs[5].Reg64));
4426 }
4427# endif
4428 WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
4429 TMNotifyStartOfExecution(pVM, pVCpu);
4430
4431 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
4432
4433 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4434 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
4435# ifdef LOG_ENABLED
4436 LogFlow(("NEM/%u: Exit @ %04X:%08RX64 IF=%d CR8=%#x Reason=%#x\n", pVCpu->idCpu, ExitReason.VpContext.Cs.Selector,
4437 ExitReason.VpContext.Rip, RT_BOOL(ExitReason.VpContext.Rflags & X86_EFL_IF), ExitReason.VpContext.Cr8,
4438 ExitReason.ExitReason));
4439# endif
4440 if (SUCCEEDED(hrc))
4441# endif
4442 {
4443 /*
4444 * Deal with the message.
4445 */
4446# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4447 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader);
4448 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;
4449# else
4450 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
4451# endif
4452 if (rcStrict == VINF_SUCCESS)
4453 { /* hopefully likely */ }
4454 else
4455 {
4456 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4457 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4458 break;
4459 }
4460 }
4461 else
4462 {
4463# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4464
4465 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT,
4466 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah,
4467 the error code conversion is into WAIT_XXX, i.e. NT status codes. */
4468# ifndef IN_RING0
4469 DWORD rcNt = GetLastError();
4470# endif
4471 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt));
4472 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT
4473 || rcNt == STATUS_ALERTED /* just in case */
4474 || rcNt == STATUS_USER_APC /* ditto */
4475 || rcNt == STATUS_KERNEL_APC /* ditto */
4476 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n",
4477 pVCpu->idCpu, rcNt, rcNt),
4478 VERR_NEM_IPE_0);
4479 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4480 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout);
4481# else
4482 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
4483 pVCpu->idCpu, hrc, GetLastError()),
4484 VERR_NEM_IPE_0);
4485# endif
4486 }
4487
4488 /*
4489 * If no relevant FFs are pending, loop.
4490 */
4491 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
4492 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4493 continue;
4494
4495 /** @todo Try handle pending flags, not just return to EM loops. Take care
4496 * not to set important RCs here unless we've handled a message. */
4497 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
4498 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
4499 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
4500 }
4501 else
4502 {
4503 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
4504 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
4505 }
4506 }
4507 else
4508 {
4509 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
4510 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
4511 }
4512 break;
4513 } /* the run loop */
4514
4515
4516 /*
4517 * If the CPU is running, make sure to stop it before we try sync back the
4518 * state and return to EM. We don't sync back the whole state if we can help it.
4519 */
4520# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4521 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4522 {
4523 pVCpu->nem.s.fHandleAndGetFlags = 0;
4524 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader);
4525 }
4526# endif
4527
4528 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
4529 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4530
4531 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
4532 {
4533 /* Try anticipate what we might need. */
4534 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI;
4535 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
4536 || RT_FAILURE(rcStrict))
4537 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4538# ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */
4539 else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
4540 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
4541 fImport = CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT;
4542 else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
4543 fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_INHIBIT_INT;
4544# endif
4545 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
4546 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4547 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
4548
4549 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
4550 {
4551# ifdef IN_RING0
4552 int rc2 = nemR0WinImportState(pVM, pVCpu, &pVCpu->cpum.GstCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT,
4553 true /*fCanUpdateCr3*/);
4554 if (RT_SUCCESS(rc2))
4555 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4556 else if (rc2 == VERR_NEM_FLUSH_TLB)
4557 {
4558 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4559 if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
4560 rcStrict = -rc2;
4561 else
4562 {
4563 pVCpu->nem.s.rcPending = -rc2;
4564 LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
4565 }
4566 }
4567# else
4568 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4569 if (RT_SUCCESS(rc2))
4570 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4571# endif
4572 else if (RT_SUCCESS(rcStrict))
4573 rcStrict = rc2;
4574 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
4575 pVCpu->cpum.GstCtx.fExtrn = 0;
4576 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
4577 }
4578 else
4579 {
4580 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4581 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
4582 }
4583 }
4584 else
4585 {
4586 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4587 pVCpu->cpum.GstCtx.fExtrn = 0;
4588 }
4589
4590 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
4591 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, VBOXSTRICTRC_VAL(rcStrict) ));
4592 return rcStrict;
4593}
4594
4595#endif /* defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3) */
4596#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3)
4597
4598/**
4599 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
4600 */
4601NEM_TMPL_STATIC DECLCALLBACK(int) nemHCWinUnsetForA20CheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys,
4602 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
4603{
4604 /* We'll just unmap the memory. */
4605 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
4606 {
4607# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4608 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
4609 AssertRC(rc);
4610 if (RT_SUCCESS(rc))
4611# else
4612 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
4613 if (SUCCEEDED(hrc))
4614# endif
4615 {
4616 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4617 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4618 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
4619 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
4620 }
4621 else
4622 {
4623 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4624# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4625 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
4626 return rc;
4627# else
4628 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4629 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4630 return VERR_NEM_IPE_2;
4631# endif
4632 }
4633 }
4634 RT_NOREF(pVCpu, pvUser);
4635 return VINF_SUCCESS;
4636}
4637
4638
4639/**
4640 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
4641 *
4642 * @returns The PGMPhysNemQueryPageInfo result.
4643 * @param pVM The cross context VM structure.
4644 * @param pVCpu The cross context virtual CPU structure.
4645 * @param GCPhys The page to unmap.
4646 */
4647NEM_TMPL_STATIC int nemHCWinUnmapPageForA20Gate(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys)
4648{
4649 PGMPHYSNEMPAGEINFO Info;
4650 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
4651 nemHCWinUnsetForA20CheckerCallback, NULL);
4652}
4653
4654#endif /* defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3) */
4655
4656void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
4657{
4658 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
4659 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
4660}
4661
4662
4663VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
4664 RTR3PTR pvMemR3, uint8_t *pu2State)
4665{
4666 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
4667 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
4668
4669 *pu2State = UINT8_MAX;
4670#if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
4671 if (pvMemR3)
4672 {
4673 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
4674 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
4675 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4676 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
4677 if (SUCCEEDED(hrc))
4678 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4679 else
4680 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
4681 pvMemR3, GCPhys, cb, hrc));
4682 }
4683 RT_NOREF(enmKind);
4684#else
4685 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
4686#endif
4687}
4688
4689
4690void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
4691 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
4692{
4693 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
4694 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
4695 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
4696}
4697
4698
4699#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3)
4700/**
4701 * Worker that maps pages into Hyper-V.
4702 *
4703 * This is used by the PGM physical page notifications as well as the memory
4704 * access VMEXIT handlers.
4705 *
4706 * @returns VBox status code.
4707 * @param pVM The cross context VM structure.
4708 * @param pVCpu The cross context virtual CPU structure of the
4709 * calling EMT.
4710 * @param GCPhysSrc The source page address.
4711 * @param GCPhysDst The hyper-V destination page. This may differ from
4712 * GCPhysSrc when A20 is disabled.
4713 * @param fPageProt NEM_PAGE_PROT_XXX.
4714 * @param pu2State Our page state (input/output).
4715 * @param fBackingChanged Set if the page backing is being changed.
4716 * @thread EMT(pVCpu)
4717 */
4718NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
4719 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
4720{
4721# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4722 /*
4723 * When using the hypercalls instead of the ring-3 APIs, we don't need to
4724 * unmap memory before modifying it. We still want to track the state though,
4725 * since unmap will fail when called an unmapped page and we don't want to redo
4726 * upgrades/downgrades.
4727 */
4728 uint8_t const u2OldState = *pu2State;
4729 int rc;
4730 if (fPageProt == NEM_PAGE_PROT_NONE)
4731 {
4732 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4733 {
4734 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4735 if (RT_SUCCESS(rc))
4736 {
4737 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4738 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4739 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4740 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4741 }
4742 else
4743 {
4744 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4745 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4746 }
4747 }
4748 else
4749 rc = VINF_SUCCESS;
4750 }
4751 else if (fPageProt & NEM_PAGE_PROT_WRITE)
4752 {
4753 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
4754 {
4755 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4756 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4757 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4758 if (RT_SUCCESS(rc))
4759 {
4760 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4761 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4762 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4763 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4764 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4765 NOREF(cMappedPages);
4766 }
4767 else
4768 {
4769 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4770 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4771 }
4772 }
4773 else
4774 rc = VINF_SUCCESS;
4775 }
4776 else
4777 {
4778 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
4779 {
4780 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4781 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4782 if (RT_SUCCESS(rc))
4783 {
4784 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4785 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4786 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4787 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4788 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4789 NOREF(cMappedPages);
4790 }
4791 else
4792 {
4793 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4794 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4795 }
4796 }
4797 else
4798 rc = VINF_SUCCESS;
4799 }
4800
4801 return VINF_SUCCESS;
4802
4803# else /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4804 /*
4805 * Looks like we need to unmap a page before we can change the backing
4806 * or even modify the protection. This is going to be *REALLY* efficient.
4807 * PGM lends us two bits to keep track of the state here.
4808 */
4809 RT_NOREF(pVCpu);
4810 uint8_t const u2OldState = *pu2State;
4811 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
4812 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
4813 if ( fBackingChanged
4814 || u2NewState != u2OldState)
4815 {
4816 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4817 {
4818# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4819 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4820 AssertRC(rc);
4821 if (RT_SUCCESS(rc))
4822 {
4823 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4824 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4825 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4826 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4827 {
4828 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4829 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4830 return VINF_SUCCESS;
4831 }
4832 }
4833 else
4834 {
4835 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4836 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4837 return rc;
4838 }
4839# else
4840 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
4841 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
4842 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
4843 if (SUCCEEDED(hrc))
4844 {
4845 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4846 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4847 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4848 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4849 {
4850 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4851 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4852 return VINF_SUCCESS;
4853 }
4854 }
4855 else
4856 {
4857 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4858 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4859 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4860 return VERR_NEM_INIT_FAILED;
4861 }
4862# endif
4863 }
4864 }
4865
4866 /*
4867 * Writeable mapping?
4868 */
4869 if (fPageProt & NEM_PAGE_PROT_WRITE)
4870 {
4871# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4872 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4873 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4874 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4875 AssertRC(rc);
4876 if (RT_SUCCESS(rc))
4877 {
4878 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4879 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4880 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4881 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4882 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4883 return VINF_SUCCESS;
4884 }
4885 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4886 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4887 return rc;
4888# else
4889 void *pvPage;
4890 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
4891 if (RT_SUCCESS(rc))
4892 {
4893 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
4894 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4895 if (SUCCEEDED(hrc))
4896 {
4897 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4898 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4899 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4900 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4901 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4902 return VINF_SUCCESS;
4903 }
4904 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4905 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4906 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4907 return VERR_NEM_INIT_FAILED;
4908 }
4909 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4910 return rc;
4911# endif
4912 }
4913
4914 if (fPageProt & NEM_PAGE_PROT_READ)
4915 {
4916# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4917 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4918 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4919 AssertRC(rc);
4920 if (RT_SUCCESS(rc))
4921 {
4922 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4923 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4924 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4925 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4926 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4927 return VINF_SUCCESS;
4928 }
4929 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4930 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4931 return rc;
4932# else
4933 const void *pvPage;
4934 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
4935 if (RT_SUCCESS(rc))
4936 {
4937 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRangePage, a);
4938 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
4939 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
4940 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRangePage, a);
4941 if (SUCCEEDED(hrc))
4942 {
4943 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4944 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4945 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4946 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4947 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4948 return VINF_SUCCESS;
4949 }
4950 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4951 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4952 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4953 return VERR_NEM_INIT_FAILED;
4954 }
4955 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4956 return rc;
4957# endif
4958 }
4959
4960 /* We already unmapped it above. */
4961 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4962 return VINF_SUCCESS;
4963# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4964}
4965#endif /* defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3) */
4966
4967
4968NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
4969{
4970 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
4971 {
4972 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
4973 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4974 return VINF_SUCCESS;
4975 }
4976
4977#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES)
4978 PVMCPUCC pVCpu = VMMGetCpu(pVM);
4979 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4980 AssertRC(rc);
4981 if (RT_SUCCESS(rc))
4982 {
4983 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4984 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4985 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
4986 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4987 return VINF_SUCCESS;
4988 }
4989 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4990 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4991 return rc;
4992
4993#elif defined(IN_RING3)
4994 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
4995 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
4996 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
4997 if (SUCCEEDED(hrc))
4998 {
4999 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
5000 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
5001 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
5002 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
5003 return VINF_SUCCESS;
5004 }
5005 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
5006 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
5007 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
5008 return VERR_NEM_IPE_6;
5009#else
5010 RT_NOREF(pVM, GCPhysDst, pu2State);
5011 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): Why are we here?!?\n", GCPhysDst));
5012 return VERR_NEM_IPE_6;
5013#endif
5014}
5015
5016
5017int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
5018 PGMPAGETYPE enmType, uint8_t *pu2State)
5019{
5020 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
5021 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
5022 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
5023
5024 int rc;
5025#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
5026 PVMCPUCC pVCpu = VMMGetCpu(pVM);
5027# ifdef NEM_WIN_WITH_A20
5028 if ( pVM->nem.s.fA20Enabled
5029 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5030# endif
5031 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
5032# ifdef NEM_WIN_WITH_A20
5033 else
5034 {
5035 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
5036 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
5037 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
5038 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
5039
5040 }
5041# endif
5042#else
5043 RT_NOREF_PV(fPageProt);
5044# ifdef NEM_WIN_WITH_A20
5045 if ( pVM->nem.s.fA20Enabled
5046 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5047# endif
5048 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5049# ifdef NEM_WIN_WITH_A20
5050 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5051 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5052 else
5053 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
5054# endif
5055#endif
5056 return rc;
5057}
5058
5059
5060VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
5061 PGMPAGETYPE enmType, uint8_t *pu2State)
5062{
5063 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
5064 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
5065 Assert(VM_IS_NEM_ENABLED(pVM));
5066 RT_NOREF(HCPhys, enmType, pvR3);
5067
5068#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
5069 PVMCPUCC pVCpu = VMMGetCpu(pVM);
5070# ifdef NEM_WIN_WITH_A20
5071 if ( pVM->nem.s.fA20Enabled
5072 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5073# endif
5074 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
5075# ifdef NEM_WIN_WITH_A20
5076 else
5077 {
5078 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
5079 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
5080 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5081 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
5082 }
5083# endif
5084#else
5085 RT_NOREF_PV(fPageProt);
5086# ifdef NEM_WIN_WITH_A20
5087 if ( pVM->nem.s.fA20Enabled
5088 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5089# endif
5090 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5091# ifdef NEM_WIN_WITH_A20
5092 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5093 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5094 /* else: ignore since we've got the alias page at this address. */
5095# endif
5096#endif
5097}
5098
5099
5100VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
5101 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
5102{
5103 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
5104 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
5105 Assert(VM_IS_NEM_ENABLED(pVM));
5106 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, enmType);
5107
5108#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
5109 PVMCPUCC pVCpu = VMMGetCpu(pVM);
5110# ifdef NEM_WIN_WITH_A20
5111 if ( pVM->nem.s.fA20Enabled
5112 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5113# endif
5114 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
5115# ifdef NEM_WIN_WITH_A20
5116 else
5117 {
5118 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
5119 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
5120 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5121 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
5122 }
5123# endif
5124#else
5125 RT_NOREF_PV(fPageProt);
5126# ifdef NEM_WIN_WITH_A20
5127 if ( pVM->nem.s.fA20Enabled
5128 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5129# endif
5130 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5131# ifdef NEM_WIN_WITH_A20
5132 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5133 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5134 /* else: ignore since we've got the alias page at this address. */
5135# endif
5136#endif
5137}
5138
5139
5140/**
5141 * Returns features supported by the NEM backend.
5142 *
5143 * @returns Flags of features supported by the native NEM backend.
5144 * @param pVM The cross context VM structure.
5145 */
5146VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
5147{
5148 RT_NOREF(pVM);
5149 /** @todo Make use of the WHvGetVirtualProcessorXsaveState/WHvSetVirtualProcessorXsaveState
5150 * interface added in 2019 to enable passthrough of xsave/xrstor (and depending) features to the guest. */
5151 /** @todo Is NEM_FEAT_F_FULL_GST_EXEC always true? */
5152 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC;
5153}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette