VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h@ 87626

Last change on this file since 87626 was 86117, checked in by vboxsync, 4 years ago

NEM: Some adjustments to r140349.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 232.7 KB
Line 
1/* $Id: NEMAllNativeTemplate-win.cpp.h 86117 2020-09-14 08:03:03Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, Windows code template ring-0/3.
4 */
5
6/*
7 * Copyright (C) 2018-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22/** Copy back a segment from hyper-V. */
23#define NEM_WIN_COPY_BACK_SEG(a_Dst, a_Src) \
24 do { \
25 (a_Dst).u64Base = (a_Src).Base; \
26 (a_Dst).u32Limit = (a_Src).Limit; \
27 (a_Dst).ValidSel = (a_Dst).Sel = (a_Src).Selector; \
28 (a_Dst).Attr.u = (a_Src).Attributes; \
29 (a_Dst).fFlags = CPUMSELREG_FLAGS_VALID; \
30 } while (0)
31
32/** @def NEMWIN_ASSERT_MSG_REG_VAL
33 * Asserts the correctness of a register value in a message/context.
34 */
35#if 0
36# define NEMWIN_NEED_GET_REGISTER
37# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
38# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \
39 do { \
40 HV_REGISTER_VALUE TmpVal; \
41 nemHCWinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \
42 AssertMsg(a_Expr, a_Msg); \
43 } while (0)
44# else
45# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \
46 do { \
47 WHV_REGISTER_VALUE TmpVal; \
48 nemR3WinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \
49 AssertMsg(a_Expr, a_Msg); \
50 } while (0)
51# endif
52#else
53# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) do { } while (0)
54#endif
55
56/** @def NEMWIN_ASSERT_MSG_REG_VAL
57 * Asserts the correctness of a 64-bit register value in a message/context.
58 */
59#define NEMWIN_ASSERT_MSG_REG_VAL64(a_pVCpu, a_enmReg, a_u64Val) \
60 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, (a_u64Val) == TmpVal.Reg64, \
61 (#a_u64Val "=%#RX64, expected %#RX64\n", (a_u64Val), TmpVal.Reg64))
62/** @def NEMWIN_ASSERT_MSG_REG_VAL
63 * Asserts the correctness of a segment register value in a message/context.
64 */
65#define NEMWIN_ASSERT_MSG_REG_SEG(a_pVCpu, a_enmReg, a_SReg) \
66 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, \
67 (a_SReg).Base == TmpVal.Segment.Base \
68 && (a_SReg).Limit == TmpVal.Segment.Limit \
69 && (a_SReg).Selector == TmpVal.Segment.Selector \
70 && (a_SReg).Attributes == TmpVal.Segment.Attributes, \
71 ( #a_SReg "=%#RX16 {%#RX64 LB %#RX32,%#RX16} expected %#RX16 {%#RX64 LB %#RX32,%#RX16}\n", \
72 (a_SReg).Selector, (a_SReg).Base, (a_SReg).Limit, (a_SReg).Attributes, \
73 TmpVal.Segment.Selector, TmpVal.Segment.Base, TmpVal.Segment.Limit, TmpVal.Segment.Attributes))
74
75
76/*********************************************************************************************************************************
77* Global Variables *
78*********************************************************************************************************************************/
79/** NEM_WIN_PAGE_STATE_XXX names. */
80NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
81
82/** HV_INTERCEPT_ACCESS_TYPE names. */
83static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
84
85
86/*********************************************************************************************************************************
87* Internal Functions *
88*********************************************************************************************************************************/
89NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
90 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
91
92
93
94#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
95
96/**
97 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
98 *
99 * @returns VBox status code.
100 * @param pVM The cross context VM structure.
101 * @param pVCpu The cross context virtual CPU structure of the caller.
102 * @param GCPhysSrc The source page. Does not need to be page aligned.
103 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
104 * when A20 is disabled.
105 * @param fFlags HV_MAP_GPA_XXX.
106 */
107DECLINLINE(int) nemHCWinHypercallMapPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
108{
109#ifdef IN_RING0
110 /** @todo optimize further, caller generally has the physical address. */
111 return nemR0WinMapPages(pVM, pVCpu,
112 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
113 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
114 1, fFlags);
115#else
116 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
117 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
118 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
119 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
120 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
121#endif
122}
123
124
125/**
126 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
127 *
128 * @returns VBox status code.
129 * @param pVM The cross context VM structure.
130 * @param pVCpu The cross context virtual CPU structure of the caller.
131 * @param GCPhys The page to unmap. Does not need to be page aligned.
132 */
133DECLINLINE(int) nemHCWinHypercallUnmapPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys)
134{
135# ifdef IN_RING0
136 return nemR0WinUnmapPages(pVM, pVCpu, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
137# else
138 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
139 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
140 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
141# endif
142}
143
144#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
145#ifndef IN_RING0
146
147NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
148{
149# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
150# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
151 if (pVM->nem.s.fUseRing0Runloop)
152# endif
153 {
154 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL);
155 AssertLogRelRCReturn(rc, rc);
156 return rc;
157 }
158# endif
159# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
160
161 /*
162 * The following is very similar to what nemR0WinExportState() does.
163 */
164 WHV_REGISTER_NAME aenmNames[128];
165 WHV_REGISTER_VALUE aValues[128];
166
167 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
168 if ( !fWhat
169 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
170 return VINF_SUCCESS;
171 uintptr_t iReg = 0;
172
173# define ADD_REG64(a_enmName, a_uValue) do { \
174 aenmNames[iReg] = (a_enmName); \
175 aValues[iReg].Reg128.High64 = 0; \
176 aValues[iReg].Reg64 = (a_uValue); \
177 iReg++; \
178 } while (0)
179# define ADD_REG128(a_enmName, a_uValueLo, a_uValueHi) do { \
180 aenmNames[iReg] = (a_enmName); \
181 aValues[iReg].Reg128.Low64 = (a_uValueLo); \
182 aValues[iReg].Reg128.High64 = (a_uValueHi); \
183 iReg++; \
184 } while (0)
185
186 /* GPRs */
187 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
188 {
189 if (fWhat & CPUMCTX_EXTRN_RAX)
190 ADD_REG64(WHvX64RegisterRax, pVCpu->cpum.GstCtx.rax);
191 if (fWhat & CPUMCTX_EXTRN_RCX)
192 ADD_REG64(WHvX64RegisterRcx, pVCpu->cpum.GstCtx.rcx);
193 if (fWhat & CPUMCTX_EXTRN_RDX)
194 ADD_REG64(WHvX64RegisterRdx, pVCpu->cpum.GstCtx.rdx);
195 if (fWhat & CPUMCTX_EXTRN_RBX)
196 ADD_REG64(WHvX64RegisterRbx, pVCpu->cpum.GstCtx.rbx);
197 if (fWhat & CPUMCTX_EXTRN_RSP)
198 ADD_REG64(WHvX64RegisterRsp, pVCpu->cpum.GstCtx.rsp);
199 if (fWhat & CPUMCTX_EXTRN_RBP)
200 ADD_REG64(WHvX64RegisterRbp, pVCpu->cpum.GstCtx.rbp);
201 if (fWhat & CPUMCTX_EXTRN_RSI)
202 ADD_REG64(WHvX64RegisterRsi, pVCpu->cpum.GstCtx.rsi);
203 if (fWhat & CPUMCTX_EXTRN_RDI)
204 ADD_REG64(WHvX64RegisterRdi, pVCpu->cpum.GstCtx.rdi);
205 if (fWhat & CPUMCTX_EXTRN_R8_R15)
206 {
207 ADD_REG64(WHvX64RegisterR8, pVCpu->cpum.GstCtx.r8);
208 ADD_REG64(WHvX64RegisterR9, pVCpu->cpum.GstCtx.r9);
209 ADD_REG64(WHvX64RegisterR10, pVCpu->cpum.GstCtx.r10);
210 ADD_REG64(WHvX64RegisterR11, pVCpu->cpum.GstCtx.r11);
211 ADD_REG64(WHvX64RegisterR12, pVCpu->cpum.GstCtx.r12);
212 ADD_REG64(WHvX64RegisterR13, pVCpu->cpum.GstCtx.r13);
213 ADD_REG64(WHvX64RegisterR14, pVCpu->cpum.GstCtx.r14);
214 ADD_REG64(WHvX64RegisterR15, pVCpu->cpum.GstCtx.r15);
215 }
216 }
217
218 /* RIP & Flags */
219 if (fWhat & CPUMCTX_EXTRN_RIP)
220 ADD_REG64(WHvX64RegisterRip, pVCpu->cpum.GstCtx.rip);
221 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
222 ADD_REG64(WHvX64RegisterRflags, pVCpu->cpum.GstCtx.rflags.u);
223
224 /* Segments */
225# define ADD_SEG(a_enmName, a_SReg) \
226 do { \
227 aenmNames[iReg] = a_enmName; \
228 aValues[iReg].Segment.Base = (a_SReg).u64Base; \
229 aValues[iReg].Segment.Limit = (a_SReg).u32Limit; \
230 aValues[iReg].Segment.Selector = (a_SReg).Sel; \
231 aValues[iReg].Segment.Attributes = (a_SReg).Attr.u; \
232 iReg++; \
233 } while (0)
234 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
235 {
236 if (fWhat & CPUMCTX_EXTRN_ES)
237 ADD_SEG(WHvX64RegisterEs, pVCpu->cpum.GstCtx.es);
238 if (fWhat & CPUMCTX_EXTRN_CS)
239 ADD_SEG(WHvX64RegisterCs, pVCpu->cpum.GstCtx.cs);
240 if (fWhat & CPUMCTX_EXTRN_SS)
241 ADD_SEG(WHvX64RegisterSs, pVCpu->cpum.GstCtx.ss);
242 if (fWhat & CPUMCTX_EXTRN_DS)
243 ADD_SEG(WHvX64RegisterDs, pVCpu->cpum.GstCtx.ds);
244 if (fWhat & CPUMCTX_EXTRN_FS)
245 ADD_SEG(WHvX64RegisterFs, pVCpu->cpum.GstCtx.fs);
246 if (fWhat & CPUMCTX_EXTRN_GS)
247 ADD_SEG(WHvX64RegisterGs, pVCpu->cpum.GstCtx.gs);
248 }
249
250 /* Descriptor tables & task segment. */
251 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
252 {
253 if (fWhat & CPUMCTX_EXTRN_LDTR)
254 ADD_SEG(WHvX64RegisterLdtr, pVCpu->cpum.GstCtx.ldtr);
255 if (fWhat & CPUMCTX_EXTRN_TR)
256 ADD_SEG(WHvX64RegisterTr, pVCpu->cpum.GstCtx.tr);
257 if (fWhat & CPUMCTX_EXTRN_IDTR)
258 {
259 aenmNames[iReg] = WHvX64RegisterIdtr;
260 aValues[iReg].Table.Limit = pVCpu->cpum.GstCtx.idtr.cbIdt;
261 aValues[iReg].Table.Base = pVCpu->cpum.GstCtx.idtr.pIdt;
262 iReg++;
263 }
264 if (fWhat & CPUMCTX_EXTRN_GDTR)
265 {
266 aenmNames[iReg] = WHvX64RegisterGdtr;
267 aValues[iReg].Table.Limit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
268 aValues[iReg].Table.Base = pVCpu->cpum.GstCtx.gdtr.pGdt;
269 iReg++;
270 }
271 }
272
273 /* Control registers. */
274 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
275 {
276 if (fWhat & CPUMCTX_EXTRN_CR0)
277 ADD_REG64(WHvX64RegisterCr0, pVCpu->cpum.GstCtx.cr0);
278 if (fWhat & CPUMCTX_EXTRN_CR2)
279 ADD_REG64(WHvX64RegisterCr2, pVCpu->cpum.GstCtx.cr2);
280 if (fWhat & CPUMCTX_EXTRN_CR3)
281 ADD_REG64(WHvX64RegisterCr3, pVCpu->cpum.GstCtx.cr3);
282 if (fWhat & CPUMCTX_EXTRN_CR4)
283 ADD_REG64(WHvX64RegisterCr4, pVCpu->cpum.GstCtx.cr4);
284 }
285 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
286 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu));
287
288 /* Debug registers. */
289/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
290 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
291 {
292 ADD_REG64(WHvX64RegisterDr0, pVCpu->cpum.GstCtx.dr[0]); // CPUMGetHyperDR0(pVCpu));
293 ADD_REG64(WHvX64RegisterDr1, pVCpu->cpum.GstCtx.dr[1]); // CPUMGetHyperDR1(pVCpu));
294 ADD_REG64(WHvX64RegisterDr2, pVCpu->cpum.GstCtx.dr[2]); // CPUMGetHyperDR2(pVCpu));
295 ADD_REG64(WHvX64RegisterDr3, pVCpu->cpum.GstCtx.dr[3]); // CPUMGetHyperDR3(pVCpu));
296 }
297 if (fWhat & CPUMCTX_EXTRN_DR6)
298 ADD_REG64(WHvX64RegisterDr6, pVCpu->cpum.GstCtx.dr[6]); // CPUMGetHyperDR6(pVCpu));
299 if (fWhat & CPUMCTX_EXTRN_DR7)
300 ADD_REG64(WHvX64RegisterDr7, pVCpu->cpum.GstCtx.dr[7]); // CPUMGetHyperDR7(pVCpu));
301
302 /* Floating point state. */
303 if (fWhat & CPUMCTX_EXTRN_X87)
304 {
305 ADD_REG128(WHvX64RegisterFpMmx0, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[0].au64[1]);
306 ADD_REG128(WHvX64RegisterFpMmx1, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[1].au64[1]);
307 ADD_REG128(WHvX64RegisterFpMmx2, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[2].au64[1]);
308 ADD_REG128(WHvX64RegisterFpMmx3, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[3].au64[1]);
309 ADD_REG128(WHvX64RegisterFpMmx4, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[4].au64[1]);
310 ADD_REG128(WHvX64RegisterFpMmx5, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[5].au64[1]);
311 ADD_REG128(WHvX64RegisterFpMmx6, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[6].au64[1]);
312 ADD_REG128(WHvX64RegisterFpMmx7, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[7].au64[1]);
313
314 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
315 aValues[iReg].FpControlStatus.FpControl = pVCpu->cpum.GstCtx.pXStateR3->x87.FCW;
316 aValues[iReg].FpControlStatus.FpStatus = pVCpu->cpum.GstCtx.pXStateR3->x87.FSW;
317 aValues[iReg].FpControlStatus.FpTag = pVCpu->cpum.GstCtx.pXStateR3->x87.FTW;
318 aValues[iReg].FpControlStatus.Reserved = pVCpu->cpum.GstCtx.pXStateR3->x87.FTW >> 8;
319 aValues[iReg].FpControlStatus.LastFpOp = pVCpu->cpum.GstCtx.pXStateR3->x87.FOP;
320 aValues[iReg].FpControlStatus.LastFpRip = (pVCpu->cpum.GstCtx.pXStateR3->x87.FPUIP)
321 | ((uint64_t)pVCpu->cpum.GstCtx.pXStateR3->x87.CS << 32)
322 | ((uint64_t)pVCpu->cpum.GstCtx.pXStateR3->x87.Rsrvd1 << 48);
323 iReg++;
324
325 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
326 aValues[iReg].XmmControlStatus.LastFpRdp = (pVCpu->cpum.GstCtx.pXStateR3->x87.FPUDP)
327 | ((uint64_t)pVCpu->cpum.GstCtx.pXStateR3->x87.DS << 32)
328 | ((uint64_t)pVCpu->cpum.GstCtx.pXStateR3->x87.Rsrvd2 << 48);
329 aValues[iReg].XmmControlStatus.XmmStatusControl = pVCpu->cpum.GstCtx.pXStateR3->x87.MXCSR;
330 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pVCpu->cpum.GstCtx.pXStateR3->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
331 iReg++;
332 }
333
334 /* Vector state. */
335 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
336 {
337 ADD_REG128(WHvX64RegisterXmm0, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 0].uXmm.s.Hi);
338 ADD_REG128(WHvX64RegisterXmm1, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 1].uXmm.s.Hi);
339 ADD_REG128(WHvX64RegisterXmm2, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 2].uXmm.s.Hi);
340 ADD_REG128(WHvX64RegisterXmm3, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 3].uXmm.s.Hi);
341 ADD_REG128(WHvX64RegisterXmm4, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 4].uXmm.s.Hi);
342 ADD_REG128(WHvX64RegisterXmm5, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 5].uXmm.s.Hi);
343 ADD_REG128(WHvX64RegisterXmm6, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 6].uXmm.s.Hi);
344 ADD_REG128(WHvX64RegisterXmm7, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 7].uXmm.s.Hi);
345 ADD_REG128(WHvX64RegisterXmm8, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 8].uXmm.s.Hi);
346 ADD_REG128(WHvX64RegisterXmm9, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 9].uXmm.s.Hi);
347 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[10].uXmm.s.Hi);
348 ADD_REG128(WHvX64RegisterXmm11, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[11].uXmm.s.Hi);
349 ADD_REG128(WHvX64RegisterXmm12, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[12].uXmm.s.Hi);
350 ADD_REG128(WHvX64RegisterXmm13, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[13].uXmm.s.Hi);
351 ADD_REG128(WHvX64RegisterXmm14, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[14].uXmm.s.Hi);
352 ADD_REG128(WHvX64RegisterXmm15, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[15].uXmm.s.Hi);
353 }
354
355 /* MSRs */
356 // WHvX64RegisterTsc - don't touch
357 if (fWhat & CPUMCTX_EXTRN_EFER)
358 ADD_REG64(WHvX64RegisterEfer, pVCpu->cpum.GstCtx.msrEFER);
359 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
360 ADD_REG64(WHvX64RegisterKernelGsBase, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
361 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
362 {
363 ADD_REG64(WHvX64RegisterSysenterCs, pVCpu->cpum.GstCtx.SysEnter.cs);
364 ADD_REG64(WHvX64RegisterSysenterEip, pVCpu->cpum.GstCtx.SysEnter.eip);
365 ADD_REG64(WHvX64RegisterSysenterEsp, pVCpu->cpum.GstCtx.SysEnter.esp);
366 }
367 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
368 {
369 ADD_REG64(WHvX64RegisterStar, pVCpu->cpum.GstCtx.msrSTAR);
370 ADD_REG64(WHvX64RegisterLstar, pVCpu->cpum.GstCtx.msrLSTAR);
371 ADD_REG64(WHvX64RegisterCstar, pVCpu->cpum.GstCtx.msrCSTAR);
372 ADD_REG64(WHvX64RegisterSfmask, pVCpu->cpum.GstCtx.msrSFMASK);
373 }
374 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
375 {
376 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu));
377 ADD_REG64(WHvX64RegisterPat, pVCpu->cpum.GstCtx.msrPAT);
378#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
379 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
380#endif
381 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
382 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
383 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
384 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
385 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
386 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
387 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
388 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
389 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
390 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
391 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
392 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
393 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
394 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
395#if 0 /** @todo these registers aren't available? Might explain something.. .*/
396 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);
397 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
398 {
399 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
400 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
401 }
402#endif
403 }
404
405 /* event injection (clear it). */
406 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
407 ADD_REG64(WHvRegisterPendingInterruption, 0);
408
409 /* Interruptibility state. This can get a little complicated since we get
410 half of the state via HV_X64_VP_EXECUTION_STATE. */
411 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
412 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
413 {
414 ADD_REG64(WHvRegisterInterruptState, 0);
415 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
416 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
417 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
418 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
419 aValues[iReg - 1].InterruptState.NmiMasked = 1;
420 }
421 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
422 {
423 if ( pVCpu->nem.s.fLastInterruptShadow
424 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
425 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip))
426 {
427 ADD_REG64(WHvRegisterInterruptState, 0);
428 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
429 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
430 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
431 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
432 //if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
433 // aValues[iReg - 1].InterruptState.NmiMasked = 1;
434 }
435 }
436 else
437 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
438
439 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
440 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
441 if ( fDesiredIntWin
442 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
443 {
444 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
445 ADD_REG64(WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin);
446 Assert(aValues[iReg - 1].DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
447 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
448 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptPriority == (unsigned)((fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT));
449 }
450
451 /// @todo WHvRegisterPendingEvent
452
453 /*
454 * Set the registers.
455 */
456 Assert(iReg < RT_ELEMENTS(aValues));
457 Assert(iReg < RT_ELEMENTS(aenmNames));
458# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
459 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
460 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
461# endif
462 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
463 if (SUCCEEDED(hrc))
464 {
465 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
466 return VINF_SUCCESS;
467 }
468 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
469 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
470 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
471 return VERR_INTERNAL_ERROR;
472
473# undef ADD_REG64
474# undef ADD_REG128
475# undef ADD_SEG
476
477# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
478}
479
480
481NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
482{
483# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
484# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
485 if (pVM->nem.s.fUseRing0Runloop)
486# endif
487 {
488 /* See NEMR0ImportState */
489 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, fWhat, NULL);
490 if (RT_SUCCESS(rc))
491 return rc;
492 if (rc == VERR_NEM_FLUSH_TLB)
493 return PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /*fGlobal*/);
494 AssertLogRelRCReturn(rc, rc);
495 return rc;
496 }
497# endif
498# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
499 WHV_REGISTER_NAME aenmNames[128];
500
501 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
502 uintptr_t iReg = 0;
503
504 /* GPRs */
505 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
506 {
507 if (fWhat & CPUMCTX_EXTRN_RAX)
508 aenmNames[iReg++] = WHvX64RegisterRax;
509 if (fWhat & CPUMCTX_EXTRN_RCX)
510 aenmNames[iReg++] = WHvX64RegisterRcx;
511 if (fWhat & CPUMCTX_EXTRN_RDX)
512 aenmNames[iReg++] = WHvX64RegisterRdx;
513 if (fWhat & CPUMCTX_EXTRN_RBX)
514 aenmNames[iReg++] = WHvX64RegisterRbx;
515 if (fWhat & CPUMCTX_EXTRN_RSP)
516 aenmNames[iReg++] = WHvX64RegisterRsp;
517 if (fWhat & CPUMCTX_EXTRN_RBP)
518 aenmNames[iReg++] = WHvX64RegisterRbp;
519 if (fWhat & CPUMCTX_EXTRN_RSI)
520 aenmNames[iReg++] = WHvX64RegisterRsi;
521 if (fWhat & CPUMCTX_EXTRN_RDI)
522 aenmNames[iReg++] = WHvX64RegisterRdi;
523 if (fWhat & CPUMCTX_EXTRN_R8_R15)
524 {
525 aenmNames[iReg++] = WHvX64RegisterR8;
526 aenmNames[iReg++] = WHvX64RegisterR9;
527 aenmNames[iReg++] = WHvX64RegisterR10;
528 aenmNames[iReg++] = WHvX64RegisterR11;
529 aenmNames[iReg++] = WHvX64RegisterR12;
530 aenmNames[iReg++] = WHvX64RegisterR13;
531 aenmNames[iReg++] = WHvX64RegisterR14;
532 aenmNames[iReg++] = WHvX64RegisterR15;
533 }
534 }
535
536 /* RIP & Flags */
537 if (fWhat & CPUMCTX_EXTRN_RIP)
538 aenmNames[iReg++] = WHvX64RegisterRip;
539 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
540 aenmNames[iReg++] = WHvX64RegisterRflags;
541
542 /* Segments */
543 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
544 {
545 if (fWhat & CPUMCTX_EXTRN_ES)
546 aenmNames[iReg++] = WHvX64RegisterEs;
547 if (fWhat & CPUMCTX_EXTRN_CS)
548 aenmNames[iReg++] = WHvX64RegisterCs;
549 if (fWhat & CPUMCTX_EXTRN_SS)
550 aenmNames[iReg++] = WHvX64RegisterSs;
551 if (fWhat & CPUMCTX_EXTRN_DS)
552 aenmNames[iReg++] = WHvX64RegisterDs;
553 if (fWhat & CPUMCTX_EXTRN_FS)
554 aenmNames[iReg++] = WHvX64RegisterFs;
555 if (fWhat & CPUMCTX_EXTRN_GS)
556 aenmNames[iReg++] = WHvX64RegisterGs;
557 }
558
559 /* Descriptor tables. */
560 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
561 {
562 if (fWhat & CPUMCTX_EXTRN_LDTR)
563 aenmNames[iReg++] = WHvX64RegisterLdtr;
564 if (fWhat & CPUMCTX_EXTRN_TR)
565 aenmNames[iReg++] = WHvX64RegisterTr;
566 if (fWhat & CPUMCTX_EXTRN_IDTR)
567 aenmNames[iReg++] = WHvX64RegisterIdtr;
568 if (fWhat & CPUMCTX_EXTRN_GDTR)
569 aenmNames[iReg++] = WHvX64RegisterGdtr;
570 }
571
572 /* Control registers. */
573 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
574 {
575 if (fWhat & CPUMCTX_EXTRN_CR0)
576 aenmNames[iReg++] = WHvX64RegisterCr0;
577 if (fWhat & CPUMCTX_EXTRN_CR2)
578 aenmNames[iReg++] = WHvX64RegisterCr2;
579 if (fWhat & CPUMCTX_EXTRN_CR3)
580 aenmNames[iReg++] = WHvX64RegisterCr3;
581 if (fWhat & CPUMCTX_EXTRN_CR4)
582 aenmNames[iReg++] = WHvX64RegisterCr4;
583 }
584 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
585 aenmNames[iReg++] = WHvX64RegisterCr8;
586
587 /* Debug registers. */
588 if (fWhat & CPUMCTX_EXTRN_DR7)
589 aenmNames[iReg++] = WHvX64RegisterDr7;
590 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
591 {
592 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_DR7))
593 {
594 fWhat |= CPUMCTX_EXTRN_DR7;
595 aenmNames[iReg++] = WHvX64RegisterDr7;
596 }
597 aenmNames[iReg++] = WHvX64RegisterDr0;
598 aenmNames[iReg++] = WHvX64RegisterDr1;
599 aenmNames[iReg++] = WHvX64RegisterDr2;
600 aenmNames[iReg++] = WHvX64RegisterDr3;
601 }
602 if (fWhat & CPUMCTX_EXTRN_DR6)
603 aenmNames[iReg++] = WHvX64RegisterDr6;
604
605 /* Floating point state. */
606 if (fWhat & CPUMCTX_EXTRN_X87)
607 {
608 aenmNames[iReg++] = WHvX64RegisterFpMmx0;
609 aenmNames[iReg++] = WHvX64RegisterFpMmx1;
610 aenmNames[iReg++] = WHvX64RegisterFpMmx2;
611 aenmNames[iReg++] = WHvX64RegisterFpMmx3;
612 aenmNames[iReg++] = WHvX64RegisterFpMmx4;
613 aenmNames[iReg++] = WHvX64RegisterFpMmx5;
614 aenmNames[iReg++] = WHvX64RegisterFpMmx6;
615 aenmNames[iReg++] = WHvX64RegisterFpMmx7;
616 aenmNames[iReg++] = WHvX64RegisterFpControlStatus;
617 }
618 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
619 aenmNames[iReg++] = WHvX64RegisterXmmControlStatus;
620
621 /* Vector state. */
622 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
623 {
624 aenmNames[iReg++] = WHvX64RegisterXmm0;
625 aenmNames[iReg++] = WHvX64RegisterXmm1;
626 aenmNames[iReg++] = WHvX64RegisterXmm2;
627 aenmNames[iReg++] = WHvX64RegisterXmm3;
628 aenmNames[iReg++] = WHvX64RegisterXmm4;
629 aenmNames[iReg++] = WHvX64RegisterXmm5;
630 aenmNames[iReg++] = WHvX64RegisterXmm6;
631 aenmNames[iReg++] = WHvX64RegisterXmm7;
632 aenmNames[iReg++] = WHvX64RegisterXmm8;
633 aenmNames[iReg++] = WHvX64RegisterXmm9;
634 aenmNames[iReg++] = WHvX64RegisterXmm10;
635 aenmNames[iReg++] = WHvX64RegisterXmm11;
636 aenmNames[iReg++] = WHvX64RegisterXmm12;
637 aenmNames[iReg++] = WHvX64RegisterXmm13;
638 aenmNames[iReg++] = WHvX64RegisterXmm14;
639 aenmNames[iReg++] = WHvX64RegisterXmm15;
640 }
641
642 /* MSRs */
643 // WHvX64RegisterTsc - don't touch
644 if (fWhat & CPUMCTX_EXTRN_EFER)
645 aenmNames[iReg++] = WHvX64RegisterEfer;
646 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
647 aenmNames[iReg++] = WHvX64RegisterKernelGsBase;
648 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
649 {
650 aenmNames[iReg++] = WHvX64RegisterSysenterCs;
651 aenmNames[iReg++] = WHvX64RegisterSysenterEip;
652 aenmNames[iReg++] = WHvX64RegisterSysenterEsp;
653 }
654 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
655 {
656 aenmNames[iReg++] = WHvX64RegisterStar;
657 aenmNames[iReg++] = WHvX64RegisterLstar;
658 aenmNames[iReg++] = WHvX64RegisterCstar;
659 aenmNames[iReg++] = WHvX64RegisterSfmask;
660 }
661
662//#ifdef LOG_ENABLED
663// const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);
664//#endif
665 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
666 {
667 aenmNames[iReg++] = WHvX64RegisterApicBase; /// @todo APIC BASE
668 aenmNames[iReg++] = WHvX64RegisterPat;
669#if 0 /*def LOG_ENABLED*/ /** @todo Check if WHvX64RegisterMsrMtrrCap works... */
670 aenmNames[iReg++] = WHvX64RegisterMsrMtrrCap;
671#endif
672 aenmNames[iReg++] = WHvX64RegisterMsrMtrrDefType;
673 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix64k00000;
674 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16k80000;
675 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16kA0000;
676 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC0000;
677 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC8000;
678 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD0000;
679 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD8000;
680 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE0000;
681 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE8000;
682 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF0000;
683 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF8000;
684 aenmNames[iReg++] = WHvX64RegisterTscAux;
685 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
686//#ifdef LOG_ENABLED
687// if (enmCpuVendor != CPUMCPUVENDOR_AMD)
688// aenmNames[iReg++] = HvX64RegisterIa32FeatureControl;
689//#endif
690 }
691
692 /* Interruptibility. */
693 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
694 {
695 aenmNames[iReg++] = WHvRegisterInterruptState;
696 aenmNames[iReg++] = WHvX64RegisterRip;
697 }
698
699 /* event injection */
700 aenmNames[iReg++] = WHvRegisterPendingInterruption;
701 aenmNames[iReg++] = WHvRegisterPendingEvent0; /** @todo renamed to WHvRegisterPendingEvent */
702
703 size_t const cRegs = iReg;
704 Assert(cRegs < RT_ELEMENTS(aenmNames));
705
706 /*
707 * Get the registers.
708 */
709 WHV_REGISTER_VALUE aValues[128];
710 RT_ZERO(aValues);
711 Assert(RT_ELEMENTS(aValues) >= cRegs);
712 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
713# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
714 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
715 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
716# endif
717 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
718 AssertLogRelMsgReturn(SUCCEEDED(hrc),
719 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
720 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
721 , VERR_NEM_GET_REGISTERS_FAILED);
722
723 iReg = 0;
724# define GET_REG64(a_DstVar, a_enmName) do { \
725 Assert(aenmNames[iReg] == (a_enmName)); \
726 (a_DstVar) = aValues[iReg].Reg64; \
727 iReg++; \
728 } while (0)
729# define GET_REG64_LOG7(a_DstVar, a_enmName, a_szLogName) do { \
730 Assert(aenmNames[iReg] == (a_enmName)); \
731 if ((a_DstVar) != aValues[iReg].Reg64) \
732 Log7(("NEM/%u: " a_szLogName " changed %RX64 -> %RX64\n", pVCpu->idCpu, (a_DstVar), aValues[iReg].Reg64)); \
733 (a_DstVar) = aValues[iReg].Reg64; \
734 iReg++; \
735 } while (0)
736# define GET_REG128(a_DstVarLo, a_DstVarHi, a_enmName) do { \
737 Assert(aenmNames[iReg] == a_enmName); \
738 (a_DstVarLo) = aValues[iReg].Reg128.Low64; \
739 (a_DstVarHi) = aValues[iReg].Reg128.High64; \
740 iReg++; \
741 } while (0)
742# define GET_SEG(a_SReg, a_enmName) do { \
743 Assert(aenmNames[iReg] == (a_enmName)); \
744 NEM_WIN_COPY_BACK_SEG(a_SReg, aValues[iReg].Segment); \
745 iReg++; \
746 } while (0)
747
748 /* GPRs */
749 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
750 {
751 if (fWhat & CPUMCTX_EXTRN_RAX)
752 GET_REG64(pVCpu->cpum.GstCtx.rax, WHvX64RegisterRax);
753 if (fWhat & CPUMCTX_EXTRN_RCX)
754 GET_REG64(pVCpu->cpum.GstCtx.rcx, WHvX64RegisterRcx);
755 if (fWhat & CPUMCTX_EXTRN_RDX)
756 GET_REG64(pVCpu->cpum.GstCtx.rdx, WHvX64RegisterRdx);
757 if (fWhat & CPUMCTX_EXTRN_RBX)
758 GET_REG64(pVCpu->cpum.GstCtx.rbx, WHvX64RegisterRbx);
759 if (fWhat & CPUMCTX_EXTRN_RSP)
760 GET_REG64(pVCpu->cpum.GstCtx.rsp, WHvX64RegisterRsp);
761 if (fWhat & CPUMCTX_EXTRN_RBP)
762 GET_REG64(pVCpu->cpum.GstCtx.rbp, WHvX64RegisterRbp);
763 if (fWhat & CPUMCTX_EXTRN_RSI)
764 GET_REG64(pVCpu->cpum.GstCtx.rsi, WHvX64RegisterRsi);
765 if (fWhat & CPUMCTX_EXTRN_RDI)
766 GET_REG64(pVCpu->cpum.GstCtx.rdi, WHvX64RegisterRdi);
767 if (fWhat & CPUMCTX_EXTRN_R8_R15)
768 {
769 GET_REG64(pVCpu->cpum.GstCtx.r8, WHvX64RegisterR8);
770 GET_REG64(pVCpu->cpum.GstCtx.r9, WHvX64RegisterR9);
771 GET_REG64(pVCpu->cpum.GstCtx.r10, WHvX64RegisterR10);
772 GET_REG64(pVCpu->cpum.GstCtx.r11, WHvX64RegisterR11);
773 GET_REG64(pVCpu->cpum.GstCtx.r12, WHvX64RegisterR12);
774 GET_REG64(pVCpu->cpum.GstCtx.r13, WHvX64RegisterR13);
775 GET_REG64(pVCpu->cpum.GstCtx.r14, WHvX64RegisterR14);
776 GET_REG64(pVCpu->cpum.GstCtx.r15, WHvX64RegisterR15);
777 }
778 }
779
780 /* RIP & Flags */
781 if (fWhat & CPUMCTX_EXTRN_RIP)
782 GET_REG64(pVCpu->cpum.GstCtx.rip, WHvX64RegisterRip);
783 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
784 GET_REG64(pVCpu->cpum.GstCtx.rflags.u, WHvX64RegisterRflags);
785
786 /* Segments */
787 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
788 {
789 if (fWhat & CPUMCTX_EXTRN_ES)
790 GET_SEG(pVCpu->cpum.GstCtx.es, WHvX64RegisterEs);
791 if (fWhat & CPUMCTX_EXTRN_CS)
792 GET_SEG(pVCpu->cpum.GstCtx.cs, WHvX64RegisterCs);
793 if (fWhat & CPUMCTX_EXTRN_SS)
794 GET_SEG(pVCpu->cpum.GstCtx.ss, WHvX64RegisterSs);
795 if (fWhat & CPUMCTX_EXTRN_DS)
796 GET_SEG(pVCpu->cpum.GstCtx.ds, WHvX64RegisterDs);
797 if (fWhat & CPUMCTX_EXTRN_FS)
798 GET_SEG(pVCpu->cpum.GstCtx.fs, WHvX64RegisterFs);
799 if (fWhat & CPUMCTX_EXTRN_GS)
800 GET_SEG(pVCpu->cpum.GstCtx.gs, WHvX64RegisterGs);
801 }
802
803 /* Descriptor tables and the task segment. */
804 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
805 {
806 if (fWhat & CPUMCTX_EXTRN_LDTR)
807 GET_SEG(pVCpu->cpum.GstCtx.ldtr, WHvX64RegisterLdtr);
808
809 if (fWhat & CPUMCTX_EXTRN_TR)
810 {
811 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
812 avoid to trigger sanity assertions around the code, always fix this. */
813 GET_SEG(pVCpu->cpum.GstCtx.tr, WHvX64RegisterTr);
814 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
815 {
816 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
817 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
818 break;
819 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
820 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
821 break;
822 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
823 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
824 break;
825 }
826 }
827 if (fWhat & CPUMCTX_EXTRN_IDTR)
828 {
829 Assert(aenmNames[iReg] == WHvX64RegisterIdtr);
830 pVCpu->cpum.GstCtx.idtr.cbIdt = aValues[iReg].Table.Limit;
831 pVCpu->cpum.GstCtx.idtr.pIdt = aValues[iReg].Table.Base;
832 iReg++;
833 }
834 if (fWhat & CPUMCTX_EXTRN_GDTR)
835 {
836 Assert(aenmNames[iReg] == WHvX64RegisterGdtr);
837 pVCpu->cpum.GstCtx.gdtr.cbGdt = aValues[iReg].Table.Limit;
838 pVCpu->cpum.GstCtx.gdtr.pGdt = aValues[iReg].Table.Base;
839 iReg++;
840 }
841 }
842
843 /* Control registers. */
844 bool fMaybeChangedMode = false;
845 bool fUpdateCr3 = false;
846 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
847 {
848 if (fWhat & CPUMCTX_EXTRN_CR0)
849 {
850 Assert(aenmNames[iReg] == WHvX64RegisterCr0);
851 if (pVCpu->cpum.GstCtx.cr0 != aValues[iReg].Reg64)
852 {
853 CPUMSetGuestCR0(pVCpu, aValues[iReg].Reg64);
854 fMaybeChangedMode = true;
855 }
856 iReg++;
857 }
858 if (fWhat & CPUMCTX_EXTRN_CR2)
859 GET_REG64(pVCpu->cpum.GstCtx.cr2, WHvX64RegisterCr2);
860 if (fWhat & CPUMCTX_EXTRN_CR3)
861 {
862 if (pVCpu->cpum.GstCtx.cr3 != aValues[iReg].Reg64)
863 {
864 CPUMSetGuestCR3(pVCpu, aValues[iReg].Reg64);
865 fUpdateCr3 = true;
866 }
867 iReg++;
868 }
869 if (fWhat & CPUMCTX_EXTRN_CR4)
870 {
871 if (pVCpu->cpum.GstCtx.cr4 != aValues[iReg].Reg64)
872 {
873 CPUMSetGuestCR4(pVCpu, aValues[iReg].Reg64);
874 fMaybeChangedMode = true;
875 }
876 iReg++;
877 }
878 }
879 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
880 {
881 Assert(aenmNames[iReg] == WHvX64RegisterCr8);
882 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4);
883 iReg++;
884 }
885
886 /* Debug registers. */
887 if (fWhat & CPUMCTX_EXTRN_DR7)
888 {
889 Assert(aenmNames[iReg] == WHvX64RegisterDr7);
890 if (pVCpu->cpum.GstCtx.dr[7] != aValues[iReg].Reg64)
891 CPUMSetGuestDR7(pVCpu, aValues[iReg].Reg64);
892 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
893 iReg++;
894 }
895 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
896 {
897 Assert(aenmNames[iReg] == WHvX64RegisterDr0);
898 Assert(aenmNames[iReg+3] == WHvX64RegisterDr3);
899 if (pVCpu->cpum.GstCtx.dr[0] != aValues[iReg].Reg64)
900 CPUMSetGuestDR0(pVCpu, aValues[iReg].Reg64);
901 iReg++;
902 if (pVCpu->cpum.GstCtx.dr[1] != aValues[iReg].Reg64)
903 CPUMSetGuestDR1(pVCpu, aValues[iReg].Reg64);
904 iReg++;
905 if (pVCpu->cpum.GstCtx.dr[2] != aValues[iReg].Reg64)
906 CPUMSetGuestDR2(pVCpu, aValues[iReg].Reg64);
907 iReg++;
908 if (pVCpu->cpum.GstCtx.dr[3] != aValues[iReg].Reg64)
909 CPUMSetGuestDR3(pVCpu, aValues[iReg].Reg64);
910 iReg++;
911 }
912 if (fWhat & CPUMCTX_EXTRN_DR6)
913 {
914 Assert(aenmNames[iReg] == WHvX64RegisterDr6);
915 if (pVCpu->cpum.GstCtx.dr[6] != aValues[iReg].Reg64)
916 CPUMSetGuestDR6(pVCpu, aValues[iReg].Reg64);
917 iReg++;
918 }
919
920 /* Floating point state. */
921 if (fWhat & CPUMCTX_EXTRN_X87)
922 {
923 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);
924 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);
925 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);
926 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);
927 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);
928 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);
929 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);
930 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);
931
932 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);
933 pVCpu->cpum.GstCtx.pXStateR3->x87.FCW = aValues[iReg].FpControlStatus.FpControl;
934 pVCpu->cpum.GstCtx.pXStateR3->x87.FSW = aValues[iReg].FpControlStatus.FpStatus;
935 pVCpu->cpum.GstCtx.pXStateR3->x87.FTW = aValues[iReg].FpControlStatus.FpTag
936 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;
937 pVCpu->cpum.GstCtx.pXStateR3->x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;
938 pVCpu->cpum.GstCtx.pXStateR3->x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;
939 pVCpu->cpum.GstCtx.pXStateR3->x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);
940 pVCpu->cpum.GstCtx.pXStateR3->x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);
941 iReg++;
942 }
943
944 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
945 {
946 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus);
947 if (fWhat & CPUMCTX_EXTRN_X87)
948 {
949 pVCpu->cpum.GstCtx.pXStateR3->x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp;
950 pVCpu->cpum.GstCtx.pXStateR3->x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32);
951 pVCpu->cpum.GstCtx.pXStateR3->x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48);
952 }
953 pVCpu->cpum.GstCtx.pXStateR3->x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl;
954 pVCpu->cpum.GstCtx.pXStateR3->x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
955 iReg++;
956 }
957
958 /* Vector state. */
959 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
960 {
961 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0);
962 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1);
963 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2);
964 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3);
965 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4);
966 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5);
967 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6);
968 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7);
969 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8);
970 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9);
971 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10);
972 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11);
973 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12);
974 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13);
975 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14);
976 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15);
977 }
978
979 /* MSRs */
980 // WHvX64RegisterTsc - don't touch
981 if (fWhat & CPUMCTX_EXTRN_EFER)
982 {
983 Assert(aenmNames[iReg] == WHvX64RegisterEfer);
984 if (aValues[iReg].Reg64 != pVCpu->cpum.GstCtx.msrEFER)
985 {
986 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, aValues[iReg].Reg64));
987 if ((aValues[iReg].Reg64 ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE)
988 PGMNotifyNxeChanged(pVCpu, RT_BOOL(aValues[iReg].Reg64 & MSR_K6_EFER_NXE));
989 pVCpu->cpum.GstCtx.msrEFER = aValues[iReg].Reg64;
990 fMaybeChangedMode = true;
991 }
992 iReg++;
993 }
994 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
995 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");
996 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
997 {
998 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");
999 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");
1000 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");
1001 }
1002 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1003 {
1004 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSTAR, WHvX64RegisterStar, "MSR STAR");
1005 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");
1006 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");
1007 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");
1008 }
1009 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1010 {
1011 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
1012 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1013 if (aValues[iReg].Reg64 != uOldBase)
1014 {
1015 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1016 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
1017 int rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
1018 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", rc2, aValues[iReg].Reg64));
1019 }
1020 iReg++;
1021
1022 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterPat, "MSR PAT");
1023#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1024 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterMsrMtrrCap);
1025#endif
1026 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1027 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
1028 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
1029 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
1030 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
1031 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
1032 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
1033 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
1034 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
1035 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
1036 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
1037 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
1038 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
1039 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
1040 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
1041 }
1042
1043 /* Interruptibility. */
1044 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1045 {
1046 Assert(aenmNames[iReg] == WHvRegisterInterruptState);
1047 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);
1048
1049 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1050 {
1051 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
1052 if (aValues[iReg].InterruptState.InterruptShadow)
1053 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
1054 else
1055 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1056 }
1057
1058 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1059 {
1060 if (aValues[iReg].InterruptState.NmiMasked)
1061 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1062 else
1063 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1064 }
1065
1066 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
1067 iReg += 2;
1068 }
1069
1070 /* Event injection. */
1071 /// @todo WHvRegisterPendingInterruption
1072 Assert(aenmNames[iReg] == WHvRegisterPendingInterruption);
1073 if (aValues[iReg].PendingInterruption.InterruptionPending)
1074 {
1075 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
1076 aValues[iReg].PendingInterruption.InterruptionType, aValues[iReg].PendingInterruption.InterruptionVector,
1077 aValues[iReg].PendingInterruption.DeliverErrorCode, aValues[iReg].PendingInterruption.ErrorCode,
1078 aValues[iReg].PendingInterruption.InstructionLength, aValues[iReg].PendingInterruption.NestedEvent));
1079 AssertMsg((aValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
1080 ("%#RX64\n", aValues[iReg].PendingInterruption.AsUINT64));
1081 }
1082
1083 /// @todo WHvRegisterPendingEvent0 (renamed to WHvRegisterPendingEvent).
1084
1085 /* Almost done, just update extrn flags and maybe change PGM mode. */
1086 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1087 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
1088 pVCpu->cpum.GstCtx.fExtrn = 0;
1089
1090 /* Typical. */
1091 if (!fMaybeChangedMode && !fUpdateCr3)
1092 return VINF_SUCCESS;
1093
1094 /*
1095 * Slow.
1096 */
1097 if (fMaybeChangedMode)
1098 {
1099 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1100 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
1101 }
1102
1103 if (fUpdateCr3)
1104 {
1105 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3);
1106 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
1107 }
1108
1109 return VINF_SUCCESS;
1110# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1111}
1112
1113#endif /* !IN_RING0 */
1114
1115
1116/**
1117 * Interface for importing state on demand (used by IEM).
1118 *
1119 * @returns VBox status code.
1120 * @param pVCpu The cross context CPU structure.
1121 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1122 */
1123VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1124{
1125 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1126
1127#ifdef IN_RING0
1128# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1129 return nemR0WinImportState(pVCpu->pGVM, pVCpu, &pVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);
1130# else
1131 RT_NOREF(pVCpu, fWhat);
1132 return VERR_NOT_IMPLEMENTED;
1133# endif
1134#else
1135 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1136#endif
1137}
1138
1139
1140/**
1141 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1142 *
1143 * @returns VBox status code.
1144 * @param pVCpu The cross context CPU structure.
1145 * @param pcTicks Where to return the CPU tick count.
1146 * @param puAux Where to return the TSC_AUX register value.
1147 */
1148VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1149{
1150 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1151
1152#ifdef IN_RING3
1153 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1154 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1155 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1156
1157# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1158# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
1159 if (pVM->nem.s.fUseRing0Runloop)
1160# endif
1161 {
1162 /* Call ring-0 and get the values. */
1163 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_QUERY_CPU_TICK, 0, NULL);
1164 AssertLogRelRCReturn(rc, rc);
1165 *pcTicks = pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks;
1166 if (puAux)
1167 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX
1168 ? pVCpu->nem.s.Hypercall.QueryCpuTick.uAux : CPUMGetGuestTscAux(pVCpu);
1169 return VINF_SUCCESS;
1170 }
1171# endif
1172# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1173 /* Call the offical API. */
1174 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1175 WHV_REGISTER_VALUE aValues[2] = { {0, 0}, {0, 0} };
1176 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1177 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1178 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1179 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1180 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1181 , VERR_NEM_GET_REGISTERS_FAILED);
1182 *pcTicks = aValues[0].Reg64;
1183 if (puAux)
1184 *pcTicks = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[0].Reg64 : CPUMGetGuestTscAux(pVCpu);
1185 return VINF_SUCCESS;
1186# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1187#else /* IN_RING0 */
1188# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1189 int rc = nemR0WinQueryCpuTick(pVCpu->pGVM, pVCpu, pcTicks, puAux);
1190 if (RT_SUCCESS(rc) && puAux && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX))
1191 *puAux = CPUMGetGuestTscAux(pVCpu);
1192 return rc;
1193# else
1194 RT_NOREF(pVCpu, pcTicks, puAux);
1195 return VERR_NOT_IMPLEMENTED;
1196# endif
1197#endif /* IN_RING0 */
1198}
1199
1200
1201/**
1202 * Resumes CPU clock (TSC) on all virtual CPUs.
1203 *
1204 * This is called by TM when the VM is started, restored, resumed or similar.
1205 *
1206 * @returns VBox status code.
1207 * @param pVM The cross context VM structure.
1208 * @param pVCpu The cross context CPU structure of the calling EMT.
1209 * @param uPausedTscValue The TSC value at the time of pausing.
1210 */
1211VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1212{
1213#ifdef IN_RING0
1214# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1215 return nemR0WinResumeCpuTickOnAll(pVM, pVCpu, uPausedTscValue);
1216# else
1217 RT_NOREF(pVM, pVCpu, uPausedTscValue);
1218 return VERR_NOT_IMPLEMENTED;
1219# endif
1220#else /* IN_RING3 */
1221 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1222 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1223
1224# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1225# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
1226 if (pVM->nem.s.fUseRing0Runloop)
1227# endif
1228 {
1229 /* Call ring-0 and do it all there. */
1230 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL, uPausedTscValue, NULL);
1231 }
1232# endif
1233# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1234 /*
1235 * Call the offical API to do the job.
1236 */
1237 if (pVM->cCpus > 1)
1238 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1239
1240 /* Start with the first CPU. */
1241 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1242 WHV_REGISTER_VALUE Value = {0, 0};
1243 Value.Reg64 = uPausedTscValue;
1244 uint64_t const uFirstTsc = ASMReadTSC();
1245 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1246 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1247 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1248 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1249 , VERR_NEM_SET_TSC);
1250
1251 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1252 that we don't introduce too much drift here. */
1253 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1254 {
1255 Assert(enmName == WHvX64RegisterTsc);
1256 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1257 Value.Reg64 = uPausedTscValue + offDelta;
1258 hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1259 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1260 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1261 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1262 , VERR_NEM_SET_TSC);
1263 }
1264
1265 return VINF_SUCCESS;
1266# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1267#endif /* IN_RING3 */
1268}
1269
1270#ifdef NEMWIN_NEED_GET_REGISTER
1271# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1272/** Worker for assertion macro. */
1273NEM_TMPL_STATIC int nemHCWinGetRegister(PVMCPUCC pVCpu, PGVMCPU pGVCpu, uint32_t enmReg, HV_REGISTER_VALUE *pRetValue)
1274{
1275 RT_ZERO(*pRetValue);
1276# ifdef IN_RING3
1277 RT_NOREF(pVCpu, pGVCpu, enmReg);
1278 return VERR_NOT_IMPLEMENTED;
1279# else
1280 NOREF(pVCpu);
1281
1282 /*
1283 * Hypercall parameters.
1284 */
1285 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1286 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1287 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1288
1289 pInput->PartitionId = pVCpu->pGVM->nemr0.s.idHvPartition;
1290 pInput->VpIndex = pVCpu->idCpu;
1291 pInput->fFlags = 0;
1292 pInput->Names[0] = (HV_REGISTER_NAME)enmReg;
1293
1294 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
1295 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1296 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
1297
1298 /*
1299 * Make the hypercall and copy out the value.
1300 */
1301 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
1302 pGVCpu->nem.s.HypercallData.HCPhysPage,
1303 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1304 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 cRegs=%#x\n", uResult, 1),
1305 VERR_NEM_GET_REGISTERS_FAILED);
1306
1307 *pRetValue = paValues[0];
1308 return VINF_SUCCESS;
1309# endif
1310}
1311# else
1312/** Worker for assertion macro. */
1313NEM_TMPL_STATIC int nemR3WinGetRegister(PVMCPUCC a_pVCpu, uint32_t a_enmReg, WHV_REGISTER_VALUE pValue)
1314{
1315 RT_ZERO(*pRetValue);
1316 RT_NOREF(pVCpu, pGVCpu, enmReg);
1317 return VERR_NOT_IMPLEMENTED;
1318}
1319# endif
1320#endif
1321
1322
1323#ifdef LOG_ENABLED
1324/**
1325 * Get the virtual processor running status.
1326 */
1327DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPUCC pVCpu)
1328{
1329# ifdef IN_RING0
1330 NOREF(pVCpu);
1331 return VidProcessorStatusUndefined;
1332# else
1333 RTERRVARS Saved;
1334 RTErrVarsSave(&Saved);
1335
1336 /*
1337 * This API is disabled in release builds, it seems. On build 17101 it requires
1338 * the following patch to be enabled (windbg): eb vid+12180 0f 84 98 00 00 00
1339 */
1340 VID_PROCESSOR_STATUS enmCpuStatus = VidProcessorStatusUndefined;
1341 NTSTATUS rcNt = g_pfnVidGetVirtualProcessorRunningStatus(pVCpu->pVMR3->nem.s.hPartitionDevice, pVCpu->idCpu, &enmCpuStatus);
1342 AssertRC(rcNt);
1343
1344 RTErrVarsRestore(&Saved);
1345 return enmCpuStatus;
1346# endif
1347}
1348#endif /* LOG_ENABLED */
1349
1350
1351#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1352# ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KTHREADs and KeAlertThread. */
1353/**
1354 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.
1355 *
1356 * This is an experiment only.
1357 *
1358 * @returns VBox status code.
1359 * @param pVM The cross context VM structure.
1360 * @param pVCpu The cross context virtual CPU structure of the
1361 * calling EMT.
1362 */
1363NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVMCC pVM, PVMCPUCC pVCpu)
1364{
1365 /*
1366 * Work the state.
1367 *
1368 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.
1369 * So, we just need to modify the state and kick the EMT if it's waiting on
1370 * messages. For the latter we use QueueUserAPC / KeAlterThread.
1371 */
1372 for (;;)
1373 {
1374 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
1375 switch (enmState)
1376 {
1377 case VMCPUSTATE_STARTED_EXEC_NEM:
1378 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))
1379 {
1380 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM -> CANCELED");
1381 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));
1382 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelChangedState);
1383 return VINF_SUCCESS;
1384 }
1385 break;
1386
1387 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:
1388 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
1389 {
1390 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM_WAIT -> CANCELED");
1391# ifdef IN_RING0
1392 NTSTATUS rcNt = KeAlertThread(??);
1393 DBGFTRACE_CUSTOM(pVM, "KeAlertThread -> %#x", rcNt);
1394# else
1395 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);
1396 DBGFTRACE_CUSTOM(pVM, "NtAlertThread -> %#x", rcNt);
1397# endif
1398 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));
1399 Assert(rcNt == STATUS_SUCCESS);
1400 if (NT_SUCCESS(rcNt))
1401 {
1402 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelAlertedThread);
1403 return VINF_SUCCESS;
1404 }
1405 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));
1406 }
1407 break;
1408
1409 default:
1410 return VINF_SUCCESS;
1411 }
1412
1413 ASMNopPause();
1414 RT_NOREF(pVM);
1415 }
1416}
1417# endif /* IN_RING3 */
1418#endif /* NEM_WIN_USE_OUR_OWN_RUN_API || NEM_WIN_WITH_RING0_RUNLOOP */
1419
1420
1421#ifdef LOG_ENABLED
1422/**
1423 * Logs the current CPU state.
1424 */
1425NEM_TMPL_STATIC void nemHCWinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1426{
1427 if (LogIs3Enabled())
1428 {
1429# if 0 // def IN_RING3 - causes lazy state import assertions all over CPUM.
1430 char szRegs[4096];
1431 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1432 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1433 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1434 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1435 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1436 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1437 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1438 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1439 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1440 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1441 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1442 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1443 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1444 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1445 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1446 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1447 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1448 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1449 " efer=%016VR{efer}\n"
1450 " pat=%016VR{pat}\n"
1451 " sf_mask=%016VR{sf_mask}\n"
1452 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1453 " lstar=%016VR{lstar}\n"
1454 " star=%016VR{star} cstar=%016VR{cstar}\n"
1455 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1456 );
1457
1458 char szInstr[256];
1459 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1460 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1461 szInstr, sizeof(szInstr), NULL);
1462 Log3(("%s%s\n", szRegs, szInstr));
1463# else
1464 /** @todo stat logging in ring-0 */
1465 RT_NOREF(pVM, pVCpu);
1466# endif
1467 }
1468}
1469#endif /* LOG_ENABLED */
1470
1471
1472/** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */
1473#define SWITCH_IT(a_szPrefix) \
1474 do \
1475 switch (u)\
1476 { \
1477 case 0x00: return a_szPrefix ""; \
1478 case 0x01: return a_szPrefix ",Pnd"; \
1479 case 0x02: return a_szPrefix ",Dbg"; \
1480 case 0x03: return a_szPrefix ",Pnd,Dbg"; \
1481 case 0x04: return a_szPrefix ",Shw"; \
1482 case 0x05: return a_szPrefix ",Pnd,Shw"; \
1483 case 0x06: return a_szPrefix ",Shw,Dbg"; \
1484 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \
1485 default: AssertFailedReturn("WTF?"); \
1486 } \
1487 while (0)
1488
1489#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1490/**
1491 * Translates the execution stat bitfield into a short log string, VID version.
1492 *
1493 * @returns Read-only log string.
1494 * @param pMsgHdr The header which state to summarize.
1495 */
1496static const char *nemHCWinExecStateToLogStr(HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1497{
1498 unsigned u = (unsigned)pMsgHdr->ExecutionState.InterruptionPending
1499 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1)
1500 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2);
1501 if (pMsgHdr->ExecutionState.EferLma)
1502 SWITCH_IT("LM");
1503 else if (pMsgHdr->ExecutionState.Cr0Pe)
1504 SWITCH_IT("PM");
1505 else
1506 SWITCH_IT("RM");
1507}
1508#elif defined(IN_RING3)
1509/**
1510 * Translates the execution stat bitfield into a short log string, WinHv version.
1511 *
1512 * @returns Read-only log string.
1513 * @param pExitCtx The exit context which state to summarize.
1514 */
1515static const char *nemR3WinExecStateToLogStr(WHV_VP_EXIT_CONTEXT const *pExitCtx)
1516{
1517 unsigned u = (unsigned)pExitCtx->ExecutionState.InterruptionPending
1518 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1)
1519 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2);
1520 if (pExitCtx->ExecutionState.EferLma)
1521 SWITCH_IT("LM");
1522 else if (pExitCtx->ExecutionState.Cr0Pe)
1523 SWITCH_IT("PM");
1524 else
1525 SWITCH_IT("RM");
1526}
1527#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1528#undef SWITCH_IT
1529
1530
1531#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1532/**
1533 * Advances the guest RIP and clear EFLAGS.RF, VID version.
1534 *
1535 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1536 *
1537 * @param pVCpu The cross context virtual CPU structure.
1538 * @param pExitCtx The exit context.
1539 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1540 */
1541DECLINLINE(void)
1542nemHCWinAdvanceGuestRipAndClearRF(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr, uint8_t cbMinInstr)
1543{
1544 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1545
1546 /* Advance the RIP. */
1547 Assert(pMsgHdr->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1548 pVCpu->cpum.GstCtx.rip += pMsgHdr->InstructionLength;
1549 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1550
1551 /* Update interrupt inhibition. */
1552 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1553 { /* likely */ }
1554 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
1555 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1556}
1557#elif defined(IN_RING3)
1558/**
1559 * Advances the guest RIP and clear EFLAGS.RF, WinHv version.
1560 *
1561 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1562 *
1563 * @param pVCpu The cross context virtual CPU structure.
1564 * @param pExitCtx The exit context.
1565 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1566 */
1567DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPUCC pVCpu, WHV_VP_EXIT_CONTEXT const *pExitCtx, uint8_t cbMinInstr)
1568{
1569 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1570
1571 /* Advance the RIP. */
1572 Assert(pExitCtx->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1573 pVCpu->cpum.GstCtx.rip += pExitCtx->InstructionLength;
1574 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1575
1576 /* Update interrupt inhibition. */
1577 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1578 { /* likely */ }
1579 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
1580 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1581}
1582#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1583
1584
1585
1586NEM_TMPL_STATIC DECLCALLBACK(int)
1587nemHCWinUnmapOnePageCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1588{
1589 RT_NOREF_PV(pvUser);
1590#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1591 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1592 AssertRC(rc);
1593 if (RT_SUCCESS(rc))
1594#else
1595 RT_NOREF_PV(pVCpu);
1596 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1597 if (SUCCEEDED(hrc))
1598#endif
1599 {
1600 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1601 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1602 }
1603 else
1604 {
1605#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1606 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1607#else
1608 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1609 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(),
1610 RTNtLastErrorValue(), pVM->nem.s.cMappedPages));
1611#endif
1612 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1613 }
1614 if (pVM->nem.s.cMappedPages > 0)
1615 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1616 return VINF_SUCCESS;
1617}
1618
1619
1620/**
1621 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1622 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1623 */
1624typedef struct NEMHCWINHMACPCCSTATE
1625{
1626 /** Input: Write access. */
1627 bool fWriteAccess;
1628 /** Output: Set if we did something. */
1629 bool fDidSomething;
1630 /** Output: Set it we should resume. */
1631 bool fCanResume;
1632} NEMHCWINHMACPCCSTATE;
1633
1634/**
1635 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1636 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1637 * NEMHCWINHMACPCCSTATE structure. }
1638 */
1639NEM_TMPL_STATIC DECLCALLBACK(int)
1640nemHCWinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1641{
1642 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1643 pState->fDidSomething = false;
1644 pState->fCanResume = false;
1645
1646 /* If A20 is disabled, we may need to make another query on the masked
1647 page to get the correct protection information. */
1648 uint8_t u2State = pInfo->u2NemState;
1649 RTGCPHYS GCPhysSrc;
1650 if ( pVM->nem.s.fA20Enabled
1651 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1652 GCPhysSrc = GCPhys;
1653 else
1654 {
1655 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1656 PGMPHYSNEMPAGEINFO Info2;
1657 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1658 AssertRCReturn(rc, rc);
1659
1660 *pInfo = Info2;
1661 pInfo->u2NemState = u2State;
1662 }
1663
1664 /*
1665 * Consolidate current page state with actual page protection and access type.
1666 * We don't really consider downgrades here, as they shouldn't happen.
1667 */
1668#ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1669 /** @todo Someone at microsoft please explain:
1670 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1671 * readonly page as writable (unmap, then map again). Specifically, this was an
1672 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1673 * a hope to work around that we no longer pre-map anything, just unmap stuff
1674 * and do it lazily here. And here we will first unmap, restart, and then remap
1675 * with new protection or backing.
1676 */
1677#endif
1678 int rc;
1679 switch (u2State)
1680 {
1681 case NEM_WIN_PAGE_STATE_UNMAPPED:
1682 case NEM_WIN_PAGE_STATE_NOT_SET:
1683 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1684 {
1685 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1686 return VINF_SUCCESS;
1687 }
1688
1689 /* Don't bother remapping it if it's a write request to a non-writable page. */
1690 if ( pState->fWriteAccess
1691 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1692 {
1693 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1694 return VINF_SUCCESS;
1695 }
1696
1697 /* Map the page. */
1698 rc = nemHCNativeSetPhysPage(pVM,
1699 pVCpu,
1700 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1701 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1702 pInfo->fNemProt,
1703 &u2State,
1704 true /*fBackingState*/);
1705 pInfo->u2NemState = u2State;
1706 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1707 GCPhys, g_apszPageStates[u2State], rc));
1708 pState->fDidSomething = true;
1709 pState->fCanResume = true;
1710 return rc;
1711
1712 case NEM_WIN_PAGE_STATE_READABLE:
1713 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1714 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1715 {
1716 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1717 return VINF_SUCCESS;
1718 }
1719
1720#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1721 /* Upgrade page to writable. */
1722/** @todo test this*/
1723 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1724 && pState->fWriteAccess)
1725 {
1726 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
1727 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1728 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1729 AssertRC(rc);
1730 if (RT_SUCCESS(rc))
1731 {
1732 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
1733 pState->fDidSomething = true;
1734 pState->fCanResume = true;
1735 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
1736 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
1737 }
1738 }
1739 else
1740 {
1741 /* Need to emulate the acces. */
1742 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
1743 rc = VINF_SUCCESS;
1744 }
1745 return rc;
1746#else
1747 break;
1748#endif
1749
1750 case NEM_WIN_PAGE_STATE_WRITABLE:
1751 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1752 {
1753 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
1754 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1755 else
1756 {
1757 pState->fCanResume = true;
1758 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1759 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1760 }
1761 return VINF_SUCCESS;
1762 }
1763#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1764 AssertFailed(); /* There should be no downgrades. */
1765#endif
1766 break;
1767
1768 default:
1769 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1770 }
1771
1772 /*
1773 * Unmap and restart the instruction.
1774 * If this fails, which it does every so often, just unmap everything for now.
1775 */
1776#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1777 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1778 AssertRC(rc);
1779 if (RT_SUCCESS(rc))
1780#else
1781 /** @todo figure out whether we mess up the state or if it's WHv. */
1782 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1783 if (SUCCEEDED(hrc))
1784#endif
1785 {
1786 pState->fDidSomething = true;
1787 pState->fCanResume = true;
1788 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1789 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1790 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1791 return VINF_SUCCESS;
1792 }
1793#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1794 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
1795 return rc;
1796#else
1797 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1798 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(),
1799 pVM->nem.s.cMappedPages));
1800
1801 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
1802 Log(("nemHCWinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
1803
1804 pState->fDidSomething = true;
1805 pState->fCanResume = true;
1806 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1807 return VINF_SUCCESS;
1808#endif
1809}
1810
1811
1812
1813#if defined(IN_RING0) && defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API)
1814/**
1815 * Wrapper around nemR0WinImportState that converts VERR_NEM_FLUSH_TLB
1816 * into informational status codes and logs+asserts statuses.
1817 *
1818 * @returns VBox strict status code.
1819 * @param pGVM The global (ring-0) VM structure.
1820 * @param pGVCpu The global (ring-0) per CPU structure.
1821 * @param fWhat What to import.
1822 * @param pszCaller Who is doing the importing.
1823 */
1824DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, uint64_t fWhat, const char *pszCaller)
1825{
1826 int rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);
1827 if (RT_SUCCESS(rc))
1828 {
1829 Assert(rc == VINF_SUCCESS);
1830 return VINF_SUCCESS;
1831 }
1832
1833 if (rc == VERR_NEM_FLUSH_TLB)
1834 {
1835 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
1836 return -rc;
1837 }
1838 RT_NOREF(pszCaller);
1839 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
1840}
1841#endif /* IN_RING0 && NEM_WIN_TEMPLATE_MODE_OWN_RUN_API*/
1842
1843#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
1844/**
1845 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
1846 *
1847 * Unlike the wrapped APIs, this checks whether it's necessary.
1848 *
1849 * @returns VBox strict status code.
1850 * @param pVCpu The cross context per CPU structure.
1851 * @param fWhat What to import.
1852 * @param pszCaller Who is doing the importing.
1853 */
1854DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPUCC pVCpu, uint64_t fWhat, const char *pszCaller)
1855{
1856 if (pVCpu->cpum.GstCtx.fExtrn & fWhat)
1857 {
1858# ifdef IN_RING0
1859 return nemR0WinImportStateStrict(pVCpu->pGVM, pVCpu, fWhat, pszCaller);
1860# else
1861 RT_NOREF(pszCaller);
1862 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1863 AssertRCReturn(rc, rc);
1864# endif
1865 }
1866 return VINF_SUCCESS;
1867}
1868#endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API || IN_RING3 */
1869
1870#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1871/**
1872 * Copies register state from the X64 intercept message header.
1873 *
1874 * ASSUMES no state copied yet.
1875 *
1876 * @param pVCpu The cross context per CPU structure.
1877 * @param pHdr The X64 intercept message header.
1878 * @sa nemR3WinCopyStateFromX64Header
1879 */
1880DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr)
1881{
1882 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1883 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1884 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pHdr->CsSegment);
1885 pVCpu->cpum.GstCtx.rip = pHdr->Rip;
1886 pVCpu->cpum.GstCtx.rflags.u = pHdr->Rflags;
1887
1888 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow;
1889 if (!pHdr->ExecutionState.InterruptShadow)
1890 {
1891 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1892 { /* likely */ }
1893 else
1894 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1895 }
1896 else
1897 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip);
1898
1899 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1900}
1901#elif defined(IN_RING3)
1902/**
1903 * Copies register state from the (common) exit context.
1904 *
1905 * ASSUMES no state copied yet.
1906 *
1907 * @param pVCpu The cross context per CPU structure.
1908 * @param pExitCtx The common exit context.
1909 * @sa nemHCWinCopyStateFromX64Header
1910 */
1911DECLINLINE(void) nemR3WinCopyStateFromX64Header(PVMCPUCC pVCpu, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1912{
1913 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1914 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1915 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pExitCtx->Cs);
1916 pVCpu->cpum.GstCtx.rip = pExitCtx->Rip;
1917 pVCpu->cpum.GstCtx.rflags.u = pExitCtx->Rflags;
1918
1919 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow;
1920 if (!pExitCtx->ExecutionState.InterruptShadow)
1921 {
1922 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1923 { /* likely */ }
1924 else
1925 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1926 }
1927 else
1928 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip);
1929
1930 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1931}
1932#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1933
1934
1935#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1936/**
1937 * Deals with memory intercept message.
1938 *
1939 * @returns Strict VBox status code.
1940 * @param pVM The cross context VM structure.
1941 * @param pVCpu The cross context per CPU structure.
1942 * @param pMsg The message.
1943 * @sa nemR3WinHandleExitMemory
1944 */
1945NEM_TMPL_STATIC VBOXSTRICTRC
1946nemHCWinHandleMessageMemory(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg)
1947{
1948 uint64_t const uHostTsc = ASMReadTSC();
1949 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
1950 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1951 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
1952
1953 /*
1954 * Whatever we do, we must clear pending event injection upon resume.
1955 */
1956 if (pMsg->Header.ExecutionState.InterruptionPending)
1957 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1958
1959# if 0 /* Experiment: 20K -> 34K exit/s. */
1960 if ( pMsg->Header.ExecutionState.EferLma
1961 && pMsg->Header.CsSegment.Long
1962 && pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
1963 {
1964 if ( pMsg->Header.Rip - (uint64_t)0xf65a < (uint64_t)(0xf662 - 0xf65a)
1965 && pMsg->InstructionBytes[0] == 0x89
1966 && pMsg->InstructionBytes[1] == 0x03)
1967 {
1968 pVCpu->cpum.GstCtx.rip = pMsg->Header.Rip + 2;
1969 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
1970 AssertMsg(pMsg->Header.InstructionLength == 2, ("%#x\n", pMsg->Header.InstructionLength));
1971 //Log(("%RX64 msg:\n%.80Rhxd\n", pVCpu->cpum.GstCtx.rip, pMsg));
1972 return VINF_SUCCESS;
1973 }
1974 }
1975# endif
1976
1977 /*
1978 * Ask PGM for information about the given GCPhys. We need to check if we're
1979 * out of sync first.
1980 */
1981 NEMHCWINHMACPCCSTATE State = { pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE, false, false };
1982 PGMPHYSNEMPAGEINFO Info;
1983 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMsg->GuestPhysicalAddress, State.fWriteAccess, &Info,
1984 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1985 if (RT_SUCCESS(rc))
1986 {
1987 if (Info.fNemProt & ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1988 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1989 {
1990 if (State.fCanResume)
1991 {
1992 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1993 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1994 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1995 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1996 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1997 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
1998 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
1999 return VINF_SUCCESS;
2000 }
2001 }
2002 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2003 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2004 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2005 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2006 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2007 }
2008 else
2009 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2010 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2011 pMsg->GuestPhysicalAddress, rc, State.fDidSomething ? " modified-backing" : "",
2012 g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2013
2014 /*
2015 * Emulate the memory access, either access handler or special memory.
2016 */
2017 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2018 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2019 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2020 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2021 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
2022 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2023 VBOXSTRICTRC rcStrict;
2024# ifdef IN_RING0
2025 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu,
2026 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit");
2027 if (rcStrict != VINF_SUCCESS)
2028 return rcStrict;
2029# else
2030 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2031 AssertRCReturn(rc, rc);
2032# endif
2033
2034 if (pMsg->Reserved1)
2035 Log(("MemExit/Reserved1=%#x\n", pMsg->Reserved1));
2036 if (pMsg->Header.ExecutionState.Reserved0 || pMsg->Header.ExecutionState.Reserved1)
2037 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pMsg->Header.ExecutionState.Reserved0, pMsg->Header.ExecutionState.Reserved1));
2038
2039 if (!pExitRec)
2040 {
2041 //if (pMsg->InstructionByteCount > 0)
2042 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2043 if (pMsg->InstructionByteCount > 0)
2044 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
2045 pMsg->InstructionBytes, pMsg->InstructionByteCount);
2046 else
2047 rcStrict = IEMExecOne(pVCpu);
2048 /** @todo do we need to do anything wrt debugging here? */
2049 }
2050 else
2051 {
2052 /* Frequent access or probing. */
2053 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2054 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2055 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2056 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2057 }
2058 return rcStrict;
2059}
2060#elif defined(IN_RING3)
2061/**
2062 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2063 *
2064 * @returns Strict VBox status code.
2065 * @param pVM The cross context VM structure.
2066 * @param pVCpu The cross context per CPU structure.
2067 * @param pExit The VM exit information to handle.
2068 * @sa nemHCWinHandleMessageMemory
2069 */
2070NEM_TMPL_STATIC VBOXSTRICTRC
2071nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2072{
2073 uint64_t const uHostTsc = ASMReadTSC();
2074 Assert(pExit->MemoryAccess.AccessInfo.AccessType != 3);
2075
2076 /*
2077 * Whatever we do, we must clear pending event injection upon resume.
2078 */
2079 if (pExit->VpContext.ExecutionState.InterruptionPending)
2080 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2081
2082 /*
2083 * Ask PGM for information about the given GCPhys. We need to check if we're
2084 * out of sync first.
2085 */
2086 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite, false, false };
2087 PGMPHYSNEMPAGEINFO Info;
2088 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
2089 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2090 if (RT_SUCCESS(rc))
2091 {
2092 if (Info.fNemProt & ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2093 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2094 {
2095 if (State.fCanResume)
2096 {
2097 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2098 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2099 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2100 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2101 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2102 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2103 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2104 return VINF_SUCCESS;
2105 }
2106 }
2107 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2108 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2109 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2110 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2111 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2112 }
2113 else
2114 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2115 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2116 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2117 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2118
2119 /*
2120 * Emulate the memory access, either access handler or special memory.
2121 */
2122 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2123 pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2124 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2125 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2126 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2127 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2128 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2129 AssertRCReturn(rc, rc);
2130 if (pExit->VpContext.ExecutionState.Reserved0 || pExit->VpContext.ExecutionState.Reserved1)
2131 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pExit->VpContext.ExecutionState.Reserved0, pExit->VpContext.ExecutionState.Reserved1));
2132
2133 VBOXSTRICTRC rcStrict;
2134 if (!pExitRec)
2135 {
2136 //if (pMsg->InstructionByteCount > 0)
2137 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2138 if (pExit->MemoryAccess.InstructionByteCount > 0)
2139 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
2140 pExit->MemoryAccess.InstructionBytes, pExit->MemoryAccess.InstructionByteCount);
2141 else
2142 rcStrict = IEMExecOne(pVCpu);
2143 /** @todo do we need to do anything wrt debugging here? */
2144 }
2145 else
2146 {
2147 /* Frequent access or probing. */
2148 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2149 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2150 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2151 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2152 }
2153 return rcStrict;
2154}
2155#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2156
2157
2158#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2159/**
2160 * Deals with I/O port intercept message.
2161 *
2162 * @returns Strict VBox status code.
2163 * @param pVM The cross context VM structure.
2164 * @param pVCpu The cross context per CPU structure.
2165 * @param pMsg The message.
2166 */
2167NEM_TMPL_STATIC VBOXSTRICTRC
2168nemHCWinHandleMessageIoPort(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg)
2169{
2170 /*
2171 * Assert message sanity.
2172 */
2173 Assert( pMsg->AccessInfo.AccessSize == 1
2174 || pMsg->AccessInfo.AccessSize == 2
2175 || pMsg->AccessInfo.AccessSize == 4);
2176 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2177 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2178 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2179 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2180 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2181 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2182 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
2183 if (pMsg->AccessInfo.StringOp)
2184 {
2185 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterDs, pMsg->DsSegment);
2186 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterEs, pMsg->EsSegment);
2187 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
2188 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi);
2189 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi);
2190 }
2191
2192 /*
2193 * Whatever we do, we must clear pending event injection upon resume.
2194 */
2195 if (pMsg->Header.ExecutionState.InterruptionPending)
2196 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2197
2198 /*
2199 * Add history first to avoid two paths doing EMHistoryExec calls.
2200 */
2201 VBOXSTRICTRC rcStrict;
2202 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2203 !pMsg->AccessInfo.StringOp
2204 ? ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2205 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2206 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2207 : ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2208 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2209 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2210 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2211 if (!pExitRec)
2212 {
2213 if (!pMsg->AccessInfo.StringOp)
2214 {
2215 /*
2216 * Simple port I/O.
2217 */
2218 static uint32_t const s_fAndMask[8] =
2219 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2220 uint32_t const fAndMask = s_fAndMask[pMsg->AccessInfo.AccessSize];
2221
2222 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2223 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2224 {
2225 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize);
2226 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2227 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2228 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2229 if (IOM_SUCCESS(rcStrict))
2230 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1);
2231# ifdef IN_RING0
2232 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
2233 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF
2234 /** @todo check for debug breakpoints */ )
2235 return EMRZSetPendingIoPortWrite(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2236 pMsg->AccessInfo.AccessSize, (uint32_t)pMsg->Rax & fAndMask);
2237# endif
2238 else
2239 {
2240 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2241 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2242 }
2243 }
2244 else
2245 {
2246 uint32_t uValue = 0;
2247 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize);
2248 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2249 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2250 pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2251 if (IOM_SUCCESS(rcStrict))
2252 {
2253 if (pMsg->AccessInfo.AccessSize != 4)
2254 pVCpu->cpum.GstCtx.rax = (pMsg->Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2255 else
2256 pVCpu->cpum.GstCtx.rax = uValue;
2257 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2258 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pVCpu->cpum.GstCtx.rax));
2259 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1);
2260 }
2261 else
2262 {
2263 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2264 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2265# ifdef IN_RING0
2266 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
2267 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF
2268 /** @todo check for debug breakpoints */ )
2269 return EMRZSetPendingIoPortRead(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2270 pMsg->AccessInfo.AccessSize);
2271# endif
2272 }
2273 }
2274 }
2275 else
2276 {
2277 /*
2278 * String port I/O.
2279 */
2280 /** @todo Someone at Microsoft please explain how we can get the address mode
2281 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2282 * getting the default mode, it can always be overridden by a prefix. This
2283 * forces us to interpret the instruction from opcodes, which is suboptimal.
2284 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2285 * CPUs that are reasonably new.
2286 *
2287 * Of course, it's possible this is an undocumented and we just need to do some
2288 * experiments to figure out how it's communicated. Alternatively, we can scan
2289 * the opcode bytes for possible evil prefixes.
2290 */
2291 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2292 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2293 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2294 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
2295 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment);
2296 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2297 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2298 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
2299 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
2300# ifdef IN_RING0
2301 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2302 if (rcStrict != VINF_SUCCESS)
2303 return rcStrict;
2304# else
2305 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2306 AssertRCReturn(rc, rc);
2307# endif
2308
2309 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2310 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2311 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2312 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS",
2313 pMsg->PortNumber, pMsg->AccessInfo.AccessSize ));
2314 rcStrict = IEMExecOne(pVCpu);
2315 }
2316 if (IOM_SUCCESS(rcStrict))
2317 {
2318 /*
2319 * Do debug checks.
2320 */
2321 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2322 || (pMsg->Header.Rflags & X86_EFL_TF)
2323 || DBGFBpIsHwIoArmed(pVM) )
2324 {
2325 /** @todo Debugging. */
2326 }
2327 }
2328 return rcStrict;
2329 }
2330
2331 /*
2332 * Frequent exit or something needing probing.
2333 * Get state and call EMHistoryExec.
2334 */
2335 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2336 if (!pMsg->AccessInfo.StringOp)
2337 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2338 else
2339 {
2340 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2341 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2342 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
2343 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment);
2344 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2345 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
2346 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
2347 }
2348 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2349
2350# ifdef IN_RING0
2351 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2352 if (rcStrict != VINF_SUCCESS)
2353 return rcStrict;
2354# else
2355 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2356 AssertRCReturn(rc, rc);
2357# endif
2358
2359 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2360 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2361 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2362 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUT" : "IN",
2363 pMsg->AccessInfo.StringOp ? "S" : "",
2364 pMsg->PortNumber, pMsg->AccessInfo.AccessSize));
2365 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2366 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2367 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2368 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2369 return rcStrict;
2370}
2371#elif defined(IN_RING3)
2372/**
2373 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess).
2374 *
2375 * @returns Strict VBox status code.
2376 * @param pVM The cross context VM structure.
2377 * @param pVCpu The cross context per CPU structure.
2378 * @param pExit The VM exit information to handle.
2379 * @sa nemHCWinHandleMessageIoPort
2380 */
2381NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitIoPort(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2382{
2383 Assert( pExit->IoPortAccess.AccessInfo.AccessSize == 1
2384 || pExit->IoPortAccess.AccessInfo.AccessSize == 2
2385 || pExit->IoPortAccess.AccessInfo.AccessSize == 4);
2386
2387 /*
2388 * Whatever we do, we must clear pending event injection upon resume.
2389 */
2390 if (pExit->VpContext.ExecutionState.InterruptionPending)
2391 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2392
2393 /*
2394 * Add history first to avoid two paths doing EMHistoryExec calls.
2395 */
2396 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2397 !pExit->IoPortAccess.AccessInfo.StringOp
2398 ? ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2399 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2400 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2401 : ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2402 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2403 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2404 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2405 if (!pExitRec)
2406 {
2407 VBOXSTRICTRC rcStrict;
2408 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2409 {
2410 /*
2411 * Simple port I/O.
2412 */
2413 static uint32_t const s_fAndMask[8] =
2414 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2415 uint32_t const fAndMask = s_fAndMask[pExit->IoPortAccess.AccessInfo.AccessSize];
2416 if (pExit->IoPortAccess.AccessInfo.IsWrite)
2417 {
2418 rcStrict = IOMIOPortWrite(pVM, pVCpu, pExit->IoPortAccess.PortNumber,
2419 (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2420 pExit->IoPortAccess.AccessInfo.AccessSize);
2421 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2422 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2423 pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2424 pExit->IoPortAccess.AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2425 if (IOM_SUCCESS(rcStrict))
2426 {
2427 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2428 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 1);
2429 }
2430 }
2431 else
2432 {
2433 uint32_t uValue = 0;
2434 rcStrict = IOMIOPortRead(pVM, pVCpu, pExit->IoPortAccess.PortNumber, &uValue,
2435 pExit->IoPortAccess.AccessInfo.AccessSize);
2436 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2437 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2438 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2439 if (IOM_SUCCESS(rcStrict))
2440 {
2441 if (pExit->IoPortAccess.AccessInfo.AccessSize != 4)
2442 pVCpu->cpum.GstCtx.rax = (pExit->IoPortAccess.Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2443 else
2444 pVCpu->cpum.GstCtx.rax = uValue;
2445 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2446 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pExit->IoPortAccess.Rax, pVCpu->cpum.GstCtx.rax));
2447 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2448 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 1);
2449 }
2450 }
2451 }
2452 else
2453 {
2454 /*
2455 * String port I/O.
2456 */
2457 /** @todo Someone at Microsoft please explain how we can get the address mode
2458 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2459 * getting the default mode, it can always be overridden by a prefix. This
2460 * forces us to interpret the instruction from opcodes, which is suboptimal.
2461 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2462 * CPUs that are reasonably new.
2463 *
2464 * Of course, it's possible this is an undocumented and we just need to do some
2465 * experiments to figure out how it's communicated. Alternatively, we can scan
2466 * the opcode bytes for possible evil prefixes.
2467 */
2468 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2469 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2470 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2471 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pExit->IoPortAccess.Ds);
2472 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pExit->IoPortAccess.Es);
2473 pVCpu->cpum.GstCtx.rax = pExit->IoPortAccess.Rax;
2474 pVCpu->cpum.GstCtx.rcx = pExit->IoPortAccess.Rcx;
2475 pVCpu->cpum.GstCtx.rdi = pExit->IoPortAccess.Rdi;
2476 pVCpu->cpum.GstCtx.rsi = pExit->IoPortAccess.Rsi;
2477 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2478 AssertRCReturn(rc, rc);
2479
2480 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2481 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2482 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2483 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUTS" : "INS",
2484 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize ));
2485 rcStrict = IEMExecOne(pVCpu);
2486 }
2487 if (IOM_SUCCESS(rcStrict))
2488 {
2489 /*
2490 * Do debug checks.
2491 */
2492 if ( pExit->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2493 || (pExit->VpContext.Rflags & X86_EFL_TF)
2494 || DBGFBpIsHwIoArmed(pVM) )
2495 {
2496 /** @todo Debugging. */
2497 }
2498 }
2499 return rcStrict;
2500 }
2501
2502 /*
2503 * Frequent exit or something needing probing.
2504 * Get state and call EMHistoryExec.
2505 */
2506 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2507 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2508 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2509 else
2510 {
2511 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2512 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2513 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pExit->IoPortAccess.Ds);
2514 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pExit->IoPortAccess.Es);
2515 pVCpu->cpum.GstCtx.rcx = pExit->IoPortAccess.Rcx;
2516 pVCpu->cpum.GstCtx.rdi = pExit->IoPortAccess.Rdi;
2517 pVCpu->cpum.GstCtx.rsi = pExit->IoPortAccess.Rsi;
2518 }
2519 pVCpu->cpum.GstCtx.rax = pExit->IoPortAccess.Rax;
2520 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2521 AssertRCReturn(rc, rc);
2522 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2523 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2524 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2525 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUT" : "IN",
2526 pExit->IoPortAccess.AccessInfo.StringOp ? "S" : "",
2527 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize));
2528 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2529 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2530 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2531 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2532 return rcStrict;
2533}
2534#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2535
2536
2537#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2538/**
2539 * Deals with interrupt window message.
2540 *
2541 * @returns Strict VBox status code.
2542 * @param pVM The cross context VM structure.
2543 * @param pVCpu The cross context per CPU structure.
2544 * @param pMsg The message.
2545 * @sa nemR3WinHandleExitInterruptWindow
2546 */
2547NEM_TMPL_STATIC VBOXSTRICTRC
2548nemHCWinHandleMessageInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg)
2549{
2550 /*
2551 * Assert message sanity.
2552 */
2553 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE
2554 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here
2555 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2556 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type));
2557
2558 /*
2559 * Just copy the state we've got and handle it in the loop for now.
2560 */
2561 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2562 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2563
2564 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2565 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2566 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2567 pMsg->Type, RT_BOOL(pMsg->Header.Rflags & X86_EFL_IF), pMsg->Header.ExecutionState.InterruptShadow));
2568
2569 /** @todo call nemHCWinHandleInterruptFF */
2570 RT_NOREF(pVM);
2571 return VINF_SUCCESS;
2572}
2573#elif defined(IN_RING3)
2574/**
2575 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow).
2576 *
2577 * @returns Strict VBox status code.
2578 * @param pVM The cross context VM structure.
2579 * @param pVCpu The cross context per CPU structure.
2580 * @param pExit The VM exit information to handle.
2581 * @sa nemHCWinHandleMessageInterruptWindow
2582 */
2583NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2584{
2585 /*
2586 * Assert message sanity.
2587 */
2588 AssertMsg( pExit->InterruptWindow.DeliverableType == WHvX64PendingInterrupt
2589 || pExit->InterruptWindow.DeliverableType == WHvX64PendingNmi,
2590 ("%#x\n", pExit->InterruptWindow.DeliverableType));
2591
2592 /*
2593 * Just copy the state we've got and handle it in the loop for now.
2594 */
2595 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2596 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2597
2598 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2599 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2600 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2601 pExit->InterruptWindow.DeliverableType, RT_BOOL(pExit->VpContext.Rflags & X86_EFL_IF),
2602 pExit->VpContext.ExecutionState.InterruptShadow));
2603
2604 /** @todo call nemHCWinHandleInterruptFF */
2605 RT_NOREF(pVM);
2606 return VINF_SUCCESS;
2607}
2608#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2609
2610
2611#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2612/**
2613 * Deals with CPUID intercept message.
2614 *
2615 * @returns Strict VBox status code.
2616 * @param pVM The cross context VM structure.
2617 * @param pVCpu The cross context per CPU structure.
2618 * @param pMsg The message.
2619 * @sa nemR3WinHandleExitCpuId
2620 */
2621NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg)
2622{
2623 /* Check message register value sanity. */
2624 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2625 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2626 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2627 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2628 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
2629 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
2630 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
2631 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx);
2632
2633 /* Do exit history. */
2634 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2635 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2636 if (!pExitRec)
2637 {
2638 /*
2639 * Soak up state and execute the instruction.
2640 *
2641 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2642 * function and make everyone use it.
2643 */
2644 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2645 * only get weirder with nested VT-x and AMD-V support. */
2646 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2647
2648 /* Copy in the low register values (top is always cleared). */
2649 pVCpu->cpum.GstCtx.rax = (uint32_t)pMsg->Rax;
2650 pVCpu->cpum.GstCtx.rcx = (uint32_t)pMsg->Rcx;
2651 pVCpu->cpum.GstCtx.rdx = (uint32_t)pMsg->Rdx;
2652 pVCpu->cpum.GstCtx.rbx = (uint32_t)pMsg->Rbx;
2653 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2654
2655 /* Get the correct values. */
2656 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2657 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2658
2659 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2660 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2661 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2662 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2663 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2664
2665 /* Move RIP and we're done. */
2666 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2667
2668 return VINF_SUCCESS;
2669 }
2670
2671 /*
2672 * Frequent exit or something needing probing.
2673 * Get state and call EMHistoryExec.
2674 */
2675 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2676 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2677 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2678 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
2679 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
2680 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2681 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2682 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2683 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2684 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2685# ifdef IN_RING0
2686 VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit");
2687 if (rcStrict != VINF_SUCCESS)
2688 return rcStrict;
2689 RT_NOREF(pVM);
2690# else
2691 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2692 AssertRCReturn(rc, rc);
2693# endif
2694 VBOXSTRICTRC rcStrictExec = EMHistoryExec(pVCpu, pExitRec, 0);
2695 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2696 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2697 VBOXSTRICTRC_VAL(rcStrictExec), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2698 return rcStrictExec;
2699}
2700#elif defined(IN_RING3)
2701/**
2702 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid).
2703 *
2704 * @returns Strict VBox status code.
2705 * @param pVM The cross context VM structure.
2706 * @param pVCpu The cross context per CPU structure.
2707 * @param pExit The VM exit information to handle.
2708 * @sa nemHCWinHandleMessageCpuId
2709 */
2710NEM_TMPL_STATIC VBOXSTRICTRC
2711nemR3WinHandleExitCpuId(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2712{
2713 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2714 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2715 if (!pExitRec)
2716 {
2717 /*
2718 * Soak up state and execute the instruction.
2719 *
2720 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2721 * function and make everyone use it.
2722 */
2723 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2724 * only get weirder with nested VT-x and AMD-V support. */
2725 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2726
2727 /* Copy in the low register values (top is always cleared). */
2728 pVCpu->cpum.GstCtx.rax = (uint32_t)pExit->CpuidAccess.Rax;
2729 pVCpu->cpum.GstCtx.rcx = (uint32_t)pExit->CpuidAccess.Rcx;
2730 pVCpu->cpum.GstCtx.rdx = (uint32_t)pExit->CpuidAccess.Rdx;
2731 pVCpu->cpum.GstCtx.rbx = (uint32_t)pExit->CpuidAccess.Rbx;
2732 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2733
2734 /* Get the correct values. */
2735 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2736 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2737
2738 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2739 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2740 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2741 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2742 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2743
2744 /* Move RIP and we're done. */
2745 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
2746
2747 RT_NOREF_PV(pVM);
2748 return VINF_SUCCESS;
2749 }
2750
2751 /*
2752 * Frequent exit or something needing probing.
2753 * Get state and call EMHistoryExec.
2754 */
2755 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2756 pVCpu->cpum.GstCtx.rax = pExit->CpuidAccess.Rax;
2757 pVCpu->cpum.GstCtx.rcx = pExit->CpuidAccess.Rcx;
2758 pVCpu->cpum.GstCtx.rdx = pExit->CpuidAccess.Rdx;
2759 pVCpu->cpum.GstCtx.rbx = pExit->CpuidAccess.Rbx;
2760 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2761 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2762 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2763 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2764 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2765 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2766 AssertRCReturn(rc, rc);
2767 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2768 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2769 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2770 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2771 return rcStrict;
2772}
2773#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2774
2775
2776#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2777/**
2778 * Deals with MSR intercept message.
2779 *
2780 * @returns Strict VBox status code.
2781 * @param pVCpu The cross context per CPU structure.
2782 * @param pMsg The message.
2783 * @sa nemR3WinHandleExitMsr
2784 */
2785NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPUCC pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg)
2786{
2787 /*
2788 * A wee bit of sanity first.
2789 */
2790 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2791 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2792 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2793 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2794 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2795 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2796 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
2797 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
2798
2799 /*
2800 * Check CPL as that's common to both RDMSR and WRMSR.
2801 */
2802 VBOXSTRICTRC rcStrict;
2803 if (pMsg->Header.ExecutionState.Cpl == 0)
2804 {
2805 /*
2806 * Get all the MSR state. Since we're getting EFER, we also need to
2807 * get CR0, CR4 and CR3.
2808 */
2809 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2810 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2811 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2812 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2813 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2814
2815 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2816 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu,
2817 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2818 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2819 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2820 "MSRs");
2821 if (rcStrict == VINF_SUCCESS)
2822 {
2823 if (!pExitRec)
2824 {
2825 /*
2826 * Handle writes.
2827 */
2828 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2829 {
2830 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx));
2831 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n",
2832 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2833 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2834 if (rcStrict == VINF_SUCCESS)
2835 {
2836 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2837 return VINF_SUCCESS;
2838 }
2839# ifndef IN_RING3
2840 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2841 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2842 rcStrict = VINF_CPUM_R3_MSR_WRITE;
2843 return rcStrict;
2844# else
2845 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n",
2846 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2847 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2848# endif
2849 }
2850 /*
2851 * Handle reads.
2852 */
2853 else
2854 {
2855 uint64_t uValue = 0;
2856 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue);
2857 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2858 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2859 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2860 if (rcStrict == VINF_SUCCESS)
2861 {
2862 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue;
2863 pVCpu->cpum.GstCtx.rdx = uValue >> 32;
2864 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2865 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2866 return VINF_SUCCESS;
2867 }
2868# ifndef IN_RING3
2869 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2870 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2871 rcStrict = VINF_CPUM_R3_MSR_READ;
2872 return rcStrict;
2873# else
2874 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2875 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2876 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2877# endif
2878 }
2879 }
2880 else
2881 {
2882 /*
2883 * Handle frequent exit or something needing probing.
2884 */
2885 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
2886 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2887 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD", pMsg->MsrNumber));
2888 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2889 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2890 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2891 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2892 return rcStrict;
2893 }
2894 }
2895 else
2896 {
2897 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2898 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2899 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD",
2900 pMsg->MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2901 return rcStrict;
2902 }
2903 }
2904 else if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2905 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n",
2906 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2907 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx ));
2908 else
2909 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n",
2910 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2911 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber));
2912
2913 /*
2914 * If we get down here, we're supposed to #GP(0).
2915 */
2916 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2917 if (rcStrict == VINF_SUCCESS)
2918 {
2919 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2920 if (rcStrict == VINF_IEM_RAISED_XCPT)
2921 rcStrict = VINF_SUCCESS;
2922 else if (rcStrict != VINF_SUCCESS)
2923 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2924 }
2925 return rcStrict;
2926}
2927#elif defined(IN_RING3)
2928/**
2929 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess).
2930 *
2931 * @returns Strict VBox status code.
2932 * @param pVM The cross context VM structure.
2933 * @param pVCpu The cross context per CPU structure.
2934 * @param pExit The VM exit information to handle.
2935 * @sa nemHCWinHandleMessageMsr
2936 */
2937NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitMsr(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2938{
2939 /*
2940 * Check CPL as that's common to both RDMSR and WRMSR.
2941 */
2942 VBOXSTRICTRC rcStrict;
2943 if (pExit->VpContext.ExecutionState.Cpl == 0)
2944 {
2945 /*
2946 * Get all the MSR state. Since we're getting EFER, we also need to
2947 * get CR0, CR4 and CR3.
2948 */
2949 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2950 pExit->MsrAccess.AccessInfo.IsWrite
2951 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2952 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2953 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2954 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2955 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu,
2956 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2957 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2958 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2959 "MSRs");
2960 if (rcStrict == VINF_SUCCESS)
2961 {
2962 if (!pExitRec)
2963 {
2964 /*
2965 * Handle writes.
2966 */
2967 if (pExit->MsrAccess.AccessInfo.IsWrite)
2968 {
2969 rcStrict = CPUMSetGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber,
2970 RT_MAKE_U64((uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx));
2971 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2972 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
2973 (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2974 if (rcStrict == VINF_SUCCESS)
2975 {
2976 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
2977 return VINF_SUCCESS;
2978 }
2979 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", pVCpu->idCpu,
2980 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2981 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx,
2982 VBOXSTRICTRC_VAL(rcStrict) ));
2983 }
2984 /*
2985 * Handle reads.
2986 */
2987 else
2988 {
2989 uint64_t uValue = 0;
2990 rcStrict = CPUMQueryGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, &uValue);
2991 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu,
2992 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2993 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2994 if (rcStrict == VINF_SUCCESS)
2995 {
2996 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue;
2997 pVCpu->cpum.GstCtx.rdx = uValue >> 32;
2998 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2999 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
3000 return VINF_SUCCESS;
3001 }
3002 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3003 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
3004 uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3005 }
3006 }
3007 else
3008 {
3009 /*
3010 * Handle frequent exit or something needing probing.
3011 */
3012 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
3013 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3014 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber));
3015 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
3016 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
3017 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3018 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
3019 return rcStrict;
3020 }
3021 }
3022 else
3023 {
3024 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
3025 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3026 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
3027 return rcStrict;
3028 }
3029 }
3030 else if (pExit->MsrAccess.AccessInfo.IsWrite)
3031 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3032 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3033 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx ));
3034 else
3035 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3036 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3037 pExit->MsrAccess.MsrNumber));
3038
3039 /*
3040 * If we get down here, we're supposed to #GP(0).
3041 */
3042 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
3043 if (rcStrict == VINF_SUCCESS)
3044 {
3045 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
3046 if (rcStrict == VINF_IEM_RAISED_XCPT)
3047 rcStrict = VINF_SUCCESS;
3048 else if (rcStrict != VINF_SUCCESS)
3049 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
3050 }
3051
3052 RT_NOREF_PV(pVM);
3053 return rcStrict;
3054}
3055#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3056
3057
3058/**
3059 * Worker for nemHCWinHandleMessageException & nemR3WinHandleExitException that
3060 * checks if the given opcodes are of interest at all.
3061 *
3062 * @returns true if interesting, false if not.
3063 * @param cbOpcodes Number of opcode bytes available.
3064 * @param pbOpcodes The opcode bytes.
3065 * @param f64BitMode Whether we're in 64-bit mode.
3066 */
3067DECLINLINE(bool) nemHcWinIsInterestingUndefinedOpcode(uint8_t cbOpcodes, uint8_t const *pbOpcodes, bool f64BitMode)
3068{
3069 /*
3070 * Currently only interested in VMCALL and VMMCALL.
3071 */
3072 while (cbOpcodes >= 3)
3073 {
3074 switch (pbOpcodes[0])
3075 {
3076 case 0x0f:
3077 switch (pbOpcodes[1])
3078 {
3079 case 0x01:
3080 switch (pbOpcodes[2])
3081 {
3082 case 0xc1: /* 0f 01 c1 VMCALL */
3083 return true;
3084 case 0xd9: /* 0f 01 d9 VMMCALL */
3085 return true;
3086 default:
3087 break;
3088 }
3089 break;
3090 }
3091 break;
3092
3093 default:
3094 return false;
3095
3096 /* prefixes */
3097 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
3098 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
3099 if (!f64BitMode)
3100 return false;
3101 RT_FALL_THRU();
3102 case X86_OP_PRF_CS:
3103 case X86_OP_PRF_SS:
3104 case X86_OP_PRF_DS:
3105 case X86_OP_PRF_ES:
3106 case X86_OP_PRF_FS:
3107 case X86_OP_PRF_GS:
3108 case X86_OP_PRF_SIZE_OP:
3109 case X86_OP_PRF_SIZE_ADDR:
3110 case X86_OP_PRF_LOCK:
3111 case X86_OP_PRF_REPZ:
3112 case X86_OP_PRF_REPNZ:
3113 cbOpcodes--;
3114 pbOpcodes++;
3115 continue;
3116 }
3117 break;
3118 }
3119 return false;
3120}
3121
3122
3123#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3124/**
3125 * Copies state included in a exception intercept message.
3126 *
3127 * @param pVCpu The cross context per CPU structure.
3128 * @param pMsg The message.
3129 * @param fClearXcpt Clear pending exception.
3130 */
3131DECLINLINE(void)
3132nemHCWinCopyStateFromExceptionMessage(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, bool fClearXcpt)
3133{
3134 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
3135 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_DS
3136 | (fClearXcpt ? CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT : 0) );
3137 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
3138 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
3139 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
3140 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
3141 pVCpu->cpum.GstCtx.rsp = pMsg->Rsp;
3142 pVCpu->cpum.GstCtx.rbp = pMsg->Rbp;
3143 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
3144 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
3145 pVCpu->cpum.GstCtx.r8 = pMsg->R8;
3146 pVCpu->cpum.GstCtx.r9 = pMsg->R9;
3147 pVCpu->cpum.GstCtx.r10 = pMsg->R10;
3148 pVCpu->cpum.GstCtx.r11 = pMsg->R11;
3149 pVCpu->cpum.GstCtx.r12 = pMsg->R12;
3150 pVCpu->cpum.GstCtx.r13 = pMsg->R13;
3151 pVCpu->cpum.GstCtx.r14 = pMsg->R14;
3152 pVCpu->cpum.GstCtx.r15 = pMsg->R15;
3153 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
3154 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ss, pMsg->SsSegment);
3155}
3156#elif defined(IN_RING3)
3157/**
3158 * Copies state included in a exception intercept exit.
3159 *
3160 * @param pVCpu The cross context per CPU structure.
3161 * @param pExit The VM exit information.
3162 * @param fClearXcpt Clear pending exception.
3163 */
3164DECLINLINE(void) nemR3WinCopyStateFromExceptionMessage(PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, bool fClearXcpt)
3165{
3166 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3167 if (fClearXcpt)
3168 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
3169}
3170#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3171
3172
3173/**
3174 * Advances the guest RIP by the number of bytes specified in @a cb.
3175 *
3176 * @param pVCpu The cross context virtual CPU structure.
3177 * @param cb RIP increment value in bytes.
3178 */
3179DECLINLINE(void) nemHcWinAdvanceRip(PVMCPUCC pVCpu, uint32_t cb)
3180{
3181 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3182 pCtx->rip += cb;
3183
3184 /* Update interrupt shadow. */
3185 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3186 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
3187 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3188}
3189
3190
3191/**
3192 * Hacks its way around the lovely mesa driver's backdoor accesses.
3193 *
3194 * @sa hmR0VmxHandleMesaDrvGp
3195 * @sa hmR0SvmHandleMesaDrvGp
3196 */
3197static int nemHcWinHandleMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx)
3198{
3199 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK)));
3200 RT_NOREF(pCtx);
3201
3202 /* For now we'll just skip the instruction. */
3203 nemHcWinAdvanceRip(pVCpu, 1);
3204 return VINF_SUCCESS;
3205}
3206
3207
3208/**
3209 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
3210 * backdoor logging w/o checking what it is running inside.
3211 *
3212 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
3213 * backdoor port and magic numbers loaded in registers.
3214 *
3215 * @returns true if it is, false if it isn't.
3216 * @sa hmR0VmxIsMesaDrvGp
3217 * @sa hmR0SvmIsMesaDrvGp
3218 */
3219DECLINLINE(bool) nemHcWinIsMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx, const uint8_t *pbInsn, uint32_t cbInsn)
3220{
3221 /* #GP(0) is already checked by caller. */
3222
3223 /* Check magic and port. */
3224 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RAX)));
3225 if (pCtx->dx != UINT32_C(0x5658))
3226 return false;
3227 if (pCtx->rax != UINT32_C(0x564d5868))
3228 return false;
3229
3230 /* Flat ring-3 CS. */
3231 if (CPUMGetGuestCPL(pVCpu) != 3)
3232 return false;
3233 if (pCtx->cs.u64Base != 0)
3234 return false;
3235
3236 /* 0xed: IN eAX,dx */
3237 if (cbInsn < 1) /* Play safe (shouldn't happen). */
3238 {
3239 uint8_t abInstr[1];
3240 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
3241 if (RT_FAILURE(rc))
3242 return false;
3243 if (abInstr[0] != 0xed)
3244 return false;
3245 }
3246 else
3247 {
3248 if (pbInsn[0] != 0xed)
3249 return false;
3250 }
3251
3252 return true;
3253}
3254
3255
3256#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3257/**
3258 * Deals with exception intercept message (HvMessageTypeX64ExceptionIntercept).
3259 *
3260 * @returns Strict VBox status code.
3261 * @param pVCpu The cross context per CPU structure.
3262 * @param pMsg The message.
3263 * @sa nemR3WinHandleExitMsr
3264 */
3265NEM_TMPL_STATIC VBOXSTRICTRC
3266nemHCWinHandleMessageException(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg)
3267{
3268 /*
3269 * Assert sanity.
3270 */
3271 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
3272 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
3273 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
3274 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
3275 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
3276 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
3277 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
3278 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterDs, pMsg->DsSegment);
3279 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterSs, pMsg->SsSegment);
3280 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
3281 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
3282 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
3283 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx);
3284 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsp, pMsg->Rsp);
3285 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbp, pMsg->Rbp);
3286 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi);
3287 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi);
3288 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR8, pMsg->R8);
3289 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR9, pMsg->R9);
3290 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR10, pMsg->R10);
3291 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR11, pMsg->R11);
3292 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR12, pMsg->R12);
3293 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR13, pMsg->R13);
3294 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR14, pMsg->R14);
3295 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR15, pMsg->R15);
3296
3297 /*
3298 * Get most of the register state since we'll end up making IEM inject the
3299 * event. The exception isn't normally flaged as a pending event, so duh.
3300 *
3301 * Note! We can optimize this later with event injection.
3302 */
3303 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n",
3304 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
3305 pMsg->ExceptionVector, pMsg->ErrorCode, pMsg->ExceptionParameter));
3306 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, true /*fClearXcpt*/);
3307 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3308 if (pMsg->ExceptionVector == X86_XCPT_DB)
3309 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3310 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt");
3311 if (rcStrict != VINF_SUCCESS)
3312 return rcStrict;
3313
3314 /*
3315 * Handle the intercept.
3316 */
3317 TRPMEVENT enmEvtType = TRPM_TRAP;
3318 switch (pMsg->ExceptionVector)
3319 {
3320 /*
3321 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3322 * and need to turn them over to GIM.
3323 *
3324 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3325 * #UD for handling non-native hypercall instructions. (IEM will
3326 * decode both and let the GIM provider decide whether to accept it.)
3327 */
3328 case X86_XCPT_UD:
3329 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3330 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3331 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3332
3333 if (nemHcWinIsInterestingUndefinedOpcode(pMsg->InstructionByteCount, pMsg->InstructionBytes,
3334 pMsg->Header.ExecutionState.EferLma && pMsg->Header.CsSegment.Long ))
3335 {
3336 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
3337 pMsg->InstructionBytes, pMsg->InstructionByteCount);
3338 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3339 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3340 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
3341 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3342 return rcStrict;
3343 }
3344 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3345 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->InstructionByteCount, pMsg->InstructionBytes ));
3346 break;
3347
3348 /*
3349 * Workaround the lovely mesa driver assuming that vmsvga means vmware
3350 * hypervisor and tries to log stuff to the host.
3351 */
3352 case X86_XCPT_GP:
3353 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGp);
3354 /** @todo r=bird: Need workaround in IEM for this, right?
3355 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_GP),
3356 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC()); */
3357 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
3358 || !nemHcWinIsMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx, pMsg->InstructionBytes, pMsg->InstructionByteCount))
3359 {
3360# if 1 /** @todo Need to emulate instruction or we get a triple fault when trying to inject the #GP... */
3361 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
3362 pMsg->InstructionBytes, pMsg->InstructionByteCount);
3363 Log4(("XcptExit/%u: %04x:%08RX64/%s: #GP -> emulated -> %Rrc\n",
3364 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3365 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
3366 return rcStrict;
3367# else
3368 break;
3369# endif
3370 }
3371 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGpMesa);
3372 return nemHcWinHandleMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx);
3373
3374 /*
3375 * Filter debug exceptions.
3376 */
3377 case X86_XCPT_DB:
3378 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3379 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3380 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3381 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3382 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header) ));
3383 break;
3384
3385 case X86_XCPT_BP:
3386 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3387 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3388 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3389 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3390 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->Header.InstructionLength));
3391 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3392 break;
3393
3394 /* This shouldn't happen. */
3395 default:
3396 AssertLogRelMsgFailedReturn(("ExceptionVector=%#x\n", pMsg->ExceptionVector), VERR_IEM_IPE_6);
3397 }
3398
3399 /*
3400 * Inject it.
3401 */
3402 rcStrict = IEMInjectTrap(pVCpu, pMsg->ExceptionVector, enmEvtType, pMsg->ErrorCode,
3403 pMsg->ExceptionParameter /*??*/, pMsg->Header.InstructionLength);
3404 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3405 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3406 nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->ExceptionVector, VBOXSTRICTRC_VAL(rcStrict) ));
3407 return rcStrict;
3408}
3409#elif defined(IN_RING3)
3410/**
3411 * Deals with MSR access exits (WHvRunVpExitReasonException).
3412 *
3413 * @returns Strict VBox status code.
3414 * @param pVM The cross context VM structure.
3415 * @param pVCpu The cross context per CPU structure.
3416 * @param pExit The VM exit information to handle.
3417 * @sa nemR3WinHandleExitException
3418 */
3419NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitException(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3420{
3421 /*
3422 * Get most of the register state since we'll end up making IEM inject the
3423 * event. The exception isn't normally flaged as a pending event, so duh.
3424 *
3425 * Note! We can optimize this later with event injection.
3426 */
3427 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3428 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType,
3429 pExit->VpException.ErrorCode, pExit->VpException.ExceptionParameter ));
3430 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, true /*fClearXcpt*/);
3431 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3432 if (pExit->VpException.ExceptionType == X86_XCPT_DB)
3433 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3434 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt");
3435 if (rcStrict != VINF_SUCCESS)
3436 return rcStrict;
3437
3438 /*
3439 * Handle the intercept.
3440 */
3441 TRPMEVENT enmEvtType = TRPM_TRAP;
3442 switch (pExit->VpException.ExceptionType)
3443 {
3444 /*
3445 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3446 * and need to turn them over to GIM.
3447 *
3448 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3449 * #UD for handling non-native hypercall instructions. (IEM will
3450 * decode both and let the GIM provider decide whether to accept it.)
3451 */
3452 case X86_XCPT_UD:
3453 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3454 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3455 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3456 if (nemHcWinIsInterestingUndefinedOpcode(pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes,
3457 pExit->VpContext.ExecutionState.EferLma && pExit->VpContext.Cs.Long ))
3458 {
3459 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
3460 pExit->VpException.InstructionBytes,
3461 pExit->VpException.InstructionByteCount);
3462 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3463 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3464 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
3465 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3466 return rcStrict;
3467 }
3468
3469 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu,
3470 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3471 pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes ));
3472 break;
3473
3474 /*
3475 * Workaround the lovely mesa driver assuming that vmsvga means vmware
3476 * hypervisor and tries to log stuff to the host.
3477 */
3478 case X86_XCPT_GP:
3479 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGp);
3480 /** @todo r=bird: Need workaround in IEM for this, right?
3481 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_GP),
3482 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC()); */
3483 if ( !pVCpu->nem.s.fTrapXcptGpForLovelyMesaDrv
3484 || !nemHcWinIsMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx, pExit->VpException.InstructionBytes,
3485 pExit->VpException.InstructionByteCount))
3486 {
3487# if 1 /** @todo Need to emulate instruction or we get a triple fault when trying to inject the #GP... */
3488 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
3489 pExit->VpException.InstructionBytes,
3490 pExit->VpException.InstructionByteCount);
3491 Log4(("XcptExit/%u: %04x:%08RX64/%s: #GP -> emulated -> %Rrc\n",
3492 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3493 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
3494 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3495 return rcStrict;
3496# else
3497 break;
3498# endif
3499 }
3500 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGpMesa);
3501 return nemHcWinHandleMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx);
3502
3503 /*
3504 * Filter debug exceptions.
3505 */
3506 case X86_XCPT_DB:
3507 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3508 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3509 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3510 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3511 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext) ));
3512 break;
3513
3514 case X86_XCPT_BP:
3515 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3516 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3517 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3518 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3519 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.InstructionLength));
3520 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3521 break;
3522
3523 /* This shouldn't happen. */
3524 default:
3525 AssertLogRelMsgFailedReturn(("ExceptionType=%#x\n", pExit->VpException.ExceptionType), VERR_IEM_IPE_6);
3526 }
3527
3528 /*
3529 * Inject it.
3530 */
3531 rcStrict = IEMInjectTrap(pVCpu, pExit->VpException.ExceptionType, enmEvtType, pExit->VpException.ErrorCode,
3532 pExit->VpException.ExceptionParameter /*??*/, pExit->VpContext.InstructionLength);
3533 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3534 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3535 nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType, VBOXSTRICTRC_VAL(rcStrict) ));
3536
3537 RT_NOREF_PV(pVM);
3538 return rcStrict;
3539}
3540#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3541
3542
3543#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3544/**
3545 * Deals with unrecoverable exception (triple fault).
3546 *
3547 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up
3548 * here too. So we'll leave it to IEM to decide.
3549 *
3550 * @returns Strict VBox status code.
3551 * @param pVCpu The cross context per CPU structure.
3552 * @param pMsgHdr The message header.
3553 * @sa nemR3WinHandleExitUnrecoverableException
3554 */
3555NEM_TMPL_STATIC VBOXSTRICTRC
3556nemHCWinHandleMessageUnrecoverableException(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
3557{
3558 /* Check message register value sanity. */
3559 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsgHdr->CsSegment);
3560 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsgHdr->Rip);
3561 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsgHdr->Rflags);
3562 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsgHdr->Cr8);
3563
3564# if 0
3565 /*
3566 * Just copy the state we've got and handle it in the loop for now.
3567 */
3568 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
3569 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n",
3570 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags));
3571 return VINF_EM_TRIPLE_FAULT;
3572# else
3573 /*
3574 * Let IEM decide whether this is really it.
3575 */
3576 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3577 pMsgHdr->Rip + pMsgHdr->CsSegment.Base, ASMReadTSC());
3578 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
3579 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3580 if (rcStrict == VINF_SUCCESS)
3581 {
3582 rcStrict = IEMExecOne(pVCpu);
3583 if (rcStrict == VINF_SUCCESS)
3584 {
3585 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3586 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags ));
3587 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3588 return VINF_SUCCESS;
3589 }
3590 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3591 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3592 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3593 else
3594 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3595 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3596 }
3597 else
3598 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3599 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3600 return rcStrict;
3601# endif
3602}
3603#elif defined(IN_RING3)
3604/**
3605 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
3606 *
3607 * @returns Strict VBox status code.
3608 * @param pVM The cross context VM structure.
3609 * @param pVCpu The cross context per CPU structure.
3610 * @param pExit The VM exit information to handle.
3611 * @sa nemHCWinHandleMessageUnrecoverableException
3612 */
3613NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3614{
3615# if 0
3616 /*
3617 * Just copy the state we've got and handle it in the loop for now.
3618 */
3619 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3620 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3621 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3622 RT_NOREF_PV(pVM);
3623 return VINF_EM_TRIPLE_FAULT;
3624# else
3625 /*
3626 * Let IEM decide whether this is really it.
3627 */
3628 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3629 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3630 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3631 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3632 if (rcStrict == VINF_SUCCESS)
3633 {
3634 rcStrict = IEMExecOne(pVCpu);
3635 if (rcStrict == VINF_SUCCESS)
3636 {
3637 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3638 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3639 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3640 return VINF_SUCCESS;
3641 }
3642 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3643 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3644 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3645 else
3646 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3647 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3648 }
3649 else
3650 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3651 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3652 RT_NOREF_PV(pVM);
3653 return rcStrict;
3654# endif
3655
3656}
3657#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3658
3659
3660#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3661/**
3662 * Handles messages (VM exits).
3663 *
3664 * @returns Strict VBox status code.
3665 * @param pVM The cross context VM structure.
3666 * @param pVCpu The cross context per CPU structure.
3667 * @param pMappingHeader The message slot mapping.
3668 * @sa nemR3WinHandleExit
3669 */
3670NEM_TMPL_STATIC VBOXSTRICTRC
3671nemHCWinHandleMessage(PVMCC pVM, PVMCPUCC pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader)
3672{
3673 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage)
3674 {
3675 AssertMsg(pMappingHeader->cbMessage == HV_MESSAGE_SIZE, ("%#x\n", pMappingHeader->cbMessage));
3676 HV_MESSAGE const *pMsg = (HV_MESSAGE const *)(pMappingHeader + 1);
3677 switch (pMsg->Header.MessageType)
3678 {
3679 case HvMessageTypeUnmappedGpa:
3680 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3681 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3682 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept);
3683
3684 case HvMessageTypeGpaIntercept:
3685 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3686 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept);
3687 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept);
3688
3689 case HvMessageTypeX64IoPortIntercept:
3690 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept));
3691 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3692 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept);
3693
3694 case HvMessageTypeX64Halt:
3695 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3696 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3697 pMsg->X64InterceptHeader.Rip + pMsg->X64InterceptHeader.CsSegment.Base, ASMReadTSC());
3698 Log4(("HaltExit\n"));
3699 return VINF_EM_HALT;
3700
3701 case HvMessageTypeX64InterruptWindow:
3702 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow));
3703 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3704 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow);
3705
3706 case HvMessageTypeX64CpuidIntercept:
3707 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept));
3708 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3709 return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept);
3710
3711 case HvMessageTypeX64MsrIntercept:
3712 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept));
3713 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3714 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept);
3715
3716 case HvMessageTypeX64ExceptionIntercept:
3717 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept));
3718 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3719 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept);
3720
3721 case HvMessageTypeUnrecoverableException:
3722 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
3723 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3724 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader);
3725
3726 case HvMessageTypeInvalidVpRegisterValue:
3727 case HvMessageTypeUnsupportedFeature:
3728 case HvMessageTypeTlbPageSizeMismatch:
3729 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3730 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg),
3731 VERR_NEM_IPE_3);
3732
3733 case HvMessageTypeX64ApicEoi:
3734 case HvMessageTypeX64LegacyFpError:
3735 case HvMessageTypeX64RegisterIntercept:
3736 case HvMessageTypeApicEoi:
3737 case HvMessageTypeFerrAsserted:
3738 case HvMessageTypeEventLogBufferComplete:
3739 case HvMessageTimerExpired:
3740 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3741 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3742 VERR_NEM_IPE_3);
3743
3744 default:
3745 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3746 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3747 VERR_NEM_IPE_3);
3748 }
3749 }
3750 else
3751 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n",
3752 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage),
3753 VERR_NEM_IPE_4);
3754}
3755#elif defined(IN_RING3)
3756/**
3757 * Handles VM exits.
3758 *
3759 * @returns Strict VBox status code.
3760 * @param pVM The cross context VM structure.
3761 * @param pVCpu The cross context per CPU structure.
3762 * @param pExit The VM exit information to handle.
3763 * @sa nemHCWinHandleMessage
3764 */
3765NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3766{
3767 switch (pExit->ExitReason)
3768 {
3769 case WHvRunVpExitReasonMemoryAccess:
3770 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3771 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
3772
3773 case WHvRunVpExitReasonX64IoPortAccess:
3774 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3775 return nemR3WinHandleExitIoPort(pVM, pVCpu, pExit);
3776
3777 case WHvRunVpExitReasonX64Halt:
3778 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3779 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3780 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3781 Log4(("HaltExit\n"));
3782 return VINF_EM_HALT;
3783
3784 case WHvRunVpExitReasonCanceled:
3785 return VINF_SUCCESS;
3786
3787 case WHvRunVpExitReasonX64InterruptWindow:
3788 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3789 return nemR3WinHandleExitInterruptWindow(pVM, pVCpu, pExit);
3790
3791 case WHvRunVpExitReasonX64Cpuid:
3792 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3793 return nemR3WinHandleExitCpuId(pVM, pVCpu, pExit);
3794
3795 case WHvRunVpExitReasonX64MsrAccess:
3796 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3797 return nemR3WinHandleExitMsr(pVM, pVCpu, pExit);
3798
3799 case WHvRunVpExitReasonException:
3800 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3801 return nemR3WinHandleExitException(pVM, pVCpu, pExit);
3802
3803 case WHvRunVpExitReasonUnrecoverableException:
3804 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3805 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
3806
3807 case WHvRunVpExitReasonUnsupportedFeature:
3808 case WHvRunVpExitReasonInvalidVpRegisterValue:
3809 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3810 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
3811 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
3812
3813 /* Undesired exits: */
3814 case WHvRunVpExitReasonNone:
3815 default:
3816 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3817 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
3818 }
3819}
3820#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3821
3822
3823#ifdef IN_RING0
3824/**
3825 * Perform an I/O control operation on the partition handle (VID.SYS),
3826 * restarting on alert-like behaviour.
3827 *
3828 * @returns NT status code.
3829 * @param pGVM The ring-0 VM structure.
3830 * @param pGVCpu The global (ring-0) per CPU structure.
3831 * @param fFlags The wait flags.
3832 * @param cMillies The timeout in milliseconds
3833 */
3834static NTSTATUS nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(PGVM pGVM, PGVMCPU pGVCpu, uint32_t fFlags, uint32_t cMillies)
3835{
3836 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3837 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags;
3838 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3839 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3840 &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3841 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
3842 NULL, 0);
3843 if (rcNt == STATUS_SUCCESS)
3844 { /* likely */ }
3845 /*
3846 * Generally, if we get down here, we have been interrupted between ACK'ing
3847 * a message and waiting for the next due to a NtAlertThread call. So, we
3848 * should stop ACK'ing the previous message and get on waiting on the next.
3849 * See similar stuff in nemHCWinRunGC().
3850 */
3851 else if ( rcNt == STATUS_TIMEOUT
3852 || rcNt == STATUS_ALERTED /* just in case */
3853 || rcNt == STATUS_KERNEL_APC /* just in case */
3854 || rcNt == STATUS_USER_APC /* just in case */)
3855 {
3856 DBGFTRACE_CUSTOM(pGVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/1 %#x (f=%#x)", rcNt, fFlags);
3857 STAM_REL_COUNTER_INC(&pGVCpu->nem.s.StatStopCpuPendingAlerts);
3858 Assert(fFlags & VID_MSHAGN_F_GET_NEXT_MESSAGE);
3859
3860 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3861 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags & ~VID_MSHAGN_F_HANDLE_MESSAGE;
3862 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3863 rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3864 &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3865 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
3866 NULL, 0);
3867 DBGFTRACE_CUSTOM(pGVM, "IoCtlMessageSlotHandleAndGetNextRestart/2 %#x", rcNt);
3868 }
3869 return rcNt;
3870}
3871#endif /* IN_RING0 */
3872
3873
3874#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3875/**
3876 * Worker for nemHCWinRunGC that stops the execution on the way out.
3877 *
3878 * The CPU was running the last time we checked, no there are no messages that
3879 * needs being marked handled/whatever. Caller checks this.
3880 *
3881 * @returns rcStrict on success, error status on failure.
3882 * @param pVM The cross context VM structure.
3883 * @param pVCpu The cross context per CPU structure.
3884 * @param rcStrict The nemHCWinRunGC return status. This is a little
3885 * bit unnecessary, except in internal error cases,
3886 * since we won't need to stop the CPU if we took an
3887 * exit.
3888 * @param pMappingHeader The message slot mapping.
3889 */
3890NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVMCC pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict,
3891 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader)
3892{
3893# ifdef DBGFTRACE_ENABLED
3894 HV_MESSAGE const volatile *pMsgForTrace = (HV_MESSAGE const volatile *)(pMappingHeader + 1);
3895# endif
3896
3897 /*
3898 * Try stopping the processor. If we're lucky we manage to do this before it
3899 * does another VM exit.
3900 */
3901 DBGFTRACE_CUSTOM(pVM, "nemStop#0");
3902# ifdef IN_RING0
3903 pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu;
3904 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction,
3905 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3906 NULL, 0);
3907 if (NT_SUCCESS(rcNt))
3908 {
3909 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay (%#x)", rcNt);
3910 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3911 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3912 return rcStrict;
3913 }
3914# else
3915 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu);
3916 if (fRet)
3917 {
3918 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay");
3919 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3920 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3921 return rcStrict;
3922 }
3923# endif
3924
3925 /*
3926 * Dang. The CPU stopped by itself and we got a couple of message to deal with.
3927 */
3928# ifdef IN_RING0
3929 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", rcNt);
3930 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt),
3931 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3932# else
3933 DWORD dwErr = RTNtLastErrorValue();
3934 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", dwErr);
3935 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr),
3936 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3937# endif
3938 Log8(("nemHCWinStopCpu: Stopping CPU #%u pending...\n", pVCpu->idCpu));
3939 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPending);
3940
3941 /*
3942 * First message: Exit or similar, sometimes VidMessageStopRequestComplete.
3943 * Note! We can safely ASSUME that rcStrict isn't an important information one.
3944 */
3945# ifdef IN_RING0
3946 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3947 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
3948 pMsgForTrace->Header.MessageType);
3949 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
3950 ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3951 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3952# else
3953 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3954 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3955 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
3956 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3957 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3958 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3959# endif
3960
3961 VID_MESSAGE_TYPE enmVidMsgType = pMappingHeader->enmVidMsgType;
3962 if (enmVidMsgType != VidMessageStopRequestComplete)
3963 {
3964 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader);
3965 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict))
3966 rcStrict = rcStrict2;
3967 DBGFTRACE_CUSTOM(pVM, "nemStop#1: handled %#x -> %d", pMsgForTrace->Header.MessageType, VBOXSTRICTRC_VAL(rcStrict));
3968
3969 /*
3970 * Mark it as handled and get the stop request completed message, then mark
3971 * that as handled too. CPU is back into fully stopped stated then.
3972 */
3973# ifdef IN_RING0
3974 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu,
3975 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE,
3976 30000 /*ms*/);
3977 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
3978 pMsgForTrace->Header.MessageType);
3979 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
3980 ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3981 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3982# else
3983 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3984 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3985 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
3986 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3987 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3988 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3989# endif
3990
3991 /* It should be a stop request completed message. */
3992 enmVidMsgType = pMappingHeader->enmVidMsgType;
3993 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete,
3994 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n",
3995 enmVidMsgType, pMappingHeader->cbMessage),
3996 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3997
3998 /*
3999 * Mark the VidMessageStopRequestComplete message as handled.
4000 */
4001# ifdef IN_RING0
4002 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
4003 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType,
4004 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4005 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
4006 ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
4007 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4008# else
4009 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
4010 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
4011 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4012 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
4013 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4014# endif
4015 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) ));
4016 }
4017 else
4018 {
4019 /** @todo I'm not so sure about this now... */
4020 DBGFTRACE_CUSTOM(pVM, "nemStop#9: %#x %#x %#x", pMappingHeader->enmVidMsgType,
4021 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4022 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingOdd);
4023 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc) - 1st VidMessageSlotHandleAndGetNext got VidMessageStopRequestComplete.\n",
4024 VBOXSTRICTRC_VAL(rcStrict) ));
4025 }
4026 return rcStrict;
4027}
4028#endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
4029
4030#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
4031
4032/**
4033 * Deals with pending interrupt related force flags, may inject interrupt.
4034 *
4035 * @returns VBox strict status code.
4036 * @param pVM The cross context VM structure.
4037 * @param pVCpu The cross context per CPU structure.
4038 * @param pfInterruptWindows Where to return interrupt window flags.
4039 */
4040NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleInterruptFF(PVMCC pVM, PVMCPUCC pVCpu, uint8_t *pfInterruptWindows)
4041{
4042 Assert(!TRPMHasTrap(pVCpu));
4043 RT_NOREF_PV(pVM);
4044
4045 /*
4046 * First update APIC. We ASSUME this won't need TPR/CR8.
4047 */
4048 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4049 {
4050 APICUpdatePendingInterrupts(pVCpu);
4051 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
4052 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4053 return VINF_SUCCESS;
4054 }
4055
4056 /*
4057 * We don't currently implement SMIs.
4058 */
4059 AssertReturn(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);
4060
4061 /*
4062 * Check if we've got the minimum of state required for deciding whether we
4063 * can inject interrupts and NMIs. If we don't have it, get all we might require
4064 * for injection via IEM.
4065 */
4066 bool const fPendingNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4067 uint64_t fNeedExtrn = CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
4068 | (fPendingNmi ? CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI : 0);
4069 if (pVCpu->cpum.GstCtx.fExtrn & fNeedExtrn)
4070 {
4071 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF");
4072 if (rcStrict != VINF_SUCCESS)
4073 return rcStrict;
4074 }
4075 bool const fInhibitInterrupts = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
4076 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip;
4077
4078 /*
4079 * NMI? Try deliver it first.
4080 */
4081 if (fPendingNmi)
4082 {
4083 if ( !fInhibitInterrupts
4084 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4085 {
4086 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4087 if (rcStrict == VINF_SUCCESS)
4088 {
4089 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4090 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_NMI, TRPM_HARDWARE_INT, 0, 0, 0);
4091 Log8(("Injected NMI on %u (%d)\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4092 }
4093 return rcStrict;
4094 }
4095 *pfInterruptWindows |= NEM_WIN_INTW_F_NMI;
4096 Log8(("NMI window pending on %u\n", pVCpu->idCpu));
4097 }
4098
4099 /*
4100 * APIC or PIC interrupt?
4101 */
4102 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4103 {
4104 if ( !fInhibitInterrupts
4105 && pVCpu->cpum.GstCtx.rflags.Bits.u1IF)
4106 {
4107 AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR);
4108 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4109 if (rcStrict == VINF_SUCCESS)
4110 {
4111 uint8_t bInterrupt;
4112 int rc = PDMGetInterrupt(pVCpu, &bInterrupt);
4113 if (RT_SUCCESS(rc))
4114 {
4115 rcStrict = IEMInjectTrap(pVCpu, bInterrupt, TRPM_HARDWARE_INT, 0, 0, 0);
4116 Log8(("Injected interrupt %#x on %u (%d)\n", bInterrupt, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4117 }
4118 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4119 {
4120 *pfInterruptWindows |= (bInterrupt >> 4 /*??*/) << NEM_WIN_INTW_F_PRIO_SHIFT;
4121 Log8(("VERR_APIC_INTR_MASKED_BY_TPR: *pfInterruptWindows=%#x\n", *pfInterruptWindows));
4122 }
4123 else
4124 Log8(("PDMGetInterrupt failed -> %d\n", rc));
4125 }
4126 return rcStrict;
4127 }
4128 *pfInterruptWindows |= NEM_WIN_INTW_F_REGULAR;
4129 Log8(("Interrupt window pending on %u\n", pVCpu->idCpu));
4130 }
4131
4132 return VINF_SUCCESS;
4133}
4134
4135
4136/**
4137 * Inner NEM runloop for windows.
4138 *
4139 * @returns Strict VBox status code.
4140 * @param pVM The cross context VM structure.
4141 * @param pVCpu The cross context per CPU structure.
4142 */
4143NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVMCC pVM, PVMCPUCC pVCpu)
4144{
4145 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags));
4146# ifdef LOG_ENABLED
4147 if (LogIs3Enabled())
4148 nemHCWinLogState(pVM, pVCpu);
4149# endif
4150
4151 /*
4152 * Try switch to NEM runloop state.
4153 */
4154 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
4155 { /* likely */ }
4156 else
4157 {
4158 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4159 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
4160 return VINF_SUCCESS;
4161 }
4162
4163 /*
4164 * The run loop.
4165 *
4166 * Current approach to state updating to use the sledgehammer and sync
4167 * everything every time. This will be optimized later.
4168 */
4169# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4170 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;
4171# endif
4172 const bool fSingleStepping = DBGFIsStepping(pVCpu);
4173// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
4174// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
4175// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
4176 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4177 for (unsigned iLoop = 0;; iLoop++)
4178 {
4179# ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4180 /*
4181 * Hack alert!
4182 */
4183 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
4184 if (cMappedPages >= 4000)
4185 {
4186 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemHCWinUnmapOnePageCallback, NULL);
4187 Log(("nemHCWinRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
4188 }
4189# endif
4190
4191 /*
4192 * Pending interrupts or such? Need to check and deal with this prior
4193 * to the state syncing.
4194 */
4195 pVCpu->nem.s.fDesiredInterruptWindows = 0;
4196 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC
4197 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4198 {
4199# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4200 /* Make sure the CPU isn't executing. */
4201 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4202 {
4203 pVCpu->nem.s.fHandleAndGetFlags = 0;
4204 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader);
4205 if (rcStrict == VINF_SUCCESS)
4206 { /* likely */ }
4207 else
4208 {
4209 LogFlow(("NEM/%u: breaking: nemHCWinStopCpu -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4210 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4211 break;
4212 }
4213 }
4214# endif
4215
4216 /* Try inject interrupt. */
4217 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, &pVCpu->nem.s.fDesiredInterruptWindows);
4218 if (rcStrict == VINF_SUCCESS)
4219 { /* likely */ }
4220 else
4221 {
4222 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4223 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4224 break;
4225 }
4226 }
4227
4228 /*
4229 * Ensure that hyper-V has the whole state.
4230 * (We always update the interrupt windows settings when active as hyper-V seems
4231 * to forget about it after an exit.)
4232 */
4233 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK))
4234 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)
4235 || ( ( pVCpu->nem.s.fDesiredInterruptWindows
4236 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows)
4237# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4238 && pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */
4239# endif
4240 )
4241 )
4242 {
4243# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4244 AssertMsg(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */,
4245 ("%#x fExtrn=%#RX64 (%#RX64) fDesiredInterruptWindows=%d fCurrentInterruptWindows=%#x vs %#x\n",
4246 pVCpu->nem.s.fHandleAndGetFlags, pVCpu->cpum.GstCtx.fExtrn, ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK),
4247 pVCpu->nem.s.fDesiredInterruptWindows, pVCpu->nem.s.fCurrentInterruptWindows, pVCpu->nem.s.fDesiredInterruptWindows));
4248# endif
4249# ifdef IN_RING0
4250 int rc2 = nemR0WinExportState(pVM, pVCpu, &pVCpu->cpum.GstCtx);
4251# else
4252 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
4253# endif
4254 AssertRCReturn(rc2, rc2);
4255 }
4256
4257 /*
4258 * Poll timers and run for a bit.
4259 *
4260 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
4261 * so we take the time of the next timer event and uses that as a deadline.
4262 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
4263 */
4264 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
4265 * the whole polling job when timers have changed... */
4266 uint64_t offDeltaIgnored;
4267 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
4268 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4269 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4270 {
4271# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4272 if (pVCpu->nem.s.fHandleAndGetFlags)
4273 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ }
4274 else
4275 {
4276# ifdef IN_RING0
4277 pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu;
4278 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction,
4279 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
4280 NULL, 0);
4281 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));
4282 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pVCpu->idCpu, rcNt),
4283 VERR_NEM_IPE_5);
4284# else
4285 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
4286 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n",
4287 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()),
4288 VERR_NEM_IPE_5);
4289# endif
4290 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4291 }
4292# endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
4293
4294 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
4295 {
4296# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4297 uint64_t const nsNow = RTTimeNanoTS();
4298 int64_t const cNsNextTimerEvt = nsNow - nsNextTimerEvt;
4299 uint32_t cMsWait;
4300 if (cNsNextTimerEvt < 100000 /* ns */)
4301 cMsWait = 0;
4302 else if ((uint64_t)cNsNextTimerEvt < RT_NS_1SEC)
4303 {
4304 if ((uint32_t)cNsNextTimerEvt < 2*RT_NS_1MS)
4305 cMsWait = 1;
4306 else
4307 cMsWait = ((uint32_t)cNsNextTimerEvt - 100000 /*ns*/) / RT_NS_1MS;
4308 }
4309 else
4310 cMsWait = RT_MS_1SEC;
4311# ifdef IN_RING0
4312 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pVCpu->idCpu;
4313 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags;
4314 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMsWait;
4315 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
4316 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
4317 pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
4318 NULL, 0);
4319 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4320 if (rcNt == STATUS_SUCCESS)
4321# else
4322 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4323 pVCpu->nem.s.fHandleAndGetFlags, cMsWait);
4324 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4325 if (fRet)
4326# endif
4327# else
4328 WHV_RUN_VP_EXIT_CONTEXT ExitReason;
4329 RT_ZERO(ExitReason);
4330 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
4331 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4332 if (SUCCEEDED(hrc))
4333# endif
4334 {
4335 /*
4336 * Deal with the message.
4337 */
4338# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4339 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader);
4340 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;
4341# else
4342 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
4343# endif
4344 if (rcStrict == VINF_SUCCESS)
4345 { /* hopefully likely */ }
4346 else
4347 {
4348 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4349 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4350 break;
4351 }
4352 }
4353 else
4354 {
4355# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4356
4357 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT,
4358 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah,
4359 the error code conversion is into WAIT_XXX, i.e. NT status codes. */
4360# ifndef IN_RING0
4361 DWORD rcNt = GetLastError();
4362# endif
4363 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt));
4364 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT
4365 || rcNt == STATUS_ALERTED /* just in case */
4366 || rcNt == STATUS_USER_APC /* ditto */
4367 || rcNt == STATUS_KERNEL_APC /* ditto */
4368 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n",
4369 pVCpu->idCpu, rcNt, rcNt),
4370 VERR_NEM_IPE_0);
4371 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4372 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout);
4373# else
4374 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
4375 pVCpu->idCpu, hrc, GetLastError()),
4376 VERR_NEM_IPE_0);
4377# endif
4378 }
4379
4380 /*
4381 * If no relevant FFs are pending, loop.
4382 */
4383 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
4384 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4385 continue;
4386
4387 /** @todo Try handle pending flags, not just return to EM loops. Take care
4388 * not to set important RCs here unless we've handled a message. */
4389 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
4390 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
4391 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
4392 }
4393 else
4394 {
4395 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
4396 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
4397 }
4398 }
4399 else
4400 {
4401 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
4402 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
4403 }
4404 break;
4405 } /* the run loop */
4406
4407
4408 /*
4409 * If the CPU is running, make sure to stop it before we try sync back the
4410 * state and return to EM. We don't sync back the whole state if we can help it.
4411 */
4412# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4413 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4414 {
4415 pVCpu->nem.s.fHandleAndGetFlags = 0;
4416 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader);
4417 }
4418# endif
4419
4420 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
4421 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4422
4423 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
4424 {
4425 /* Try anticipate what we might need. */
4426 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
4427 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
4428 || RT_FAILURE(rcStrict))
4429 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4430# ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */
4431 else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
4432 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
4433 fImport = CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
4434 else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
4435 fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
4436# endif
4437 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
4438 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4439 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
4440
4441 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
4442 {
4443# ifdef IN_RING0
4444 int rc2 = nemR0WinImportState(pVM, pVCpu, &pVCpu->cpum.GstCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT,
4445 true /*fCanUpdateCr3*/);
4446 if (RT_SUCCESS(rc2))
4447 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4448 else if (rc2 == VERR_NEM_FLUSH_TLB)
4449 {
4450 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4451 if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
4452 rcStrict = -rc2;
4453 else
4454 {
4455 pVCpu->nem.s.rcPending = -rc2;
4456 LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
4457 }
4458 }
4459# else
4460 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4461 if (RT_SUCCESS(rc2))
4462 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4463# endif
4464 else if (RT_SUCCESS(rcStrict))
4465 rcStrict = rc2;
4466 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
4467 pVCpu->cpum.GstCtx.fExtrn = 0;
4468 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
4469 }
4470 else
4471 {
4472 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4473 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
4474 }
4475 }
4476 else
4477 {
4478 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4479 pVCpu->cpum.GstCtx.fExtrn = 0;
4480 }
4481
4482 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
4483 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, VBOXSTRICTRC_VAL(rcStrict) ));
4484 return rcStrict;
4485}
4486
4487#endif /* defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3) */
4488
4489/**
4490 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
4491 */
4492NEM_TMPL_STATIC DECLCALLBACK(int) nemHCWinUnsetForA20CheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys,
4493 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
4494{
4495 /* We'll just unmap the memory. */
4496 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
4497 {
4498#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4499 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
4500 AssertRC(rc);
4501 if (RT_SUCCESS(rc))
4502#else
4503 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
4504 if (SUCCEEDED(hrc))
4505#endif
4506 {
4507 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4508 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
4509 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
4510 }
4511 else
4512 {
4513#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4514 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
4515 return rc;
4516#else
4517 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4518 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4519 return VERR_NEM_IPE_2;
4520#endif
4521 }
4522 }
4523 RT_NOREF(pVCpu, pvUser);
4524 return VINF_SUCCESS;
4525}
4526
4527
4528/**
4529 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
4530 *
4531 * @returns The PGMPhysNemQueryPageInfo result.
4532 * @param pVM The cross context VM structure.
4533 * @param pVCpu The cross context virtual CPU structure.
4534 * @param GCPhys The page to unmap.
4535 */
4536NEM_TMPL_STATIC int nemHCWinUnmapPageForA20Gate(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys)
4537{
4538 PGMPHYSNEMPAGEINFO Info;
4539 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
4540 nemHCWinUnsetForA20CheckerCallback, NULL);
4541}
4542
4543
4544void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
4545{
4546 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
4547 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
4548}
4549
4550
4551void nemHCNativeNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
4552 int fRestoreAsRAM, bool fRestoreAsRAM2)
4553{
4554 Log5(("nemHCNativeNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d fRestoreAsRAM=%d fRestoreAsRAM2=%d\n",
4555 GCPhys, cb, enmKind, fRestoreAsRAM, fRestoreAsRAM2));
4556 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb); NOREF(fRestoreAsRAM); NOREF(fRestoreAsRAM2);
4557}
4558
4559
4560void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
4561 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
4562{
4563 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
4564 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
4565 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
4566}
4567
4568
4569/**
4570 * Worker that maps pages into Hyper-V.
4571 *
4572 * This is used by the PGM physical page notifications as well as the memory
4573 * access VMEXIT handlers.
4574 *
4575 * @returns VBox status code.
4576 * @param pVM The cross context VM structure.
4577 * @param pVCpu The cross context virtual CPU structure of the
4578 * calling EMT.
4579 * @param GCPhysSrc The source page address.
4580 * @param GCPhysDst The hyper-V destination page. This may differ from
4581 * GCPhysSrc when A20 is disabled.
4582 * @param fPageProt NEM_PAGE_PROT_XXX.
4583 * @param pu2State Our page state (input/output).
4584 * @param fBackingChanged Set if the page backing is being changed.
4585 * @thread EMT(pVCpu)
4586 */
4587NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
4588 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
4589{
4590#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4591 /*
4592 * When using the hypercalls instead of the ring-3 APIs, we don't need to
4593 * unmap memory before modifying it. We still want to track the state though,
4594 * since unmap will fail when called an unmapped page and we don't want to redo
4595 * upgrades/downgrades.
4596 */
4597 uint8_t const u2OldState = *pu2State;
4598 int rc;
4599 if (fPageProt == NEM_PAGE_PROT_NONE)
4600 {
4601 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4602 {
4603 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4604 if (RT_SUCCESS(rc))
4605 {
4606 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4607 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4608 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4609 }
4610 else
4611 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4612 }
4613 else
4614 rc = VINF_SUCCESS;
4615 }
4616 else if (fPageProt & NEM_PAGE_PROT_WRITE)
4617 {
4618 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
4619 {
4620 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4621 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4622 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4623 if (RT_SUCCESS(rc))
4624 {
4625 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4626 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4627 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4628 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4629 NOREF(cMappedPages);
4630 }
4631 else
4632 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4633 }
4634 else
4635 rc = VINF_SUCCESS;
4636 }
4637 else
4638 {
4639 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
4640 {
4641 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4642 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4643 if (RT_SUCCESS(rc))
4644 {
4645 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4646 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4647 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4648 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4649 NOREF(cMappedPages);
4650 }
4651 else
4652 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4653 }
4654 else
4655 rc = VINF_SUCCESS;
4656 }
4657
4658 return VINF_SUCCESS;
4659
4660#else
4661 /*
4662 * Looks like we need to unmap a page before we can change the backing
4663 * or even modify the protection. This is going to be *REALLY* efficient.
4664 * PGM lends us two bits to keep track of the state here.
4665 */
4666 uint8_t const u2OldState = *pu2State;
4667 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
4668 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
4669 if ( fBackingChanged
4670 || u2NewState != u2OldState)
4671 {
4672 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4673 {
4674# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4675 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4676 AssertRC(rc);
4677 if (RT_SUCCESS(rc))
4678 {
4679 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4680 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4681 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4682 {
4683 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4684 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4685 return VINF_SUCCESS;
4686 }
4687 }
4688 else
4689 {
4690 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4691 return rc;
4692 }
4693# else
4694 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
4695 if (SUCCEEDED(hrc))
4696 {
4697 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4698 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4699 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4700 {
4701 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4702 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4703 return VINF_SUCCESS;
4704 }
4705 }
4706 else
4707 {
4708 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4709 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4710 return VERR_NEM_INIT_FAILED;
4711 }
4712# endif
4713 }
4714 }
4715
4716 /*
4717 * Writeable mapping?
4718 */
4719 if (fPageProt & NEM_PAGE_PROT_WRITE)
4720 {
4721# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4722 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4723 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4724 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4725 AssertRC(rc);
4726 if (RT_SUCCESS(rc))
4727 {
4728 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4729 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4730 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4731 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4732 return VINF_SUCCESS;
4733 }
4734 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4735 return rc;
4736# else
4737 void *pvPage;
4738 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
4739 if (RT_SUCCESS(rc))
4740 {
4741 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
4742 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4743 if (SUCCEEDED(hrc))
4744 {
4745 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4746 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4747 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4748 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4749 return VINF_SUCCESS;
4750 }
4751 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4752 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4753 return VERR_NEM_INIT_FAILED;
4754 }
4755 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4756 return rc;
4757# endif
4758 }
4759
4760 if (fPageProt & NEM_PAGE_PROT_READ)
4761 {
4762# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4763 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4764 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4765 AssertRC(rc);
4766 if (RT_SUCCESS(rc))
4767 {
4768 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4769 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4770 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4771 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4772 return VINF_SUCCESS;
4773 }
4774 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4775 return rc;
4776# else
4777 const void *pvPage;
4778 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
4779 if (RT_SUCCESS(rc))
4780 {
4781 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
4782 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
4783 if (SUCCEEDED(hrc))
4784 {
4785 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4786 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4787 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4788 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4789 return VINF_SUCCESS;
4790 }
4791 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4792 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4793 return VERR_NEM_INIT_FAILED;
4794 }
4795 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4796 return rc;
4797# endif
4798 }
4799
4800 /* We already unmapped it above. */
4801 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4802 return VINF_SUCCESS;
4803#endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4804}
4805
4806
4807NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
4808{
4809 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
4810 {
4811 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
4812 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4813 return VINF_SUCCESS;
4814 }
4815
4816#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4817 PVMCPUCC pVCpu = VMMGetCpu(pVM);
4818 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4819 AssertRC(rc);
4820 if (RT_SUCCESS(rc))
4821 {
4822 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4823 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
4824 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4825 return VINF_SUCCESS;
4826 }
4827 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4828 return rc;
4829#else
4830 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
4831 if (SUCCEEDED(hrc))
4832 {
4833 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4834 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4835 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
4836 return VINF_SUCCESS;
4837 }
4838 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
4839 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4840 return VERR_NEM_IPE_6;
4841#endif
4842}
4843
4844
4845int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4846 PGMPAGETYPE enmType, uint8_t *pu2State)
4847{
4848 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4849 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4850 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4851
4852 int rc;
4853#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4854 PVMCPUCC pVCpu = VMMGetCpu(pVM);
4855 if ( pVM->nem.s.fA20Enabled
4856 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4857 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4858 else
4859 {
4860 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4861 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4862 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
4863 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4864
4865 }
4866#else
4867 RT_NOREF_PV(fPageProt);
4868 if ( pVM->nem.s.fA20Enabled
4869 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4870 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4871 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4872 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4873 else
4874 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
4875#endif
4876 return rc;
4877}
4878
4879
4880void nemHCNativeNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4881 PGMPAGETYPE enmType, uint8_t *pu2State)
4882{
4883 Log5(("nemHCNativeNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4884 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4885 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4886
4887#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4888 PVMCPUCC pVCpu = VMMGetCpu(pVM);
4889 if ( pVM->nem.s.fA20Enabled
4890 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4891 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4892 else
4893 {
4894 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4895 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4896 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4897 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4898 }
4899#else
4900 RT_NOREF_PV(fPageProt);
4901 if ( pVM->nem.s.fA20Enabled
4902 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4903 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4904 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4905 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4906 /* else: ignore since we've got the alias page at this address. */
4907#endif
4908}
4909
4910
4911void nemHCNativeNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
4912 uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
4913{
4914 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4915 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
4916 RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType);
4917
4918#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4919 PVMCPUCC pVCpu = VMMGetCpu(pVM);
4920 if ( pVM->nem.s.fA20Enabled
4921 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4922 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4923 else
4924 {
4925 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4926 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4927 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4928 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4929 }
4930#else
4931 RT_NOREF_PV(fPageProt);
4932 if ( pVM->nem.s.fA20Enabled
4933 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4934 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4935 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4936 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4937 /* else: ignore since we've got the alias page at this address. */
4938#endif
4939}
4940
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette