VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h@ 92296

Last change on this file since 92296 was 92170, checked in by vboxsync, 3 years ago

VMM/PGM,NEM: Let NEM handle dirty VRAM (MMIO2) page tracking. Saves lots of exits when using the VBoxVGA device or VMSVGA in non-VMSVGA mode. Added a 32-bit NEM field to the PGMRAMRANGE structure called uNemRange. bugref:10122

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 240.5 KB
Line 
1/* $Id: NEMAllNativeTemplate-win.cpp.h 92170 2021-11-01 22:06:25Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, Windows code template ring-0/3.
4 */
5
6/*
7 * Copyright (C) 2018-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22/** Copy back a segment from hyper-V. */
23#define NEM_WIN_COPY_BACK_SEG(a_Dst, a_Src) \
24 do { \
25 (a_Dst).u64Base = (a_Src).Base; \
26 (a_Dst).u32Limit = (a_Src).Limit; \
27 (a_Dst).ValidSel = (a_Dst).Sel = (a_Src).Selector; \
28 (a_Dst).Attr.u = (a_Src).Attributes; \
29 (a_Dst).fFlags = CPUMSELREG_FLAGS_VALID; \
30 } while (0)
31
32/** @def NEMWIN_ASSERT_MSG_REG_VAL
33 * Asserts the correctness of a register value in a message/context.
34 */
35#if 0
36# define NEMWIN_NEED_GET_REGISTER
37# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
38# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \
39 do { \
40 HV_REGISTER_VALUE TmpVal; \
41 nemHCWinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \
42 AssertMsg(a_Expr, a_Msg); \
43 } while (0)
44# else
45# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) \
46 do { \
47 WHV_REGISTER_VALUE TmpVal; \
48 nemR3WinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \
49 AssertMsg(a_Expr, a_Msg); \
50 } while (0)
51# endif
52#else
53# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, a_Expr, a_Msg) do { } while (0)
54#endif
55
56/** @def NEMWIN_ASSERT_MSG_REG_VAL
57 * Asserts the correctness of a 64-bit register value in a message/context.
58 */
59#define NEMWIN_ASSERT_MSG_REG_VAL64(a_pVCpu, a_enmReg, a_u64Val) \
60 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, (a_u64Val) == TmpVal.Reg64, \
61 (#a_u64Val "=%#RX64, expected %#RX64\n", (a_u64Val), TmpVal.Reg64))
62/** @def NEMWIN_ASSERT_MSG_REG_VAL
63 * Asserts the correctness of a segment register value in a message/context.
64 */
65#define NEMWIN_ASSERT_MSG_REG_SEG(a_pVCpu, a_enmReg, a_SReg) \
66 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_enmReg, \
67 (a_SReg).Base == TmpVal.Segment.Base \
68 && (a_SReg).Limit == TmpVal.Segment.Limit \
69 && (a_SReg).Selector == TmpVal.Segment.Selector \
70 && (a_SReg).Attributes == TmpVal.Segment.Attributes, \
71 ( #a_SReg "=%#RX16 {%#RX64 LB %#RX32,%#RX16} expected %#RX16 {%#RX64 LB %#RX32,%#RX16}\n", \
72 (a_SReg).Selector, (a_SReg).Base, (a_SReg).Limit, (a_SReg).Attributes, \
73 TmpVal.Segment.Selector, TmpVal.Segment.Base, TmpVal.Segment.Limit, TmpVal.Segment.Attributes))
74
75
76/*********************************************************************************************************************************
77* Global Variables *
78*********************************************************************************************************************************/
79/** NEM_WIN_PAGE_STATE_XXX names. */
80NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
81
82/** HV_INTERCEPT_ACCESS_TYPE names. */
83static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
84
85
86/*********************************************************************************************************************************
87* Internal Functions *
88*********************************************************************************************************************************/
89NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
90 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
91
92
93
94#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
95
96/**
97 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
98 *
99 * @returns VBox status code.
100 * @param pVM The cross context VM structure.
101 * @param pVCpu The cross context virtual CPU structure of the caller.
102 * @param GCPhysSrc The source page. Does not need to be page aligned.
103 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
104 * when A20 is disabled.
105 * @param fFlags HV_MAP_GPA_XXX.
106 */
107DECLINLINE(int) nemHCWinHypercallMapPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
108{
109#ifdef IN_RING0
110 /** @todo optimize further, caller generally has the physical address. */
111 return nemR0WinMapPages(pVM, pVCpu,
112 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
113 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
114 1, fFlags);
115#else
116 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
117 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
118 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
119 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
120 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
121#endif
122}
123
124
125/**
126 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
127 *
128 * @returns VBox status code.
129 * @param pVM The cross context VM structure.
130 * @param pVCpu The cross context virtual CPU structure of the caller.
131 * @param GCPhys The page to unmap. Does not need to be page aligned.
132 */
133DECLINLINE(int) nemHCWinHypercallUnmapPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys)
134{
135# ifdef IN_RING0
136 return nemR0WinUnmapPages(pVM, pVCpu, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
137# else
138 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
139 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
140 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
141# endif
142}
143
144#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
145#ifndef IN_RING0
146
147NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
148{
149# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
150# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
151 if (pVM->nem.s.fUseRing0Runloop)
152# endif
153 {
154 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL);
155 AssertLogRelRCReturn(rc, rc);
156 return rc;
157 }
158# endif
159# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
160
161 /*
162 * The following is very similar to what nemR0WinExportState() does.
163 */
164 WHV_REGISTER_NAME aenmNames[128];
165 WHV_REGISTER_VALUE aValues[128];
166
167 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
168 if ( !fWhat
169 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
170 return VINF_SUCCESS;
171 uintptr_t iReg = 0;
172
173# define ADD_REG64(a_enmName, a_uValue) do { \
174 aenmNames[iReg] = (a_enmName); \
175 aValues[iReg].Reg128.High64 = 0; \
176 aValues[iReg].Reg64 = (a_uValue); \
177 iReg++; \
178 } while (0)
179# define ADD_REG128(a_enmName, a_uValueLo, a_uValueHi) do { \
180 aenmNames[iReg] = (a_enmName); \
181 aValues[iReg].Reg128.Low64 = (a_uValueLo); \
182 aValues[iReg].Reg128.High64 = (a_uValueHi); \
183 iReg++; \
184 } while (0)
185
186 /* GPRs */
187 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
188 {
189 if (fWhat & CPUMCTX_EXTRN_RAX)
190 ADD_REG64(WHvX64RegisterRax, pVCpu->cpum.GstCtx.rax);
191 if (fWhat & CPUMCTX_EXTRN_RCX)
192 ADD_REG64(WHvX64RegisterRcx, pVCpu->cpum.GstCtx.rcx);
193 if (fWhat & CPUMCTX_EXTRN_RDX)
194 ADD_REG64(WHvX64RegisterRdx, pVCpu->cpum.GstCtx.rdx);
195 if (fWhat & CPUMCTX_EXTRN_RBX)
196 ADD_REG64(WHvX64RegisterRbx, pVCpu->cpum.GstCtx.rbx);
197 if (fWhat & CPUMCTX_EXTRN_RSP)
198 ADD_REG64(WHvX64RegisterRsp, pVCpu->cpum.GstCtx.rsp);
199 if (fWhat & CPUMCTX_EXTRN_RBP)
200 ADD_REG64(WHvX64RegisterRbp, pVCpu->cpum.GstCtx.rbp);
201 if (fWhat & CPUMCTX_EXTRN_RSI)
202 ADD_REG64(WHvX64RegisterRsi, pVCpu->cpum.GstCtx.rsi);
203 if (fWhat & CPUMCTX_EXTRN_RDI)
204 ADD_REG64(WHvX64RegisterRdi, pVCpu->cpum.GstCtx.rdi);
205 if (fWhat & CPUMCTX_EXTRN_R8_R15)
206 {
207 ADD_REG64(WHvX64RegisterR8, pVCpu->cpum.GstCtx.r8);
208 ADD_REG64(WHvX64RegisterR9, pVCpu->cpum.GstCtx.r9);
209 ADD_REG64(WHvX64RegisterR10, pVCpu->cpum.GstCtx.r10);
210 ADD_REG64(WHvX64RegisterR11, pVCpu->cpum.GstCtx.r11);
211 ADD_REG64(WHvX64RegisterR12, pVCpu->cpum.GstCtx.r12);
212 ADD_REG64(WHvX64RegisterR13, pVCpu->cpum.GstCtx.r13);
213 ADD_REG64(WHvX64RegisterR14, pVCpu->cpum.GstCtx.r14);
214 ADD_REG64(WHvX64RegisterR15, pVCpu->cpum.GstCtx.r15);
215 }
216 }
217
218 /* RIP & Flags */
219 if (fWhat & CPUMCTX_EXTRN_RIP)
220 ADD_REG64(WHvX64RegisterRip, pVCpu->cpum.GstCtx.rip);
221 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
222 ADD_REG64(WHvX64RegisterRflags, pVCpu->cpum.GstCtx.rflags.u);
223
224 /* Segments */
225# define ADD_SEG(a_enmName, a_SReg) \
226 do { \
227 aenmNames[iReg] = a_enmName; \
228 aValues[iReg].Segment.Base = (a_SReg).u64Base; \
229 aValues[iReg].Segment.Limit = (a_SReg).u32Limit; \
230 aValues[iReg].Segment.Selector = (a_SReg).Sel; \
231 aValues[iReg].Segment.Attributes = (a_SReg).Attr.u; \
232 iReg++; \
233 } while (0)
234 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
235 {
236 if (fWhat & CPUMCTX_EXTRN_ES)
237 ADD_SEG(WHvX64RegisterEs, pVCpu->cpum.GstCtx.es);
238 if (fWhat & CPUMCTX_EXTRN_CS)
239 ADD_SEG(WHvX64RegisterCs, pVCpu->cpum.GstCtx.cs);
240 if (fWhat & CPUMCTX_EXTRN_SS)
241 ADD_SEG(WHvX64RegisterSs, pVCpu->cpum.GstCtx.ss);
242 if (fWhat & CPUMCTX_EXTRN_DS)
243 ADD_SEG(WHvX64RegisterDs, pVCpu->cpum.GstCtx.ds);
244 if (fWhat & CPUMCTX_EXTRN_FS)
245 ADD_SEG(WHvX64RegisterFs, pVCpu->cpum.GstCtx.fs);
246 if (fWhat & CPUMCTX_EXTRN_GS)
247 ADD_SEG(WHvX64RegisterGs, pVCpu->cpum.GstCtx.gs);
248 }
249
250 /* Descriptor tables & task segment. */
251 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
252 {
253 if (fWhat & CPUMCTX_EXTRN_LDTR)
254 ADD_SEG(WHvX64RegisterLdtr, pVCpu->cpum.GstCtx.ldtr);
255 if (fWhat & CPUMCTX_EXTRN_TR)
256 ADD_SEG(WHvX64RegisterTr, pVCpu->cpum.GstCtx.tr);
257 if (fWhat & CPUMCTX_EXTRN_IDTR)
258 {
259 aenmNames[iReg] = WHvX64RegisterIdtr;
260 aValues[iReg].Table.Limit = pVCpu->cpum.GstCtx.idtr.cbIdt;
261 aValues[iReg].Table.Base = pVCpu->cpum.GstCtx.idtr.pIdt;
262 iReg++;
263 }
264 if (fWhat & CPUMCTX_EXTRN_GDTR)
265 {
266 aenmNames[iReg] = WHvX64RegisterGdtr;
267 aValues[iReg].Table.Limit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
268 aValues[iReg].Table.Base = pVCpu->cpum.GstCtx.gdtr.pGdt;
269 iReg++;
270 }
271 }
272
273 /* Control registers. */
274 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
275 {
276 if (fWhat & CPUMCTX_EXTRN_CR0)
277 ADD_REG64(WHvX64RegisterCr0, pVCpu->cpum.GstCtx.cr0);
278 if (fWhat & CPUMCTX_EXTRN_CR2)
279 ADD_REG64(WHvX64RegisterCr2, pVCpu->cpum.GstCtx.cr2);
280 if (fWhat & CPUMCTX_EXTRN_CR3)
281 ADD_REG64(WHvX64RegisterCr3, pVCpu->cpum.GstCtx.cr3);
282 if (fWhat & CPUMCTX_EXTRN_CR4)
283 ADD_REG64(WHvX64RegisterCr4, pVCpu->cpum.GstCtx.cr4);
284 }
285 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
286 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu));
287
288 /* Debug registers. */
289/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
290 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
291 {
292 ADD_REG64(WHvX64RegisterDr0, pVCpu->cpum.GstCtx.dr[0]); // CPUMGetHyperDR0(pVCpu));
293 ADD_REG64(WHvX64RegisterDr1, pVCpu->cpum.GstCtx.dr[1]); // CPUMGetHyperDR1(pVCpu));
294 ADD_REG64(WHvX64RegisterDr2, pVCpu->cpum.GstCtx.dr[2]); // CPUMGetHyperDR2(pVCpu));
295 ADD_REG64(WHvX64RegisterDr3, pVCpu->cpum.GstCtx.dr[3]); // CPUMGetHyperDR3(pVCpu));
296 }
297 if (fWhat & CPUMCTX_EXTRN_DR6)
298 ADD_REG64(WHvX64RegisterDr6, pVCpu->cpum.GstCtx.dr[6]); // CPUMGetHyperDR6(pVCpu));
299 if (fWhat & CPUMCTX_EXTRN_DR7)
300 ADD_REG64(WHvX64RegisterDr7, pVCpu->cpum.GstCtx.dr[7]); // CPUMGetHyperDR7(pVCpu));
301
302 /* Floating point state. */
303 if (fWhat & CPUMCTX_EXTRN_X87)
304 {
305 ADD_REG128(WHvX64RegisterFpMmx0, pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[1]);
306 ADD_REG128(WHvX64RegisterFpMmx1, pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[1]);
307 ADD_REG128(WHvX64RegisterFpMmx2, pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[1]);
308 ADD_REG128(WHvX64RegisterFpMmx3, pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[1]);
309 ADD_REG128(WHvX64RegisterFpMmx4, pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[1]);
310 ADD_REG128(WHvX64RegisterFpMmx5, pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[1]);
311 ADD_REG128(WHvX64RegisterFpMmx6, pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[1]);
312 ADD_REG128(WHvX64RegisterFpMmx7, pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[1]);
313
314 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
315 aValues[iReg].FpControlStatus.FpControl = pVCpu->cpum.GstCtx.XState.x87.FCW;
316 aValues[iReg].FpControlStatus.FpStatus = pVCpu->cpum.GstCtx.XState.x87.FSW;
317 aValues[iReg].FpControlStatus.FpTag = pVCpu->cpum.GstCtx.XState.x87.FTW;
318 aValues[iReg].FpControlStatus.Reserved = pVCpu->cpum.GstCtx.XState.x87.FTW >> 8;
319 aValues[iReg].FpControlStatus.LastFpOp = pVCpu->cpum.GstCtx.XState.x87.FOP;
320 aValues[iReg].FpControlStatus.LastFpRip = (pVCpu->cpum.GstCtx.XState.x87.FPUIP)
321 | ((uint64_t)pVCpu->cpum.GstCtx.XState.x87.CS << 32)
322 | ((uint64_t)pVCpu->cpum.GstCtx.XState.x87.Rsrvd1 << 48);
323 iReg++;
324
325 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
326 aValues[iReg].XmmControlStatus.LastFpRdp = (pVCpu->cpum.GstCtx.XState.x87.FPUDP)
327 | ((uint64_t)pVCpu->cpum.GstCtx.XState.x87.DS << 32)
328 | ((uint64_t)pVCpu->cpum.GstCtx.XState.x87.Rsrvd2 << 48);
329 aValues[iReg].XmmControlStatus.XmmStatusControl = pVCpu->cpum.GstCtx.XState.x87.MXCSR;
330 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pVCpu->cpum.GstCtx.XState.x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
331 iReg++;
332 }
333
334 /* Vector state. */
335 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
336 {
337 ADD_REG128(WHvX64RegisterXmm0, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Hi);
338 ADD_REG128(WHvX64RegisterXmm1, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Hi);
339 ADD_REG128(WHvX64RegisterXmm2, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Hi);
340 ADD_REG128(WHvX64RegisterXmm3, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Hi);
341 ADD_REG128(WHvX64RegisterXmm4, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Hi);
342 ADD_REG128(WHvX64RegisterXmm5, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Hi);
343 ADD_REG128(WHvX64RegisterXmm6, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Hi);
344 ADD_REG128(WHvX64RegisterXmm7, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Hi);
345 ADD_REG128(WHvX64RegisterXmm8, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Hi);
346 ADD_REG128(WHvX64RegisterXmm9, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Hi);
347 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Hi);
348 ADD_REG128(WHvX64RegisterXmm11, pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Hi);
349 ADD_REG128(WHvX64RegisterXmm12, pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Hi);
350 ADD_REG128(WHvX64RegisterXmm13, pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Hi);
351 ADD_REG128(WHvX64RegisterXmm14, pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Hi);
352 ADD_REG128(WHvX64RegisterXmm15, pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Hi);
353 }
354
355 /* MSRs */
356 // WHvX64RegisterTsc - don't touch
357 if (fWhat & CPUMCTX_EXTRN_EFER)
358 ADD_REG64(WHvX64RegisterEfer, pVCpu->cpum.GstCtx.msrEFER);
359 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
360 ADD_REG64(WHvX64RegisterKernelGsBase, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
361 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
362 {
363 ADD_REG64(WHvX64RegisterSysenterCs, pVCpu->cpum.GstCtx.SysEnter.cs);
364 ADD_REG64(WHvX64RegisterSysenterEip, pVCpu->cpum.GstCtx.SysEnter.eip);
365 ADD_REG64(WHvX64RegisterSysenterEsp, pVCpu->cpum.GstCtx.SysEnter.esp);
366 }
367 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
368 {
369 ADD_REG64(WHvX64RegisterStar, pVCpu->cpum.GstCtx.msrSTAR);
370 ADD_REG64(WHvX64RegisterLstar, pVCpu->cpum.GstCtx.msrLSTAR);
371 ADD_REG64(WHvX64RegisterCstar, pVCpu->cpum.GstCtx.msrCSTAR);
372 ADD_REG64(WHvX64RegisterSfmask, pVCpu->cpum.GstCtx.msrSFMASK);
373 }
374 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
375 {
376 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu));
377 ADD_REG64(WHvX64RegisterPat, pVCpu->cpum.GstCtx.msrPAT);
378#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
379 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
380#endif
381 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
382 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
383 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
384 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
385 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
386 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
387 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
388 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
389 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
390 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
391 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
392 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
393 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
394 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
395#if 0 /** @todo these registers aren't available? Might explain something.. .*/
396 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);
397 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
398 {
399 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
400 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
401 }
402#endif
403 }
404
405 /* event injection (clear it). */
406 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
407 ADD_REG64(WHvRegisterPendingInterruption, 0);
408
409 /* Interruptibility state. This can get a little complicated since we get
410 half of the state via HV_X64_VP_EXECUTION_STATE. */
411 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
412 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
413 {
414 ADD_REG64(WHvRegisterInterruptState, 0);
415 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
416 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
417 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
418 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
419 aValues[iReg - 1].InterruptState.NmiMasked = 1;
420 }
421 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
422 {
423 if ( pVCpu->nem.s.fLastInterruptShadow
424 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
425 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip))
426 {
427 ADD_REG64(WHvRegisterInterruptState, 0);
428 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
429 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
430 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
431 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
432 //if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
433 // aValues[iReg - 1].InterruptState.NmiMasked = 1;
434 }
435 }
436 else
437 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
438
439 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
440 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
441 if ( fDesiredIntWin
442 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
443 {
444 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
445 Log8(("Setting WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin=%X\n", fDesiredIntWin));
446 ADD_REG64(WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin);
447 Assert(aValues[iReg - 1].DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
448 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
449 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptPriority == (unsigned)((fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT));
450 }
451
452 /// @todo WHvRegisterPendingEvent
453
454 /*
455 * Set the registers.
456 */
457 Assert(iReg < RT_ELEMENTS(aValues));
458 Assert(iReg < RT_ELEMENTS(aenmNames));
459# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
460 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
461 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
462# endif
463 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
464 if (SUCCEEDED(hrc))
465 {
466 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
467 return VINF_SUCCESS;
468 }
469 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
470 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
471 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
472 return VERR_INTERNAL_ERROR;
473
474# undef ADD_REG64
475# undef ADD_REG128
476# undef ADD_SEG
477
478# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
479}
480
481
482NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
483{
484# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
485# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
486 if (pVM->nem.s.fUseRing0Runloop)
487# endif
488 {
489 /* See NEMR0ImportState */
490 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, fWhat, NULL);
491 if (RT_SUCCESS(rc))
492 return rc;
493 if (rc == VERR_NEM_FLUSH_TLB)
494 {
495 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /*fGlobal*/, false /*fPdpesMapped*/);
496 return rc;
497 }
498 AssertLogRelRCReturn(rc, rc);
499 return rc;
500 }
501# endif
502# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
503 WHV_REGISTER_NAME aenmNames[128];
504
505 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
506 uintptr_t iReg = 0;
507
508 /* GPRs */
509 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
510 {
511 if (fWhat & CPUMCTX_EXTRN_RAX)
512 aenmNames[iReg++] = WHvX64RegisterRax;
513 if (fWhat & CPUMCTX_EXTRN_RCX)
514 aenmNames[iReg++] = WHvX64RegisterRcx;
515 if (fWhat & CPUMCTX_EXTRN_RDX)
516 aenmNames[iReg++] = WHvX64RegisterRdx;
517 if (fWhat & CPUMCTX_EXTRN_RBX)
518 aenmNames[iReg++] = WHvX64RegisterRbx;
519 if (fWhat & CPUMCTX_EXTRN_RSP)
520 aenmNames[iReg++] = WHvX64RegisterRsp;
521 if (fWhat & CPUMCTX_EXTRN_RBP)
522 aenmNames[iReg++] = WHvX64RegisterRbp;
523 if (fWhat & CPUMCTX_EXTRN_RSI)
524 aenmNames[iReg++] = WHvX64RegisterRsi;
525 if (fWhat & CPUMCTX_EXTRN_RDI)
526 aenmNames[iReg++] = WHvX64RegisterRdi;
527 if (fWhat & CPUMCTX_EXTRN_R8_R15)
528 {
529 aenmNames[iReg++] = WHvX64RegisterR8;
530 aenmNames[iReg++] = WHvX64RegisterR9;
531 aenmNames[iReg++] = WHvX64RegisterR10;
532 aenmNames[iReg++] = WHvX64RegisterR11;
533 aenmNames[iReg++] = WHvX64RegisterR12;
534 aenmNames[iReg++] = WHvX64RegisterR13;
535 aenmNames[iReg++] = WHvX64RegisterR14;
536 aenmNames[iReg++] = WHvX64RegisterR15;
537 }
538 }
539
540 /* RIP & Flags */
541 if (fWhat & CPUMCTX_EXTRN_RIP)
542 aenmNames[iReg++] = WHvX64RegisterRip;
543 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
544 aenmNames[iReg++] = WHvX64RegisterRflags;
545
546 /* Segments */
547 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
548 {
549 if (fWhat & CPUMCTX_EXTRN_ES)
550 aenmNames[iReg++] = WHvX64RegisterEs;
551 if (fWhat & CPUMCTX_EXTRN_CS)
552 aenmNames[iReg++] = WHvX64RegisterCs;
553 if (fWhat & CPUMCTX_EXTRN_SS)
554 aenmNames[iReg++] = WHvX64RegisterSs;
555 if (fWhat & CPUMCTX_EXTRN_DS)
556 aenmNames[iReg++] = WHvX64RegisterDs;
557 if (fWhat & CPUMCTX_EXTRN_FS)
558 aenmNames[iReg++] = WHvX64RegisterFs;
559 if (fWhat & CPUMCTX_EXTRN_GS)
560 aenmNames[iReg++] = WHvX64RegisterGs;
561 }
562
563 /* Descriptor tables. */
564 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
565 {
566 if (fWhat & CPUMCTX_EXTRN_LDTR)
567 aenmNames[iReg++] = WHvX64RegisterLdtr;
568 if (fWhat & CPUMCTX_EXTRN_TR)
569 aenmNames[iReg++] = WHvX64RegisterTr;
570 if (fWhat & CPUMCTX_EXTRN_IDTR)
571 aenmNames[iReg++] = WHvX64RegisterIdtr;
572 if (fWhat & CPUMCTX_EXTRN_GDTR)
573 aenmNames[iReg++] = WHvX64RegisterGdtr;
574 }
575
576 /* Control registers. */
577 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
578 {
579 if (fWhat & CPUMCTX_EXTRN_CR0)
580 aenmNames[iReg++] = WHvX64RegisterCr0;
581 if (fWhat & CPUMCTX_EXTRN_CR2)
582 aenmNames[iReg++] = WHvX64RegisterCr2;
583 if (fWhat & CPUMCTX_EXTRN_CR3)
584 aenmNames[iReg++] = WHvX64RegisterCr3;
585 if (fWhat & CPUMCTX_EXTRN_CR4)
586 aenmNames[iReg++] = WHvX64RegisterCr4;
587 }
588 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
589 aenmNames[iReg++] = WHvX64RegisterCr8;
590
591 /* Debug registers. */
592 if (fWhat & CPUMCTX_EXTRN_DR7)
593 aenmNames[iReg++] = WHvX64RegisterDr7;
594 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
595 {
596 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_DR7))
597 {
598 fWhat |= CPUMCTX_EXTRN_DR7;
599 aenmNames[iReg++] = WHvX64RegisterDr7;
600 }
601 aenmNames[iReg++] = WHvX64RegisterDr0;
602 aenmNames[iReg++] = WHvX64RegisterDr1;
603 aenmNames[iReg++] = WHvX64RegisterDr2;
604 aenmNames[iReg++] = WHvX64RegisterDr3;
605 }
606 if (fWhat & CPUMCTX_EXTRN_DR6)
607 aenmNames[iReg++] = WHvX64RegisterDr6;
608
609 /* Floating point state. */
610 if (fWhat & CPUMCTX_EXTRN_X87)
611 {
612 aenmNames[iReg++] = WHvX64RegisterFpMmx0;
613 aenmNames[iReg++] = WHvX64RegisterFpMmx1;
614 aenmNames[iReg++] = WHvX64RegisterFpMmx2;
615 aenmNames[iReg++] = WHvX64RegisterFpMmx3;
616 aenmNames[iReg++] = WHvX64RegisterFpMmx4;
617 aenmNames[iReg++] = WHvX64RegisterFpMmx5;
618 aenmNames[iReg++] = WHvX64RegisterFpMmx6;
619 aenmNames[iReg++] = WHvX64RegisterFpMmx7;
620 aenmNames[iReg++] = WHvX64RegisterFpControlStatus;
621 }
622 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
623 aenmNames[iReg++] = WHvX64RegisterXmmControlStatus;
624
625 /* Vector state. */
626 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
627 {
628 aenmNames[iReg++] = WHvX64RegisterXmm0;
629 aenmNames[iReg++] = WHvX64RegisterXmm1;
630 aenmNames[iReg++] = WHvX64RegisterXmm2;
631 aenmNames[iReg++] = WHvX64RegisterXmm3;
632 aenmNames[iReg++] = WHvX64RegisterXmm4;
633 aenmNames[iReg++] = WHvX64RegisterXmm5;
634 aenmNames[iReg++] = WHvX64RegisterXmm6;
635 aenmNames[iReg++] = WHvX64RegisterXmm7;
636 aenmNames[iReg++] = WHvX64RegisterXmm8;
637 aenmNames[iReg++] = WHvX64RegisterXmm9;
638 aenmNames[iReg++] = WHvX64RegisterXmm10;
639 aenmNames[iReg++] = WHvX64RegisterXmm11;
640 aenmNames[iReg++] = WHvX64RegisterXmm12;
641 aenmNames[iReg++] = WHvX64RegisterXmm13;
642 aenmNames[iReg++] = WHvX64RegisterXmm14;
643 aenmNames[iReg++] = WHvX64RegisterXmm15;
644 }
645
646 /* MSRs */
647 // WHvX64RegisterTsc - don't touch
648 if (fWhat & CPUMCTX_EXTRN_EFER)
649 aenmNames[iReg++] = WHvX64RegisterEfer;
650 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
651 aenmNames[iReg++] = WHvX64RegisterKernelGsBase;
652 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
653 {
654 aenmNames[iReg++] = WHvX64RegisterSysenterCs;
655 aenmNames[iReg++] = WHvX64RegisterSysenterEip;
656 aenmNames[iReg++] = WHvX64RegisterSysenterEsp;
657 }
658 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
659 {
660 aenmNames[iReg++] = WHvX64RegisterStar;
661 aenmNames[iReg++] = WHvX64RegisterLstar;
662 aenmNames[iReg++] = WHvX64RegisterCstar;
663 aenmNames[iReg++] = WHvX64RegisterSfmask;
664 }
665
666//#ifdef LOG_ENABLED
667// const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);
668//#endif
669 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
670 {
671 aenmNames[iReg++] = WHvX64RegisterApicBase; /// @todo APIC BASE
672 aenmNames[iReg++] = WHvX64RegisterPat;
673#if 0 /*def LOG_ENABLED*/ /** @todo Check if WHvX64RegisterMsrMtrrCap works... */
674 aenmNames[iReg++] = WHvX64RegisterMsrMtrrCap;
675#endif
676 aenmNames[iReg++] = WHvX64RegisterMsrMtrrDefType;
677 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix64k00000;
678 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16k80000;
679 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16kA0000;
680 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC0000;
681 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC8000;
682 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD0000;
683 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD8000;
684 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE0000;
685 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE8000;
686 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF0000;
687 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF8000;
688 aenmNames[iReg++] = WHvX64RegisterTscAux;
689 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
690//#ifdef LOG_ENABLED
691// if (enmCpuVendor != CPUMCPUVENDOR_AMD)
692// aenmNames[iReg++] = HvX64RegisterIa32FeatureControl;
693//#endif
694 }
695
696 /* Interruptibility. */
697 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
698 {
699 aenmNames[iReg++] = WHvRegisterInterruptState;
700 aenmNames[iReg++] = WHvX64RegisterRip;
701 }
702
703 /* event injection */
704 aenmNames[iReg++] = WHvRegisterPendingInterruption;
705 aenmNames[iReg++] = WHvRegisterPendingEvent0; /** @todo renamed to WHvRegisterPendingEvent */
706
707 size_t const cRegs = iReg;
708 Assert(cRegs < RT_ELEMENTS(aenmNames));
709
710 /*
711 * Get the registers.
712 */
713 WHV_REGISTER_VALUE aValues[128];
714 RT_ZERO(aValues);
715 Assert(RT_ELEMENTS(aValues) >= cRegs);
716 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
717# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
718 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
719 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
720# endif
721 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
722 AssertLogRelMsgReturn(SUCCEEDED(hrc),
723 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
724 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
725 , VERR_NEM_GET_REGISTERS_FAILED);
726
727 iReg = 0;
728# define GET_REG64(a_DstVar, a_enmName) do { \
729 Assert(aenmNames[iReg] == (a_enmName)); \
730 (a_DstVar) = aValues[iReg].Reg64; \
731 iReg++; \
732 } while (0)
733# define GET_REG64_LOG7(a_DstVar, a_enmName, a_szLogName) do { \
734 Assert(aenmNames[iReg] == (a_enmName)); \
735 if ((a_DstVar) != aValues[iReg].Reg64) \
736 Log7(("NEM/%u: " a_szLogName " changed %RX64 -> %RX64\n", pVCpu->idCpu, (a_DstVar), aValues[iReg].Reg64)); \
737 (a_DstVar) = aValues[iReg].Reg64; \
738 iReg++; \
739 } while (0)
740# define GET_REG128(a_DstVarLo, a_DstVarHi, a_enmName) do { \
741 Assert(aenmNames[iReg] == a_enmName); \
742 (a_DstVarLo) = aValues[iReg].Reg128.Low64; \
743 (a_DstVarHi) = aValues[iReg].Reg128.High64; \
744 iReg++; \
745 } while (0)
746# define GET_SEG(a_SReg, a_enmName) do { \
747 Assert(aenmNames[iReg] == (a_enmName)); \
748 NEM_WIN_COPY_BACK_SEG(a_SReg, aValues[iReg].Segment); \
749 iReg++; \
750 } while (0)
751
752 /* GPRs */
753 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
754 {
755 if (fWhat & CPUMCTX_EXTRN_RAX)
756 GET_REG64(pVCpu->cpum.GstCtx.rax, WHvX64RegisterRax);
757 if (fWhat & CPUMCTX_EXTRN_RCX)
758 GET_REG64(pVCpu->cpum.GstCtx.rcx, WHvX64RegisterRcx);
759 if (fWhat & CPUMCTX_EXTRN_RDX)
760 GET_REG64(pVCpu->cpum.GstCtx.rdx, WHvX64RegisterRdx);
761 if (fWhat & CPUMCTX_EXTRN_RBX)
762 GET_REG64(pVCpu->cpum.GstCtx.rbx, WHvX64RegisterRbx);
763 if (fWhat & CPUMCTX_EXTRN_RSP)
764 GET_REG64(pVCpu->cpum.GstCtx.rsp, WHvX64RegisterRsp);
765 if (fWhat & CPUMCTX_EXTRN_RBP)
766 GET_REG64(pVCpu->cpum.GstCtx.rbp, WHvX64RegisterRbp);
767 if (fWhat & CPUMCTX_EXTRN_RSI)
768 GET_REG64(pVCpu->cpum.GstCtx.rsi, WHvX64RegisterRsi);
769 if (fWhat & CPUMCTX_EXTRN_RDI)
770 GET_REG64(pVCpu->cpum.GstCtx.rdi, WHvX64RegisterRdi);
771 if (fWhat & CPUMCTX_EXTRN_R8_R15)
772 {
773 GET_REG64(pVCpu->cpum.GstCtx.r8, WHvX64RegisterR8);
774 GET_REG64(pVCpu->cpum.GstCtx.r9, WHvX64RegisterR9);
775 GET_REG64(pVCpu->cpum.GstCtx.r10, WHvX64RegisterR10);
776 GET_REG64(pVCpu->cpum.GstCtx.r11, WHvX64RegisterR11);
777 GET_REG64(pVCpu->cpum.GstCtx.r12, WHvX64RegisterR12);
778 GET_REG64(pVCpu->cpum.GstCtx.r13, WHvX64RegisterR13);
779 GET_REG64(pVCpu->cpum.GstCtx.r14, WHvX64RegisterR14);
780 GET_REG64(pVCpu->cpum.GstCtx.r15, WHvX64RegisterR15);
781 }
782 }
783
784 /* RIP & Flags */
785 if (fWhat & CPUMCTX_EXTRN_RIP)
786 GET_REG64(pVCpu->cpum.GstCtx.rip, WHvX64RegisterRip);
787 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
788 GET_REG64(pVCpu->cpum.GstCtx.rflags.u, WHvX64RegisterRflags);
789
790 /* Segments */
791 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
792 {
793 if (fWhat & CPUMCTX_EXTRN_ES)
794 GET_SEG(pVCpu->cpum.GstCtx.es, WHvX64RegisterEs);
795 if (fWhat & CPUMCTX_EXTRN_CS)
796 GET_SEG(pVCpu->cpum.GstCtx.cs, WHvX64RegisterCs);
797 if (fWhat & CPUMCTX_EXTRN_SS)
798 GET_SEG(pVCpu->cpum.GstCtx.ss, WHvX64RegisterSs);
799 if (fWhat & CPUMCTX_EXTRN_DS)
800 GET_SEG(pVCpu->cpum.GstCtx.ds, WHvX64RegisterDs);
801 if (fWhat & CPUMCTX_EXTRN_FS)
802 GET_SEG(pVCpu->cpum.GstCtx.fs, WHvX64RegisterFs);
803 if (fWhat & CPUMCTX_EXTRN_GS)
804 GET_SEG(pVCpu->cpum.GstCtx.gs, WHvX64RegisterGs);
805 }
806
807 /* Descriptor tables and the task segment. */
808 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
809 {
810 if (fWhat & CPUMCTX_EXTRN_LDTR)
811 GET_SEG(pVCpu->cpum.GstCtx.ldtr, WHvX64RegisterLdtr);
812
813 if (fWhat & CPUMCTX_EXTRN_TR)
814 {
815 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
816 avoid to trigger sanity assertions around the code, always fix this. */
817 GET_SEG(pVCpu->cpum.GstCtx.tr, WHvX64RegisterTr);
818 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
819 {
820 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
821 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
822 break;
823 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
824 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
825 break;
826 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
827 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
828 break;
829 }
830 }
831 if (fWhat & CPUMCTX_EXTRN_IDTR)
832 {
833 Assert(aenmNames[iReg] == WHvX64RegisterIdtr);
834 pVCpu->cpum.GstCtx.idtr.cbIdt = aValues[iReg].Table.Limit;
835 pVCpu->cpum.GstCtx.idtr.pIdt = aValues[iReg].Table.Base;
836 iReg++;
837 }
838 if (fWhat & CPUMCTX_EXTRN_GDTR)
839 {
840 Assert(aenmNames[iReg] == WHvX64RegisterGdtr);
841 pVCpu->cpum.GstCtx.gdtr.cbGdt = aValues[iReg].Table.Limit;
842 pVCpu->cpum.GstCtx.gdtr.pGdt = aValues[iReg].Table.Base;
843 iReg++;
844 }
845 }
846
847 /* Control registers. */
848 bool fMaybeChangedMode = false;
849 bool fUpdateCr3 = false;
850 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
851 {
852 if (fWhat & CPUMCTX_EXTRN_CR0)
853 {
854 Assert(aenmNames[iReg] == WHvX64RegisterCr0);
855 if (pVCpu->cpum.GstCtx.cr0 != aValues[iReg].Reg64)
856 {
857 CPUMSetGuestCR0(pVCpu, aValues[iReg].Reg64);
858 fMaybeChangedMode = true;
859 }
860 iReg++;
861 }
862 if (fWhat & CPUMCTX_EXTRN_CR2)
863 GET_REG64(pVCpu->cpum.GstCtx.cr2, WHvX64RegisterCr2);
864 if (fWhat & CPUMCTX_EXTRN_CR3)
865 {
866 if (pVCpu->cpum.GstCtx.cr3 != aValues[iReg].Reg64)
867 {
868 CPUMSetGuestCR3(pVCpu, aValues[iReg].Reg64);
869 fUpdateCr3 = true;
870 }
871 iReg++;
872 }
873 if (fWhat & CPUMCTX_EXTRN_CR4)
874 {
875 if (pVCpu->cpum.GstCtx.cr4 != aValues[iReg].Reg64)
876 {
877 CPUMSetGuestCR4(pVCpu, aValues[iReg].Reg64);
878 fMaybeChangedMode = true;
879 }
880 iReg++;
881 }
882 }
883 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
884 {
885 Assert(aenmNames[iReg] == WHvX64RegisterCr8);
886 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4);
887 iReg++;
888 }
889
890 /* Debug registers. */
891 if (fWhat & CPUMCTX_EXTRN_DR7)
892 {
893 Assert(aenmNames[iReg] == WHvX64RegisterDr7);
894 if (pVCpu->cpum.GstCtx.dr[7] != aValues[iReg].Reg64)
895 CPUMSetGuestDR7(pVCpu, aValues[iReg].Reg64);
896 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
897 iReg++;
898 }
899 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
900 {
901 Assert(aenmNames[iReg] == WHvX64RegisterDr0);
902 Assert(aenmNames[iReg+3] == WHvX64RegisterDr3);
903 if (pVCpu->cpum.GstCtx.dr[0] != aValues[iReg].Reg64)
904 CPUMSetGuestDR0(pVCpu, aValues[iReg].Reg64);
905 iReg++;
906 if (pVCpu->cpum.GstCtx.dr[1] != aValues[iReg].Reg64)
907 CPUMSetGuestDR1(pVCpu, aValues[iReg].Reg64);
908 iReg++;
909 if (pVCpu->cpum.GstCtx.dr[2] != aValues[iReg].Reg64)
910 CPUMSetGuestDR2(pVCpu, aValues[iReg].Reg64);
911 iReg++;
912 if (pVCpu->cpum.GstCtx.dr[3] != aValues[iReg].Reg64)
913 CPUMSetGuestDR3(pVCpu, aValues[iReg].Reg64);
914 iReg++;
915 }
916 if (fWhat & CPUMCTX_EXTRN_DR6)
917 {
918 Assert(aenmNames[iReg] == WHvX64RegisterDr6);
919 if (pVCpu->cpum.GstCtx.dr[6] != aValues[iReg].Reg64)
920 CPUMSetGuestDR6(pVCpu, aValues[iReg].Reg64);
921 iReg++;
922 }
923
924 /* Floating point state. */
925 if (fWhat & CPUMCTX_EXTRN_X87)
926 {
927 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);
928 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);
929 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);
930 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);
931 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);
932 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);
933 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);
934 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);
935
936 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);
937 pVCpu->cpum.GstCtx.XState.x87.FCW = aValues[iReg].FpControlStatus.FpControl;
938 pVCpu->cpum.GstCtx.XState.x87.FSW = aValues[iReg].FpControlStatus.FpStatus;
939 pVCpu->cpum.GstCtx.XState.x87.FTW = aValues[iReg].FpControlStatus.FpTag
940 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;
941 pVCpu->cpum.GstCtx.XState.x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;
942 pVCpu->cpum.GstCtx.XState.x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;
943 pVCpu->cpum.GstCtx.XState.x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);
944 pVCpu->cpum.GstCtx.XState.x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);
945 iReg++;
946 }
947
948 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
949 {
950 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus);
951 if (fWhat & CPUMCTX_EXTRN_X87)
952 {
953 pVCpu->cpum.GstCtx.XState.x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp;
954 pVCpu->cpum.GstCtx.XState.x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32);
955 pVCpu->cpum.GstCtx.XState.x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48);
956 }
957 pVCpu->cpum.GstCtx.XState.x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl;
958 pVCpu->cpum.GstCtx.XState.x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
959 iReg++;
960 }
961
962 /* Vector state. */
963 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
964 {
965 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0);
966 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1);
967 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2);
968 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3);
969 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4);
970 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5);
971 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6);
972 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7);
973 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8);
974 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9);
975 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10);
976 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11);
977 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12);
978 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13);
979 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14);
980 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15);
981 }
982
983 /* MSRs */
984 // WHvX64RegisterTsc - don't touch
985 if (fWhat & CPUMCTX_EXTRN_EFER)
986 {
987 Assert(aenmNames[iReg] == WHvX64RegisterEfer);
988 if (aValues[iReg].Reg64 != pVCpu->cpum.GstCtx.msrEFER)
989 {
990 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, aValues[iReg].Reg64));
991 if ((aValues[iReg].Reg64 ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE)
992 PGMNotifyNxeChanged(pVCpu, RT_BOOL(aValues[iReg].Reg64 & MSR_K6_EFER_NXE));
993 pVCpu->cpum.GstCtx.msrEFER = aValues[iReg].Reg64;
994 fMaybeChangedMode = true;
995 }
996 iReg++;
997 }
998 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
999 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");
1000 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1001 {
1002 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");
1003 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");
1004 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");
1005 }
1006 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1007 {
1008 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSTAR, WHvX64RegisterStar, "MSR STAR");
1009 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");
1010 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");
1011 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");
1012 }
1013 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1014 {
1015 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
1016 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1017 if (aValues[iReg].Reg64 != uOldBase)
1018 {
1019 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1020 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
1021 int rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
1022 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", rc2, aValues[iReg].Reg64));
1023 }
1024 iReg++;
1025
1026 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterPat, "MSR PAT");
1027#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1028 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterMsrMtrrCap);
1029#endif
1030 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1031 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
1032 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
1033 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
1034 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
1035 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
1036 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
1037 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
1038 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
1039 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
1040 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
1041 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
1042 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
1043 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
1044 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
1045 }
1046
1047 /* Interruptibility. */
1048 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1049 {
1050 Assert(aenmNames[iReg] == WHvRegisterInterruptState);
1051 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);
1052
1053 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1054 {
1055 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
1056 if (aValues[iReg].InterruptState.InterruptShadow)
1057 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
1058 else
1059 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1060 }
1061
1062 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1063 {
1064 if (aValues[iReg].InterruptState.NmiMasked)
1065 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1066 else
1067 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1068 }
1069
1070 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
1071 iReg += 2;
1072 }
1073
1074 /* Event injection. */
1075 /// @todo WHvRegisterPendingInterruption
1076 Assert(aenmNames[iReg] == WHvRegisterPendingInterruption);
1077 if (aValues[iReg].PendingInterruption.InterruptionPending)
1078 {
1079 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
1080 aValues[iReg].PendingInterruption.InterruptionType, aValues[iReg].PendingInterruption.InterruptionVector,
1081 aValues[iReg].PendingInterruption.DeliverErrorCode, aValues[iReg].PendingInterruption.ErrorCode,
1082 aValues[iReg].PendingInterruption.InstructionLength, aValues[iReg].PendingInterruption.NestedEvent));
1083 AssertMsg((aValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
1084 ("%#RX64\n", aValues[iReg].PendingInterruption.AsUINT64));
1085 }
1086
1087 /// @todo WHvRegisterPendingEvent0 (renamed to WHvRegisterPendingEvent).
1088
1089 /* Almost done, just update extrn flags and maybe change PGM mode. */
1090 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1091 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
1092 pVCpu->cpum.GstCtx.fExtrn = 0;
1093
1094 /* Typical. */
1095 if (!fMaybeChangedMode && !fUpdateCr3)
1096 return VINF_SUCCESS;
1097
1098 /*
1099 * Slow.
1100 */
1101 if (fMaybeChangedMode)
1102 {
1103 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1104 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
1105 }
1106
1107 if (fUpdateCr3)
1108 {
1109 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fPdpesMapped*/);
1110 if (rc == VINF_SUCCESS)
1111 { /* likely */ }
1112 else
1113 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
1114 }
1115
1116 return VINF_SUCCESS;
1117# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1118}
1119
1120#endif /* !IN_RING0 */
1121
1122
1123/**
1124 * Interface for importing state on demand (used by IEM).
1125 *
1126 * @returns VBox status code.
1127 * @param pVCpu The cross context CPU structure.
1128 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1129 */
1130VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1131{
1132 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1133
1134#ifdef IN_RING0
1135# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1136 return nemR0WinImportState(pVCpu->pGVM, pVCpu, &pVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);
1137# else
1138 RT_NOREF(pVCpu, fWhat);
1139 return VERR_NOT_IMPLEMENTED;
1140# endif
1141#else
1142 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1143#endif
1144}
1145
1146
1147/**
1148 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1149 *
1150 * @returns VBox status code.
1151 * @param pVCpu The cross context CPU structure.
1152 * @param pcTicks Where to return the CPU tick count.
1153 * @param puAux Where to return the TSC_AUX register value.
1154 */
1155VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1156{
1157 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1158
1159#ifdef IN_RING3
1160 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1161 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1162 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1163
1164# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1165# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
1166 if (pVM->nem.s.fUseRing0Runloop)
1167# endif
1168 {
1169 /* Call ring-0 and get the values. */
1170 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_QUERY_CPU_TICK, 0, NULL);
1171 AssertLogRelRCReturn(rc, rc);
1172 *pcTicks = pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks;
1173 if (puAux)
1174 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX
1175 ? pVCpu->nem.s.Hypercall.QueryCpuTick.uAux : CPUMGetGuestTscAux(pVCpu);
1176 return VINF_SUCCESS;
1177 }
1178# endif
1179# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1180 /* Call the offical API. */
1181 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1182 WHV_REGISTER_VALUE aValues[2] = { {0, 0}, {0, 0} };
1183 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1184 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1185 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1186 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1187 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1188 , VERR_NEM_GET_REGISTERS_FAILED);
1189 *pcTicks = aValues[0].Reg64;
1190 if (puAux)
1191 *pcTicks = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[0].Reg64 : CPUMGetGuestTscAux(pVCpu);
1192 return VINF_SUCCESS;
1193# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1194#else /* IN_RING0 */
1195# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1196 int rc = nemR0WinQueryCpuTick(pVCpu->pGVM, pVCpu, pcTicks, puAux);
1197 if (RT_SUCCESS(rc) && puAux && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX))
1198 *puAux = CPUMGetGuestTscAux(pVCpu);
1199 return rc;
1200# else
1201 RT_NOREF(pVCpu, pcTicks, puAux);
1202 return VERR_NOT_IMPLEMENTED;
1203# endif
1204#endif /* IN_RING0 */
1205}
1206
1207
1208/**
1209 * Resumes CPU clock (TSC) on all virtual CPUs.
1210 *
1211 * This is called by TM when the VM is started, restored, resumed or similar.
1212 *
1213 * @returns VBox status code.
1214 * @param pVM The cross context VM structure.
1215 * @param pVCpu The cross context CPU structure of the calling EMT.
1216 * @param uPausedTscValue The TSC value at the time of pausing.
1217 */
1218VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1219{
1220#ifdef IN_RING0
1221# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1222 return nemR0WinResumeCpuTickOnAll(pVM, pVCpu, uPausedTscValue);
1223# else
1224 RT_NOREF(pVM, pVCpu, uPausedTscValue);
1225 return VERR_NOT_IMPLEMENTED;
1226# endif
1227#else /* IN_RING3 */
1228 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1229 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1230
1231# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1232# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
1233 if (pVM->nem.s.fUseRing0Runloop)
1234# endif
1235 {
1236 /* Call ring-0 and do it all there. */
1237 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL, uPausedTscValue, NULL);
1238 }
1239# endif
1240# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1241 /*
1242 * Call the offical API to do the job.
1243 */
1244 if (pVM->cCpus > 1)
1245 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1246
1247 /* Start with the first CPU. */
1248 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1249 WHV_REGISTER_VALUE Value = {0, 0};
1250 Value.Reg64 = uPausedTscValue;
1251 uint64_t const uFirstTsc = ASMReadTSC();
1252 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1253 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1254 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1255 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1256 , VERR_NEM_SET_TSC);
1257
1258 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1259 that we don't introduce too much drift here. */
1260 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1261 {
1262 Assert(enmName == WHvX64RegisterTsc);
1263 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1264 Value.Reg64 = uPausedTscValue + offDelta;
1265 hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1266 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1267 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1268 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1269 , VERR_NEM_SET_TSC);
1270 }
1271
1272 return VINF_SUCCESS;
1273# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1274#endif /* IN_RING3 */
1275}
1276
1277#ifdef NEMWIN_NEED_GET_REGISTER
1278# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1279/** Worker for assertion macro. */
1280NEM_TMPL_STATIC int nemHCWinGetRegister(PVMCPUCC pVCpu, PGVMCPU pGVCpu, uint32_t enmReg, HV_REGISTER_VALUE *pRetValue)
1281{
1282 RT_ZERO(*pRetValue);
1283# ifdef IN_RING3
1284 RT_NOREF(pVCpu, pGVCpu, enmReg);
1285 return VERR_NOT_IMPLEMENTED;
1286# else
1287 NOREF(pVCpu);
1288
1289 /*
1290 * Hypercall parameters.
1291 */
1292 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1293 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1294 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1295
1296 pInput->PartitionId = pVCpu->pGVM->nemr0.s.idHvPartition;
1297 pInput->VpIndex = pVCpu->idCpu;
1298 pInput->fFlags = 0;
1299 pInput->Names[0] = (HV_REGISTER_NAME)enmReg;
1300
1301 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
1302 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1303 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
1304
1305 /*
1306 * Make the hypercall and copy out the value.
1307 */
1308 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
1309 pGVCpu->nem.s.HypercallData.HCPhysPage,
1310 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1311 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 cRegs=%#x\n", uResult, 1),
1312 VERR_NEM_GET_REGISTERS_FAILED);
1313
1314 *pRetValue = paValues[0];
1315 return VINF_SUCCESS;
1316# endif
1317}
1318# else
1319/** Worker for assertion macro. */
1320NEM_TMPL_STATIC int nemR3WinGetRegister(PVMCPUCC a_pVCpu, uint32_t a_enmReg, WHV_REGISTER_VALUE pValue)
1321{
1322 RT_ZERO(*pRetValue);
1323 RT_NOREF(pVCpu, pGVCpu, enmReg);
1324 return VERR_NOT_IMPLEMENTED;
1325}
1326# endif
1327#endif
1328
1329
1330#ifdef LOG_ENABLED
1331/**
1332 * Get the virtual processor running status.
1333 */
1334DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPUCC pVCpu)
1335{
1336# ifdef IN_RING0
1337 NOREF(pVCpu);
1338 return VidProcessorStatusUndefined;
1339# else
1340 RTERRVARS Saved;
1341 RTErrVarsSave(&Saved);
1342
1343 /*
1344 * This API is disabled in release builds, it seems. On build 17101 it requires
1345 * the following patch to be enabled (windbg): eb vid+12180 0f 84 98 00 00 00
1346 */
1347 VID_PROCESSOR_STATUS enmCpuStatus = VidProcessorStatusUndefined;
1348 NTSTATUS rcNt = g_pfnVidGetVirtualProcessorRunningStatus(pVCpu->pVMR3->nem.s.hPartitionDevice, pVCpu->idCpu, &enmCpuStatus);
1349 AssertRC(rcNt);
1350
1351 RTErrVarsRestore(&Saved);
1352 return enmCpuStatus;
1353# endif
1354}
1355#endif /* LOG_ENABLED */
1356
1357
1358#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1359# ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KTHREADs and KeAlertThread. */
1360/**
1361 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.
1362 *
1363 * This is an experiment only.
1364 *
1365 * @returns VBox status code.
1366 * @param pVM The cross context VM structure.
1367 * @param pVCpu The cross context virtual CPU structure of the
1368 * calling EMT.
1369 */
1370NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVMCC pVM, PVMCPUCC pVCpu)
1371{
1372 /*
1373 * Work the state.
1374 *
1375 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.
1376 * So, we just need to modify the state and kick the EMT if it's waiting on
1377 * messages. For the latter we use QueueUserAPC / KeAlterThread.
1378 */
1379 for (;;)
1380 {
1381 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
1382 switch (enmState)
1383 {
1384 case VMCPUSTATE_STARTED_EXEC_NEM:
1385 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))
1386 {
1387 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM -> CANCELED");
1388 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));
1389 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelChangedState);
1390 return VINF_SUCCESS;
1391 }
1392 break;
1393
1394 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:
1395 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
1396 {
1397 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM_WAIT -> CANCELED");
1398# ifdef IN_RING0
1399 NTSTATUS rcNt = KeAlertThread(??);
1400 DBGFTRACE_CUSTOM(pVM, "KeAlertThread -> %#x", rcNt);
1401# else
1402 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);
1403 DBGFTRACE_CUSTOM(pVM, "NtAlertThread -> %#x", rcNt);
1404# endif
1405 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));
1406 Assert(rcNt == STATUS_SUCCESS);
1407 if (NT_SUCCESS(rcNt))
1408 {
1409 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelAlertedThread);
1410 return VINF_SUCCESS;
1411 }
1412 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));
1413 }
1414 break;
1415
1416 default:
1417 return VINF_SUCCESS;
1418 }
1419
1420 ASMNopPause();
1421 RT_NOREF(pVM);
1422 }
1423}
1424# endif /* IN_RING3 */
1425#endif /* NEM_WIN_USE_OUR_OWN_RUN_API || NEM_WIN_WITH_RING0_RUNLOOP */
1426
1427
1428#ifdef LOG_ENABLED
1429/**
1430 * Logs the current CPU state.
1431 */
1432NEM_TMPL_STATIC void nemHCWinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1433{
1434 if (LogIs3Enabled())
1435 {
1436# if 0 // def IN_RING3 - causes lazy state import assertions all over CPUM.
1437 char szRegs[4096];
1438 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1439 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1440 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1441 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1442 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1443 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1444 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1445 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1446 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1447 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1448 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1449 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1450 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1451 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1452 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1453 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1454 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1455 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1456 " efer=%016VR{efer}\n"
1457 " pat=%016VR{pat}\n"
1458 " sf_mask=%016VR{sf_mask}\n"
1459 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1460 " lstar=%016VR{lstar}\n"
1461 " star=%016VR{star} cstar=%016VR{cstar}\n"
1462 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1463 );
1464
1465 char szInstr[256];
1466 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1467 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1468 szInstr, sizeof(szInstr), NULL);
1469 Log3(("%s%s\n", szRegs, szInstr));
1470# else
1471 /** @todo stat logging in ring-0 */
1472 RT_NOREF(pVM, pVCpu);
1473# endif
1474 }
1475}
1476#endif /* LOG_ENABLED */
1477
1478
1479/** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */
1480#define SWITCH_IT(a_szPrefix) \
1481 do \
1482 switch (u)\
1483 { \
1484 case 0x00: return a_szPrefix ""; \
1485 case 0x01: return a_szPrefix ",Pnd"; \
1486 case 0x02: return a_szPrefix ",Dbg"; \
1487 case 0x03: return a_szPrefix ",Pnd,Dbg"; \
1488 case 0x04: return a_szPrefix ",Shw"; \
1489 case 0x05: return a_szPrefix ",Pnd,Shw"; \
1490 case 0x06: return a_szPrefix ",Shw,Dbg"; \
1491 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \
1492 default: AssertFailedReturn("WTF?"); \
1493 } \
1494 while (0)
1495
1496#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1497/**
1498 * Translates the execution stat bitfield into a short log string, VID version.
1499 *
1500 * @returns Read-only log string.
1501 * @param pMsgHdr The header which state to summarize.
1502 */
1503static const char *nemHCWinExecStateToLogStr(HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1504{
1505 unsigned u = (unsigned)pMsgHdr->ExecutionState.InterruptionPending
1506 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1)
1507 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2);
1508 if (pMsgHdr->ExecutionState.EferLma)
1509 SWITCH_IT("LM");
1510 else if (pMsgHdr->ExecutionState.Cr0Pe)
1511 SWITCH_IT("PM");
1512 else
1513 SWITCH_IT("RM");
1514}
1515#elif defined(IN_RING3)
1516/**
1517 * Translates the execution stat bitfield into a short log string, WinHv version.
1518 *
1519 * @returns Read-only log string.
1520 * @param pExitCtx The exit context which state to summarize.
1521 */
1522static const char *nemR3WinExecStateToLogStr(WHV_VP_EXIT_CONTEXT const *pExitCtx)
1523{
1524 unsigned u = (unsigned)pExitCtx->ExecutionState.InterruptionPending
1525 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1)
1526 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2);
1527 if (pExitCtx->ExecutionState.EferLma)
1528 SWITCH_IT("LM");
1529 else if (pExitCtx->ExecutionState.Cr0Pe)
1530 SWITCH_IT("PM");
1531 else
1532 SWITCH_IT("RM");
1533}
1534#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1535#undef SWITCH_IT
1536
1537
1538#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1539/**
1540 * Advances the guest RIP and clear EFLAGS.RF, VID version.
1541 *
1542 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1543 *
1544 * @param pVCpu The cross context virtual CPU structure.
1545 * @param pExitCtx The exit context.
1546 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1547 */
1548DECLINLINE(void)
1549nemHCWinAdvanceGuestRipAndClearRF(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr, uint8_t cbMinInstr)
1550{
1551 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1552
1553 /* Advance the RIP. */
1554 Assert(pMsgHdr->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1555 pVCpu->cpum.GstCtx.rip += pMsgHdr->InstructionLength;
1556 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1557
1558 /* Update interrupt inhibition. */
1559 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1560 { /* likely */ }
1561 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
1562 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1563}
1564#elif defined(IN_RING3)
1565/**
1566 * Advances the guest RIP and clear EFLAGS.RF, WinHv version.
1567 *
1568 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1569 *
1570 * @param pVCpu The cross context virtual CPU structure.
1571 * @param pExitCtx The exit context.
1572 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1573 */
1574DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPUCC pVCpu, WHV_VP_EXIT_CONTEXT const *pExitCtx, uint8_t cbMinInstr)
1575{
1576 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1577
1578 /* Advance the RIP. */
1579 Assert(pExitCtx->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1580 pVCpu->cpum.GstCtx.rip += pExitCtx->InstructionLength;
1581 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1582
1583 /* Update interrupt inhibition. */
1584 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1585 { /* likely */ }
1586 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
1587 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1588}
1589#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1590
1591#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3)
1592
1593NEM_TMPL_STATIC DECLCALLBACK(int)
1594nemHCWinUnmapOnePageCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1595{
1596 RT_NOREF_PV(pvUser);
1597# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1598 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1599 AssertRC(rc);
1600 if (RT_SUCCESS(rc))
1601# else
1602 RT_NOREF_PV(pVCpu);
1603 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1604 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1605 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1606 if (SUCCEEDED(hrc))
1607# endif
1608 {
1609 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1610 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1611 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
1612 }
1613 else
1614 {
1615# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1616 LogRel(("nemHCWinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1617# else
1618 LogRel(("nemHCWinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1619 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(),
1620 RTNtLastErrorValue(), pVM->nem.s.cMappedPages));
1621# endif
1622 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1623 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
1624 }
1625 if (pVM->nem.s.cMappedPages > 0)
1626 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1627 return VINF_SUCCESS;
1628}
1629
1630
1631/**
1632 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1633 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1634 */
1635typedef struct NEMHCWINHMACPCCSTATE
1636{
1637 /** Input: Write access. */
1638 bool fWriteAccess;
1639 /** Output: Set if we did something. */
1640 bool fDidSomething;
1641 /** Output: Set it we should resume. */
1642 bool fCanResume;
1643} NEMHCWINHMACPCCSTATE;
1644
1645/**
1646 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1647 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1648 * NEMHCWINHMACPCCSTATE structure. }
1649 */
1650NEM_TMPL_STATIC DECLCALLBACK(int)
1651nemHCWinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1652{
1653 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1654 pState->fDidSomething = false;
1655 pState->fCanResume = false;
1656
1657 /* If A20 is disabled, we may need to make another query on the masked
1658 page to get the correct protection information. */
1659 uint8_t u2State = pInfo->u2NemState;
1660 RTGCPHYS GCPhysSrc;
1661# ifdef NEM_WIN_WITH_A20
1662 if ( pVM->nem.s.fA20Enabled
1663 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1664# endif
1665 GCPhysSrc = GCPhys;
1666# ifdef NEM_WIN_WITH_A20
1667 else
1668 {
1669 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1670 PGMPHYSNEMPAGEINFO Info2;
1671 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1672 AssertRCReturn(rc, rc);
1673
1674 *pInfo = Info2;
1675 pInfo->u2NemState = u2State;
1676 }
1677# endif
1678
1679 /*
1680 * Consolidate current page state with actual page protection and access type.
1681 * We don't really consider downgrades here, as they shouldn't happen.
1682 */
1683# ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1684 /** @todo Someone at microsoft please explain:
1685 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1686 * readonly page as writable (unmap, then map again). Specifically, this was an
1687 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1688 * a hope to work around that we no longer pre-map anything, just unmap stuff
1689 * and do it lazily here. And here we will first unmap, restart, and then remap
1690 * with new protection or backing.
1691 */
1692# endif
1693 int rc;
1694 switch (u2State)
1695 {
1696 case NEM_WIN_PAGE_STATE_UNMAPPED:
1697 case NEM_WIN_PAGE_STATE_NOT_SET:
1698 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1699 {
1700 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1701 return VINF_SUCCESS;
1702 }
1703
1704 /* Don't bother remapping it if it's a write request to a non-writable page. */
1705 if ( pState->fWriteAccess
1706 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1707 {
1708 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1709 return VINF_SUCCESS;
1710 }
1711
1712 /* Map the page. */
1713 rc = nemHCNativeSetPhysPage(pVM,
1714 pVCpu,
1715 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1716 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1717 pInfo->fNemProt,
1718 &u2State,
1719 true /*fBackingState*/);
1720 pInfo->u2NemState = u2State;
1721 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1722 GCPhys, g_apszPageStates[u2State], rc));
1723 pState->fDidSomething = true;
1724 pState->fCanResume = true;
1725 return rc;
1726
1727 case NEM_WIN_PAGE_STATE_READABLE:
1728 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1729 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1730 {
1731 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1732 return VINF_SUCCESS;
1733 }
1734
1735# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1736 /* Upgrade page to writable. */
1737/** @todo test this*/
1738 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1739 && pState->fWriteAccess)
1740 {
1741 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
1742 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1743 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1744 AssertRC(rc);
1745 if (RT_SUCCESS(rc))
1746 {
1747 STAM_REL_COUNTER_INC(&pVM->nem.s.StatRemapPage);
1748 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
1749 pState->fDidSomething = true;
1750 pState->fCanResume = true;
1751 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
1752 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
1753 }
1754 else
1755 STAM_REL_COUNTER_INC(&pVM->nem.s.StatRemapPageFailed);
1756 }
1757 else
1758 {
1759 /* Need to emulate the acces. */
1760 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
1761 rc = VINF_SUCCESS;
1762 }
1763 return rc;
1764# else
1765 break;
1766# endif
1767
1768 case NEM_WIN_PAGE_STATE_WRITABLE:
1769 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1770 {
1771 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
1772 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1773 else
1774 {
1775 pState->fCanResume = true;
1776 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1777 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1778 }
1779 return VINF_SUCCESS;
1780 }
1781# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1782 AssertFailed(); /* There should be no downgrades. */
1783# endif
1784 break;
1785
1786 default:
1787 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1788 }
1789
1790 /*
1791 * Unmap and restart the instruction.
1792 * If this fails, which it does every so often, just unmap everything for now.
1793 */
1794# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1795 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1796 AssertRC(rc);
1797 if (RT_SUCCESS(rc))
1798# else
1799 /** @todo figure out whether we mess up the state or if it's WHv. */
1800 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1801 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1802 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1803 if (SUCCEEDED(hrc))
1804# endif
1805 {
1806 pState->fDidSomething = true;
1807 pState->fCanResume = true;
1808 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1809 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
1810 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1811 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1812 return VINF_SUCCESS;
1813 }
1814 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
1815# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1816 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
1817 return rc;
1818# elif defined(VBOX_WITH_PGM_NEM_MODE)
1819 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n",
1820 GCPhys, g_apszPageStates[u2State], hrc, hrc));
1821 return VERR_NEM_UNMAP_PAGES_FAILED;
1822# else
1823 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1824 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(),
1825 pVM->nem.s.cMappedPages));
1826
1827 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemHCWinUnmapOnePageCallback, NULL);
1828 Log(("nemHCWinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
1829 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapAllPages);
1830
1831 pState->fDidSomething = true;
1832 pState->fCanResume = true;
1833 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1834 return VINF_SUCCESS;
1835# endif
1836}
1837
1838#endif /* defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3) */
1839
1840
1841#if defined(IN_RING0) && defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API)
1842/**
1843 * Wrapper around nemR0WinImportState that converts VERR_NEM_FLUSH_TLB
1844 * into informational status codes and logs+asserts statuses.
1845 *
1846 * @returns VBox strict status code.
1847 * @param pGVM The global (ring-0) VM structure.
1848 * @param pGVCpu The global (ring-0) per CPU structure.
1849 * @param fWhat What to import.
1850 * @param pszCaller Who is doing the importing.
1851 */
1852DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, uint64_t fWhat, const char *pszCaller)
1853{
1854 int rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, true /*fCanUpdateCr3*/);
1855 if (RT_SUCCESS(rc))
1856 {
1857 Assert(rc == VINF_SUCCESS);
1858 return VINF_SUCCESS;
1859 }
1860
1861 if (rc == VERR_NEM_FLUSH_TLB)
1862 {
1863 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
1864 return -rc;
1865 }
1866 RT_NOREF(pszCaller);
1867 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
1868}
1869#endif /* IN_RING0 && NEM_WIN_TEMPLATE_MODE_OWN_RUN_API*/
1870
1871#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
1872/**
1873 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
1874 *
1875 * Unlike the wrapped APIs, this checks whether it's necessary.
1876 *
1877 * @returns VBox strict status code.
1878 * @param pVCpu The cross context per CPU structure.
1879 * @param fWhat What to import.
1880 * @param pszCaller Who is doing the importing.
1881 */
1882DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPUCC pVCpu, uint64_t fWhat, const char *pszCaller)
1883{
1884 if (pVCpu->cpum.GstCtx.fExtrn & fWhat)
1885 {
1886# ifdef IN_RING0
1887 return nemR0WinImportStateStrict(pVCpu->pGVM, pVCpu, fWhat, pszCaller);
1888# else
1889 RT_NOREF(pszCaller);
1890 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1891 AssertRCReturn(rc, rc);
1892# endif
1893 }
1894 return VINF_SUCCESS;
1895}
1896#endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API || IN_RING3 */
1897
1898#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1899/**
1900 * Copies register state from the X64 intercept message header.
1901 *
1902 * ASSUMES no state copied yet.
1903 *
1904 * @param pVCpu The cross context per CPU structure.
1905 * @param pHdr The X64 intercept message header.
1906 * @sa nemR3WinCopyStateFromX64Header
1907 */
1908DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr)
1909{
1910 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1911 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1912 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pHdr->CsSegment);
1913 pVCpu->cpum.GstCtx.rip = pHdr->Rip;
1914 pVCpu->cpum.GstCtx.rflags.u = pHdr->Rflags;
1915
1916 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow;
1917 if (!pHdr->ExecutionState.InterruptShadow)
1918 {
1919 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1920 { /* likely */ }
1921 else
1922 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1923 }
1924 else
1925 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip);
1926
1927 APICSetTpr(pVCpu, pHdr->Cr8 << 4);
1928
1929 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_APIC_TPR);
1930}
1931#elif defined(IN_RING3)
1932/**
1933 * Copies register state from the (common) exit context.
1934 *
1935 * ASSUMES no state copied yet.
1936 *
1937 * @param pVCpu The cross context per CPU structure.
1938 * @param pExitCtx The common exit context.
1939 * @sa nemHCWinCopyStateFromX64Header
1940 */
1941DECLINLINE(void) nemR3WinCopyStateFromX64Header(PVMCPUCC pVCpu, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1942{
1943 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1944 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1945 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pExitCtx->Cs);
1946 pVCpu->cpum.GstCtx.rip = pExitCtx->Rip;
1947 pVCpu->cpum.GstCtx.rflags.u = pExitCtx->Rflags;
1948
1949 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow;
1950 if (!pExitCtx->ExecutionState.InterruptShadow)
1951 {
1952 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1953 { /* likely */ }
1954 else
1955 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1956 }
1957 else
1958 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip);
1959
1960 APICSetTpr(pVCpu, pExitCtx->Cr8 << 4);
1961
1962 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_APIC_TPR);
1963}
1964#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1965
1966
1967#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1968/**
1969 * Deals with memory intercept message.
1970 *
1971 * @returns Strict VBox status code.
1972 * @param pVM The cross context VM structure.
1973 * @param pVCpu The cross context per CPU structure.
1974 * @param pMsg The message.
1975 * @sa nemR3WinHandleExitMemory
1976 */
1977NEM_TMPL_STATIC VBOXSTRICTRC
1978nemHCWinHandleMessageMemory(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg)
1979{
1980 uint64_t const uHostTsc = ASMReadTSC();
1981 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
1982 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1983 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
1984
1985 /*
1986 * Whatever we do, we must clear pending event injection upon resume.
1987 */
1988 if (pMsg->Header.ExecutionState.InterruptionPending)
1989 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1990
1991# if 0 /* Experiment: 20K -> 34K exit/s. */
1992 if ( pMsg->Header.ExecutionState.EferLma
1993 && pMsg->Header.CsSegment.Long
1994 && pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
1995 {
1996 if ( pMsg->Header.Rip - (uint64_t)0xf65a < (uint64_t)(0xf662 - 0xf65a)
1997 && pMsg->InstructionBytes[0] == 0x89
1998 && pMsg->InstructionBytes[1] == 0x03)
1999 {
2000 pVCpu->cpum.GstCtx.rip = pMsg->Header.Rip + 2;
2001 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
2002 AssertMsg(pMsg->Header.InstructionLength == 2, ("%#x\n", pMsg->Header.InstructionLength));
2003 //Log(("%RX64 msg:\n%.80Rhxd\n", pVCpu->cpum.GstCtx.rip, pMsg));
2004 return VINF_SUCCESS;
2005 }
2006 }
2007# endif
2008
2009 /*
2010 * Ask PGM for information about the given GCPhys. We need to check if we're
2011 * out of sync first.
2012 */
2013 NEMHCWINHMACPCCSTATE State = { pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE, false, false };
2014 PGMPHYSNEMPAGEINFO Info;
2015 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMsg->GuestPhysicalAddress, State.fWriteAccess, &Info,
2016 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2017 if (RT_SUCCESS(rc))
2018 {
2019 if (Info.fNemProt & ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2020 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2021 {
2022 if (State.fCanResume)
2023 {
2024 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2025 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2026 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2027 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2028 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2029 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2030 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
2031 return VINF_SUCCESS;
2032 }
2033 }
2034 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2035 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2036 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2037 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2038 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2039 }
2040 else
2041 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2042 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2043 pMsg->GuestPhysicalAddress, rc, State.fDidSomething ? " modified-backing" : "",
2044 g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2045
2046 /*
2047 * Emulate the memory access, either access handler or special memory.
2048 */
2049 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2050 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2051 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2052 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2053 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
2054 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2055 VBOXSTRICTRC rcStrict;
2056# ifdef IN_RING0
2057 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu,
2058 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit");
2059 if (rcStrict != VINF_SUCCESS)
2060 return rcStrict;
2061# else
2062 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2063 AssertRCReturn(rc, rc);
2064# endif
2065
2066 if (pMsg->Reserved1)
2067 Log(("MemExit/Reserved1=%#x\n", pMsg->Reserved1));
2068 if (pMsg->Header.ExecutionState.Reserved0 || pMsg->Header.ExecutionState.Reserved1)
2069 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pMsg->Header.ExecutionState.Reserved0, pMsg->Header.ExecutionState.Reserved1));
2070
2071 if (!pExitRec)
2072 {
2073 //if (pMsg->InstructionByteCount > 0)
2074 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2075 if (pMsg->InstructionByteCount > 0)
2076 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
2077 pMsg->InstructionBytes, pMsg->InstructionByteCount);
2078 else
2079 rcStrict = IEMExecOne(pVCpu);
2080 /** @todo do we need to do anything wrt debugging here? */
2081 }
2082 else
2083 {
2084 /* Frequent access or probing. */
2085 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2086 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2087 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2088 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2089 }
2090 return rcStrict;
2091}
2092#elif defined(IN_RING3)
2093/**
2094 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2095 *
2096 * @returns Strict VBox status code.
2097 * @param pVM The cross context VM structure.
2098 * @param pVCpu The cross context per CPU structure.
2099 * @param pExit The VM exit information to handle.
2100 * @sa nemHCWinHandleMessageMemory
2101 */
2102NEM_TMPL_STATIC VBOXSTRICTRC
2103nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2104{
2105 uint64_t const uHostTsc = ASMReadTSC();
2106 Assert(pExit->MemoryAccess.AccessInfo.AccessType != 3);
2107
2108 /*
2109 * Whatever we do, we must clear pending event injection upon resume.
2110 */
2111 if (pExit->VpContext.ExecutionState.InterruptionPending)
2112 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2113
2114 /*
2115 * Ask PGM for information about the given GCPhys. We need to check if we're
2116 * out of sync first.
2117 */
2118 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite, false, false };
2119 PGMPHYSNEMPAGEINFO Info;
2120 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
2121 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2122 if (RT_SUCCESS(rc))
2123 {
2124 if (Info.fNemProt & ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2125 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2126 {
2127 if (State.fCanResume)
2128 {
2129 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2130 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2131 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2132 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2133 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2134 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2135 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2136 return VINF_SUCCESS;
2137 }
2138 }
2139 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2140 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2141 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2142 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2143 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2144 }
2145 else
2146 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2147 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2148 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2149 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2150
2151 /*
2152 * Emulate the memory access, either access handler or special memory.
2153 */
2154 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2155 pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2156 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2157 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2158 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2159 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2160 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2161 AssertRCReturn(rc, rc);
2162 if (pExit->VpContext.ExecutionState.Reserved0 || pExit->VpContext.ExecutionState.Reserved1)
2163 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pExit->VpContext.ExecutionState.Reserved0, pExit->VpContext.ExecutionState.Reserved1));
2164
2165 VBOXSTRICTRC rcStrict;
2166 if (!pExitRec)
2167 {
2168 //if (pMsg->InstructionByteCount > 0)
2169 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2170 if (pExit->MemoryAccess.InstructionByteCount > 0)
2171 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
2172 pExit->MemoryAccess.InstructionBytes, pExit->MemoryAccess.InstructionByteCount);
2173 else
2174 rcStrict = IEMExecOne(pVCpu);
2175 /** @todo do we need to do anything wrt debugging here? */
2176 }
2177 else
2178 {
2179 /* Frequent access or probing. */
2180 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2181 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2182 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2183 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2184 }
2185 return rcStrict;
2186}
2187#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2188
2189
2190#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2191/**
2192 * Deals with I/O port intercept message.
2193 *
2194 * @returns Strict VBox status code.
2195 * @param pVM The cross context VM structure.
2196 * @param pVCpu The cross context per CPU structure.
2197 * @param pMsg The message.
2198 */
2199NEM_TMPL_STATIC VBOXSTRICTRC
2200nemHCWinHandleMessageIoPort(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg)
2201{
2202 /*
2203 * Assert message sanity.
2204 */
2205 Assert( pMsg->AccessInfo.AccessSize == 1
2206 || pMsg->AccessInfo.AccessSize == 2
2207 || pMsg->AccessInfo.AccessSize == 4);
2208 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2209 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2210 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2211 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2212 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2213 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2214 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
2215 if (pMsg->AccessInfo.StringOp)
2216 {
2217 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterDs, pMsg->DsSegment);
2218 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterEs, pMsg->EsSegment);
2219 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
2220 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi);
2221 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi);
2222 }
2223
2224 /*
2225 * Whatever we do, we must clear pending event injection upon resume.
2226 */
2227 if (pMsg->Header.ExecutionState.InterruptionPending)
2228 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2229
2230 /*
2231 * Add history first to avoid two paths doing EMHistoryExec calls.
2232 */
2233 VBOXSTRICTRC rcStrict;
2234 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2235 !pMsg->AccessInfo.StringOp
2236 ? ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2237 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2238 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2239 : ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2240 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2241 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2242 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2243 if (!pExitRec)
2244 {
2245 if (!pMsg->AccessInfo.StringOp)
2246 {
2247 /*
2248 * Simple port I/O.
2249 */
2250 static uint32_t const s_fAndMask[8] =
2251 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2252 uint32_t const fAndMask = s_fAndMask[pMsg->AccessInfo.AccessSize];
2253
2254 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2255 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2256 {
2257 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize);
2258 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2259 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2260 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2261 if (IOM_SUCCESS(rcStrict))
2262 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1);
2263# ifdef IN_RING0
2264 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
2265 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF
2266 /** @todo check for debug breakpoints */ )
2267 return EMRZSetPendingIoPortWrite(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2268 pMsg->AccessInfo.AccessSize, (uint32_t)pMsg->Rax & fAndMask);
2269# endif
2270 else
2271 {
2272 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2273 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2274 }
2275 }
2276 else
2277 {
2278 uint32_t uValue = 0;
2279 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize);
2280 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2281 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2282 pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2283 if (IOM_SUCCESS(rcStrict))
2284 {
2285 if (pMsg->AccessInfo.AccessSize != 4)
2286 pVCpu->cpum.GstCtx.rax = (pMsg->Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2287 else
2288 pVCpu->cpum.GstCtx.rax = uValue;
2289 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2290 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pVCpu->cpum.GstCtx.rax));
2291 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1);
2292 }
2293 else
2294 {
2295 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2296 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2297# ifdef IN_RING0
2298 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
2299 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF
2300 /** @todo check for debug breakpoints */ )
2301 return EMRZSetPendingIoPortRead(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2302 pMsg->AccessInfo.AccessSize);
2303# endif
2304 }
2305 }
2306 }
2307 else
2308 {
2309 /*
2310 * String port I/O.
2311 */
2312 /** @todo Someone at Microsoft please explain how we can get the address mode
2313 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2314 * getting the default mode, it can always be overridden by a prefix. This
2315 * forces us to interpret the instruction from opcodes, which is suboptimal.
2316 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2317 * CPUs that are reasonably new.
2318 *
2319 * Of course, it's possible this is an undocumented and we just need to do some
2320 * experiments to figure out how it's communicated. Alternatively, we can scan
2321 * the opcode bytes for possible evil prefixes.
2322 */
2323 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2324 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2325 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2326 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
2327 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment);
2328 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2329 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2330 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
2331 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
2332# ifdef IN_RING0
2333 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2334 if (rcStrict != VINF_SUCCESS)
2335 return rcStrict;
2336# else
2337 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2338 AssertRCReturn(rc, rc);
2339# endif
2340
2341 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2342 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2343 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2344 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS",
2345 pMsg->PortNumber, pMsg->AccessInfo.AccessSize ));
2346 rcStrict = IEMExecOne(pVCpu);
2347 }
2348 if (IOM_SUCCESS(rcStrict))
2349 {
2350 /*
2351 * Do debug checks.
2352 */
2353 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2354 || (pMsg->Header.Rflags & X86_EFL_TF)
2355 || DBGFBpIsHwIoArmed(pVM) )
2356 {
2357 /** @todo Debugging. */
2358 }
2359 }
2360 return rcStrict;
2361 }
2362
2363 /*
2364 * Frequent exit or something needing probing.
2365 * Get state and call EMHistoryExec.
2366 */
2367 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2368 if (!pMsg->AccessInfo.StringOp)
2369 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2370 else
2371 {
2372 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2373 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2374 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
2375 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment);
2376 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2377 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
2378 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
2379 }
2380 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2381
2382# ifdef IN_RING0
2383 rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2384 if (rcStrict != VINF_SUCCESS)
2385 return rcStrict;
2386# else
2387 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2388 AssertRCReturn(rc, rc);
2389# endif
2390
2391 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2392 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2393 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2394 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUT" : "IN",
2395 pMsg->AccessInfo.StringOp ? "S" : "",
2396 pMsg->PortNumber, pMsg->AccessInfo.AccessSize));
2397 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2398 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2399 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2400 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2401 return rcStrict;
2402}
2403#elif defined(IN_RING3)
2404/**
2405 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess).
2406 *
2407 * @returns Strict VBox status code.
2408 * @param pVM The cross context VM structure.
2409 * @param pVCpu The cross context per CPU structure.
2410 * @param pExit The VM exit information to handle.
2411 * @sa nemHCWinHandleMessageIoPort
2412 */
2413NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitIoPort(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2414{
2415 Assert( pExit->IoPortAccess.AccessInfo.AccessSize == 1
2416 || pExit->IoPortAccess.AccessInfo.AccessSize == 2
2417 || pExit->IoPortAccess.AccessInfo.AccessSize == 4);
2418
2419 /*
2420 * Whatever we do, we must clear pending event injection upon resume.
2421 */
2422 if (pExit->VpContext.ExecutionState.InterruptionPending)
2423 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2424
2425 /*
2426 * Add history first to avoid two paths doing EMHistoryExec calls.
2427 */
2428 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2429 !pExit->IoPortAccess.AccessInfo.StringOp
2430 ? ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2431 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2432 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2433 : ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2434 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2435 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2436 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2437 if (!pExitRec)
2438 {
2439 VBOXSTRICTRC rcStrict;
2440 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2441 {
2442 /*
2443 * Simple port I/O.
2444 */
2445 static uint32_t const s_fAndMask[8] =
2446 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2447 uint32_t const fAndMask = s_fAndMask[pExit->IoPortAccess.AccessInfo.AccessSize];
2448 if (pExit->IoPortAccess.AccessInfo.IsWrite)
2449 {
2450 rcStrict = IOMIOPortWrite(pVM, pVCpu, pExit->IoPortAccess.PortNumber,
2451 (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2452 pExit->IoPortAccess.AccessInfo.AccessSize);
2453 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2454 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2455 pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2456 pExit->IoPortAccess.AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2457 if (IOM_SUCCESS(rcStrict))
2458 {
2459 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2460 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 1);
2461 }
2462 }
2463 else
2464 {
2465 uint32_t uValue = 0;
2466 rcStrict = IOMIOPortRead(pVM, pVCpu, pExit->IoPortAccess.PortNumber, &uValue,
2467 pExit->IoPortAccess.AccessInfo.AccessSize);
2468 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2469 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2470 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2471 if (IOM_SUCCESS(rcStrict))
2472 {
2473 if (pExit->IoPortAccess.AccessInfo.AccessSize != 4)
2474 pVCpu->cpum.GstCtx.rax = (pExit->IoPortAccess.Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2475 else
2476 pVCpu->cpum.GstCtx.rax = uValue;
2477 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2478 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pExit->IoPortAccess.Rax, pVCpu->cpum.GstCtx.rax));
2479 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2480 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 1);
2481 }
2482 }
2483 }
2484 else
2485 {
2486 /*
2487 * String port I/O.
2488 */
2489 /** @todo Someone at Microsoft please explain how we can get the address mode
2490 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2491 * getting the default mode, it can always be overridden by a prefix. This
2492 * forces us to interpret the instruction from opcodes, which is suboptimal.
2493 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2494 * CPUs that are reasonably new.
2495 *
2496 * Of course, it's possible this is an undocumented and we just need to do some
2497 * experiments to figure out how it's communicated. Alternatively, we can scan
2498 * the opcode bytes for possible evil prefixes.
2499 */
2500 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2501 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2502 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2503 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pExit->IoPortAccess.Ds);
2504 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pExit->IoPortAccess.Es);
2505 pVCpu->cpum.GstCtx.rax = pExit->IoPortAccess.Rax;
2506 pVCpu->cpum.GstCtx.rcx = pExit->IoPortAccess.Rcx;
2507 pVCpu->cpum.GstCtx.rdi = pExit->IoPortAccess.Rdi;
2508 pVCpu->cpum.GstCtx.rsi = pExit->IoPortAccess.Rsi;
2509 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2510 AssertRCReturn(rc, rc);
2511
2512 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2513 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2514 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2515 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUTS" : "INS",
2516 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize ));
2517 rcStrict = IEMExecOne(pVCpu);
2518 }
2519 if (IOM_SUCCESS(rcStrict))
2520 {
2521 /*
2522 * Do debug checks.
2523 */
2524 if ( pExit->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2525 || (pExit->VpContext.Rflags & X86_EFL_TF)
2526 || DBGFBpIsHwIoArmed(pVM) )
2527 {
2528 /** @todo Debugging. */
2529 }
2530 }
2531 return rcStrict;
2532 }
2533
2534 /*
2535 * Frequent exit or something needing probing.
2536 * Get state and call EMHistoryExec.
2537 */
2538 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2539 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2540 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2541 else
2542 {
2543 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2544 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2545 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pExit->IoPortAccess.Ds);
2546 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pExit->IoPortAccess.Es);
2547 pVCpu->cpum.GstCtx.rcx = pExit->IoPortAccess.Rcx;
2548 pVCpu->cpum.GstCtx.rdi = pExit->IoPortAccess.Rdi;
2549 pVCpu->cpum.GstCtx.rsi = pExit->IoPortAccess.Rsi;
2550 }
2551 pVCpu->cpum.GstCtx.rax = pExit->IoPortAccess.Rax;
2552 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2553 AssertRCReturn(rc, rc);
2554 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2555 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2556 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2557 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUT" : "IN",
2558 pExit->IoPortAccess.AccessInfo.StringOp ? "S" : "",
2559 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize));
2560 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2561 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2562 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2563 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2564 return rcStrict;
2565}
2566#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2567
2568
2569#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2570/**
2571 * Deals with interrupt window message.
2572 *
2573 * @returns Strict VBox status code.
2574 * @param pVM The cross context VM structure.
2575 * @param pVCpu The cross context per CPU structure.
2576 * @param pMsg The message.
2577 * @sa nemR3WinHandleExitInterruptWindow
2578 */
2579NEM_TMPL_STATIC VBOXSTRICTRC
2580nemHCWinHandleMessageInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg)
2581{
2582 /*
2583 * Assert message sanity.
2584 */
2585 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE
2586 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here
2587 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2588 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type));
2589
2590 /*
2591 * Just copy the state we've got and handle it in the loop for now.
2592 */
2593 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2594 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2595
2596 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2597 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2598 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2599 pMsg->Type, RT_BOOL(pMsg->Header.Rflags & X86_EFL_IF), pMsg->Header.ExecutionState.InterruptShadow));
2600
2601 /** @todo call nemHCWinHandleInterruptFF */
2602 RT_NOREF(pVM);
2603 return VINF_SUCCESS;
2604}
2605#elif defined(IN_RING3)
2606/**
2607 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow).
2608 *
2609 * @returns Strict VBox status code.
2610 * @param pVM The cross context VM structure.
2611 * @param pVCpu The cross context per CPU structure.
2612 * @param pExit The VM exit information to handle.
2613 * @sa nemHCWinHandleMessageInterruptWindow
2614 */
2615NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitInterruptWindow(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2616{
2617 /*
2618 * Assert message sanity.
2619 */
2620 AssertMsg( pExit->InterruptWindow.DeliverableType == WHvX64PendingInterrupt
2621 || pExit->InterruptWindow.DeliverableType == WHvX64PendingNmi,
2622 ("%#x\n", pExit->InterruptWindow.DeliverableType));
2623
2624 /*
2625 * Just copy the state we've got and handle it in the loop for now.
2626 */
2627 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2628 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2629
2630 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2631 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d CR8=%#x\n",
2632 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2633 pExit->InterruptWindow.DeliverableType, RT_BOOL(pExit->VpContext.Rflags & X86_EFL_IF),
2634 pExit->VpContext.ExecutionState.InterruptShadow, pExit->VpContext.Cr8));
2635
2636 /** @todo call nemHCWinHandleInterruptFF */
2637 RT_NOREF(pVM);
2638 return VINF_SUCCESS;
2639}
2640#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2641
2642
2643#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2644/**
2645 * Deals with CPUID intercept message.
2646 *
2647 * @returns Strict VBox status code.
2648 * @param pVM The cross context VM structure.
2649 * @param pVCpu The cross context per CPU structure.
2650 * @param pMsg The message.
2651 * @sa nemR3WinHandleExitCpuId
2652 */
2653NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVMCC pVM, PVMCPUCC pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg)
2654{
2655 /* Check message register value sanity. */
2656 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2657 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2658 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2659 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2660 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
2661 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
2662 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
2663 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx);
2664
2665 /* Do exit history. */
2666 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2667 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2668 if (!pExitRec)
2669 {
2670 /*
2671 * Soak up state and execute the instruction.
2672 *
2673 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2674 * function and make everyone use it.
2675 */
2676 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2677 * only get weirder with nested VT-x and AMD-V support. */
2678 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2679
2680 /* Copy in the low register values (top is always cleared). */
2681 pVCpu->cpum.GstCtx.rax = (uint32_t)pMsg->Rax;
2682 pVCpu->cpum.GstCtx.rcx = (uint32_t)pMsg->Rcx;
2683 pVCpu->cpum.GstCtx.rdx = (uint32_t)pMsg->Rdx;
2684 pVCpu->cpum.GstCtx.rbx = (uint32_t)pMsg->Rbx;
2685 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2686
2687 /* Get the correct values. */
2688 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2689 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2690
2691 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2692 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2693 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2694 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2695 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2696
2697 /* Move RIP and we're done. */
2698 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2699
2700 return VINF_SUCCESS;
2701 }
2702
2703 /*
2704 * Frequent exit or something needing probing.
2705 * Get state and call EMHistoryExec.
2706 */
2707 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2708 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2709 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2710 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
2711 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
2712 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2713 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2714 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2715 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2716 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2717# ifdef IN_RING0
2718 VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit");
2719 if (rcStrict != VINF_SUCCESS)
2720 return rcStrict;
2721 RT_NOREF(pVM);
2722# else
2723 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2724 AssertRCReturn(rc, rc);
2725# endif
2726 VBOXSTRICTRC rcStrictExec = EMHistoryExec(pVCpu, pExitRec, 0);
2727 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2728 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2729 VBOXSTRICTRC_VAL(rcStrictExec), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2730 return rcStrictExec;
2731}
2732#elif defined(IN_RING3)
2733/**
2734 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid).
2735 *
2736 * @returns Strict VBox status code.
2737 * @param pVM The cross context VM structure.
2738 * @param pVCpu The cross context per CPU structure.
2739 * @param pExit The VM exit information to handle.
2740 * @sa nemHCWinHandleMessageCpuId
2741 */
2742NEM_TMPL_STATIC VBOXSTRICTRC
2743nemR3WinHandleExitCpuId(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2744{
2745 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2746 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2747 if (!pExitRec)
2748 {
2749 /*
2750 * Soak up state and execute the instruction.
2751 *
2752 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2753 * function and make everyone use it.
2754 */
2755 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2756 * only get weirder with nested VT-x and AMD-V support. */
2757 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2758
2759 /* Copy in the low register values (top is always cleared). */
2760 pVCpu->cpum.GstCtx.rax = (uint32_t)pExit->CpuidAccess.Rax;
2761 pVCpu->cpum.GstCtx.rcx = (uint32_t)pExit->CpuidAccess.Rcx;
2762 pVCpu->cpum.GstCtx.rdx = (uint32_t)pExit->CpuidAccess.Rdx;
2763 pVCpu->cpum.GstCtx.rbx = (uint32_t)pExit->CpuidAccess.Rbx;
2764 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2765
2766 /* Get the correct values. */
2767 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2768 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2769
2770 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2771 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2772 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2773 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2774 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2775
2776 /* Move RIP and we're done. */
2777 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
2778
2779 RT_NOREF_PV(pVM);
2780 return VINF_SUCCESS;
2781 }
2782
2783 /*
2784 * Frequent exit or something needing probing.
2785 * Get state and call EMHistoryExec.
2786 */
2787 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2788 pVCpu->cpum.GstCtx.rax = pExit->CpuidAccess.Rax;
2789 pVCpu->cpum.GstCtx.rcx = pExit->CpuidAccess.Rcx;
2790 pVCpu->cpum.GstCtx.rdx = pExit->CpuidAccess.Rdx;
2791 pVCpu->cpum.GstCtx.rbx = pExit->CpuidAccess.Rbx;
2792 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2793 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2794 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2795 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2796 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2797 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2798 AssertRCReturn(rc, rc);
2799 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2800 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2801 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2802 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2803 return rcStrict;
2804}
2805#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2806
2807
2808#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2809/**
2810 * Deals with MSR intercept message.
2811 *
2812 * @returns Strict VBox status code.
2813 * @param pVCpu The cross context per CPU structure.
2814 * @param pMsg The message.
2815 * @sa nemR3WinHandleExitMsr
2816 */
2817NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPUCC pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg)
2818{
2819 /*
2820 * A wee bit of sanity first.
2821 */
2822 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2823 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2824 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2825 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2826 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2827 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2828 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
2829 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
2830
2831 /*
2832 * Check CPL as that's common to both RDMSR and WRMSR.
2833 */
2834 VBOXSTRICTRC rcStrict;
2835 if (pMsg->Header.ExecutionState.Cpl == 0)
2836 {
2837 /*
2838 * Get all the MSR state. Since we're getting EFER, we also need to
2839 * get CR0, CR4 and CR3.
2840 */
2841 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2842 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2843 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2844 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2845 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2846
2847 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2848 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu,
2849 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2850 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2851 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2852 "MSRs");
2853 if (rcStrict == VINF_SUCCESS)
2854 {
2855 if (!pExitRec)
2856 {
2857 /*
2858 * Handle writes.
2859 */
2860 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2861 {
2862 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx));
2863 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n",
2864 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2865 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2866 if (rcStrict == VINF_SUCCESS)
2867 {
2868 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2869 return VINF_SUCCESS;
2870 }
2871# ifndef IN_RING3
2872 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2873 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2874 rcStrict = VINF_CPUM_R3_MSR_WRITE;
2875 return rcStrict;
2876# else
2877 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n",
2878 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2879 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2880# endif
2881 }
2882 /*
2883 * Handle reads.
2884 */
2885 else
2886 {
2887 uint64_t uValue = 0;
2888 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue);
2889 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2890 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2891 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2892 if (rcStrict == VINF_SUCCESS)
2893 {
2894 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue;
2895 pVCpu->cpum.GstCtx.rdx = uValue >> 32;
2896 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2897 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2898 return VINF_SUCCESS;
2899 }
2900# ifndef IN_RING3
2901 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2902 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2903 rcStrict = VINF_CPUM_R3_MSR_READ;
2904 return rcStrict;
2905# else
2906 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2907 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2908 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2909# endif
2910 }
2911 }
2912 else
2913 {
2914 /*
2915 * Handle frequent exit or something needing probing.
2916 */
2917 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
2918 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2919 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD", pMsg->MsrNumber));
2920 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2921 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2922 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2923 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2924 return rcStrict;
2925 }
2926 }
2927 else
2928 {
2929 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2930 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2931 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD",
2932 pMsg->MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2933 return rcStrict;
2934 }
2935 }
2936 else if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2937 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n",
2938 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2939 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx ));
2940 else
2941 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n",
2942 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2943 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber));
2944
2945 /*
2946 * If we get down here, we're supposed to #GP(0).
2947 */
2948 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2949 if (rcStrict == VINF_SUCCESS)
2950 {
2951 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2952 if (rcStrict == VINF_IEM_RAISED_XCPT)
2953 rcStrict = VINF_SUCCESS;
2954 else if (rcStrict != VINF_SUCCESS)
2955 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2956 }
2957 return rcStrict;
2958}
2959#elif defined(IN_RING3)
2960/**
2961 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess).
2962 *
2963 * @returns Strict VBox status code.
2964 * @param pVM The cross context VM structure.
2965 * @param pVCpu The cross context per CPU structure.
2966 * @param pExit The VM exit information to handle.
2967 * @sa nemHCWinHandleMessageMsr
2968 */
2969NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitMsr(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2970{
2971 /*
2972 * Check CPL as that's common to both RDMSR and WRMSR.
2973 */
2974 VBOXSTRICTRC rcStrict;
2975 if (pExit->VpContext.ExecutionState.Cpl == 0)
2976 {
2977 /*
2978 * Get all the MSR state. Since we're getting EFER, we also need to
2979 * get CR0, CR4 and CR3.
2980 */
2981 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2982 pExit->MsrAccess.AccessInfo.IsWrite
2983 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2984 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2985 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2986 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2987 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu,
2988 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2989 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2990 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2991 "MSRs");
2992 if (rcStrict == VINF_SUCCESS)
2993 {
2994 if (!pExitRec)
2995 {
2996 /*
2997 * Handle writes.
2998 */
2999 if (pExit->MsrAccess.AccessInfo.IsWrite)
3000 {
3001 rcStrict = CPUMSetGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber,
3002 RT_MAKE_U64((uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx));
3003 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3004 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
3005 (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
3006 if (rcStrict == VINF_SUCCESS)
3007 {
3008 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
3009 return VINF_SUCCESS;
3010 }
3011 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", pVCpu->idCpu,
3012 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3013 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx,
3014 VBOXSTRICTRC_VAL(rcStrict) ));
3015 }
3016 /*
3017 * Handle reads.
3018 */
3019 else
3020 {
3021 uint64_t uValue = 0;
3022 rcStrict = CPUMQueryGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, &uValue);
3023 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu,
3024 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3025 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3026 if (rcStrict == VINF_SUCCESS)
3027 {
3028 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue;
3029 pVCpu->cpum.GstCtx.rdx = uValue >> 32;
3030 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
3031 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
3032 return VINF_SUCCESS;
3033 }
3034 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3035 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
3036 uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3037 }
3038 }
3039 else
3040 {
3041 /*
3042 * Handle frequent exit or something needing probing.
3043 */
3044 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
3045 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3046 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber));
3047 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
3048 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
3049 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3050 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
3051 return rcStrict;
3052 }
3053 }
3054 else
3055 {
3056 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
3057 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3058 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
3059 return rcStrict;
3060 }
3061 }
3062 else if (pExit->MsrAccess.AccessInfo.IsWrite)
3063 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3064 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3065 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx ));
3066 else
3067 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3068 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3069 pExit->MsrAccess.MsrNumber));
3070
3071 /*
3072 * If we get down here, we're supposed to #GP(0).
3073 */
3074 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
3075 if (rcStrict == VINF_SUCCESS)
3076 {
3077 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
3078 if (rcStrict == VINF_IEM_RAISED_XCPT)
3079 rcStrict = VINF_SUCCESS;
3080 else if (rcStrict != VINF_SUCCESS)
3081 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
3082 }
3083
3084 RT_NOREF_PV(pVM);
3085 return rcStrict;
3086}
3087#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3088
3089
3090/**
3091 * Worker for nemHCWinHandleMessageException & nemR3WinHandleExitException that
3092 * checks if the given opcodes are of interest at all.
3093 *
3094 * @returns true if interesting, false if not.
3095 * @param cbOpcodes Number of opcode bytes available.
3096 * @param pbOpcodes The opcode bytes.
3097 * @param f64BitMode Whether we're in 64-bit mode.
3098 */
3099DECLINLINE(bool) nemHcWinIsInterestingUndefinedOpcode(uint8_t cbOpcodes, uint8_t const *pbOpcodes, bool f64BitMode)
3100{
3101 /*
3102 * Currently only interested in VMCALL and VMMCALL.
3103 */
3104 while (cbOpcodes >= 3)
3105 {
3106 switch (pbOpcodes[0])
3107 {
3108 case 0x0f:
3109 switch (pbOpcodes[1])
3110 {
3111 case 0x01:
3112 switch (pbOpcodes[2])
3113 {
3114 case 0xc1: /* 0f 01 c1 VMCALL */
3115 return true;
3116 case 0xd9: /* 0f 01 d9 VMMCALL */
3117 return true;
3118 default:
3119 break;
3120 }
3121 break;
3122 }
3123 break;
3124
3125 default:
3126 return false;
3127
3128 /* prefixes */
3129 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
3130 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
3131 if (!f64BitMode)
3132 return false;
3133 RT_FALL_THRU();
3134 case X86_OP_PRF_CS:
3135 case X86_OP_PRF_SS:
3136 case X86_OP_PRF_DS:
3137 case X86_OP_PRF_ES:
3138 case X86_OP_PRF_FS:
3139 case X86_OP_PRF_GS:
3140 case X86_OP_PRF_SIZE_OP:
3141 case X86_OP_PRF_SIZE_ADDR:
3142 case X86_OP_PRF_LOCK:
3143 case X86_OP_PRF_REPZ:
3144 case X86_OP_PRF_REPNZ:
3145 cbOpcodes--;
3146 pbOpcodes++;
3147 continue;
3148 }
3149 break;
3150 }
3151 return false;
3152}
3153
3154
3155#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3156/**
3157 * Copies state included in a exception intercept message.
3158 *
3159 * @param pVCpu The cross context per CPU structure.
3160 * @param pMsg The message.
3161 * @param fClearXcpt Clear pending exception.
3162 */
3163DECLINLINE(void)
3164nemHCWinCopyStateFromExceptionMessage(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, bool fClearXcpt)
3165{
3166 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
3167 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_DS
3168 | (fClearXcpt ? CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT : 0) );
3169 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
3170 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
3171 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
3172 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
3173 pVCpu->cpum.GstCtx.rsp = pMsg->Rsp;
3174 pVCpu->cpum.GstCtx.rbp = pMsg->Rbp;
3175 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
3176 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
3177 pVCpu->cpum.GstCtx.r8 = pMsg->R8;
3178 pVCpu->cpum.GstCtx.r9 = pMsg->R9;
3179 pVCpu->cpum.GstCtx.r10 = pMsg->R10;
3180 pVCpu->cpum.GstCtx.r11 = pMsg->R11;
3181 pVCpu->cpum.GstCtx.r12 = pMsg->R12;
3182 pVCpu->cpum.GstCtx.r13 = pMsg->R13;
3183 pVCpu->cpum.GstCtx.r14 = pMsg->R14;
3184 pVCpu->cpum.GstCtx.r15 = pMsg->R15;
3185 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
3186 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ss, pMsg->SsSegment);
3187}
3188#elif defined(IN_RING3)
3189/**
3190 * Copies state included in a exception intercept exit.
3191 *
3192 * @param pVCpu The cross context per CPU structure.
3193 * @param pExit The VM exit information.
3194 * @param fClearXcpt Clear pending exception.
3195 */
3196DECLINLINE(void) nemR3WinCopyStateFromExceptionMessage(PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, bool fClearXcpt)
3197{
3198 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3199 if (fClearXcpt)
3200 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
3201}
3202#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3203
3204
3205/**
3206 * Advances the guest RIP by the number of bytes specified in @a cb.
3207 *
3208 * @param pVCpu The cross context virtual CPU structure.
3209 * @param cb RIP increment value in bytes.
3210 */
3211DECLINLINE(void) nemHcWinAdvanceRip(PVMCPUCC pVCpu, uint32_t cb)
3212{
3213 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
3214 pCtx->rip += cb;
3215
3216 /* Update interrupt shadow. */
3217 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3218 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
3219 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3220}
3221
3222
3223/**
3224 * Hacks its way around the lovely mesa driver's backdoor accesses.
3225 *
3226 * @sa hmR0VmxHandleMesaDrvGp
3227 * @sa hmR0SvmHandleMesaDrvGp
3228 */
3229static int nemHcWinHandleMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx)
3230{
3231 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_GPRS_MASK)));
3232 RT_NOREF(pCtx);
3233
3234 /* For now we'll just skip the instruction. */
3235 nemHcWinAdvanceRip(pVCpu, 1);
3236 return VINF_SUCCESS;
3237}
3238
3239
3240/**
3241 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
3242 * backdoor logging w/o checking what it is running inside.
3243 *
3244 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
3245 * backdoor port and magic numbers loaded in registers.
3246 *
3247 * @returns true if it is, false if it isn't.
3248 * @sa hmR0VmxIsMesaDrvGp
3249 * @sa hmR0SvmIsMesaDrvGp
3250 */
3251DECLINLINE(bool) nemHcWinIsMesaDrvGp(PVMCPUCC pVCpu, PCPUMCTX pCtx, const uint8_t *pbInsn, uint32_t cbInsn)
3252{
3253 /* #GP(0) is already checked by caller. */
3254
3255 /* Check magic and port. */
3256 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RAX)));
3257 if (pCtx->dx != UINT32_C(0x5658))
3258 return false;
3259 if (pCtx->rax != UINT32_C(0x564d5868))
3260 return false;
3261
3262 /* Flat ring-3 CS. */
3263 if (CPUMGetGuestCPL(pVCpu) != 3)
3264 return false;
3265 if (pCtx->cs.u64Base != 0)
3266 return false;
3267
3268 /* 0xed: IN eAX,dx */
3269 if (cbInsn < 1) /* Play safe (shouldn't happen). */
3270 {
3271 uint8_t abInstr[1];
3272 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
3273 if (RT_FAILURE(rc))
3274 return false;
3275 if (abInstr[0] != 0xed)
3276 return false;
3277 }
3278 else
3279 {
3280 if (pbInsn[0] != 0xed)
3281 return false;
3282 }
3283
3284 return true;
3285}
3286
3287
3288#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3289/**
3290 * Deals with exception intercept message (HvMessageTypeX64ExceptionIntercept).
3291 *
3292 * @returns Strict VBox status code.
3293 * @param pVCpu The cross context per CPU structure.
3294 * @param pMsg The message.
3295 * @sa nemR3WinHandleExitMsr
3296 */
3297NEM_TMPL_STATIC VBOXSTRICTRC
3298nemHCWinHandleMessageException(PVMCPUCC pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg)
3299{
3300 /*
3301 * Assert sanity.
3302 */
3303 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
3304 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
3305 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
3306 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
3307 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsg->Header.Rip);
3308 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
3309 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
3310 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterDs, pMsg->DsSegment);
3311 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterSs, pMsg->SsSegment);
3312 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRax, pMsg->Rax);
3313 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRcx, pMsg->Rcx);
3314 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdx, pMsg->Rdx);
3315 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbx, pMsg->Rbx);
3316 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsp, pMsg->Rsp);
3317 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRbp, pMsg->Rbp);
3318 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRsi, pMsg->Rsi);
3319 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRdi, pMsg->Rdi);
3320 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR8, pMsg->R8);
3321 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR9, pMsg->R9);
3322 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR10, pMsg->R10);
3323 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR11, pMsg->R11);
3324 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR12, pMsg->R12);
3325 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR13, pMsg->R13);
3326 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR14, pMsg->R14);
3327 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterR15, pMsg->R15);
3328
3329 /*
3330 * Get most of the register state since we'll end up making IEM inject the
3331 * event. The exception isn't normally flaged as a pending event, so duh.
3332 *
3333 * Note! We can optimize this later with event injection.
3334 */
3335 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n",
3336 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
3337 pMsg->ExceptionVector, pMsg->ErrorCode, pMsg->ExceptionParameter));
3338 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, true /*fClearXcpt*/);
3339 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3340 if (pMsg->ExceptionVector == X86_XCPT_DB)
3341 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3342 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt");
3343 if (rcStrict != VINF_SUCCESS)
3344 return rcStrict;
3345
3346 /*
3347 * Handle the intercept.
3348 */
3349 TRPMEVENT enmEvtType = TRPM_TRAP;
3350 switch (pMsg->ExceptionVector)
3351 {
3352 /*
3353 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3354 * and need to turn them over to GIM.
3355 *
3356 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3357 * #UD for handling non-native hypercall instructions. (IEM will
3358 * decode both and let the GIM provider decide whether to accept it.)
3359 */
3360 case X86_XCPT_UD:
3361 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3362 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3363 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3364
3365 if (nemHcWinIsInterestingUndefinedOpcode(pMsg->InstructionByteCount, pMsg->InstructionBytes,
3366 pMsg->Header.ExecutionState.EferLma && pMsg->Header.CsSegment.Long ))
3367 {
3368 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
3369 pMsg->InstructionBytes, pMsg->InstructionByteCount);
3370 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3371 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3372 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
3373 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3374 return rcStrict;
3375 }
3376 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3377 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->InstructionByteCount, pMsg->InstructionBytes ));
3378 break;
3379
3380 /*
3381 * Workaround the lovely mesa driver assuming that vmsvga means vmware
3382 * hypervisor and tries to log stuff to the host.
3383 */
3384 case X86_XCPT_GP:
3385 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGp);
3386 /** @todo r=bird: Need workaround in IEM for this, right?
3387 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_GP),
3388 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC()); */
3389 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
3390 || !nemHcWinIsMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx, pMsg->InstructionBytes, pMsg->InstructionByteCount))
3391 {
3392# if 1 /** @todo Need to emulate instruction or we get a triple fault when trying to inject the #GP... */
3393 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
3394 pMsg->InstructionBytes, pMsg->InstructionByteCount);
3395 Log4(("XcptExit/%u: %04x:%08RX64/%s: #GP -> emulated -> %Rrc\n",
3396 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3397 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
3398 return rcStrict;
3399# else
3400 break;
3401# endif
3402 }
3403 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGpMesa);
3404 return nemHcWinHandleMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx);
3405
3406 /*
3407 * Filter debug exceptions.
3408 */
3409 case X86_XCPT_DB:
3410 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3411 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3412 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3413 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3414 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header) ));
3415 break;
3416
3417 case X86_XCPT_BP:
3418 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3419 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3420 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3421 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3422 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->Header.InstructionLength));
3423 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3424 break;
3425
3426 /* This shouldn't happen. */
3427 default:
3428 AssertLogRelMsgFailedReturn(("ExceptionVector=%#x\n", pMsg->ExceptionVector), VERR_IEM_IPE_6);
3429 }
3430
3431 /*
3432 * Inject it.
3433 */
3434 rcStrict = IEMInjectTrap(pVCpu, pMsg->ExceptionVector, enmEvtType, pMsg->ErrorCode,
3435 pMsg->ExceptionParameter /*??*/, pMsg->Header.InstructionLength);
3436 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3437 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3438 nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->ExceptionVector, VBOXSTRICTRC_VAL(rcStrict) ));
3439 return rcStrict;
3440}
3441#elif defined(IN_RING3)
3442/**
3443 * Deals with MSR access exits (WHvRunVpExitReasonException).
3444 *
3445 * @returns Strict VBox status code.
3446 * @param pVM The cross context VM structure.
3447 * @param pVCpu The cross context per CPU structure.
3448 * @param pExit The VM exit information to handle.
3449 * @sa nemR3WinHandleExitException
3450 */
3451NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitException(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3452{
3453 /*
3454 * Get most of the register state since we'll end up making IEM inject the
3455 * event. The exception isn't normally flaged as a pending event, so duh.
3456 *
3457 * Note! We can optimize this later with event injection.
3458 */
3459 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3460 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType,
3461 pExit->VpException.ErrorCode, pExit->VpException.ExceptionParameter ));
3462 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, true /*fClearXcpt*/);
3463 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3464 if (pExit->VpException.ExceptionType == X86_XCPT_DB)
3465 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3466 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, fWhat, "Xcpt");
3467 if (rcStrict != VINF_SUCCESS)
3468 return rcStrict;
3469
3470 /*
3471 * Handle the intercept.
3472 */
3473 TRPMEVENT enmEvtType = TRPM_TRAP;
3474 switch (pExit->VpException.ExceptionType)
3475 {
3476 /*
3477 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3478 * and need to turn them over to GIM.
3479 *
3480 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3481 * #UD for handling non-native hypercall instructions. (IEM will
3482 * decode both and let the GIM provider decide whether to accept it.)
3483 */
3484 case X86_XCPT_UD:
3485 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3486 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3487 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3488 if (nemHcWinIsInterestingUndefinedOpcode(pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes,
3489 pExit->VpContext.ExecutionState.EferLma && pExit->VpContext.Cs.Long ))
3490 {
3491 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
3492 pExit->VpException.InstructionBytes,
3493 pExit->VpException.InstructionByteCount);
3494 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3495 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3496 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
3497 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3498 return rcStrict;
3499 }
3500
3501 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu,
3502 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3503 pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes ));
3504 break;
3505
3506 /*
3507 * Workaround the lovely mesa driver assuming that vmsvga means vmware
3508 * hypervisor and tries to log stuff to the host.
3509 */
3510 case X86_XCPT_GP:
3511 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGp);
3512 /** @todo r=bird: Need workaround in IEM for this, right?
3513 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_GP),
3514 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC()); */
3515 if ( !pVCpu->nem.s.fTrapXcptGpForLovelyMesaDrv
3516 || !nemHcWinIsMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx, pExit->VpException.InstructionBytes,
3517 pExit->VpException.InstructionByteCount))
3518 {
3519# if 1 /** @todo Need to emulate instruction or we get a triple fault when trying to inject the #GP... */
3520 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
3521 pExit->VpException.InstructionBytes,
3522 pExit->VpException.InstructionByteCount);
3523 Log4(("XcptExit/%u: %04x:%08RX64/%s: #GP -> emulated -> %Rrc\n",
3524 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3525 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
3526 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3527 return rcStrict;
3528# else
3529 break;
3530# endif
3531 }
3532 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionGpMesa);
3533 return nemHcWinHandleMesaDrvGp(pVCpu, &pVCpu->cpum.GstCtx);
3534
3535 /*
3536 * Filter debug exceptions.
3537 */
3538 case X86_XCPT_DB:
3539 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3540 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3541 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3542 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3543 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext) ));
3544 break;
3545
3546 case X86_XCPT_BP:
3547 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3548 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3549 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3550 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3551 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.InstructionLength));
3552 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3553 break;
3554
3555 /* This shouldn't happen. */
3556 default:
3557 AssertLogRelMsgFailedReturn(("ExceptionType=%#x\n", pExit->VpException.ExceptionType), VERR_IEM_IPE_6);
3558 }
3559
3560 /*
3561 * Inject it.
3562 */
3563 rcStrict = IEMInjectTrap(pVCpu, pExit->VpException.ExceptionType, enmEvtType, pExit->VpException.ErrorCode,
3564 pExit->VpException.ExceptionParameter /*??*/, pExit->VpContext.InstructionLength);
3565 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3566 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3567 nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType, VBOXSTRICTRC_VAL(rcStrict) ));
3568
3569 RT_NOREF_PV(pVM);
3570 return rcStrict;
3571}
3572#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3573
3574
3575#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3576/**
3577 * Deals with unrecoverable exception (triple fault).
3578 *
3579 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up
3580 * here too. So we'll leave it to IEM to decide.
3581 *
3582 * @returns Strict VBox status code.
3583 * @param pVCpu The cross context per CPU structure.
3584 * @param pMsgHdr The message header.
3585 * @sa nemR3WinHandleExitUnrecoverableException
3586 */
3587NEM_TMPL_STATIC VBOXSTRICTRC
3588nemHCWinHandleMessageUnrecoverableException(PVMCPUCC pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
3589{
3590 /* Check message register value sanity. */
3591 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, HvX64RegisterCs, pMsgHdr->CsSegment);
3592 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRip, pMsgHdr->Rip);
3593 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterRflags, pMsgHdr->Rflags);
3594 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, HvX64RegisterCr8, (uint64_t)pMsgHdr->Cr8);
3595
3596# if 0
3597 /*
3598 * Just copy the state we've got and handle it in the loop for now.
3599 */
3600 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
3601 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n",
3602 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags));
3603 return VINF_EM_TRIPLE_FAULT;
3604# else
3605 /*
3606 * Let IEM decide whether this is really it.
3607 */
3608 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3609 pMsgHdr->Rip + pMsgHdr->CsSegment.Base, ASMReadTSC());
3610 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
3611 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3612 if (rcStrict == VINF_SUCCESS)
3613 {
3614 rcStrict = IEMExecOne(pVCpu);
3615 if (rcStrict == VINF_SUCCESS)
3616 {
3617 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3618 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags ));
3619 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3620 return VINF_SUCCESS;
3621 }
3622 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3623 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3624 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3625 else
3626 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3627 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3628 }
3629 else
3630 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3631 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3632 return rcStrict;
3633# endif
3634}
3635#elif defined(IN_RING3)
3636/**
3637 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
3638 *
3639 * @returns Strict VBox status code.
3640 * @param pVM The cross context VM structure.
3641 * @param pVCpu The cross context per CPU structure.
3642 * @param pExit The VM exit information to handle.
3643 * @sa nemHCWinHandleMessageUnrecoverableException
3644 */
3645NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3646{
3647# if 0
3648 /*
3649 * Just copy the state we've got and handle it in the loop for now.
3650 */
3651 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3652 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3653 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3654 RT_NOREF_PV(pVM);
3655 return VINF_EM_TRIPLE_FAULT;
3656# else
3657 /*
3658 * Let IEM decide whether this is really it.
3659 */
3660 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3661 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3662 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3663 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3664 if (rcStrict == VINF_SUCCESS)
3665 {
3666 rcStrict = IEMExecOne(pVCpu);
3667 if (rcStrict == VINF_SUCCESS)
3668 {
3669 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3670 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3671 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3672 return VINF_SUCCESS;
3673 }
3674 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3675 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3676 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3677 else
3678 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3679 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3680 }
3681 else
3682 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3683 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3684 RT_NOREF_PV(pVM);
3685 return rcStrict;
3686# endif
3687
3688}
3689#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3690
3691
3692#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3693/**
3694 * Handles messages (VM exits).
3695 *
3696 * @returns Strict VBox status code.
3697 * @param pVM The cross context VM structure.
3698 * @param pVCpu The cross context per CPU structure.
3699 * @param pMappingHeader The message slot mapping.
3700 * @sa nemR3WinHandleExit
3701 */
3702NEM_TMPL_STATIC VBOXSTRICTRC
3703nemHCWinHandleMessage(PVMCC pVM, PVMCPUCC pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader)
3704{
3705 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage)
3706 {
3707 AssertMsg(pMappingHeader->cbMessage == HV_MESSAGE_SIZE, ("%#x\n", pMappingHeader->cbMessage));
3708 HV_MESSAGE const *pMsg = (HV_MESSAGE const *)(pMappingHeader + 1);
3709 switch (pMsg->Header.MessageType)
3710 {
3711 case HvMessageTypeUnmappedGpa:
3712 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3713 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3714 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept);
3715
3716 case HvMessageTypeGpaIntercept:
3717 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3718 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept);
3719 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept);
3720
3721 case HvMessageTypeX64IoPortIntercept:
3722 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept));
3723 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3724 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept);
3725
3726 case HvMessageTypeX64Halt:
3727 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3728 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3729 pMsg->X64InterceptHeader.Rip + pMsg->X64InterceptHeader.CsSegment.Base, ASMReadTSC());
3730 Log4(("HaltExit\n"));
3731 return VINF_EM_HALT;
3732
3733 case HvMessageTypeX64InterruptWindow:
3734 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow));
3735 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3736 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow);
3737
3738 case HvMessageTypeX64CpuidIntercept:
3739 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept));
3740 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3741 return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept);
3742
3743 case HvMessageTypeX64MsrIntercept:
3744 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept));
3745 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3746 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept);
3747
3748 case HvMessageTypeX64ExceptionIntercept:
3749 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept));
3750 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3751 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept);
3752
3753 case HvMessageTypeUnrecoverableException:
3754 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
3755 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3756 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader);
3757
3758 case HvMessageTypeInvalidVpRegisterValue:
3759 case HvMessageTypeUnsupportedFeature:
3760 case HvMessageTypeTlbPageSizeMismatch:
3761 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3762 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg),
3763 VERR_NEM_IPE_3);
3764
3765 case HvMessageTypeX64ApicEoi:
3766 case HvMessageTypeX64LegacyFpError:
3767 case HvMessageTypeX64RegisterIntercept:
3768 case HvMessageTypeApicEoi:
3769 case HvMessageTypeFerrAsserted:
3770 case HvMessageTypeEventLogBufferComplete:
3771 case HvMessageTimerExpired:
3772 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3773 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3774 VERR_NEM_IPE_3);
3775
3776 default:
3777 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3778 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3779 VERR_NEM_IPE_3);
3780 }
3781 }
3782 else
3783 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n",
3784 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage),
3785 VERR_NEM_IPE_4);
3786}
3787#elif defined(IN_RING3)
3788/**
3789 * Handles VM exits.
3790 *
3791 * @returns Strict VBox status code.
3792 * @param pVM The cross context VM structure.
3793 * @param pVCpu The cross context per CPU structure.
3794 * @param pExit The VM exit information to handle.
3795 * @sa nemHCWinHandleMessage
3796 */
3797NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3798{
3799 switch (pExit->ExitReason)
3800 {
3801 case WHvRunVpExitReasonMemoryAccess:
3802 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3803 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
3804
3805 case WHvRunVpExitReasonX64IoPortAccess:
3806 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3807 return nemR3WinHandleExitIoPort(pVM, pVCpu, pExit);
3808
3809 case WHvRunVpExitReasonX64Halt:
3810 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3811 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3812 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3813 Log4(("HaltExit/%u\n", pVCpu->idCpu));
3814 return VINF_EM_HALT;
3815
3816 case WHvRunVpExitReasonCanceled:
3817 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
3818 return VINF_SUCCESS;
3819
3820 case WHvRunVpExitReasonX64InterruptWindow:
3821 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3822 return nemR3WinHandleExitInterruptWindow(pVM, pVCpu, pExit);
3823
3824 case WHvRunVpExitReasonX64Cpuid:
3825 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3826 return nemR3WinHandleExitCpuId(pVM, pVCpu, pExit);
3827
3828 case WHvRunVpExitReasonX64MsrAccess:
3829 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3830 return nemR3WinHandleExitMsr(pVM, pVCpu, pExit);
3831
3832 case WHvRunVpExitReasonException:
3833 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3834 return nemR3WinHandleExitException(pVM, pVCpu, pExit);
3835
3836 case WHvRunVpExitReasonUnrecoverableException:
3837 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3838 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
3839
3840 case WHvRunVpExitReasonUnsupportedFeature:
3841 case WHvRunVpExitReasonInvalidVpRegisterValue:
3842 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3843 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
3844 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
3845
3846 /* Undesired exits: */
3847 case WHvRunVpExitReasonNone:
3848 default:
3849 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3850 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
3851 }
3852}
3853#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3854
3855
3856#if defined(IN_RING0) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
3857/**
3858 * Perform an I/O control operation on the partition handle (VID.SYS),
3859 * restarting on alert-like behaviour.
3860 *
3861 * @returns NT status code.
3862 * @param pGVM The ring-0 VM structure.
3863 * @param pGVCpu The global (ring-0) per CPU structure.
3864 * @param fFlags The wait flags.
3865 * @param cMillies The timeout in milliseconds
3866 */
3867static NTSTATUS nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(PGVM pGVM, PGVMCPU pGVCpu, uint32_t fFlags, uint32_t cMillies)
3868{
3869 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3870 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags;
3871 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3872 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3873 &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3874 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
3875 NULL, 0);
3876 if (rcNt == STATUS_SUCCESS)
3877 { /* likely */ }
3878 /*
3879 * Generally, if we get down here, we have been interrupted between ACK'ing
3880 * a message and waiting for the next due to a NtAlertThread call. So, we
3881 * should stop ACK'ing the previous message and get on waiting on the next.
3882 * See similar stuff in nemHCWinRunGC().
3883 */
3884 else if ( rcNt == STATUS_TIMEOUT
3885 || rcNt == STATUS_ALERTED /* just in case */
3886 || rcNt == STATUS_KERNEL_APC /* just in case */
3887 || rcNt == STATUS_USER_APC /* just in case */)
3888 {
3889 DBGFTRACE_CUSTOM(pGVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/1 %#x (f=%#x)", rcNt, fFlags);
3890 STAM_REL_COUNTER_INC(&pGVCpu->nem.s.StatStopCpuPendingAlerts);
3891 Assert(fFlags & VID_MSHAGN_F_GET_NEXT_MESSAGE);
3892
3893 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3894 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags & ~VID_MSHAGN_F_HANDLE_MESSAGE;
3895 pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3896 rcNt = nemR0NtPerformIoControl(pGVM, pGVCpu, pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3897 &pGVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3898 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
3899 NULL, 0);
3900 DBGFTRACE_CUSTOM(pGVM, "IoCtlMessageSlotHandleAndGetNextRestart/2 %#x", rcNt);
3901 }
3902 return rcNt;
3903}
3904#endif /* IN_RING0 */
3905
3906
3907#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3908/**
3909 * Worker for nemHCWinRunGC that stops the execution on the way out.
3910 *
3911 * The CPU was running the last time we checked, no there are no messages that
3912 * needs being marked handled/whatever. Caller checks this.
3913 *
3914 * @returns rcStrict on success, error status on failure.
3915 * @param pVM The cross context VM structure.
3916 * @param pVCpu The cross context per CPU structure.
3917 * @param rcStrict The nemHCWinRunGC return status. This is a little
3918 * bit unnecessary, except in internal error cases,
3919 * since we won't need to stop the CPU if we took an
3920 * exit.
3921 * @param pMappingHeader The message slot mapping.
3922 */
3923NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVMCC pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict,
3924 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader)
3925{
3926# ifdef DBGFTRACE_ENABLED
3927 HV_MESSAGE const volatile *pMsgForTrace = (HV_MESSAGE const volatile *)(pMappingHeader + 1);
3928# endif
3929
3930 /*
3931 * Try stopping the processor. If we're lucky we manage to do this before it
3932 * does another VM exit.
3933 */
3934 DBGFTRACE_CUSTOM(pVM, "nemStop#0");
3935# ifdef IN_RING0
3936 pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu;
3937 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction,
3938 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3939 NULL, 0);
3940 if (NT_SUCCESS(rcNt))
3941 {
3942 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay (%#x)", rcNt);
3943 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3944 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3945 return rcStrict;
3946 }
3947# else
3948 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu);
3949 if (fRet)
3950 {
3951 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay");
3952 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3953 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3954 return rcStrict;
3955 }
3956# endif
3957
3958 /*
3959 * Dang. The CPU stopped by itself and we got a couple of message to deal with.
3960 */
3961# ifdef IN_RING0
3962 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", rcNt);
3963 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt),
3964 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3965# else
3966 DWORD dwErr = RTNtLastErrorValue();
3967 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", dwErr);
3968 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr),
3969 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3970# endif
3971 Log8(("nemHCWinStopCpu: Stopping CPU #%u pending...\n", pVCpu->idCpu));
3972 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPending);
3973
3974 /*
3975 * First message: Exit or similar, sometimes VidMessageStopRequestComplete.
3976 * Note! We can safely ASSUME that rcStrict isn't an important information one.
3977 */
3978# ifdef IN_RING0
3979 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3980 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
3981 pMsgForTrace->Header.MessageType);
3982 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
3983 ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3984 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3985# else
3986 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3987 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3988 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
3989 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3990 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3991 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3992# endif
3993
3994 VID_MESSAGE_TYPE enmVidMsgType = pMappingHeader->enmVidMsgType;
3995 if (enmVidMsgType != VidMessageStopRequestComplete)
3996 {
3997 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader);
3998 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict))
3999 rcStrict = rcStrict2;
4000 DBGFTRACE_CUSTOM(pVM, "nemStop#1: handled %#x -> %d", pMsgForTrace->Header.MessageType, VBOXSTRICTRC_VAL(rcStrict));
4001
4002 /*
4003 * Mark it as handled and get the stop request completed message, then mark
4004 * that as handled too. CPU is back into fully stopped stated then.
4005 */
4006# ifdef IN_RING0
4007 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu,
4008 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE,
4009 30000 /*ms*/);
4010 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
4011 pMsgForTrace->Header.MessageType);
4012 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
4013 ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
4014 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4015# else
4016 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4017 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
4018 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
4019 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4020 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
4021 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4022# endif
4023
4024 /* It should be a stop request completed message. */
4025 enmVidMsgType = pMappingHeader->enmVidMsgType;
4026 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete,
4027 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n",
4028 enmVidMsgType, pMappingHeader->cbMessage),
4029 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4030
4031 /*
4032 * Mark the VidMessageStopRequestComplete message as handled.
4033 */
4034# ifdef IN_RING0
4035 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pVM, pVCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
4036 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType,
4037 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4038 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
4039 ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
4040 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4041# else
4042 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
4043 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
4044 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4045 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
4046 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
4047# endif
4048 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) ));
4049 }
4050 else
4051 {
4052 /** @todo I'm not so sure about this now... */
4053 DBGFTRACE_CUSTOM(pVM, "nemStop#9: %#x %#x %#x", pMappingHeader->enmVidMsgType,
4054 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
4055 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingOdd);
4056 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc) - 1st VidMessageSlotHandleAndGetNext got VidMessageStopRequestComplete.\n",
4057 VBOXSTRICTRC_VAL(rcStrict) ));
4058 }
4059 return rcStrict;
4060}
4061#endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
4062
4063#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
4064
4065/**
4066 * Deals with pending interrupt related force flags, may inject interrupt.
4067 *
4068 * @returns VBox strict status code.
4069 * @param pVM The cross context VM structure.
4070 * @param pVCpu The cross context per CPU structure.
4071 * @param pfInterruptWindows Where to return interrupt window flags.
4072 */
4073NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleInterruptFF(PVMCC pVM, PVMCPUCC pVCpu, uint8_t *pfInterruptWindows)
4074{
4075 Assert(!TRPMHasTrap(pVCpu));
4076 RT_NOREF_PV(pVM);
4077
4078 /*
4079 * First update APIC. We ASSUME this won't need TPR/CR8.
4080 */
4081 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
4082 {
4083 APICUpdatePendingInterrupts(pVCpu);
4084 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
4085 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4086 return VINF_SUCCESS;
4087 }
4088
4089 /*
4090 * We don't currently implement SMIs.
4091 */
4092 AssertReturn(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);
4093
4094 /*
4095 * Check if we've got the minimum of state required for deciding whether we
4096 * can inject interrupts and NMIs. If we don't have it, get all we might require
4097 * for injection via IEM.
4098 */
4099 bool const fPendingNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4100 uint64_t fNeedExtrn = CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
4101 | (fPendingNmi ? CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI : 0);
4102 if (pVCpu->cpum.GstCtx.fExtrn & fNeedExtrn)
4103 {
4104 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF");
4105 if (rcStrict != VINF_SUCCESS)
4106 return rcStrict;
4107 }
4108 bool const fInhibitInterrupts = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
4109 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip;
4110
4111 /*
4112 * NMI? Try deliver it first.
4113 */
4114 if (fPendingNmi)
4115 {
4116 if ( !fInhibitInterrupts
4117 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
4118 {
4119 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4120 if (rcStrict == VINF_SUCCESS)
4121 {
4122 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4123 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_NMI, TRPM_HARDWARE_INT, 0, 0, 0);
4124 Log8(("Injected NMI on %u (%d)\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4125 }
4126 return rcStrict;
4127 }
4128 *pfInterruptWindows |= NEM_WIN_INTW_F_NMI;
4129 Log8(("NMI window pending on %u\n", pVCpu->idCpu));
4130 }
4131
4132 /*
4133 * APIC or PIC interrupt?
4134 */
4135 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4136 {
4137 if ( !fInhibitInterrupts
4138 && pVCpu->cpum.GstCtx.rflags.Bits.u1IF)
4139 {
4140 AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR);
4141 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4142 if (rcStrict == VINF_SUCCESS)
4143 {
4144 uint8_t bInterrupt;
4145 int rc = PDMGetInterrupt(pVCpu, &bInterrupt);
4146 if (RT_SUCCESS(rc))
4147 {
4148 Log8(("Injecting interrupt %#x on %u: %04x:%08RX64 efl=%#x\n", bInterrupt, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
4149 rcStrict = IEMInjectTrap(pVCpu, bInterrupt, TRPM_HARDWARE_INT, 0, 0, 0);
4150 Log8(("Injected interrupt %#x on %u (%d)\n", bInterrupt, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4151 }
4152 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4153 {
4154 *pfInterruptWindows |= ((bInterrupt >> 4) << NEM_WIN_INTW_F_PRIO_SHIFT) | NEM_WIN_INTW_F_REGULAR;
4155 Log8(("VERR_APIC_INTR_MASKED_BY_TPR: *pfInterruptWindows=%#x\n", *pfInterruptWindows));
4156 }
4157 else
4158 Log8(("PDMGetInterrupt failed -> %Rrc\n", rc));
4159 }
4160 return rcStrict;
4161 }
4162
4163 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC) && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))
4164 {
4165 /* If only an APIC interrupt is pending, we need to know its priority. Otherwise we'll
4166 * likely get pointless deliverability notifications with IF=1 but TPR still too high.
4167 */
4168 bool fPendingIntr = false;
4169 uint8_t bTpr = 0;
4170 uint8_t bPendingIntr = 0;
4171 int rc = APICGetTpr(pVCpu, &bTpr, &fPendingIntr, &bPendingIntr);
4172 AssertRC(rc);
4173 *pfInterruptWindows |= (bPendingIntr >> 4) << NEM_WIN_INTW_F_PRIO_SHIFT;
4174 Log8(("Interrupt window pending on %u: %#x (bTpr=%#x fPendingIntr=%d bPendingIntr=%#x)\n",
4175 pVCpu->idCpu, *pfInterruptWindows, bTpr, fPendingIntr, bPendingIntr));
4176 }
4177 else
4178 {
4179 *pfInterruptWindows |= NEM_WIN_INTW_F_REGULAR;
4180 Log8(("Interrupt window pending on %u: %#x\n", pVCpu->idCpu, *pfInterruptWindows));
4181 }
4182 }
4183
4184 return VINF_SUCCESS;
4185}
4186
4187
4188/**
4189 * Inner NEM runloop for windows.
4190 *
4191 * @returns Strict VBox status code.
4192 * @param pVM The cross context VM structure.
4193 * @param pVCpu The cross context per CPU structure.
4194 */
4195NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVMCC pVM, PVMCPUCC pVCpu)
4196{
4197 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags));
4198# ifdef LOG_ENABLED
4199 if (LogIs3Enabled())
4200 nemHCWinLogState(pVM, pVCpu);
4201# endif
4202
4203 /*
4204 * Try switch to NEM runloop state.
4205 */
4206 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
4207 { /* likely */ }
4208 else
4209 {
4210 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4211 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
4212 return VINF_SUCCESS;
4213 }
4214
4215 /*
4216 * The run loop.
4217 *
4218 * Current approach to state updating to use the sledgehammer and sync
4219 * everything every time. This will be optimized later.
4220 */
4221# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4222 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;
4223# endif
4224 const bool fSingleStepping = DBGFIsStepping(pVCpu);
4225// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
4226// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
4227// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
4228 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4229 for (unsigned iLoop = 0;; iLoop++)
4230 {
4231# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && !defined(VBOX_WITH_PGM_NEM_MODE)
4232 /*
4233 * Hack alert!
4234 */
4235 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
4236 if (cMappedPages < pVM->nem.s.cMaxMappedPages)
4237 { /* likely*/ }
4238 else
4239 {
4240 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemHCWinUnmapOnePageCallback, NULL);
4241 Log(("nemHCWinRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
4242 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapAllPages);
4243 }
4244# endif
4245
4246 /*
4247 * Pending interrupts or such? Need to check and deal with this prior
4248 * to the state syncing.
4249 */
4250 pVCpu->nem.s.fDesiredInterruptWindows = 0;
4251 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC
4252 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4253 {
4254# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4255 /* Make sure the CPU isn't executing. */
4256 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4257 {
4258 pVCpu->nem.s.fHandleAndGetFlags = 0;
4259 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader);
4260 if (rcStrict == VINF_SUCCESS)
4261 { /* likely */ }
4262 else
4263 {
4264 LogFlow(("NEM/%u: breaking: nemHCWinStopCpu -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4265 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4266 break;
4267 }
4268 }
4269# endif
4270
4271 /* Try inject interrupt. */
4272 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, &pVCpu->nem.s.fDesiredInterruptWindows);
4273 if (rcStrict == VINF_SUCCESS)
4274 { /* likely */ }
4275 else
4276 {
4277 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4278 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4279 break;
4280 }
4281 }
4282
4283# ifndef NEM_WIN_WITH_A20
4284 /*
4285 * Do not execute in hyper-V if the A20 isn't enabled.
4286 */
4287 if (PGMPhysIsA20Enabled(pVCpu))
4288 { /* likely */ }
4289 else
4290 {
4291 rcStrict = VINF_EM_RESCHEDULE_REM;
4292 LogFlow(("NEM/%u: breaking: A20 disabled\n", pVCpu->idCpu));
4293 break;
4294 }
4295# endif
4296
4297 /*
4298 * Ensure that hyper-V has the whole state.
4299 * (We always update the interrupt windows settings when active as hyper-V seems
4300 * to forget about it after an exit.)
4301 */
4302 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK))
4303 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)
4304 || ( ( pVCpu->nem.s.fDesiredInterruptWindows
4305 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows)
4306# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4307 && pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */
4308# endif
4309 )
4310 )
4311 {
4312# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4313 AssertMsg(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */,
4314 ("%#x fExtrn=%#RX64 (%#RX64) fDesiredInterruptWindows=%d fCurrentInterruptWindows=%#x vs %#x\n",
4315 pVCpu->nem.s.fHandleAndGetFlags, pVCpu->cpum.GstCtx.fExtrn, ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK),
4316 pVCpu->nem.s.fDesiredInterruptWindows, pVCpu->nem.s.fCurrentInterruptWindows, pVCpu->nem.s.fDesiredInterruptWindows));
4317# endif
4318# ifdef IN_RING0
4319 int rc2 = nemR0WinExportState(pVM, pVCpu, &pVCpu->cpum.GstCtx);
4320# else
4321 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
4322# endif
4323 AssertRCReturn(rc2, rc2);
4324 }
4325
4326 /*
4327 * Poll timers and run for a bit.
4328 *
4329 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
4330 * so we take the time of the next timer event and uses that as a deadline.
4331 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
4332 */
4333 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
4334 * the whole polling job when timers have changed... */
4335 uint64_t offDeltaIgnored;
4336 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
4337 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4338 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4339 {
4340# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4341 if (pVCpu->nem.s.fHandleAndGetFlags)
4342 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ }
4343 else
4344 {
4345# ifdef IN_RING0
4346 pVCpu->nem.s.uIoCtlBuf.idCpu = pVCpu->idCpu;
4347 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction,
4348 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
4349 NULL, 0);
4350 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));
4351 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pVCpu->idCpu, rcNt),
4352 VERR_NEM_IPE_5);
4353# else
4354 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
4355 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n",
4356 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()),
4357 VERR_NEM_IPE_5);
4358# endif
4359 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4360 }
4361# endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
4362
4363 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
4364 {
4365# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4366 uint64_t const nsNow = RTTimeNanoTS();
4367 int64_t const cNsNextTimerEvt = nsNow - nsNextTimerEvt;
4368 uint32_t cMsWait;
4369 if (cNsNextTimerEvt < 100000 /* ns */)
4370 cMsWait = 0;
4371 else if ((uint64_t)cNsNextTimerEvt < RT_NS_1SEC)
4372 {
4373 if ((uint32_t)cNsNextTimerEvt < 2*RT_NS_1MS)
4374 cMsWait = 1;
4375 else
4376 cMsWait = ((uint32_t)cNsNextTimerEvt - 100000 /*ns*/) / RT_NS_1MS;
4377 }
4378 else
4379 cMsWait = RT_MS_1SEC;
4380# ifdef IN_RING0
4381 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pVCpu->idCpu;
4382 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags;
4383 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMsWait;
4384 NTSTATUS rcNt = nemR0NtPerformIoControl(pVM, pVCpu, pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
4385 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
4386 pVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext.cbInput,
4387 NULL, 0);
4388 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4389 if (rcNt == STATUS_SUCCESS)
4390# else
4391 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4392 pVCpu->nem.s.fHandleAndGetFlags, cMsWait);
4393 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4394 if (fRet)
4395# endif
4396# else
4397# ifdef LOG_ENABLED
4398 if (LogIsFlowEnabled())
4399 {
4400 static const WHV_REGISTER_NAME s_aNames[6] = { WHvX64RegisterCs, WHvX64RegisterRip, WHvX64RegisterRflags,
4401 WHvX64RegisterSs, WHvX64RegisterRsp, WHvX64RegisterCr0 };
4402 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = {0};
4403 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
4404 LogFlow(("NEM/%u: Entry @ %04x:%08RX64 IF=%d EFL=%#RX64 SS:RSP=%04x:%08RX64 cr0=%RX64\n",
4405 pVCpu->idCpu, aRegs[0].Segment.Selector, aRegs[1].Reg64, RT_BOOL(aRegs[2].Reg64 & X86_EFL_IF),
4406 aRegs[2].Reg64, aRegs[3].Segment.Selector, aRegs[4].Reg64, aRegs[5].Reg64));
4407 }
4408# endif
4409 WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
4410 TMNotifyStartOfExecution(pVM, pVCpu);
4411
4412 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
4413
4414 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4415 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
4416# ifdef LOG_ENABLED
4417 LogFlow(("NEM/%u: Exit @ %04X:%08RX64 IF=%d CR8=%#x Reason=%#x\n", pVCpu->idCpu, ExitReason.VpContext.Cs.Selector,
4418 ExitReason.VpContext.Rip, RT_BOOL(ExitReason.VpContext.Rflags & X86_EFL_IF), ExitReason.VpContext.Cr8,
4419 ExitReason.ExitReason));
4420# endif
4421 if (SUCCEEDED(hrc))
4422# endif
4423 {
4424 /*
4425 * Deal with the message.
4426 */
4427# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4428 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader);
4429 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;
4430# else
4431 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
4432# endif
4433 if (rcStrict == VINF_SUCCESS)
4434 { /* hopefully likely */ }
4435 else
4436 {
4437 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4438 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4439 break;
4440 }
4441 }
4442 else
4443 {
4444# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4445
4446 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT,
4447 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah,
4448 the error code conversion is into WAIT_XXX, i.e. NT status codes. */
4449# ifndef IN_RING0
4450 DWORD rcNt = GetLastError();
4451# endif
4452 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt));
4453 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT
4454 || rcNt == STATUS_ALERTED /* just in case */
4455 || rcNt == STATUS_USER_APC /* ditto */
4456 || rcNt == STATUS_KERNEL_APC /* ditto */
4457 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n",
4458 pVCpu->idCpu, rcNt, rcNt),
4459 VERR_NEM_IPE_0);
4460 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4461 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout);
4462# else
4463 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
4464 pVCpu->idCpu, hrc, GetLastError()),
4465 VERR_NEM_IPE_0);
4466# endif
4467 }
4468
4469 /*
4470 * If no relevant FFs are pending, loop.
4471 */
4472 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
4473 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4474 continue;
4475
4476 /** @todo Try handle pending flags, not just return to EM loops. Take care
4477 * not to set important RCs here unless we've handled a message. */
4478 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
4479 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
4480 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
4481 }
4482 else
4483 {
4484 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
4485 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
4486 }
4487 }
4488 else
4489 {
4490 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
4491 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
4492 }
4493 break;
4494 } /* the run loop */
4495
4496
4497 /*
4498 * If the CPU is running, make sure to stop it before we try sync back the
4499 * state and return to EM. We don't sync back the whole state if we can help it.
4500 */
4501# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4502 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4503 {
4504 pVCpu->nem.s.fHandleAndGetFlags = 0;
4505 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader);
4506 }
4507# endif
4508
4509 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
4510 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4511
4512 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
4513 {
4514 /* Try anticipate what we might need. */
4515 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
4516 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
4517 || RT_FAILURE(rcStrict))
4518 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4519# ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */
4520 else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
4521 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
4522 fImport = CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
4523 else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
4524 fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
4525# endif
4526 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
4527 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4528 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
4529
4530 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
4531 {
4532# ifdef IN_RING0
4533 int rc2 = nemR0WinImportState(pVM, pVCpu, &pVCpu->cpum.GstCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT,
4534 true /*fCanUpdateCr3*/);
4535 if (RT_SUCCESS(rc2))
4536 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4537 else if (rc2 == VERR_NEM_FLUSH_TLB)
4538 {
4539 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4540 if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
4541 rcStrict = -rc2;
4542 else
4543 {
4544 pVCpu->nem.s.rcPending = -rc2;
4545 LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
4546 }
4547 }
4548# else
4549 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4550 if (RT_SUCCESS(rc2))
4551 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4552# endif
4553 else if (RT_SUCCESS(rcStrict))
4554 rcStrict = rc2;
4555 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
4556 pVCpu->cpum.GstCtx.fExtrn = 0;
4557 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
4558 }
4559 else
4560 {
4561 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4562 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
4563 }
4564 }
4565 else
4566 {
4567 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4568 pVCpu->cpum.GstCtx.fExtrn = 0;
4569 }
4570
4571 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
4572 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, VBOXSTRICTRC_VAL(rcStrict) ));
4573 return rcStrict;
4574}
4575
4576#endif /* defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3) */
4577#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3)
4578
4579/**
4580 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
4581 */
4582NEM_TMPL_STATIC DECLCALLBACK(int) nemHCWinUnsetForA20CheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys,
4583 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
4584{
4585 /* We'll just unmap the memory. */
4586 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
4587 {
4588# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4589 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
4590 AssertRC(rc);
4591 if (RT_SUCCESS(rc))
4592# else
4593 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
4594 if (SUCCEEDED(hrc))
4595# endif
4596 {
4597 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4598 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4599 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
4600 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
4601 }
4602 else
4603 {
4604 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4605# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4606 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
4607 return rc;
4608# else
4609 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4610 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4611 return VERR_NEM_IPE_2;
4612# endif
4613 }
4614 }
4615 RT_NOREF(pVCpu, pvUser);
4616 return VINF_SUCCESS;
4617}
4618
4619
4620/**
4621 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
4622 *
4623 * @returns The PGMPhysNemQueryPageInfo result.
4624 * @param pVM The cross context VM structure.
4625 * @param pVCpu The cross context virtual CPU structure.
4626 * @param GCPhys The page to unmap.
4627 */
4628NEM_TMPL_STATIC int nemHCWinUnmapPageForA20Gate(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys)
4629{
4630 PGMPHYSNEMPAGEINFO Info;
4631 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
4632 nemHCWinUnsetForA20CheckerCallback, NULL);
4633}
4634
4635#endif /* defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3) */
4636
4637void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
4638{
4639 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
4640 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
4641}
4642
4643
4644VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
4645 RTR3PTR pvMemR3, uint8_t *pu2State)
4646{
4647 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
4648 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
4649
4650 *pu2State = UINT8_MAX;
4651#if !defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) && defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
4652 if (pvMemR3)
4653 {
4654 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
4655 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
4656 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4657 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
4658 if (SUCCEEDED(hrc))
4659 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4660 else
4661 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
4662 pvMemR3, GCPhys, cb, hrc));
4663 }
4664 RT_NOREF(enmKind);
4665#else
4666 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
4667#endif
4668}
4669
4670
4671void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
4672 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
4673{
4674 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
4675 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
4676 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
4677}
4678
4679
4680#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3)
4681/**
4682 * Worker that maps pages into Hyper-V.
4683 *
4684 * This is used by the PGM physical page notifications as well as the memory
4685 * access VMEXIT handlers.
4686 *
4687 * @returns VBox status code.
4688 * @param pVM The cross context VM structure.
4689 * @param pVCpu The cross context virtual CPU structure of the
4690 * calling EMT.
4691 * @param GCPhysSrc The source page address.
4692 * @param GCPhysDst The hyper-V destination page. This may differ from
4693 * GCPhysSrc when A20 is disabled.
4694 * @param fPageProt NEM_PAGE_PROT_XXX.
4695 * @param pu2State Our page state (input/output).
4696 * @param fBackingChanged Set if the page backing is being changed.
4697 * @thread EMT(pVCpu)
4698 */
4699NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
4700 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
4701{
4702# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4703 /*
4704 * When using the hypercalls instead of the ring-3 APIs, we don't need to
4705 * unmap memory before modifying it. We still want to track the state though,
4706 * since unmap will fail when called an unmapped page and we don't want to redo
4707 * upgrades/downgrades.
4708 */
4709 uint8_t const u2OldState = *pu2State;
4710 int rc;
4711 if (fPageProt == NEM_PAGE_PROT_NONE)
4712 {
4713 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4714 {
4715 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4716 if (RT_SUCCESS(rc))
4717 {
4718 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4719 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4720 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4721 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4722 }
4723 else
4724 {
4725 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4726 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4727 }
4728 }
4729 else
4730 rc = VINF_SUCCESS;
4731 }
4732 else if (fPageProt & NEM_PAGE_PROT_WRITE)
4733 {
4734 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
4735 {
4736 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4737 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4738 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4739 if (RT_SUCCESS(rc))
4740 {
4741 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4742 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4743 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4744 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4745 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4746 NOREF(cMappedPages);
4747 }
4748 else
4749 {
4750 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4751 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4752 }
4753 }
4754 else
4755 rc = VINF_SUCCESS;
4756 }
4757 else
4758 {
4759 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
4760 {
4761 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4762 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4763 if (RT_SUCCESS(rc))
4764 {
4765 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4766 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4767 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4768 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4769 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4770 NOREF(cMappedPages);
4771 }
4772 else
4773 {
4774 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4775 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4776 }
4777 }
4778 else
4779 rc = VINF_SUCCESS;
4780 }
4781
4782 return VINF_SUCCESS;
4783
4784# else /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4785 /*
4786 * Looks like we need to unmap a page before we can change the backing
4787 * or even modify the protection. This is going to be *REALLY* efficient.
4788 * PGM lends us two bits to keep track of the state here.
4789 */
4790 RT_NOREF(pVCpu);
4791 uint8_t const u2OldState = *pu2State;
4792 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
4793 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
4794 if ( fBackingChanged
4795 || u2NewState != u2OldState)
4796 {
4797 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4798 {
4799# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4800 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4801 AssertRC(rc);
4802 if (RT_SUCCESS(rc))
4803 {
4804 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4805 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4806 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4807 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4808 {
4809 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4810 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4811 return VINF_SUCCESS;
4812 }
4813 }
4814 else
4815 {
4816 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4817 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4818 return rc;
4819 }
4820# else
4821 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
4822 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
4823 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
4824 if (SUCCEEDED(hrc))
4825 {
4826 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4827 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4828 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4829 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4830 {
4831 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4832 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4833 return VINF_SUCCESS;
4834 }
4835 }
4836 else
4837 {
4838 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4839 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4840 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4841 return VERR_NEM_INIT_FAILED;
4842 }
4843# endif
4844 }
4845 }
4846
4847 /*
4848 * Writeable mapping?
4849 */
4850 if (fPageProt & NEM_PAGE_PROT_WRITE)
4851 {
4852# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4853 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4854 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4855 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4856 AssertRC(rc);
4857 if (RT_SUCCESS(rc))
4858 {
4859 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4860 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4861 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4862 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4863 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4864 return VINF_SUCCESS;
4865 }
4866 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4867 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4868 return rc;
4869# else
4870 void *pvPage;
4871 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
4872 if (RT_SUCCESS(rc))
4873 {
4874 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
4875 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4876 if (SUCCEEDED(hrc))
4877 {
4878 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4879 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4880 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4881 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4882 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4883 return VINF_SUCCESS;
4884 }
4885 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4886 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4887 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4888 return VERR_NEM_INIT_FAILED;
4889 }
4890 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4891 return rc;
4892# endif
4893 }
4894
4895 if (fPageProt & NEM_PAGE_PROT_READ)
4896 {
4897# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4898 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4899 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4900 AssertRC(rc);
4901 if (RT_SUCCESS(rc))
4902 {
4903 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4904 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4905 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4906 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4907 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4908 return VINF_SUCCESS;
4909 }
4910 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4911 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4912 return rc;
4913# else
4914 const void *pvPage;
4915 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
4916 if (RT_SUCCESS(rc))
4917 {
4918 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRangePage, a);
4919 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
4920 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
4921 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRangePage, a);
4922 if (SUCCEEDED(hrc))
4923 {
4924 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4925 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
4926 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4927 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4928 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4929 return VINF_SUCCESS;
4930 }
4931 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
4932 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4933 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4934 return VERR_NEM_INIT_FAILED;
4935 }
4936 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4937 return rc;
4938# endif
4939 }
4940
4941 /* We already unmapped it above. */
4942 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4943 return VINF_SUCCESS;
4944# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4945}
4946#endif /* defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING3) */
4947
4948
4949NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
4950{
4951 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
4952 {
4953 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
4954 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4955 return VINF_SUCCESS;
4956 }
4957
4958#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES)
4959 PVMCPUCC pVCpu = VMMGetCpu(pVM);
4960 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4961 AssertRC(rc);
4962 if (RT_SUCCESS(rc))
4963 {
4964 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4965 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4966 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
4967 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4968 return VINF_SUCCESS;
4969 }
4970 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4971 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4972 return rc;
4973
4974#elif defined(IN_RING3)
4975 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
4976 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
4977 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
4978 if (SUCCEEDED(hrc))
4979 {
4980 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
4981 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4982 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4983 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
4984 return VINF_SUCCESS;
4985 }
4986 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
4987 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
4988 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4989 return VERR_NEM_IPE_6;
4990#else
4991 RT_NOREF(pVM, GCPhysDst, pu2State);
4992 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): Why are we here?!?\n", GCPhysDst));
4993 return VERR_NEM_IPE_6;
4994#endif
4995}
4996
4997
4998int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4999 PGMPAGETYPE enmType, uint8_t *pu2State)
5000{
5001 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
5002 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
5003 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
5004
5005 int rc;
5006#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
5007 PVMCPUCC pVCpu = VMMGetCpu(pVM);
5008# ifdef NEM_WIN_WITH_A20
5009 if ( pVM->nem.s.fA20Enabled
5010 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5011# endif
5012 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
5013# ifdef NEM_WIN_WITH_A20
5014 else
5015 {
5016 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
5017 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
5018 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
5019 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
5020
5021 }
5022# endif
5023#else
5024 RT_NOREF_PV(fPageProt);
5025# ifdef NEM_WIN_WITH_A20
5026 if ( pVM->nem.s.fA20Enabled
5027 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5028# endif
5029 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5030# ifdef NEM_WIN_WITH_A20
5031 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5032 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5033 else
5034 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
5035# endif
5036#endif
5037 return rc;
5038}
5039
5040
5041VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
5042 PGMPAGETYPE enmType, uint8_t *pu2State)
5043{
5044 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
5045 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
5046 Assert(VM_IS_NEM_ENABLED(pVM));
5047 RT_NOREF(HCPhys, enmType, pvR3);
5048
5049#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
5050 PVMCPUCC pVCpu = VMMGetCpu(pVM);
5051# ifdef NEM_WIN_WITH_A20
5052 if ( pVM->nem.s.fA20Enabled
5053 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5054# endif
5055 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
5056# ifdef NEM_WIN_WITH_A20
5057 else
5058 {
5059 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
5060 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
5061 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5062 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
5063 }
5064# endif
5065#else
5066 RT_NOREF_PV(fPageProt);
5067# ifdef NEM_WIN_WITH_A20
5068 if ( pVM->nem.s.fA20Enabled
5069 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5070# endif
5071 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5072# ifdef NEM_WIN_WITH_A20
5073 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5074 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5075 /* else: ignore since we've got the alias page at this address. */
5076# endif
5077#endif
5078}
5079
5080
5081VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
5082 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
5083{
5084 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
5085 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
5086 Assert(VM_IS_NEM_ENABLED(pVM));
5087 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, enmType);
5088
5089#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
5090 PVMCPUCC pVCpu = VMMGetCpu(pVM);
5091# ifdef NEM_WIN_WITH_A20
5092 if ( pVM->nem.s.fA20Enabled
5093 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5094# endif
5095 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
5096# ifdef NEM_WIN_WITH_A20
5097 else
5098 {
5099 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
5100 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
5101 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5102 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
5103 }
5104# endif
5105#else
5106 RT_NOREF_PV(fPageProt);
5107# ifdef NEM_WIN_WITH_A20
5108 if ( pVM->nem.s.fA20Enabled
5109 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
5110# endif
5111 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5112# ifdef NEM_WIN_WITH_A20
5113 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
5114 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
5115 /* else: ignore since we've got the alias page at this address. */
5116# endif
5117#endif
5118}
5119
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette