VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h@ 73282

Last change on this file since 73282 was 73282, checked in by vboxsync, 6 years ago

NEM/win: Kicked out VINF/VERR_NEM_UPDATE_APIC_BASE and VINF/VERR_NEM_CHANGE_PGM_MODE and associated complications. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 230.4 KB
Line 
1/* $Id: NEMAllNativeTemplate-win.cpp.h 73282 2018-07-20 20:04:26Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, Windows code template ring-0/3.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22/** Copy back a segment from hyper-V. */
23#define NEM_WIN_COPY_BACK_SEG(a_Dst, a_Src) \
24 do { \
25 (a_Dst).u64Base = (a_Src).Base; \
26 (a_Dst).u32Limit = (a_Src).Limit; \
27 (a_Dst).ValidSel = (a_Dst).Sel = (a_Src).Selector; \
28 (a_Dst).Attr.u = (a_Src).Attributes; \
29 (a_Dst).fFlags = CPUMSELREG_FLAGS_VALID; \
30 } while (0)
31
32/** @def NEMWIN_ASSERT_MSG_REG_VAL
33 * Asserts the correctness of a register value in a message/context.
34 */
35#if 0
36# define NEMWIN_NEED_GET_REGISTER
37# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
38# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, a_Expr, a_Msg) \
39 do { \
40 HV_REGISTER_VALUE TmpVal; \
41 nemHCWinGetRegister(a_pVCpu, a_pGVCpu, a_enmReg, &TmpVal); \
42 AssertMsg(a_Expr, a_Msg); \
43 } while (0)
44# else
45# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, a_Expr, a_Msg) \
46 do { \
47 WHV_REGISTER_VALUE TmpVal; \
48 nemR3WinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \
49 AssertMsg(a_Expr, a_Msg); \
50 } while (0)
51# endif
52#else
53# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, a_Expr, a_Msg) do { } while (0)
54#endif
55
56/** @def NEMWIN_ASSERT_MSG_REG_VAL
57 * Asserts the correctness of a 64-bit register value in a message/context.
58 */
59#define NEMWIN_ASSERT_MSG_REG_VAL64(a_pVCpu, a_pGVCpu, a_enmReg, a_u64Val) \
60 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, (a_u64Val) == TmpVal.Reg64, \
61 (#a_u64Val "=%#RX64, expected %#RX64\n", (a_u64Val), TmpVal.Reg64))
62/** @def NEMWIN_ASSERT_MSG_REG_VAL
63 * Asserts the correctness of a segment register value in a message/context.
64 */
65#define NEMWIN_ASSERT_MSG_REG_SEG(a_pVCpu, a_pGVCpu, a_enmReg, a_SReg) \
66 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, \
67 (a_SReg).Base == TmpVal.Segment.Base \
68 && (a_SReg).Limit == TmpVal.Segment.Limit \
69 && (a_SReg).Selector == TmpVal.Segment.Selector \
70 && (a_SReg).Attributes == TmpVal.Segment.Attributes, \
71 ( #a_SReg "=%#RX16 {%#RX64 LB %#RX32,%#RX16} expected %#RX16 {%#RX64 LB %#RX32,%#RX16}\n", \
72 (a_SReg).Selector, (a_SReg).Base, (a_SReg).Limit, (a_SReg).Attributes, \
73 TmpVal.Segment.Selector, TmpVal.Segment.Base, TmpVal.Segment.Limit, TmpVal.Segment.Attributes))
74
75
76/*********************************************************************************************************************************
77* Global Variables *
78*********************************************************************************************************************************/
79/** NEM_WIN_PAGE_STATE_XXX names. */
80NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
81
82/** HV_INTERCEPT_ACCESS_TYPE names. */
83static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
84
85
86/*********************************************************************************************************************************
87* Internal Functions *
88*********************************************************************************************************************************/
89NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
90 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
91
92
93
94#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
95
96/**
97 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
98 *
99 * @returns VBox status code.
100 * @param pVM The cross context VM structure.
101 * @param pVCpu The cross context virtual CPU structure of the caller.
102 * @param GCPhysSrc The source page. Does not need to be page aligned.
103 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
104 * when A20 is disabled.
105 * @param fFlags HV_MAP_GPA_XXX.
106 */
107DECLINLINE(int) nemHCWinHypercallMapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
108{
109#ifdef IN_RING0
110 /** @todo optimize further, caller generally has the physical address. */
111 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
112 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
113 return nemR0WinMapPages(pGVM, pVM, &pGVM->aCpus[pVCpu->idCpu],
114 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
115 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
116 1, fFlags);
117#else
118 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
119 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
120 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
121 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
122 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
123#endif
124}
125
126
127/**
128 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
129 *
130 * @returns VBox status code.
131 * @param pVM The cross context VM structure.
132 * @param pVCpu The cross context virtual CPU structure of the caller.
133 * @param GCPhys The page to unmap. Does not need to be page aligned.
134 */
135DECLINLINE(int) nemHCWinHypercallUnmapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
136{
137# ifdef IN_RING0
138 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
139 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
140 return nemR0WinUnmapPages(pGVM, &pGVM->aCpus[pVCpu->idCpu], GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
141# else
142 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
143 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
144 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
145# endif
146}
147
148#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
149#ifndef IN_RING0
150
151NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVM pVM, PVMCPU pVCpu)
152{
153# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
154# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
155 if (pVM->nem.s.fUseRing0Runloop)
156# endif
157 {
158 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL);
159 AssertLogRelRCReturn(rc, rc);
160 return rc;
161 }
162# endif
163# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
164
165 /*
166 * The following is very similar to what nemR0WinExportState() does.
167 */
168 WHV_REGISTER_NAME aenmNames[128];
169 WHV_REGISTER_VALUE aValues[128];
170
171 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
172 if ( !fWhat
173 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
174 return VINF_SUCCESS;
175 uintptr_t iReg = 0;
176
177# define ADD_REG64(a_enmName, a_uValue) do { \
178 aenmNames[iReg] = (a_enmName); \
179 aValues[iReg].Reg128.High64 = 0; \
180 aValues[iReg].Reg64 = (a_uValue); \
181 iReg++; \
182 } while (0)
183# define ADD_REG128(a_enmName, a_uValueLo, a_uValueHi) do { \
184 aenmNames[iReg] = (a_enmName); \
185 aValues[iReg].Reg128.Low64 = (a_uValueLo); \
186 aValues[iReg].Reg128.High64 = (a_uValueHi); \
187 iReg++; \
188 } while (0)
189
190 /* GPRs */
191 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
192 {
193 if (fWhat & CPUMCTX_EXTRN_RAX)
194 ADD_REG64(WHvX64RegisterRax, pVCpu->cpum.GstCtx.rax);
195 if (fWhat & CPUMCTX_EXTRN_RCX)
196 ADD_REG64(WHvX64RegisterRcx, pVCpu->cpum.GstCtx.rcx);
197 if (fWhat & CPUMCTX_EXTRN_RDX)
198 ADD_REG64(WHvX64RegisterRdx, pVCpu->cpum.GstCtx.rdx);
199 if (fWhat & CPUMCTX_EXTRN_RBX)
200 ADD_REG64(WHvX64RegisterRbx, pVCpu->cpum.GstCtx.rbx);
201 if (fWhat & CPUMCTX_EXTRN_RSP)
202 ADD_REG64(WHvX64RegisterRsp, pVCpu->cpum.GstCtx.rsp);
203 if (fWhat & CPUMCTX_EXTRN_RBP)
204 ADD_REG64(WHvX64RegisterRbp, pVCpu->cpum.GstCtx.rbp);
205 if (fWhat & CPUMCTX_EXTRN_RSI)
206 ADD_REG64(WHvX64RegisterRsi, pVCpu->cpum.GstCtx.rsi);
207 if (fWhat & CPUMCTX_EXTRN_RDI)
208 ADD_REG64(WHvX64RegisterRdi, pVCpu->cpum.GstCtx.rdi);
209 if (fWhat & CPUMCTX_EXTRN_R8_R15)
210 {
211 ADD_REG64(WHvX64RegisterR8, pVCpu->cpum.GstCtx.r8);
212 ADD_REG64(WHvX64RegisterR9, pVCpu->cpum.GstCtx.r9);
213 ADD_REG64(WHvX64RegisterR10, pVCpu->cpum.GstCtx.r10);
214 ADD_REG64(WHvX64RegisterR11, pVCpu->cpum.GstCtx.r11);
215 ADD_REG64(WHvX64RegisterR12, pVCpu->cpum.GstCtx.r12);
216 ADD_REG64(WHvX64RegisterR13, pVCpu->cpum.GstCtx.r13);
217 ADD_REG64(WHvX64RegisterR14, pVCpu->cpum.GstCtx.r14);
218 ADD_REG64(WHvX64RegisterR15, pVCpu->cpum.GstCtx.r15);
219 }
220 }
221
222 /* RIP & Flags */
223 if (fWhat & CPUMCTX_EXTRN_RIP)
224 ADD_REG64(WHvX64RegisterRip, pVCpu->cpum.GstCtx.rip);
225 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
226 ADD_REG64(WHvX64RegisterRflags, pVCpu->cpum.GstCtx.rflags.u);
227
228 /* Segments */
229# define ADD_SEG(a_enmName, a_SReg) \
230 do { \
231 aenmNames[iReg] = a_enmName; \
232 aValues[iReg].Segment.Base = (a_SReg).u64Base; \
233 aValues[iReg].Segment.Limit = (a_SReg).u32Limit; \
234 aValues[iReg].Segment.Selector = (a_SReg).Sel; \
235 aValues[iReg].Segment.Attributes = (a_SReg).Attr.u; \
236 iReg++; \
237 } while (0)
238 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
239 {
240 if (fWhat & CPUMCTX_EXTRN_ES)
241 ADD_SEG(WHvX64RegisterEs, pVCpu->cpum.GstCtx.es);
242 if (fWhat & CPUMCTX_EXTRN_CS)
243 ADD_SEG(WHvX64RegisterCs, pVCpu->cpum.GstCtx.cs);
244 if (fWhat & CPUMCTX_EXTRN_SS)
245 ADD_SEG(WHvX64RegisterSs, pVCpu->cpum.GstCtx.ss);
246 if (fWhat & CPUMCTX_EXTRN_DS)
247 ADD_SEG(WHvX64RegisterDs, pVCpu->cpum.GstCtx.ds);
248 if (fWhat & CPUMCTX_EXTRN_FS)
249 ADD_SEG(WHvX64RegisterFs, pVCpu->cpum.GstCtx.fs);
250 if (fWhat & CPUMCTX_EXTRN_GS)
251 ADD_SEG(WHvX64RegisterGs, pVCpu->cpum.GstCtx.gs);
252 }
253
254 /* Descriptor tables & task segment. */
255 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
256 {
257 if (fWhat & CPUMCTX_EXTRN_LDTR)
258 ADD_SEG(WHvX64RegisterLdtr, pVCpu->cpum.GstCtx.ldtr);
259 if (fWhat & CPUMCTX_EXTRN_TR)
260 ADD_SEG(WHvX64RegisterTr, pVCpu->cpum.GstCtx.tr);
261 if (fWhat & CPUMCTX_EXTRN_IDTR)
262 {
263 aenmNames[iReg] = WHvX64RegisterIdtr;
264 aValues[iReg].Table.Limit = pVCpu->cpum.GstCtx.idtr.cbIdt;
265 aValues[iReg].Table.Base = pVCpu->cpum.GstCtx.idtr.pIdt;
266 iReg++;
267 }
268 if (fWhat & CPUMCTX_EXTRN_GDTR)
269 {
270 aenmNames[iReg] = WHvX64RegisterGdtr;
271 aValues[iReg].Table.Limit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
272 aValues[iReg].Table.Base = pVCpu->cpum.GstCtx.gdtr.pGdt;
273 iReg++;
274 }
275 }
276
277 /* Control registers. */
278 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
279 {
280 if (fWhat & CPUMCTX_EXTRN_CR0)
281 ADD_REG64(WHvX64RegisterCr0, pVCpu->cpum.GstCtx.cr0);
282 if (fWhat & CPUMCTX_EXTRN_CR2)
283 ADD_REG64(WHvX64RegisterCr2, pVCpu->cpum.GstCtx.cr2);
284 if (fWhat & CPUMCTX_EXTRN_CR3)
285 ADD_REG64(WHvX64RegisterCr3, pVCpu->cpum.GstCtx.cr3);
286 if (fWhat & CPUMCTX_EXTRN_CR4)
287 ADD_REG64(WHvX64RegisterCr4, pVCpu->cpum.GstCtx.cr4);
288 }
289 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
290 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu));
291
292 /* Debug registers. */
293/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
294 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
295 {
296 ADD_REG64(WHvX64RegisterDr0, pVCpu->cpum.GstCtx.dr[0]); // CPUMGetHyperDR0(pVCpu));
297 ADD_REG64(WHvX64RegisterDr1, pVCpu->cpum.GstCtx.dr[1]); // CPUMGetHyperDR1(pVCpu));
298 ADD_REG64(WHvX64RegisterDr2, pVCpu->cpum.GstCtx.dr[2]); // CPUMGetHyperDR2(pVCpu));
299 ADD_REG64(WHvX64RegisterDr3, pVCpu->cpum.GstCtx.dr[3]); // CPUMGetHyperDR3(pVCpu));
300 }
301 if (fWhat & CPUMCTX_EXTRN_DR6)
302 ADD_REG64(WHvX64RegisterDr6, pVCpu->cpum.GstCtx.dr[6]); // CPUMGetHyperDR6(pVCpu));
303 if (fWhat & CPUMCTX_EXTRN_DR7)
304 ADD_REG64(WHvX64RegisterDr7, pVCpu->cpum.GstCtx.dr[7]); // CPUMGetHyperDR7(pVCpu));
305
306 /* Floating point state. */
307 if (fWhat & CPUMCTX_EXTRN_X87)
308 {
309 ADD_REG128(WHvX64RegisterFpMmx0, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[0].au64[1]);
310 ADD_REG128(WHvX64RegisterFpMmx1, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[1].au64[1]);
311 ADD_REG128(WHvX64RegisterFpMmx2, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[2].au64[1]);
312 ADD_REG128(WHvX64RegisterFpMmx3, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[3].au64[1]);
313 ADD_REG128(WHvX64RegisterFpMmx4, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[4].au64[1]);
314 ADD_REG128(WHvX64RegisterFpMmx5, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[5].au64[1]);
315 ADD_REG128(WHvX64RegisterFpMmx6, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[6].au64[1]);
316 ADD_REG128(WHvX64RegisterFpMmx7, pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[7].au64[1]);
317
318 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
319 aValues[iReg].FpControlStatus.FpControl = pVCpu->cpum.GstCtx.pXStateR3->x87.FCW;
320 aValues[iReg].FpControlStatus.FpStatus = pVCpu->cpum.GstCtx.pXStateR3->x87.FSW;
321 aValues[iReg].FpControlStatus.FpTag = pVCpu->cpum.GstCtx.pXStateR3->x87.FTW;
322 aValues[iReg].FpControlStatus.Reserved = pVCpu->cpum.GstCtx.pXStateR3->x87.FTW >> 8;
323 aValues[iReg].FpControlStatus.LastFpOp = pVCpu->cpum.GstCtx.pXStateR3->x87.FOP;
324 aValues[iReg].FpControlStatus.LastFpRip = (pVCpu->cpum.GstCtx.pXStateR3->x87.FPUIP)
325 | ((uint64_t)pVCpu->cpum.GstCtx.pXStateR3->x87.CS << 32)
326 | ((uint64_t)pVCpu->cpum.GstCtx.pXStateR3->x87.Rsrvd1 << 48);
327 iReg++;
328
329 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
330 aValues[iReg].XmmControlStatus.LastFpRdp = (pVCpu->cpum.GstCtx.pXStateR3->x87.FPUDP)
331 | ((uint64_t)pVCpu->cpum.GstCtx.pXStateR3->x87.DS << 32)
332 | ((uint64_t)pVCpu->cpum.GstCtx.pXStateR3->x87.Rsrvd2 << 48);
333 aValues[iReg].XmmControlStatus.XmmStatusControl = pVCpu->cpum.GstCtx.pXStateR3->x87.MXCSR;
334 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pVCpu->cpum.GstCtx.pXStateR3->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
335 iReg++;
336 }
337
338 /* Vector state. */
339 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
340 {
341 ADD_REG128(WHvX64RegisterXmm0, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 0].uXmm.s.Hi);
342 ADD_REG128(WHvX64RegisterXmm1, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 1].uXmm.s.Hi);
343 ADD_REG128(WHvX64RegisterXmm2, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 2].uXmm.s.Hi);
344 ADD_REG128(WHvX64RegisterXmm3, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 3].uXmm.s.Hi);
345 ADD_REG128(WHvX64RegisterXmm4, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 4].uXmm.s.Hi);
346 ADD_REG128(WHvX64RegisterXmm5, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 5].uXmm.s.Hi);
347 ADD_REG128(WHvX64RegisterXmm6, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 6].uXmm.s.Hi);
348 ADD_REG128(WHvX64RegisterXmm7, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 7].uXmm.s.Hi);
349 ADD_REG128(WHvX64RegisterXmm8, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 8].uXmm.s.Hi);
350 ADD_REG128(WHvX64RegisterXmm9, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 9].uXmm.s.Hi);
351 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[10].uXmm.s.Hi);
352 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[11].uXmm.s.Hi);
353 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[12].uXmm.s.Hi);
354 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[13].uXmm.s.Hi);
355 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[14].uXmm.s.Hi);
356 ADD_REG128(WHvX64RegisterXmm10, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[15].uXmm.s.Hi);
357 }
358
359 /* MSRs */
360 // WHvX64RegisterTsc - don't touch
361 if (fWhat & CPUMCTX_EXTRN_EFER)
362 ADD_REG64(WHvX64RegisterEfer, pVCpu->cpum.GstCtx.msrEFER);
363 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
364 ADD_REG64(WHvX64RegisterKernelGsBase, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
365 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
366 {
367 ADD_REG64(WHvX64RegisterSysenterCs, pVCpu->cpum.GstCtx.SysEnter.cs);
368 ADD_REG64(WHvX64RegisterSysenterEip, pVCpu->cpum.GstCtx.SysEnter.eip);
369 ADD_REG64(WHvX64RegisterSysenterEsp, pVCpu->cpum.GstCtx.SysEnter.esp);
370 }
371 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
372 {
373 ADD_REG64(WHvX64RegisterStar, pVCpu->cpum.GstCtx.msrSTAR);
374 ADD_REG64(WHvX64RegisterLstar, pVCpu->cpum.GstCtx.msrLSTAR);
375 ADD_REG64(WHvX64RegisterCstar, pVCpu->cpum.GstCtx.msrCSTAR);
376 ADD_REG64(WHvX64RegisterSfmask, pVCpu->cpum.GstCtx.msrSFMASK);
377 }
378 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
379 {
380 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu));
381 ADD_REG64(WHvX64RegisterPat, pVCpu->cpum.GstCtx.msrPAT);
382#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
383 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
384#endif
385 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
386 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
387 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
388 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
389 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
390 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
391 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
392 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
393 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
394 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
395 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
396 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
397 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
398 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
399#if 0 /** @todo these registers aren't available? Might explain something.. .*/
400 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
401 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
402 {
403 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
404 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
405 }
406#endif
407 }
408
409 /* event injection (clear it). */
410 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
411 ADD_REG64(WHvRegisterPendingInterruption, 0);
412
413 /* Interruptibility state. This can get a little complicated since we get
414 half of the state via HV_X64_VP_EXECUTION_STATE. */
415 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
416 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
417 {
418 ADD_REG64(WHvRegisterInterruptState, 0);
419 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
420 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
421 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
422 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
423 aValues[iReg - 1].InterruptState.NmiMasked = 1;
424 }
425 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
426 {
427 if ( pVCpu->nem.s.fLastInterruptShadow
428 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
429 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip))
430 {
431 ADD_REG64(WHvRegisterInterruptState, 0);
432 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
433 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
434 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
435 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
436 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
437 // aValues[iReg - 1].InterruptState.NmiMasked = 1;
438 }
439 }
440 else
441 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
442
443 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
444 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
445 if ( fDesiredIntWin
446 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
447 {
448 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
449 ADD_REG64(WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin);
450 Assert(aValues[iReg - 1].DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
451 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
452 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
453 }
454
455 /// @todo WHvRegisterPendingEvent0
456 /// @todo WHvRegisterPendingEvent1
457
458 /*
459 * Set the registers.
460 */
461 Assert(iReg < RT_ELEMENTS(aValues));
462 Assert(iReg < RT_ELEMENTS(aenmNames));
463# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
464 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
465 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
466# endif
467 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
468 if (SUCCEEDED(hrc))
469 {
470 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
471 return VINF_SUCCESS;
472 }
473 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
474 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
475 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
476 return VERR_INTERNAL_ERROR;
477
478# undef ADD_REG64
479# undef ADD_REG128
480# undef ADD_SEG
481
482# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
483}
484
485
486NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVM pVM, PVMCPU pVCpu, uint64_t fWhat)
487{
488# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
489# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
490 if (pVM->nem.s.fUseRing0Runloop)
491# endif
492 {
493 /* See NEMR0ImportState */
494 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, fWhat, NULL);
495 if (RT_SUCCESS(rc))
496 return rc;
497 if (rc == VERR_NEM_FLUSH_TLB)
498 return PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /*fGlobal*/);
499 AssertLogRelRCReturn(rc, rc);
500 return rc;
501 }
502# endif
503# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
504 WHV_REGISTER_NAME aenmNames[128];
505
506 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
507 uintptr_t iReg = 0;
508
509 /* GPRs */
510 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
511 {
512 if (fWhat & CPUMCTX_EXTRN_RAX)
513 aenmNames[iReg++] = WHvX64RegisterRax;
514 if (fWhat & CPUMCTX_EXTRN_RCX)
515 aenmNames[iReg++] = WHvX64RegisterRcx;
516 if (fWhat & CPUMCTX_EXTRN_RDX)
517 aenmNames[iReg++] = WHvX64RegisterRdx;
518 if (fWhat & CPUMCTX_EXTRN_RBX)
519 aenmNames[iReg++] = WHvX64RegisterRbx;
520 if (fWhat & CPUMCTX_EXTRN_RSP)
521 aenmNames[iReg++] = WHvX64RegisterRsp;
522 if (fWhat & CPUMCTX_EXTRN_RBP)
523 aenmNames[iReg++] = WHvX64RegisterRbp;
524 if (fWhat & CPUMCTX_EXTRN_RSI)
525 aenmNames[iReg++] = WHvX64RegisterRsi;
526 if (fWhat & CPUMCTX_EXTRN_RDI)
527 aenmNames[iReg++] = WHvX64RegisterRdi;
528 if (fWhat & CPUMCTX_EXTRN_R8_R15)
529 {
530 aenmNames[iReg++] = WHvX64RegisterR8;
531 aenmNames[iReg++] = WHvX64RegisterR9;
532 aenmNames[iReg++] = WHvX64RegisterR10;
533 aenmNames[iReg++] = WHvX64RegisterR11;
534 aenmNames[iReg++] = WHvX64RegisterR12;
535 aenmNames[iReg++] = WHvX64RegisterR13;
536 aenmNames[iReg++] = WHvX64RegisterR14;
537 aenmNames[iReg++] = WHvX64RegisterR15;
538 }
539 }
540
541 /* RIP & Flags */
542 if (fWhat & CPUMCTX_EXTRN_RIP)
543 aenmNames[iReg++] = WHvX64RegisterRip;
544 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
545 aenmNames[iReg++] = WHvX64RegisterRflags;
546
547 /* Segments */
548 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
549 {
550 if (fWhat & CPUMCTX_EXTRN_ES)
551 aenmNames[iReg++] = WHvX64RegisterEs;
552 if (fWhat & CPUMCTX_EXTRN_CS)
553 aenmNames[iReg++] = WHvX64RegisterCs;
554 if (fWhat & CPUMCTX_EXTRN_SS)
555 aenmNames[iReg++] = WHvX64RegisterSs;
556 if (fWhat & CPUMCTX_EXTRN_DS)
557 aenmNames[iReg++] = WHvX64RegisterDs;
558 if (fWhat & CPUMCTX_EXTRN_FS)
559 aenmNames[iReg++] = WHvX64RegisterFs;
560 if (fWhat & CPUMCTX_EXTRN_GS)
561 aenmNames[iReg++] = WHvX64RegisterGs;
562 }
563
564 /* Descriptor tables. */
565 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
566 {
567 if (fWhat & CPUMCTX_EXTRN_LDTR)
568 aenmNames[iReg++] = WHvX64RegisterLdtr;
569 if (fWhat & CPUMCTX_EXTRN_TR)
570 aenmNames[iReg++] = WHvX64RegisterTr;
571 if (fWhat & CPUMCTX_EXTRN_IDTR)
572 aenmNames[iReg++] = WHvX64RegisterIdtr;
573 if (fWhat & CPUMCTX_EXTRN_GDTR)
574 aenmNames[iReg++] = WHvX64RegisterGdtr;
575 }
576
577 /* Control registers. */
578 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
579 {
580 if (fWhat & CPUMCTX_EXTRN_CR0)
581 aenmNames[iReg++] = WHvX64RegisterCr0;
582 if (fWhat & CPUMCTX_EXTRN_CR2)
583 aenmNames[iReg++] = WHvX64RegisterCr2;
584 if (fWhat & CPUMCTX_EXTRN_CR3)
585 aenmNames[iReg++] = WHvX64RegisterCr3;
586 if (fWhat & CPUMCTX_EXTRN_CR4)
587 aenmNames[iReg++] = WHvX64RegisterCr4;
588 }
589 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
590 aenmNames[iReg++] = WHvX64RegisterCr8;
591
592 /* Debug registers. */
593 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
594 {
595 aenmNames[iReg++] = WHvX64RegisterDr0;
596 aenmNames[iReg++] = WHvX64RegisterDr1;
597 aenmNames[iReg++] = WHvX64RegisterDr2;
598 aenmNames[iReg++] = WHvX64RegisterDr3;
599 }
600 if (fWhat & CPUMCTX_EXTRN_DR6)
601 aenmNames[iReg++] = WHvX64RegisterDr6;
602 if (fWhat & CPUMCTX_EXTRN_DR7)
603 aenmNames[iReg++] = WHvX64RegisterDr7;
604
605 /* Floating point state. */
606 if (fWhat & CPUMCTX_EXTRN_X87)
607 {
608 aenmNames[iReg++] = WHvX64RegisterFpMmx0;
609 aenmNames[iReg++] = WHvX64RegisterFpMmx1;
610 aenmNames[iReg++] = WHvX64RegisterFpMmx2;
611 aenmNames[iReg++] = WHvX64RegisterFpMmx3;
612 aenmNames[iReg++] = WHvX64RegisterFpMmx4;
613 aenmNames[iReg++] = WHvX64RegisterFpMmx5;
614 aenmNames[iReg++] = WHvX64RegisterFpMmx6;
615 aenmNames[iReg++] = WHvX64RegisterFpMmx7;
616 aenmNames[iReg++] = WHvX64RegisterFpControlStatus;
617 }
618 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
619 aenmNames[iReg++] = WHvX64RegisterXmmControlStatus;
620
621 /* Vector state. */
622 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
623 {
624 aenmNames[iReg++] = WHvX64RegisterXmm0;
625 aenmNames[iReg++] = WHvX64RegisterXmm1;
626 aenmNames[iReg++] = WHvX64RegisterXmm2;
627 aenmNames[iReg++] = WHvX64RegisterXmm3;
628 aenmNames[iReg++] = WHvX64RegisterXmm4;
629 aenmNames[iReg++] = WHvX64RegisterXmm5;
630 aenmNames[iReg++] = WHvX64RegisterXmm6;
631 aenmNames[iReg++] = WHvX64RegisterXmm7;
632 aenmNames[iReg++] = WHvX64RegisterXmm8;
633 aenmNames[iReg++] = WHvX64RegisterXmm9;
634 aenmNames[iReg++] = WHvX64RegisterXmm10;
635 aenmNames[iReg++] = WHvX64RegisterXmm11;
636 aenmNames[iReg++] = WHvX64RegisterXmm12;
637 aenmNames[iReg++] = WHvX64RegisterXmm13;
638 aenmNames[iReg++] = WHvX64RegisterXmm14;
639 aenmNames[iReg++] = WHvX64RegisterXmm15;
640 }
641
642 /* MSRs */
643 // WHvX64RegisterTsc - don't touch
644 if (fWhat & CPUMCTX_EXTRN_EFER)
645 aenmNames[iReg++] = WHvX64RegisterEfer;
646 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
647 aenmNames[iReg++] = WHvX64RegisterKernelGsBase;
648 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
649 {
650 aenmNames[iReg++] = WHvX64RegisterSysenterCs;
651 aenmNames[iReg++] = WHvX64RegisterSysenterEip;
652 aenmNames[iReg++] = WHvX64RegisterSysenterEsp;
653 }
654 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
655 {
656 aenmNames[iReg++] = WHvX64RegisterStar;
657 aenmNames[iReg++] = WHvX64RegisterLstar;
658 aenmNames[iReg++] = WHvX64RegisterCstar;
659 aenmNames[iReg++] = WHvX64RegisterSfmask;
660 }
661
662//#ifdef LOG_ENABLED
663// const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
664//#endif
665 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
666 {
667 aenmNames[iReg++] = WHvX64RegisterApicBase; /// @todo APIC BASE
668 aenmNames[iReg++] = WHvX64RegisterPat;
669#if 0 /*def LOG_ENABLED*/ /** @todo Check if WHvX64RegisterMsrMtrrCap works... */
670 aenmNames[iReg++] = WHvX64RegisterMsrMtrrCap;
671#endif
672 aenmNames[iReg++] = WHvX64RegisterMsrMtrrDefType;
673 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix64k00000;
674 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16k80000;
675 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16kA0000;
676 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC0000;
677 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC8000;
678 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD0000;
679 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD8000;
680 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE0000;
681 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE8000;
682 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF0000;
683 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF8000;
684 aenmNames[iReg++] = WHvX64RegisterTscAux;
685 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
686//#ifdef LOG_ENABLED
687// if (enmCpuVendor != CPUMCPUVENDOR_AMD)
688// aenmNames[iReg++] = HvX64RegisterIa32FeatureControl;
689//#endif
690 }
691
692 /* Interruptibility. */
693 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
694 {
695 aenmNames[iReg++] = WHvRegisterInterruptState;
696 aenmNames[iReg++] = WHvX64RegisterRip;
697 }
698
699 /* event injection */
700 aenmNames[iReg++] = WHvRegisterPendingInterruption;
701 aenmNames[iReg++] = WHvRegisterPendingEvent0;
702 aenmNames[iReg++] = WHvRegisterPendingEvent1;
703
704 size_t const cRegs = iReg;
705 Assert(cRegs < RT_ELEMENTS(aenmNames));
706
707 /*
708 * Get the registers.
709 */
710 WHV_REGISTER_VALUE aValues[128];
711 RT_ZERO(aValues);
712 Assert(RT_ELEMENTS(aValues) >= cRegs);
713 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
714# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
715 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
716 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
717# endif
718 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
719 AssertLogRelMsgReturn(SUCCEEDED(hrc),
720 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
721 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
722 , VERR_NEM_GET_REGISTERS_FAILED);
723
724 iReg = 0;
725# define GET_REG64(a_DstVar, a_enmName) do { \
726 Assert(aenmNames[iReg] == (a_enmName)); \
727 (a_DstVar) = aValues[iReg].Reg64; \
728 iReg++; \
729 } while (0)
730# define GET_REG64_LOG7(a_DstVar, a_enmName, a_szLogName) do { \
731 Assert(aenmNames[iReg] == (a_enmName)); \
732 if ((a_DstVar) != aValues[iReg].Reg64) \
733 Log7(("NEM/%u: " a_szLogName " changed %RX64 -> %RX64\n", pVCpu->idCpu, (a_DstVar), aValues[iReg].Reg64)); \
734 (a_DstVar) = aValues[iReg].Reg64; \
735 iReg++; \
736 } while (0)
737# define GET_REG128(a_DstVarLo, a_DstVarHi, a_enmName) do { \
738 Assert(aenmNames[iReg] == a_enmName); \
739 (a_DstVarLo) = aValues[iReg].Reg128.Low64; \
740 (a_DstVarHi) = aValues[iReg].Reg128.High64; \
741 iReg++; \
742 } while (0)
743# define GET_SEG(a_SReg, a_enmName) do { \
744 Assert(aenmNames[iReg] == (a_enmName)); \
745 NEM_WIN_COPY_BACK_SEG(a_SReg, aValues[iReg].Segment); \
746 iReg++; \
747 } while (0)
748
749 /* GPRs */
750 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
751 {
752 if (fWhat & CPUMCTX_EXTRN_RAX)
753 GET_REG64(pVCpu->cpum.GstCtx.rax, WHvX64RegisterRax);
754 if (fWhat & CPUMCTX_EXTRN_RCX)
755 GET_REG64(pVCpu->cpum.GstCtx.rcx, WHvX64RegisterRcx);
756 if (fWhat & CPUMCTX_EXTRN_RDX)
757 GET_REG64(pVCpu->cpum.GstCtx.rdx, WHvX64RegisterRdx);
758 if (fWhat & CPUMCTX_EXTRN_RBX)
759 GET_REG64(pVCpu->cpum.GstCtx.rbx, WHvX64RegisterRbx);
760 if (fWhat & CPUMCTX_EXTRN_RSP)
761 GET_REG64(pVCpu->cpum.GstCtx.rsp, WHvX64RegisterRsp);
762 if (fWhat & CPUMCTX_EXTRN_RBP)
763 GET_REG64(pVCpu->cpum.GstCtx.rbp, WHvX64RegisterRbp);
764 if (fWhat & CPUMCTX_EXTRN_RSI)
765 GET_REG64(pVCpu->cpum.GstCtx.rsi, WHvX64RegisterRsi);
766 if (fWhat & CPUMCTX_EXTRN_RDI)
767 GET_REG64(pVCpu->cpum.GstCtx.rdi, WHvX64RegisterRdi);
768 if (fWhat & CPUMCTX_EXTRN_R8_R15)
769 {
770 GET_REG64(pVCpu->cpum.GstCtx.r8, WHvX64RegisterR8);
771 GET_REG64(pVCpu->cpum.GstCtx.r9, WHvX64RegisterR9);
772 GET_REG64(pVCpu->cpum.GstCtx.r10, WHvX64RegisterR10);
773 GET_REG64(pVCpu->cpum.GstCtx.r11, WHvX64RegisterR11);
774 GET_REG64(pVCpu->cpum.GstCtx.r12, WHvX64RegisterR12);
775 GET_REG64(pVCpu->cpum.GstCtx.r13, WHvX64RegisterR13);
776 GET_REG64(pVCpu->cpum.GstCtx.r14, WHvX64RegisterR14);
777 GET_REG64(pVCpu->cpum.GstCtx.r15, WHvX64RegisterR15);
778 }
779 }
780
781 /* RIP & Flags */
782 if (fWhat & CPUMCTX_EXTRN_RIP)
783 GET_REG64(pVCpu->cpum.GstCtx.rip, WHvX64RegisterRip);
784 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
785 GET_REG64(pVCpu->cpum.GstCtx.rflags.u, WHvX64RegisterRflags);
786
787 /* Segments */
788 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
789 {
790 if (fWhat & CPUMCTX_EXTRN_ES)
791 GET_SEG(pVCpu->cpum.GstCtx.es, WHvX64RegisterEs);
792 if (fWhat & CPUMCTX_EXTRN_CS)
793 GET_SEG(pVCpu->cpum.GstCtx.cs, WHvX64RegisterCs);
794 if (fWhat & CPUMCTX_EXTRN_SS)
795 GET_SEG(pVCpu->cpum.GstCtx.ss, WHvX64RegisterSs);
796 if (fWhat & CPUMCTX_EXTRN_DS)
797 GET_SEG(pVCpu->cpum.GstCtx.ds, WHvX64RegisterDs);
798 if (fWhat & CPUMCTX_EXTRN_FS)
799 GET_SEG(pVCpu->cpum.GstCtx.fs, WHvX64RegisterFs);
800 if (fWhat & CPUMCTX_EXTRN_GS)
801 GET_SEG(pVCpu->cpum.GstCtx.gs, WHvX64RegisterGs);
802 }
803
804 /* Descriptor tables and the task segment. */
805 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
806 {
807 if (fWhat & CPUMCTX_EXTRN_LDTR)
808 GET_SEG(pVCpu->cpum.GstCtx.ldtr, WHvX64RegisterLdtr);
809
810 if (fWhat & CPUMCTX_EXTRN_TR)
811 {
812 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
813 avoid to trigger sanity assertions around the code, always fix this. */
814 GET_SEG(pVCpu->cpum.GstCtx.tr, WHvX64RegisterTr);
815 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
816 {
817 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
818 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
819 break;
820 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
821 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
822 break;
823 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
824 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
825 break;
826 }
827 }
828 if (fWhat & CPUMCTX_EXTRN_IDTR)
829 {
830 Assert(aenmNames[iReg] == WHvX64RegisterIdtr);
831 pVCpu->cpum.GstCtx.idtr.cbIdt = aValues[iReg].Table.Limit;
832 pVCpu->cpum.GstCtx.idtr.pIdt = aValues[iReg].Table.Base;
833 iReg++;
834 }
835 if (fWhat & CPUMCTX_EXTRN_GDTR)
836 {
837 Assert(aenmNames[iReg] == WHvX64RegisterGdtr);
838 pVCpu->cpum.GstCtx.gdtr.cbGdt = aValues[iReg].Table.Limit;
839 pVCpu->cpum.GstCtx.gdtr.pGdt = aValues[iReg].Table.Base;
840 iReg++;
841 }
842 }
843
844 /* Control registers. */
845 bool fMaybeChangedMode = false;
846 bool fFlushTlb = false;
847 bool fFlushGlobalTlb = false;
848 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
849 {
850 if (fWhat & CPUMCTX_EXTRN_CR0)
851 {
852 Assert(aenmNames[iReg] == WHvX64RegisterCr0);
853 if (pVCpu->cpum.GstCtx.cr0 != aValues[iReg].Reg64)
854 {
855 CPUMSetGuestCR0(pVCpu, aValues[iReg].Reg64);
856 fMaybeChangedMode = true;
857 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
858 }
859 iReg++;
860 }
861 if (fWhat & CPUMCTX_EXTRN_CR2)
862 GET_REG64(pVCpu->cpum.GstCtx.cr2, WHvX64RegisterCr2);
863 if (fWhat & CPUMCTX_EXTRN_CR3)
864 {
865 if (pVCpu->cpum.GstCtx.cr3 != aValues[iReg].Reg64)
866 {
867 CPUMSetGuestCR3(pVCpu, aValues[iReg].Reg64);
868 fFlushTlb = true;
869 }
870 iReg++;
871 }
872 if (fWhat & CPUMCTX_EXTRN_CR4)
873 {
874 if (pVCpu->cpum.GstCtx.cr4 != aValues[iReg].Reg64)
875 {
876 CPUMSetGuestCR4(pVCpu, aValues[iReg].Reg64);
877 fMaybeChangedMode = true;
878 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
879 }
880 iReg++;
881 }
882 }
883 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
884 {
885 Assert(aenmNames[iReg] == WHvX64RegisterCr8);
886 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4);
887 iReg++;
888 }
889
890 /* Debug registers. */
891 /** @todo fixme */
892 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
893 {
894 Assert(aenmNames[iReg] == WHvX64RegisterDr0);
895 Assert(aenmNames[iReg+3] == WHvX64RegisterDr3);
896 if (pVCpu->cpum.GstCtx.dr[0] != aValues[iReg].Reg64)
897 CPUMSetGuestDR0(pVCpu, aValues[iReg].Reg64);
898 iReg++;
899 if (pVCpu->cpum.GstCtx.dr[1] != aValues[iReg].Reg64)
900 CPUMSetGuestDR1(pVCpu, aValues[iReg].Reg64);
901 iReg++;
902 if (pVCpu->cpum.GstCtx.dr[2] != aValues[iReg].Reg64)
903 CPUMSetGuestDR2(pVCpu, aValues[iReg].Reg64);
904 iReg++;
905 if (pVCpu->cpum.GstCtx.dr[3] != aValues[iReg].Reg64)
906 CPUMSetGuestDR3(pVCpu, aValues[iReg].Reg64);
907 iReg++;
908 }
909 if (fWhat & CPUMCTX_EXTRN_DR6)
910 {
911 Assert(aenmNames[iReg] == WHvX64RegisterDr6);
912 if (pVCpu->cpum.GstCtx.dr[6] != aValues[iReg].Reg64)
913 CPUMSetGuestDR6(pVCpu, aValues[iReg].Reg64);
914 iReg++;
915 }
916 if (fWhat & CPUMCTX_EXTRN_DR7)
917 {
918 Assert(aenmNames[iReg] == WHvX64RegisterDr7);
919 if (pVCpu->cpum.GstCtx.dr[7] != aValues[iReg].Reg64)
920 CPUMSetGuestDR7(pVCpu, aValues[iReg].Reg64);
921 iReg++;
922 }
923
924 /* Floating point state. */
925 if (fWhat & CPUMCTX_EXTRN_X87)
926 {
927 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);
928 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);
929 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);
930 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);
931 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);
932 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);
933 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);
934 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.pXStateR3->x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);
935
936 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);
937 pVCpu->cpum.GstCtx.pXStateR3->x87.FCW = aValues[iReg].FpControlStatus.FpControl;
938 pVCpu->cpum.GstCtx.pXStateR3->x87.FSW = aValues[iReg].FpControlStatus.FpStatus;
939 pVCpu->cpum.GstCtx.pXStateR3->x87.FTW = aValues[iReg].FpControlStatus.FpTag
940 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;
941 pVCpu->cpum.GstCtx.pXStateR3->x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;
942 pVCpu->cpum.GstCtx.pXStateR3->x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;
943 pVCpu->cpum.GstCtx.pXStateR3->x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);
944 pVCpu->cpum.GstCtx.pXStateR3->x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);
945 iReg++;
946 }
947
948 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
949 {
950 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus);
951 if (fWhat & CPUMCTX_EXTRN_X87)
952 {
953 pVCpu->cpum.GstCtx.pXStateR3->x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp;
954 pVCpu->cpum.GstCtx.pXStateR3->x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32);
955 pVCpu->cpum.GstCtx.pXStateR3->x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48);
956 }
957 pVCpu->cpum.GstCtx.pXStateR3->x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl;
958 pVCpu->cpum.GstCtx.pXStateR3->x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
959 iReg++;
960 }
961
962 /* Vector state. */
963 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
964 {
965 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0);
966 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1);
967 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2);
968 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3);
969 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4);
970 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5);
971 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6);
972 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7);
973 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8);
974 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9);
975 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10);
976 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11);
977 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12);
978 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13);
979 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14);
980 GET_REG128(pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.pXStateR3->x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15);
981 }
982
983 /* MSRs */
984 // WHvX64RegisterTsc - don't touch
985 if (fWhat & CPUMCTX_EXTRN_EFER)
986 {
987 Assert(aenmNames[iReg] == WHvX64RegisterEfer);
988 if (aValues[iReg].Reg64 != pVCpu->cpum.GstCtx.msrEFER)
989 {
990 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, aValues[iReg].Reg64));
991 if ((aValues[iReg].Reg64 ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE)
992 PGMNotifyNxeChanged(pVCpu, RT_BOOL(aValues[iReg].Reg64 & MSR_K6_EFER_NXE));
993 pVCpu->cpum.GstCtx.msrEFER = aValues[iReg].Reg64;
994 fMaybeChangedMode = true;
995 }
996 iReg++;
997 }
998 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
999 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");
1000 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1001 {
1002 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");
1003 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");
1004 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");
1005 }
1006 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1007 {
1008 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSTAR, WHvX64RegisterStar, "MSR STAR");
1009 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");
1010 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");
1011 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");
1012 }
1013 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1014 {
1015 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
1016 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1017 if (aValues[iReg].Reg64 != uOldBase)
1018 {
1019 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1020 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
1021 int rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
1022 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", rc2, aValues[iReg].Reg64));
1023 }
1024 iReg++;
1025
1026 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterPat, "MSR PAT");
1027#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1028 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterMsrMtrrCap);
1029#endif
1030 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1031 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
1032 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
1033 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
1034 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
1035 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
1036 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
1037 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
1038 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
1039 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
1040 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
1041 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
1042 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
1043 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
1044 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
1045 }
1046
1047 /* Interruptibility. */
1048 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1049 {
1050 Assert(aenmNames[iReg] == WHvRegisterInterruptState);
1051 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);
1052
1053 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1054 {
1055 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
1056 if (aValues[iReg].InterruptState.InterruptShadow)
1057 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
1058 else
1059 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1060 }
1061
1062 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1063 {
1064 if (aValues[iReg].InterruptState.NmiMasked)
1065 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1066 else
1067 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1068 }
1069
1070 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
1071 iReg += 2;
1072 }
1073
1074 /* Event injection. */
1075 /// @todo WHvRegisterPendingInterruption
1076 Assert(aenmNames[iReg] == WHvRegisterPendingInterruption);
1077 if (aValues[iReg].PendingInterruption.InterruptionPending)
1078 {
1079 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
1080 aValues[iReg].PendingInterruption.InterruptionType, aValues[iReg].PendingInterruption.InterruptionVector,
1081 aValues[iReg].PendingInterruption.DeliverErrorCode, aValues[iReg].PendingInterruption.ErrorCode,
1082 aValues[iReg].PendingInterruption.InstructionLength, aValues[iReg].PendingInterruption.NestedEvent));
1083 AssertMsg((aValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
1084 ("%#RX64\n", aValues[iReg].PendingInterruption.AsUINT64));
1085 }
1086
1087 /// @todo WHvRegisterPendingEvent0
1088 /// @todo WHvRegisterPendingEvent1
1089
1090 /* Almost done, just update extrn flags and maybe change PGM mode. */
1091 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1092 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
1093 pVCpu->cpum.GstCtx.fExtrn = 0;
1094
1095 /* Typical. */
1096 if (!fMaybeChangedMode && !fFlushTlb)
1097 return VINF_SUCCESS;
1098
1099 /*
1100 * Slow.
1101 */
1102 if (fMaybeChangedMode)
1103 {
1104 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1105 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
1106 }
1107
1108 if (fFlushTlb)
1109 {
1110 int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, fFlushGlobalTlb);
1111 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
1112 }
1113
1114 return VINF_SUCCESS;
1115# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1116}
1117
1118#endif /* !IN_RING0 */
1119
1120
1121/**
1122 * Interface for importing state on demand (used by IEM).
1123 *
1124 * @returns VBox status code.
1125 * @param pVCpu The cross context CPU structure.
1126 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1127 */
1128VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat)
1129{
1130 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1131
1132#ifdef IN_RING0
1133# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1134 /** @todo improve and secure this translation */
1135 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf);
1136 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1137 VMCPUID idCpu = pVCpu->idCpu;
1138 ASMCompilerBarrier();
1139 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1140
1141 return nemR0WinImportState(pGVM, &pGVM->aCpus[idCpu], &pVCpu->cpum.GstCtx, fWhat);
1142# else
1143 RT_NOREF(pVCpu, fWhat);
1144 return VERR_NOT_IMPLEMENTED;
1145# endif
1146#else
1147 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1148#endif
1149}
1150
1151
1152/**
1153 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1154 *
1155 * @returns VBox status code.
1156 * @param pVCpu The cross context CPU structure.
1157 * @param pcTicks Where to return the CPU tick count.
1158 * @param puAux Where to return the TSC_AUX register value.
1159 */
1160VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPU pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1161{
1162 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1163
1164#ifdef IN_RING3
1165 PVM pVM = pVCpu->CTX_SUFF(pVM);
1166 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1167 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1168
1169# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1170# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
1171 if (pVM->nem.s.fUseRing0Runloop)
1172# endif
1173 {
1174 /* Call ring-0 and get the values. */
1175 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_QUERY_CPU_TICK, 0, NULL);
1176 AssertLogRelRCReturn(rc, rc);
1177 *pcTicks = pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks;
1178 if (puAux)
1179 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX
1180 ? pVCpu->nem.s.Hypercall.QueryCpuTick.uAux : CPUMGetGuestTscAux(pVCpu);
1181 return VINF_SUCCESS;
1182 }
1183# endif
1184# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1185 /* Call the offical API. */
1186 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1187 WHV_REGISTER_VALUE aValues[2] = { {0, 0}, {0, 0} };
1188 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1189 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1190 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1191 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1192 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1193 , VERR_NEM_GET_REGISTERS_FAILED);
1194 *pcTicks = aValues[0].Reg64;
1195 if (puAux)
1196 *pcTicks = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[0].Reg64 : CPUMGetGuestTscAux(pVCpu);
1197 return VINF_SUCCESS;
1198# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1199#else /* IN_RING0 */
1200# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1201 /** @todo improve and secure this translation */
1202 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf);
1203 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1204 VMCPUID idCpu = pVCpu->idCpu;
1205 ASMCompilerBarrier();
1206 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1207
1208 int rc = nemR0WinQueryCpuTick(pGVM, &pGVM->aCpus[idCpu], pcTicks, puAux);
1209 if (RT_SUCCESS(rc) && puAux && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX))
1210 *puAux = CPUMGetGuestTscAux(pVCpu);
1211 return rc;
1212# else
1213 RT_NOREF(pVCpu, pcTicks, puAux);
1214 return VERR_NOT_IMPLEMENTED;
1215# endif
1216#endif /* IN_RING0 */
1217}
1218
1219
1220/**
1221 * Resumes CPU clock (TSC) on all virtual CPUs.
1222 *
1223 * This is called by TM when the VM is started, restored, resumed or similar.
1224 *
1225 * @returns VBox status code.
1226 * @param pVM The cross context VM structure.
1227 * @param pVCpu The cross context CPU structure of the calling EMT.
1228 * @param uPausedTscValue The TSC value at the time of pausing.
1229 */
1230VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVM pVM, PVMCPU pVCpu, uint64_t uPausedTscValue)
1231{
1232#ifdef IN_RING0
1233# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1234 /** @todo improve and secure this translation */
1235 PGVM pGVM = GVMMR0ByHandle(pVM->hSelf);
1236 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1237 VMCPUID idCpu = pVCpu->idCpu;
1238 ASMCompilerBarrier();
1239 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1240
1241 return nemR0WinResumeCpuTickOnAll(pGVM, &pGVM->aCpus[idCpu], uPausedTscValue);
1242# else
1243 RT_NOREF(pVM, pVCpu, uPausedTscValue);
1244 return VERR_NOT_IMPLEMENTED;
1245# endif
1246#else /* IN_RING3 */
1247 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1248 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1249
1250# if defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1251# if !defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS) && defined(NEM_WIN_WITH_RING0_RUNLOOP)
1252 if (pVM->nem.s.fUseRing0Runloop)
1253# endif
1254 {
1255 /* Call ring-0 and do it all there. */
1256 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL, uPausedTscValue, NULL);
1257 }
1258# endif
1259# ifndef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1260 /*
1261 * Call the offical API to do the job.
1262 */
1263 if (pVM->cCpus > 1)
1264 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1265
1266 /* Start with the first CPU. */
1267 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1268 WHV_REGISTER_VALUE Value = {0, 0};
1269 Value.Reg64 = uPausedTscValue;
1270 uint64_t const uFirstTsc = ASMReadTSC();
1271 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1272 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1273 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1274 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1275 , VERR_NEM_SET_TSC);
1276
1277 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1278 that we don't introduce too much drift here. */
1279 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1280 {
1281 Assert(enmName == WHvX64RegisterTsc);
1282 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1283 Value.Reg64 = uPausedTscValue + offDelta;
1284 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1285 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1286 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1287 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1288 , VERR_NEM_SET_TSC);
1289 }
1290
1291 return VINF_SUCCESS;
1292# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1293#endif /* IN_RING3 */
1294}
1295
1296#ifdef NEMWIN_NEED_GET_REGISTER
1297# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1298/** Worker for assertion macro. */
1299NEM_TMPL_STATIC int nemHCWinGetRegister(PVMCPU pVCpu, PGVMCPU pGVCpu, uint32_t enmReg, HV_REGISTER_VALUE *pRetValue)
1300{
1301 RT_ZERO(*pRetValue);
1302# ifdef IN_RING3
1303 RT_NOREF(pVCpu, pGVCpu, enmReg);
1304 return VERR_NOT_IMPLEMENTED;
1305# else
1306 NOREF(pVCpu);
1307
1308 /*
1309 * Hypercall parameters.
1310 */
1311 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1312 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1313 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1314
1315 pInput->PartitionId = pGVCpu->pGVM->nem.s.idHvPartition;
1316 pInput->VpIndex = pGVCpu->idCpu;
1317 pInput->fFlags = 0;
1318 pInput->Names[0] = (HV_REGISTER_NAME)enmReg;
1319
1320 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
1321 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1322 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
1323
1324 /*
1325 * Make the hypercall and copy out the value.
1326 */
1327 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
1328 pGVCpu->nem.s.HypercallData.HCPhysPage,
1329 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1330 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 cRegs=%#x\n", uResult, 1),
1331 VERR_NEM_GET_REGISTERS_FAILED);
1332
1333 *pRetValue = paValues[0];
1334 return VINF_SUCCESS;
1335# endif
1336}
1337# else
1338/** Worker for assertion macro. */
1339NEM_TMPL_STATIC int nemR3WinGetRegister(PVMCPU a_pVCpu, uint32_t a_enmReg, WHV_REGISTER_VALUE pValue)
1340{
1341 RT_ZERO(*pRetValue);
1342 RT_NOREF(pVCpu, pGVCpu, enmReg);
1343 return VERR_NOT_IMPLEMENTED;
1344}
1345# endif
1346#endif
1347
1348
1349#ifdef LOG_ENABLED
1350/**
1351 * Get the virtual processor running status.
1352 */
1353DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPU pVCpu)
1354{
1355# ifdef IN_RING0
1356 NOREF(pVCpu);
1357 return VidProcessorStatusUndefined;
1358# else
1359 RTERRVARS Saved;
1360 RTErrVarsSave(&Saved);
1361
1362 /*
1363 * This API is disabled in release builds, it seems. On build 17101 it requires
1364 * the following patch to be enabled (windbg): eb vid+12180 0f 84 98 00 00 00
1365 */
1366 VID_PROCESSOR_STATUS enmCpuStatus = VidProcessorStatusUndefined;
1367 NTSTATUS rcNt = g_pfnVidGetVirtualProcessorRunningStatus(pVCpu->pVMR3->nem.s.hPartitionDevice, pVCpu->idCpu, &enmCpuStatus);
1368 AssertRC(rcNt);
1369
1370 RTErrVarsRestore(&Saved);
1371 return enmCpuStatus;
1372# endif
1373}
1374#endif /* LOG_ENABLED */
1375
1376
1377#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(NEM_WIN_WITH_RING0_RUNLOOP)
1378# ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KTHREADs and KeAlertThread. */
1379/**
1380 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.
1381 *
1382 * This is an experiment only.
1383 *
1384 * @returns VBox status code.
1385 * @param pVM The cross context VM structure.
1386 * @param pVCpu The cross context virtual CPU structure of the
1387 * calling EMT.
1388 */
1389NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVM pVM, PVMCPU pVCpu)
1390{
1391 /*
1392 * Work the state.
1393 *
1394 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.
1395 * So, we just need to modify the state and kick the EMT if it's waiting on
1396 * messages. For the latter we use QueueUserAPC / KeAlterThread.
1397 */
1398 for (;;)
1399 {
1400 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
1401 switch (enmState)
1402 {
1403 case VMCPUSTATE_STARTED_EXEC_NEM:
1404 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))
1405 {
1406 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM -> CANCELED");
1407 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));
1408 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelChangedState);
1409 return VINF_SUCCESS;
1410 }
1411 break;
1412
1413 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:
1414 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
1415 {
1416 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM_WAIT -> CANCELED");
1417# ifdef IN_RING0
1418 NTSTATUS rcNt = KeAlertThread(??);
1419 DBGFTRACE_CUSTOM(pVM, "KeAlertThread -> %#x", rcNt);
1420# else
1421 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);
1422 DBGFTRACE_CUSTOM(pVM, "NtAlertThread -> %#x", rcNt);
1423# endif
1424 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));
1425 Assert(rcNt == STATUS_SUCCESS);
1426 if (NT_SUCCESS(rcNt))
1427 {
1428 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelAlertedThread);
1429 return VINF_SUCCESS;
1430 }
1431 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));
1432 }
1433 break;
1434
1435 default:
1436 return VINF_SUCCESS;
1437 }
1438
1439 ASMNopPause();
1440 RT_NOREF(pVM);
1441 }
1442}
1443# endif /* IN_RING3 */
1444#endif /* NEM_WIN_USE_OUR_OWN_RUN_API || NEM_WIN_WITH_RING0_RUNLOOP */
1445
1446
1447#ifdef LOG_ENABLED
1448/**
1449 * Logs the current CPU state.
1450 */
1451NEM_TMPL_STATIC void nemHCWinLogState(PVM pVM, PVMCPU pVCpu)
1452{
1453 if (LogIs3Enabled())
1454 {
1455# if 0 // def IN_RING3 - causes lazy state import assertions all over CPUM.
1456 char szRegs[4096];
1457 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1458 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1459 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1460 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1461 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1462 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1463 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1464 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1465 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1466 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1467 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1468 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1469 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1470 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1471 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1472 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1473 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1474 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1475 " efer=%016VR{efer}\n"
1476 " pat=%016VR{pat}\n"
1477 " sf_mask=%016VR{sf_mask}\n"
1478 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1479 " lstar=%016VR{lstar}\n"
1480 " star=%016VR{star} cstar=%016VR{cstar}\n"
1481 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1482 );
1483
1484 char szInstr[256];
1485 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1486 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1487 szInstr, sizeof(szInstr), NULL);
1488 Log3(("%s%s\n", szRegs, szInstr));
1489# else
1490 /** @todo stat logging in ring-0 */
1491 RT_NOREF(pVM, pVCpu);
1492# endif
1493 }
1494}
1495#endif /* LOG_ENABLED */
1496
1497
1498/** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */
1499#define SWITCH_IT(a_szPrefix) \
1500 do \
1501 switch (u)\
1502 { \
1503 case 0x00: return a_szPrefix ""; \
1504 case 0x01: return a_szPrefix ",Pnd"; \
1505 case 0x02: return a_szPrefix ",Dbg"; \
1506 case 0x03: return a_szPrefix ",Pnd,Dbg"; \
1507 case 0x04: return a_szPrefix ",Shw"; \
1508 case 0x05: return a_szPrefix ",Pnd,Shw"; \
1509 case 0x06: return a_szPrefix ",Shw,Dbg"; \
1510 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \
1511 default: AssertFailedReturn("WTF?"); \
1512 } \
1513 while (0)
1514
1515#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1516/**
1517 * Translates the execution stat bitfield into a short log string, VID version.
1518 *
1519 * @returns Read-only log string.
1520 * @param pMsgHdr The header which state to summarize.
1521 */
1522static const char *nemHCWinExecStateToLogStr(HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1523{
1524 unsigned u = (unsigned)pMsgHdr->ExecutionState.InterruptionPending
1525 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1)
1526 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2);
1527 if (pMsgHdr->ExecutionState.EferLma)
1528 SWITCH_IT("LM");
1529 else if (pMsgHdr->ExecutionState.Cr0Pe)
1530 SWITCH_IT("PM");
1531 else
1532 SWITCH_IT("RM");
1533}
1534#elif defined(IN_RING3)
1535/**
1536 * Translates the execution stat bitfield into a short log string, WinHv version.
1537 *
1538 * @returns Read-only log string.
1539 * @param pExitCtx The exit context which state to summarize.
1540 */
1541static const char *nemR3WinExecStateToLogStr(WHV_VP_EXIT_CONTEXT const *pExitCtx)
1542{
1543 unsigned u = (unsigned)pExitCtx->ExecutionState.InterruptionPending
1544 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1)
1545 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2);
1546 if (pExitCtx->ExecutionState.EferLma)
1547 SWITCH_IT("LM");
1548 else if (pExitCtx->ExecutionState.Cr0Pe)
1549 SWITCH_IT("PM");
1550 else
1551 SWITCH_IT("RM");
1552}
1553#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1554#undef SWITCH_IT
1555
1556
1557#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1558/**
1559 * Advances the guest RIP and clear EFLAGS.RF, VID version.
1560 *
1561 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1562 *
1563 * @param pVCpu The cross context virtual CPU structure.
1564 * @param pExitCtx The exit context.
1565 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1566 */
1567DECLINLINE(void)
1568nemHCWinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr, uint8_t cbMinInstr)
1569{
1570 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1571
1572 /* Advance the RIP. */
1573 Assert(pMsgHdr->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1574 pVCpu->cpum.GstCtx.rip += pMsgHdr->InstructionLength;
1575 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1576
1577 /* Update interrupt inhibition. */
1578 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1579 { /* likely */ }
1580 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
1581 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1582}
1583#elif defined(IN_RING3)
1584/**
1585 * Advances the guest RIP and clear EFLAGS.RF, WinHv version.
1586 *
1587 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1588 *
1589 * @param pVCpu The cross context virtual CPU structure.
1590 * @param pExitCtx The exit context.
1591 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1592 */
1593DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, WHV_VP_EXIT_CONTEXT const *pExitCtx, uint8_t cbMinInstr)
1594{
1595 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1596
1597 /* Advance the RIP. */
1598 Assert(pExitCtx->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1599 pVCpu->cpum.GstCtx.rip += pExitCtx->InstructionLength;
1600 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1601
1602 /* Update interrupt inhibition. */
1603 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1604 { /* likely */ }
1605 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
1606 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1607}
1608#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1609
1610
1611
1612NEM_TMPL_STATIC DECLCALLBACK(int)
1613nemHCWinUnmapOnePageCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1614{
1615 RT_NOREF_PV(pvUser);
1616#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1617 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1618 AssertRC(rc);
1619 if (RT_SUCCESS(rc))
1620#else
1621 RT_NOREF_PV(pVCpu);
1622 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1623 if (SUCCEEDED(hrc))
1624#endif
1625 {
1626 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1627 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1628 }
1629 else
1630 {
1631#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1632 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1633#else
1634 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1635 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(),
1636 RTNtLastErrorValue(), pVM->nem.s.cMappedPages));
1637#endif
1638 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1639 }
1640 if (pVM->nem.s.cMappedPages > 0)
1641 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1642 return VINF_SUCCESS;
1643}
1644
1645
1646/**
1647 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1648 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1649 */
1650typedef struct NEMHCWINHMACPCCSTATE
1651{
1652 /** Input: Write access. */
1653 bool fWriteAccess;
1654 /** Output: Set if we did something. */
1655 bool fDidSomething;
1656 /** Output: Set it we should resume. */
1657 bool fCanResume;
1658} NEMHCWINHMACPCCSTATE;
1659
1660/**
1661 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1662 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1663 * NEMHCWINHMACPCCSTATE structure. }
1664 */
1665NEM_TMPL_STATIC DECLCALLBACK(int)
1666nemHCWinHandleMemoryAccessPageCheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1667{
1668 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1669 pState->fDidSomething = false;
1670 pState->fCanResume = false;
1671
1672 /* If A20 is disabled, we may need to make another query on the masked
1673 page to get the correct protection information. */
1674 uint8_t u2State = pInfo->u2NemState;
1675 RTGCPHYS GCPhysSrc;
1676 if ( pVM->nem.s.fA20Enabled
1677 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1678 GCPhysSrc = GCPhys;
1679 else
1680 {
1681 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1682 PGMPHYSNEMPAGEINFO Info2;
1683 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1684 AssertRCReturn(rc, rc);
1685
1686 *pInfo = Info2;
1687 pInfo->u2NemState = u2State;
1688 }
1689
1690 /*
1691 * Consolidate current page state with actual page protection and access type.
1692 * We don't really consider downgrades here, as they shouldn't happen.
1693 */
1694#ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1695 /** @todo Someone at microsoft please explain:
1696 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1697 * readonly page as writable (unmap, then map again). Specifically, this was an
1698 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1699 * a hope to work around that we no longer pre-map anything, just unmap stuff
1700 * and do it lazily here. And here we will first unmap, restart, and then remap
1701 * with new protection or backing.
1702 */
1703#endif
1704 int rc;
1705 switch (u2State)
1706 {
1707 case NEM_WIN_PAGE_STATE_UNMAPPED:
1708 case NEM_WIN_PAGE_STATE_NOT_SET:
1709 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1710 {
1711 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1712 return VINF_SUCCESS;
1713 }
1714
1715 /* Don't bother remapping it if it's a write request to a non-writable page. */
1716 if ( pState->fWriteAccess
1717 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1718 {
1719 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1720 return VINF_SUCCESS;
1721 }
1722
1723 /* Map the page. */
1724 rc = nemHCNativeSetPhysPage(pVM,
1725 pVCpu,
1726 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1727 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1728 pInfo->fNemProt,
1729 &u2State,
1730 true /*fBackingState*/);
1731 pInfo->u2NemState = u2State;
1732 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1733 GCPhys, g_apszPageStates[u2State], rc));
1734 pState->fDidSomething = true;
1735 pState->fCanResume = true;
1736 return rc;
1737
1738 case NEM_WIN_PAGE_STATE_READABLE:
1739 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1740 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1741 {
1742 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1743 return VINF_SUCCESS;
1744 }
1745
1746#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1747 /* Upgrade page to writable. */
1748/** @todo test this*/
1749 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1750 && pState->fWriteAccess)
1751 {
1752 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
1753 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1754 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1755 AssertRC(rc);
1756 if (RT_SUCCESS(rc))
1757 {
1758 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
1759 pState->fDidSomething = true;
1760 pState->fCanResume = true;
1761 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
1762 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
1763 }
1764 }
1765 else
1766 {
1767 /* Need to emulate the acces. */
1768 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
1769 rc = VINF_SUCCESS;
1770 }
1771 return rc;
1772#else
1773 break;
1774#endif
1775
1776 case NEM_WIN_PAGE_STATE_WRITABLE:
1777 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1778 {
1779 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3\n", GCPhys));
1780 return VINF_SUCCESS;
1781 }
1782#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1783 AssertFailed(); /* There should be no downgrades. */
1784#endif
1785 break;
1786
1787 default:
1788 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1789 }
1790
1791 /*
1792 * Unmap and restart the instruction.
1793 * If this fails, which it does every so often, just unmap everything for now.
1794 */
1795#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1796 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1797 AssertRC(rc);
1798 if (RT_SUCCESS(rc))
1799#else
1800 /** @todo figure out whether we mess up the state or if it's WHv. */
1801 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1802 if (SUCCEEDED(hrc))
1803#endif
1804 {
1805 pState->fDidSomething = true;
1806 pState->fCanResume = true;
1807 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1808 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1809 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1810 return VINF_SUCCESS;
1811 }
1812#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1813 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
1814 return rc;
1815#else
1816 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1817 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(),
1818 pVM->nem.s.cMappedPages));
1819
1820 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
1821 Log(("nemHCWinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
1822
1823 pState->fDidSomething = true;
1824 pState->fCanResume = true;
1825 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1826 return VINF_SUCCESS;
1827#endif
1828}
1829
1830
1831
1832#if defined(IN_RING0) && defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API)
1833/**
1834 * Wrapper around nemR0WinImportState that converts VERR_NEM_FLUSH_TLB
1835 * into informational status codes and logs+asserts statuses.
1836 *
1837 * @returns VBox strict status code.
1838 * @param pGVM The global (ring-0) VM structure.
1839 * @param pGVCpu The global (ring-0) per CPU structure.
1840 * @param pVCpu The cross context per CPU structure.
1841 * @param fWhat What to import.
1842 * @param pszCaller Who is doing the importing.
1843 */
1844DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, PVMCPU pVCpu, uint64_t fWhat, const char *pszCaller)
1845{
1846 int rc = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fWhat);
1847 if (RT_SUCCESS(rc))
1848 {
1849 Assert(rc == VINF_SUCCESS);
1850 return VINF_SUCCESS;
1851 }
1852
1853 if (rc == VERR_NEM_FLUSH_TLB)
1854 {
1855 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
1856 return -rc;
1857 }
1858 RT_NOREF(pszCaller);
1859 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
1860}
1861#endif /* IN_RING0 && NEM_WIN_TEMPLATE_MODE_OWN_RUN_API*/
1862
1863#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
1864/**
1865 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
1866 *
1867 * Unlike the wrapped APIs, this checks whether it's necessary.
1868 *
1869 * @returns VBox strict status code.
1870 * @param pGVM The global (ring-0) VM structure.
1871 * @param pGVCpu The global (ring-0) per CPU structure.
1872 * @param fWhat What to import.
1873 * @param pszCaller Who is doing the importing.
1874 */
1875DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPU pVCpu, PGVMCPU pGVCpu, uint64_t fWhat, const char *pszCaller)
1876{
1877 if (pVCpu->cpum.GstCtx.fExtrn & fWhat)
1878 {
1879# ifdef IN_RING0
1880 return nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu, fWhat, pszCaller);
1881# else
1882 RT_NOREF(pGVCpu, pszCaller);
1883 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1884 AssertRCReturn(rc, rc);
1885# endif
1886 }
1887 return VINF_SUCCESS;
1888}
1889#endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API || IN_RING3 */
1890
1891#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1892/**
1893 * Copies register state from the X64 intercept message header.
1894 *
1895 * ASSUMES no state copied yet.
1896 *
1897 * @param pVCpu The cross context per CPU structure.
1898 * @param pHdr The X64 intercept message header.
1899 * @sa nemR3WinCopyStateFromX64Header
1900 */
1901DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPU pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr)
1902{
1903 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1904 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1905 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pHdr->CsSegment);
1906 pVCpu->cpum.GstCtx.rip = pHdr->Rip;
1907 pVCpu->cpum.GstCtx.rflags.u = pHdr->Rflags;
1908
1909 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow;
1910 if (!pHdr->ExecutionState.InterruptShadow)
1911 {
1912 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1913 { /* likely */ }
1914 else
1915 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1916 }
1917 else
1918 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip);
1919
1920 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1921}
1922#elif defined(IN_RING3)
1923/**
1924 * Copies register state from the (common) exit context.
1925 *
1926 * ASSUMES no state copied yet.
1927 *
1928 * @param pVCpu The cross context per CPU structure.
1929 * @param pExitCtx The common exit context.
1930 * @sa nemHCWinCopyStateFromX64Header
1931 */
1932DECLINLINE(void) nemR3WinCopyStateFromX64Header(PVMCPU pVCpu, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1933{
1934 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1935 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1936 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pExitCtx->Cs);
1937 pVCpu->cpum.GstCtx.rip = pExitCtx->Rip;
1938 pVCpu->cpum.GstCtx.rflags.u = pExitCtx->Rflags;
1939
1940 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow;
1941 if (!pExitCtx->ExecutionState.InterruptShadow)
1942 {
1943 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1944 { /* likely */ }
1945 else
1946 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1947 }
1948 else
1949 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip);
1950
1951 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1952}
1953#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
1954
1955
1956#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
1957/**
1958 * Deals with memory intercept message.
1959 *
1960 * @returns Strict VBox status code.
1961 * @param pVM The cross context VM structure.
1962 * @param pVCpu The cross context per CPU structure.
1963 * @param pMsg The message.
1964 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
1965 * @sa nemR3WinHandleExitMemory
1966 */
1967NEM_TMPL_STATIC VBOXSTRICTRC
1968nemHCWinHandleMessageMemory(PVM pVM, PVMCPU pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg, PGVMCPU pGVCpu)
1969{
1970 uint64_t const uHostTsc = ASMReadTSC();
1971 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
1972 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1973 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
1974
1975 /*
1976 * Whatever we do, we must clear pending event injection upon resume.
1977 */
1978 if (pMsg->Header.ExecutionState.InterruptionPending)
1979 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1980
1981# if 0 /* Experiment: 20K -> 34K exit/s. */
1982 if ( pMsg->Header.ExecutionState.EferLma
1983 && pMsg->Header.CsSegment.Long
1984 && pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
1985 {
1986 if ( pMsg->Header.Rip - (uint64_t)0xf65a < (uint64_t)(0xf662 - 0xf65a)
1987 && pMsg->InstructionBytes[0] == 0x89
1988 && pMsg->InstructionBytes[1] == 0x03)
1989 {
1990 pVCpu->cpum.GstCtx.rip = pMsg->Header.Rip + 2;
1991 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RIP;
1992 AssertMsg(pMsg->Header.InstructionLength == 2, ("%#x\n", pMsg->Header.InstructionLength));
1993 //Log(("%RX64 msg:\n%.80Rhxd\n", pVCpu->cpum.GstCtx.rip, pMsg));
1994 return VINF_SUCCESS;
1995 }
1996 }
1997# endif
1998
1999 /*
2000 * Ask PGM for information about the given GCPhys. We need to check if we're
2001 * out of sync first.
2002 */
2003 NEMHCWINHMACPCCSTATE State = { pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE, false, false };
2004 PGMPHYSNEMPAGEINFO Info;
2005 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMsg->GuestPhysicalAddress, State.fWriteAccess, &Info,
2006 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2007 if (RT_SUCCESS(rc))
2008 {
2009 if (Info.fNemProt & ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2010 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2011 {
2012 if (State.fCanResume)
2013 {
2014 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2015 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2016 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2017 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2018 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2019 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2020 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
2021 return VINF_SUCCESS;
2022 }
2023 }
2024 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2025 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2026 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2027 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2028 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2029 }
2030 else
2031 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2032 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2033 pMsg->GuestPhysicalAddress, rc, State.fDidSomething ? " modified-backing" : "",
2034 g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2035
2036 /*
2037 * Emulate the memory access, either access handler or special memory.
2038 */
2039 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2040 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2041 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2042 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2043 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
2044 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2045 VBOXSTRICTRC rcStrict;
2046# ifdef IN_RING0
2047 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu,
2048 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit");
2049 if (rcStrict != VINF_SUCCESS)
2050 return rcStrict;
2051# else
2052 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2053 AssertRCReturn(rc, rc);
2054 NOREF(pGVCpu);
2055# endif
2056
2057 if (pMsg->Reserved1)
2058 Log(("MemExit/Reserved1=%#x\n", pMsg->Reserved1));
2059 if (pMsg->Header.ExecutionState.Reserved0 || pMsg->Header.ExecutionState.Reserved1)
2060 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pMsg->Header.ExecutionState.Reserved0, pMsg->Header.ExecutionState.Reserved1));
2061
2062 if (!pExitRec)
2063 {
2064 //if (pMsg->InstructionByteCount > 0)
2065 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2066 if (pMsg->InstructionByteCount > 0)
2067 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
2068 pMsg->InstructionBytes, pMsg->InstructionByteCount);
2069 else
2070 rcStrict = IEMExecOne(pVCpu);
2071 /** @todo do we need to do anything wrt debugging here? */
2072 }
2073 else
2074 {
2075 /* Frequent access or probing. */
2076 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2077 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2078 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2079 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2080 }
2081 return rcStrict;
2082}
2083#elif defined(IN_RING3)
2084/**
2085 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2086 *
2087 * @returns Strict VBox status code.
2088 * @param pVM The cross context VM structure.
2089 * @param pVCpu The cross context per CPU structure.
2090 * @param pExit The VM exit information to handle.
2091 * @sa nemHCWinHandleMessageMemory
2092 */
2093NEM_TMPL_STATIC VBOXSTRICTRC
2094nemR3WinHandleExitMemory(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2095{
2096 uint64_t const uHostTsc = ASMReadTSC();
2097 Assert(pExit->MemoryAccess.AccessInfo.AccessType != 3);
2098
2099 /*
2100 * Whatever we do, we must clear pending event injection upon resume.
2101 */
2102 if (pExit->VpContext.ExecutionState.InterruptionPending)
2103 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2104
2105 /*
2106 * Ask PGM for information about the given GCPhys. We need to check if we're
2107 * out of sync first.
2108 */
2109 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite, false, false };
2110 PGMPHYSNEMPAGEINFO Info;
2111 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
2112 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2113 if (RT_SUCCESS(rc))
2114 {
2115 if (Info.fNemProt & ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2116 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2117 {
2118 if (State.fCanResume)
2119 {
2120 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2121 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2122 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2123 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2124 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2125 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2126 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2127 return VINF_SUCCESS;
2128 }
2129 }
2130 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2131 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2132 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2133 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2134 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2135 }
2136 else
2137 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2138 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2139 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2140 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2141
2142 /*
2143 * Emulate the memory access, either access handler or special memory.
2144 */
2145 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2146 pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2147 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2148 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2149 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2150 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2151 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2152 AssertRCReturn(rc, rc);
2153 if (pExit->VpContext.ExecutionState.Reserved0 || pExit->VpContext.ExecutionState.Reserved1)
2154 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pExit->VpContext.ExecutionState.Reserved0, pExit->VpContext.ExecutionState.Reserved1));
2155
2156 VBOXSTRICTRC rcStrict;
2157 if (!pExitRec)
2158 {
2159 //if (pMsg->InstructionByteCount > 0)
2160 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2161 if (pExit->MemoryAccess.InstructionByteCount > 0)
2162 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
2163 pExit->MemoryAccess.InstructionBytes, pExit->MemoryAccess.InstructionByteCount);
2164 else
2165 rcStrict = IEMExecOne(pVCpu);
2166 /** @todo do we need to do anything wrt debugging here? */
2167 }
2168 else
2169 {
2170 /* Frequent access or probing. */
2171 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2172 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2173 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2174 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2175 }
2176 return rcStrict;
2177}
2178#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2179
2180
2181#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2182/**
2183 * Deals with I/O port intercept message.
2184 *
2185 * @returns Strict VBox status code.
2186 * @param pVM The cross context VM structure.
2187 * @param pVCpu The cross context per CPU structure.
2188 * @param pMsg The message.
2189 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2190 */
2191NEM_TMPL_STATIC VBOXSTRICTRC
2192nemHCWinHandleMessageIoPort(PVM pVM, PVMCPU pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg, PGVMCPU pGVCpu)
2193{
2194 /*
2195 * Assert message sanity.
2196 */
2197 Assert( pMsg->AccessInfo.AccessSize == 1
2198 || pMsg->AccessInfo.AccessSize == 2
2199 || pMsg->AccessInfo.AccessSize == 4);
2200 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2201 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2202 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2203 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2204 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2205 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2206 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
2207 if (pMsg->AccessInfo.StringOp)
2208 {
2209 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterDs, pMsg->DsSegment);
2210 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterEs, pMsg->EsSegment);
2211 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRcx, pMsg->Rcx);
2212 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRsi, pMsg->Rsi);
2213 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdi, pMsg->Rdi);
2214 }
2215
2216 /*
2217 * Whatever we do, we must clear pending event injection upon resume.
2218 */
2219 if (pMsg->Header.ExecutionState.InterruptionPending)
2220 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2221
2222 /*
2223 * Add history first to avoid two paths doing EMHistoryExec calls.
2224 */
2225 VBOXSTRICTRC rcStrict;
2226 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2227 !pMsg->AccessInfo.StringOp
2228 ? ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2229 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2230 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2231 : ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2232 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2233 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2234 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2235 if (!pExitRec)
2236 {
2237 if (!pMsg->AccessInfo.StringOp)
2238 {
2239 /*
2240 * Simple port I/O.
2241 */
2242 static uint32_t const s_fAndMask[8] =
2243 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2244 uint32_t const fAndMask = s_fAndMask[pMsg->AccessInfo.AccessSize];
2245
2246 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2247 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2248 {
2249 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize);
2250 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2251 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2252 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2253 if (IOM_SUCCESS(rcStrict))
2254 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1);
2255# ifdef IN_RING0
2256 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
2257 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF
2258 /** @todo check for debug breakpoints */ )
2259 return EMRZSetPendingIoPortWrite(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2260 pMsg->AccessInfo.AccessSize, (uint32_t)pMsg->Rax & fAndMask);
2261# endif
2262 else
2263 {
2264 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2265 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2266 }
2267 }
2268 else
2269 {
2270 uint32_t uValue = 0;
2271 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize);
2272 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2273 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2274 pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2275 if (IOM_SUCCESS(rcStrict))
2276 {
2277 if (pMsg->AccessInfo.AccessSize != 4)
2278 pVCpu->cpum.GstCtx.rax = (pMsg->Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2279 else
2280 pVCpu->cpum.GstCtx.rax = uValue;
2281 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2282 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pVCpu->cpum.GstCtx.rax));
2283 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 1);
2284 }
2285 else
2286 {
2287 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2288 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2289# ifdef IN_RING0
2290 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
2291 && !pVCpu->cpum.GstCtx.rflags.Bits.u1TF
2292 /** @todo check for debug breakpoints */ )
2293 return EMRZSetPendingIoPortRead(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2294 pMsg->AccessInfo.AccessSize);
2295# endif
2296 }
2297 }
2298 }
2299 else
2300 {
2301 /*
2302 * String port I/O.
2303 */
2304 /** @todo Someone at Microsoft please explain how we can get the address mode
2305 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2306 * getting the default mode, it can always be overridden by a prefix. This
2307 * forces us to interpret the instruction from opcodes, which is suboptimal.
2308 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2309 * CPUs that are reasonably new.
2310 *
2311 * Of course, it's possible this is an undocumented and we just need to do some
2312 * experiments to figure out how it's communicated. Alternatively, we can scan
2313 * the opcode bytes for possible evil prefixes.
2314 */
2315 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2316 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2317 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2318 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
2319 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment);
2320 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2321 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2322 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
2323 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
2324# ifdef IN_RING0
2325 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2326 if (rcStrict != VINF_SUCCESS)
2327 return rcStrict;
2328# else
2329 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2330 AssertRCReturn(rc, rc);
2331 RT_NOREF(pGVCpu);
2332# endif
2333
2334 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2335 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2336 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2337 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS",
2338 pMsg->PortNumber, pMsg->AccessInfo.AccessSize ));
2339 rcStrict = IEMExecOne(pVCpu);
2340 }
2341 if (IOM_SUCCESS(rcStrict))
2342 {
2343 /*
2344 * Do debug checks.
2345 */
2346 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2347 || (pMsg->Header.Rflags & X86_EFL_TF)
2348 || DBGFBpIsHwIoArmed(pVM) )
2349 {
2350 /** @todo Debugging. */
2351 }
2352 }
2353 return rcStrict;
2354 }
2355
2356 /*
2357 * Frequent exit or something needing probing.
2358 * Get state and call EMHistoryExec.
2359 */
2360 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2361 if (!pMsg->AccessInfo.StringOp)
2362 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2363 else
2364 {
2365 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2366 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2367 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
2368 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pMsg->EsSegment);
2369 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2370 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
2371 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
2372 }
2373 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2374
2375# ifdef IN_RING0
2376 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2377 if (rcStrict != VINF_SUCCESS)
2378 return rcStrict;
2379# else
2380 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2381 AssertRCReturn(rc, rc);
2382 RT_NOREF(pGVCpu);
2383# endif
2384
2385 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2386 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2387 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2388 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUT" : "IN",
2389 pMsg->AccessInfo.StringOp ? "S" : "",
2390 pMsg->PortNumber, pMsg->AccessInfo.AccessSize));
2391 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2392 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2393 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2394 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2395 return rcStrict;
2396}
2397#elif defined(IN_RING3)
2398/**
2399 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess).
2400 *
2401 * @returns Strict VBox status code.
2402 * @param pVM The cross context VM structure.
2403 * @param pVCpu The cross context per CPU structure.
2404 * @param pExit The VM exit information to handle.
2405 * @sa nemHCWinHandleMessageIoPort
2406 */
2407NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitIoPort(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2408{
2409 Assert( pExit->IoPortAccess.AccessInfo.AccessSize == 1
2410 || pExit->IoPortAccess.AccessInfo.AccessSize == 2
2411 || pExit->IoPortAccess.AccessInfo.AccessSize == 4);
2412
2413 /*
2414 * Whatever we do, we must clear pending event injection upon resume.
2415 */
2416 if (pExit->VpContext.ExecutionState.InterruptionPending)
2417 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2418
2419 /*
2420 * Add history first to avoid two paths doing EMHistoryExec calls.
2421 */
2422 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2423 !pExit->IoPortAccess.AccessInfo.StringOp
2424 ? ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2425 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2426 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2427 : ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2428 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2429 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2430 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2431 if (!pExitRec)
2432 {
2433 VBOXSTRICTRC rcStrict;
2434 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2435 {
2436 /*
2437 * Simple port I/O.
2438 */
2439 static uint32_t const s_fAndMask[8] =
2440 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2441 uint32_t const fAndMask = s_fAndMask[pExit->IoPortAccess.AccessInfo.AccessSize];
2442 if (pExit->IoPortAccess.AccessInfo.IsWrite)
2443 {
2444 rcStrict = IOMIOPortWrite(pVM, pVCpu, pExit->IoPortAccess.PortNumber,
2445 (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2446 pExit->IoPortAccess.AccessInfo.AccessSize);
2447 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2448 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2449 pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2450 pExit->IoPortAccess.AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2451 if (IOM_SUCCESS(rcStrict))
2452 {
2453 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2454 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 1);
2455 }
2456 }
2457 else
2458 {
2459 uint32_t uValue = 0;
2460 rcStrict = IOMIOPortRead(pVM, pVCpu, pExit->IoPortAccess.PortNumber, &uValue,
2461 pExit->IoPortAccess.AccessInfo.AccessSize);
2462 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2463 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2464 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2465 if (IOM_SUCCESS(rcStrict))
2466 {
2467 if (pExit->IoPortAccess.AccessInfo.AccessSize != 4)
2468 pVCpu->cpum.GstCtx.rax = (pExit->IoPortAccess.Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2469 else
2470 pVCpu->cpum.GstCtx.rax = uValue;
2471 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2472 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pExit->IoPortAccess.Rax, pVCpu->cpum.GstCtx.rax));
2473 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2474 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 1);
2475 }
2476 }
2477 }
2478 else
2479 {
2480 /*
2481 * String port I/O.
2482 */
2483 /** @todo Someone at Microsoft please explain how we can get the address mode
2484 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2485 * getting the default mode, it can always be overridden by a prefix. This
2486 * forces us to interpret the instruction from opcodes, which is suboptimal.
2487 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2488 * CPUs that are reasonably new.
2489 *
2490 * Of course, it's possible this is an undocumented and we just need to do some
2491 * experiments to figure out how it's communicated. Alternatively, we can scan
2492 * the opcode bytes for possible evil prefixes.
2493 */
2494 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2495 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2496 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2497 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pExit->IoPortAccess.Ds);
2498 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pExit->IoPortAccess.Es);
2499 pVCpu->cpum.GstCtx.rax = pExit->IoPortAccess.Rax;
2500 pVCpu->cpum.GstCtx.rcx = pExit->IoPortAccess.Rcx;
2501 pVCpu->cpum.GstCtx.rdi = pExit->IoPortAccess.Rdi;
2502 pVCpu->cpum.GstCtx.rsi = pExit->IoPortAccess.Rsi;
2503 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2504 AssertRCReturn(rc, rc);
2505
2506 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2507 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2508 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2509 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUTS" : "INS",
2510 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize ));
2511 rcStrict = IEMExecOne(pVCpu);
2512 }
2513 if (IOM_SUCCESS(rcStrict))
2514 {
2515 /*
2516 * Do debug checks.
2517 */
2518 if ( pExit->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2519 || (pExit->VpContext.Rflags & X86_EFL_TF)
2520 || DBGFBpIsHwIoArmed(pVM) )
2521 {
2522 /** @todo Debugging. */
2523 }
2524 }
2525 return rcStrict;
2526 }
2527
2528 /*
2529 * Frequent exit or something needing probing.
2530 * Get state and call EMHistoryExec.
2531 */
2532 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2533 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2534 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_RAX;
2535 else
2536 {
2537 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2538 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2539 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pExit->IoPortAccess.Ds);
2540 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.es, pExit->IoPortAccess.Es);
2541 pVCpu->cpum.GstCtx.rcx = pExit->IoPortAccess.Rcx;
2542 pVCpu->cpum.GstCtx.rdi = pExit->IoPortAccess.Rdi;
2543 pVCpu->cpum.GstCtx.rsi = pExit->IoPortAccess.Rsi;
2544 }
2545 pVCpu->cpum.GstCtx.rax = pExit->IoPortAccess.Rax;
2546 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2547 AssertRCReturn(rc, rc);
2548 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2549 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2550 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2551 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUT" : "IN",
2552 pExit->IoPortAccess.AccessInfo.StringOp ? "S" : "",
2553 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize));
2554 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2555 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2556 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2557 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2558 return rcStrict;
2559}
2560#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2561
2562
2563#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2564/**
2565 * Deals with interrupt window message.
2566 *
2567 * @returns Strict VBox status code.
2568 * @param pVM The cross context VM structure.
2569 * @param pVCpu The cross context per CPU structure.
2570 * @param pMsg The message.
2571 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2572 * @sa nemR3WinHandleExitInterruptWindow
2573 */
2574NEM_TMPL_STATIC VBOXSTRICTRC
2575nemHCWinHandleMessageInterruptWindow(PVM pVM, PVMCPU pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg, PGVMCPU pGVCpu)
2576{
2577 /*
2578 * Assert message sanity.
2579 */
2580 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE
2581 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here
2582 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2583 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type));
2584
2585 /*
2586 * Just copy the state we've got and handle it in the loop for now.
2587 */
2588 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2589 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2590
2591 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2592 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2593 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2594 pMsg->Type, RT_BOOL(pMsg->Header.Rflags & X86_EFL_IF), pMsg->Header.ExecutionState.InterruptShadow));
2595
2596 /** @todo call nemHCWinHandleInterruptFF */
2597 RT_NOREF(pVM, pGVCpu);
2598 return VINF_SUCCESS;
2599}
2600#elif defined(IN_RING3)
2601/**
2602 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow).
2603 *
2604 * @returns Strict VBox status code.
2605 * @param pVM The cross context VM structure.
2606 * @param pVCpu The cross context per CPU structure.
2607 * @param pExit The VM exit information to handle.
2608 * @sa nemHCWinHandleMessageInterruptWindow
2609 */
2610NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitInterruptWindow(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2611{
2612 /*
2613 * Assert message sanity.
2614 */
2615 AssertMsg( pExit->InterruptWindow.DeliverableType == WHvX64PendingInterrupt
2616 || pExit->InterruptWindow.DeliverableType == WHvX64PendingNmi,
2617 ("%#x\n", pExit->InterruptWindow.DeliverableType));
2618
2619 /*
2620 * Just copy the state we've got and handle it in the loop for now.
2621 */
2622 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2623 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2624
2625 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2626 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2627 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2628 pExit->InterruptWindow.DeliverableType, RT_BOOL(pExit->VpContext.Rflags & X86_EFL_IF),
2629 pExit->VpContext.ExecutionState.InterruptShadow));
2630
2631 /** @todo call nemHCWinHandleInterruptFF */
2632 RT_NOREF(pVM);
2633 return VINF_SUCCESS;
2634}
2635#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2636
2637
2638#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2639/**
2640 * Deals with CPUID intercept message.
2641 *
2642 * @returns Strict VBox status code.
2643 * @param pVM The cross context VM structure.
2644 * @param pVCpu The cross context per CPU structure.
2645 * @param pMsg The message.
2646 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2647 * @sa nemR3WinHandleExitCpuId
2648 */
2649NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVM pVM, PVMCPU pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg,
2650 PGVMCPU pGVCpu)
2651{
2652 /* Check message register value sanity. */
2653 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2654 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2655 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2656 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2657 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
2658 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRcx, pMsg->Rcx);
2659 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdx, pMsg->Rdx);
2660 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRbx, pMsg->Rbx);
2661
2662 /* Do exit history. */
2663 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2664 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2665 if (!pExitRec)
2666 {
2667 /*
2668 * Soak up state and execute the instruction.
2669 *
2670 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2671 * function and make everyone use it.
2672 */
2673 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2674 * only get weirder with nested VT-x and AMD-V support. */
2675 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2676
2677 /* Copy in the low register values (top is always cleared). */
2678 pVCpu->cpum.GstCtx.rax = (uint32_t)pMsg->Rax;
2679 pVCpu->cpum.GstCtx.rcx = (uint32_t)pMsg->Rcx;
2680 pVCpu->cpum.GstCtx.rdx = (uint32_t)pMsg->Rdx;
2681 pVCpu->cpum.GstCtx.rbx = (uint32_t)pMsg->Rbx;
2682 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2683
2684 /* Get the correct values. */
2685 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2686 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2687
2688 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2689 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2690 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2691 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2692 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2693
2694 /* Move RIP and we're done. */
2695 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2696
2697 return VINF_SUCCESS;
2698 }
2699
2700 /*
2701 * Frequent exit or something needing probing.
2702 * Get state and call EMHistoryExec.
2703 */
2704 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2705 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2706 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2707 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
2708 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
2709 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2710 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2711 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2712 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2713 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2714# ifdef IN_RING0
2715 VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit");
2716 if (rcStrict != VINF_SUCCESS)
2717 return rcStrict;
2718 RT_NOREF(pVM);
2719# else
2720 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2721 AssertRCReturn(rc, rc);
2722 RT_NOREF(pGVCpu);
2723# endif
2724 VBOXSTRICTRC rcStrictExec = EMHistoryExec(pVCpu, pExitRec, 0);
2725 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2726 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2727 VBOXSTRICTRC_VAL(rcStrictExec), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2728 return rcStrictExec;
2729}
2730#elif defined(IN_RING3)
2731/**
2732 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid).
2733 *
2734 * @returns Strict VBox status code.
2735 * @param pVM The cross context VM structure.
2736 * @param pVCpu The cross context per CPU structure.
2737 * @param pExit The VM exit information to handle.
2738 * @sa nemHCWinHandleMessageCpuId
2739 */
2740NEM_TMPL_STATIC VBOXSTRICTRC
2741nemR3WinHandleExitCpuId(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2742{
2743 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2744 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2745 if (!pExitRec)
2746 {
2747 /*
2748 * Soak up state and execute the instruction.
2749 *
2750 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2751 * function and make everyone use it.
2752 */
2753 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2754 * only get weirder with nested VT-x and AMD-V support. */
2755 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2756
2757 /* Copy in the low register values (top is always cleared). */
2758 pVCpu->cpum.GstCtx.rax = (uint32_t)pExit->CpuidAccess.Rax;
2759 pVCpu->cpum.GstCtx.rcx = (uint32_t)pExit->CpuidAccess.Rcx;
2760 pVCpu->cpum.GstCtx.rdx = (uint32_t)pExit->CpuidAccess.Rdx;
2761 pVCpu->cpum.GstCtx.rbx = (uint32_t)pExit->CpuidAccess.Rbx;
2762 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2763
2764 /* Get the correct values. */
2765 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2766 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2767
2768 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2769 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2770 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2771 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2772 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2773
2774 /* Move RIP and we're done. */
2775 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
2776
2777 RT_NOREF_PV(pVM);
2778 return VINF_SUCCESS;
2779 }
2780
2781 /*
2782 * Frequent exit or something needing probing.
2783 * Get state and call EMHistoryExec.
2784 */
2785 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2786 pVCpu->cpum.GstCtx.rax = pExit->CpuidAccess.Rax;
2787 pVCpu->cpum.GstCtx.rcx = pExit->CpuidAccess.Rcx;
2788 pVCpu->cpum.GstCtx.rdx = pExit->CpuidAccess.Rdx;
2789 pVCpu->cpum.GstCtx.rbx = pExit->CpuidAccess.Rbx;
2790 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2791 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2792 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2793 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2794 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2795 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2796 AssertRCReturn(rc, rc);
2797 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2798 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2799 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2800 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2801 return rcStrict;
2802}
2803#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
2804
2805
2806#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
2807/**
2808 * Deals with MSR intercept message.
2809 *
2810 * @returns Strict VBox status code.
2811 * @param pVCpu The cross context per CPU structure.
2812 * @param pMsg The message.
2813 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2814 * @sa nemR3WinHandleExitMsr
2815 */
2816NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPU pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg, PGVMCPU pGVCpu)
2817{
2818 /*
2819 * A wee bit of sanity first.
2820 */
2821 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2822 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2823 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2824 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2825 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2826 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2827 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
2828 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdx, pMsg->Rdx);
2829
2830 /*
2831 * Check CPL as that's common to both RDMSR and WRMSR.
2832 */
2833 VBOXSTRICTRC rcStrict;
2834 if (pMsg->Header.ExecutionState.Cpl == 0)
2835 {
2836 /*
2837 * Get all the MSR state. Since we're getting EFER, we also need to
2838 * get CR0, CR4 and CR3.
2839 */
2840 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2841 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2842 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2843 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2844 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2845
2846 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
2847 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
2848 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2849 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2850 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2851 "MSRs");
2852 if (rcStrict == VINF_SUCCESS)
2853 {
2854 if (!pExitRec)
2855 {
2856 /*
2857 * Handle writes.
2858 */
2859 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2860 {
2861 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx));
2862 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n",
2863 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2864 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2865 if (rcStrict == VINF_SUCCESS)
2866 {
2867 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2868 return VINF_SUCCESS;
2869 }
2870# ifndef IN_RING3
2871 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2872 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2873 rcStrict = VINF_CPUM_R3_MSR_WRITE;
2874 return rcStrict;
2875# else
2876 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n",
2877 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2878 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2879# endif
2880 }
2881 /*
2882 * Handle reads.
2883 */
2884 else
2885 {
2886 uint64_t uValue = 0;
2887 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue);
2888 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2889 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2890 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2891 if (rcStrict == VINF_SUCCESS)
2892 {
2893 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue;
2894 pVCpu->cpum.GstCtx.rdx = uValue >> 32;
2895 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2896 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pMsg->Header, 2);
2897 return VINF_SUCCESS;
2898 }
2899# ifndef IN_RING3
2900 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2901 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2902 rcStrict = VINF_CPUM_R3_MSR_READ;
2903 return rcStrict;
2904# else
2905 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2906 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2907 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2908# endif
2909 }
2910 }
2911 else
2912 {
2913 /*
2914 * Handle frequent exit or something needing probing.
2915 */
2916 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
2917 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2918 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD", pMsg->MsrNumber));
2919 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2920 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2921 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2922 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2923 return rcStrict;
2924 }
2925 }
2926 else
2927 {
2928 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2929 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2930 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD",
2931 pMsg->MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2932 return rcStrict;
2933 }
2934 }
2935 else if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2936 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n",
2937 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2938 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx ));
2939 else
2940 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n",
2941 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2942 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber));
2943
2944 /*
2945 * If we get down here, we're supposed to #GP(0).
2946 */
2947 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2948 if (rcStrict == VINF_SUCCESS)
2949 {
2950 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2951 if (rcStrict == VINF_IEM_RAISED_XCPT)
2952 rcStrict = VINF_SUCCESS;
2953 else if (rcStrict != VINF_SUCCESS)
2954 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2955 }
2956 return rcStrict;
2957}
2958#elif defined(IN_RING3)
2959/**
2960 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess).
2961 *
2962 * @returns Strict VBox status code.
2963 * @param pVM The cross context VM structure.
2964 * @param pVCpu The cross context per CPU structure.
2965 * @param pExit The VM exit information to handle.
2966 * @sa nemHCWinHandleMessageMsr
2967 */
2968NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitMsr(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2969{
2970 /*
2971 * Check CPL as that's common to both RDMSR and WRMSR.
2972 */
2973 VBOXSTRICTRC rcStrict;
2974 if (pExit->VpContext.ExecutionState.Cpl == 0)
2975 {
2976 /*
2977 * Get all the MSR state. Since we're getting EFER, we also need to
2978 * get CR0, CR4 and CR3.
2979 */
2980 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2981 pExit->MsrAccess.AccessInfo.IsWrite
2982 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2983 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2984 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2985 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2986 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL,
2987 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2988 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2989 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2990 "MSRs");
2991 if (rcStrict == VINF_SUCCESS)
2992 {
2993 if (!pExitRec)
2994 {
2995 /*
2996 * Handle writes.
2997 */
2998 if (pExit->MsrAccess.AccessInfo.IsWrite)
2999 {
3000 rcStrict = CPUMSetGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber,
3001 RT_MAKE_U64((uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx));
3002 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3003 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
3004 (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
3005 if (rcStrict == VINF_SUCCESS)
3006 {
3007 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
3008 return VINF_SUCCESS;
3009 }
3010 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", pVCpu->idCpu,
3011 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3012 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx,
3013 VBOXSTRICTRC_VAL(rcStrict) ));
3014 }
3015 /*
3016 * Handle reads.
3017 */
3018 else
3019 {
3020 uint64_t uValue = 0;
3021 rcStrict = CPUMQueryGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, &uValue);
3022 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu,
3023 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3024 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3025 if (rcStrict == VINF_SUCCESS)
3026 {
3027 pVCpu->cpum.GstCtx.rax = (uint32_t)uValue;
3028 pVCpu->cpum.GstCtx.rdx = uValue >> 32;
3029 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
3030 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pExit->VpContext, 2);
3031 return VINF_SUCCESS;
3032 }
3033 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3034 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
3035 uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3036 }
3037 }
3038 else
3039 {
3040 /*
3041 * Handle frequent exit or something needing probing.
3042 */
3043 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
3044 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3045 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber));
3046 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
3047 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
3048 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3049 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
3050 return rcStrict;
3051 }
3052 }
3053 else
3054 {
3055 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
3056 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3057 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
3058 return rcStrict;
3059 }
3060 }
3061 else if (pExit->MsrAccess.AccessInfo.IsWrite)
3062 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3063 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3064 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx ));
3065 else
3066 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3067 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3068 pExit->MsrAccess.MsrNumber));
3069
3070 /*
3071 * If we get down here, we're supposed to #GP(0).
3072 */
3073 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL,
3074 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
3075 if (rcStrict == VINF_SUCCESS)
3076 {
3077 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
3078 if (rcStrict == VINF_IEM_RAISED_XCPT)
3079 rcStrict = VINF_SUCCESS;
3080 else if (rcStrict != VINF_SUCCESS)
3081 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
3082 }
3083
3084 RT_NOREF_PV(pVM);
3085 return rcStrict;
3086}
3087#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3088
3089
3090/**
3091 * Worker for nemHCWinHandleMessageException & nemR3WinHandleExitException that
3092 * checks if the given opcodes are of interest at all.
3093 *
3094 * @returns true if interesting, false if not.
3095 * @param cbOpcodes Number of opcode bytes available.
3096 * @param pbOpcodes The opcode bytes.
3097 * @param f64BitMode Whether we're in 64-bit mode.
3098 */
3099DECLINLINE(bool) nemHcWinIsInterestingUndefinedOpcode(uint8_t cbOpcodes, uint8_t const *pbOpcodes, bool f64BitMode)
3100{
3101 /*
3102 * Currently only interested in VMCALL and VMMCALL.
3103 */
3104 while (cbOpcodes >= 3)
3105 {
3106 switch (pbOpcodes[0])
3107 {
3108 case 0x0f:
3109 switch (pbOpcodes[1])
3110 {
3111 case 0x01:
3112 switch (pbOpcodes[2])
3113 {
3114 case 0xc1: /* 0f 01 c1 VMCALL */
3115 return true;
3116 case 0xd9: /* 0f 01 d9 VMMCALL */
3117 return true;
3118 default:
3119 break;
3120 }
3121 break;
3122 }
3123 break;
3124
3125 default:
3126 return false;
3127
3128 /* prefixes */
3129 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
3130 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
3131 if (!f64BitMode)
3132 return false;
3133 RT_FALL_THRU();
3134 case X86_OP_PRF_CS:
3135 case X86_OP_PRF_SS:
3136 case X86_OP_PRF_DS:
3137 case X86_OP_PRF_ES:
3138 case X86_OP_PRF_FS:
3139 case X86_OP_PRF_GS:
3140 case X86_OP_PRF_SIZE_OP:
3141 case X86_OP_PRF_SIZE_ADDR:
3142 case X86_OP_PRF_LOCK:
3143 case X86_OP_PRF_REPZ:
3144 case X86_OP_PRF_REPNZ:
3145 cbOpcodes--;
3146 pbOpcodes++;
3147 continue;
3148 }
3149 break;
3150 }
3151 return false;
3152}
3153
3154
3155#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3156/**
3157 * Copies state included in a exception intercept message.
3158 *
3159 * @param pVCpu The cross context per CPU structure.
3160 * @param pMsg The message.
3161 * @param fClearXcpt Clear pending exception.
3162 */
3163DECLINLINE(void)
3164nemHCWinCopyStateFromExceptionMessage(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, bool fClearXcpt)
3165{
3166 nemHCWinCopyStateFromX64Header(pVCpu, &pMsg->Header);
3167 pVCpu->cpum.GstCtx.fExtrn &= ~( CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_DS
3168 | (fClearXcpt ? CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT : 0) );
3169 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
3170 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
3171 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
3172 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
3173 pVCpu->cpum.GstCtx.rsp = pMsg->Rsp;
3174 pVCpu->cpum.GstCtx.rbp = pMsg->Rbp;
3175 pVCpu->cpum.GstCtx.rsi = pMsg->Rsi;
3176 pVCpu->cpum.GstCtx.rdi = pMsg->Rdi;
3177 pVCpu->cpum.GstCtx.r8 = pMsg->R8;
3178 pVCpu->cpum.GstCtx.r9 = pMsg->R9;
3179 pVCpu->cpum.GstCtx.r10 = pMsg->R10;
3180 pVCpu->cpum.GstCtx.r11 = pMsg->R11;
3181 pVCpu->cpum.GstCtx.r12 = pMsg->R12;
3182 pVCpu->cpum.GstCtx.r13 = pMsg->R13;
3183 pVCpu->cpum.GstCtx.r14 = pMsg->R14;
3184 pVCpu->cpum.GstCtx.r15 = pMsg->R15;
3185 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ds, pMsg->DsSegment);
3186 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.ss, pMsg->SsSegment);
3187}
3188#elif defined(IN_RING3)
3189/**
3190 * Copies state included in a exception intercept exit.
3191 *
3192 * @param pVCpu The cross context per CPU structure.
3193 * @param pExit The VM exit information.
3194 * @param fClearXcpt Clear pending exception.
3195 */
3196DECLINLINE(void) nemR3WinCopyStateFromExceptionMessage(PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, bool fClearXcpt)
3197{
3198 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3199 if (fClearXcpt)
3200 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
3201}
3202#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3203
3204
3205#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3206/**
3207 * Deals with exception intercept message (HvMessageTypeX64ExceptionIntercept).
3208 *
3209 * @returns Strict VBox status code.
3210 * @param pVCpu The cross context per CPU structure.
3211 * @param pMsg The message.
3212 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3213 * @sa nemR3WinHandleExitMsr
3214 */
3215NEM_TMPL_STATIC VBOXSTRICTRC
3216nemHCWinHandleMessageException(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, PGVMCPU pGVCpu)
3217{
3218 /*
3219 * Assert sanity.
3220 */
3221 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
3222 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
3223 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
3224 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
3225 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
3226 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
3227 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
3228 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterDs, pMsg->DsSegment);
3229 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterSs, pMsg->SsSegment);
3230 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
3231 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRcx, pMsg->Rcx);
3232 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdx, pMsg->Rdx);
3233 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRbx, pMsg->Rbx);
3234 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRsp, pMsg->Rsp);
3235 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRbp, pMsg->Rbp);
3236 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRsi, pMsg->Rsi);
3237 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdi, pMsg->Rdi);
3238 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR8, pMsg->R8);
3239 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR9, pMsg->R9);
3240 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR10, pMsg->R10);
3241 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR11, pMsg->R11);
3242 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR12, pMsg->R12);
3243 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR13, pMsg->R13);
3244 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR14, pMsg->R14);
3245 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR15, pMsg->R15);
3246
3247 /*
3248 * Get most of the register state since we'll end up making IEM inject the
3249 * event. The exception isn't normally flaged as a pending event, so duh.
3250 *
3251 * Note! We can optimize this later with event injection.
3252 */
3253 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n",
3254 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
3255 pMsg->ExceptionVector, pMsg->ErrorCode, pMsg->ExceptionParameter));
3256 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, true /*fClearXcpt*/);
3257 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3258 if (pMsg->ExceptionVector == X86_XCPT_DB)
3259 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3260 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, fWhat, "Xcpt");
3261 if (rcStrict != VINF_SUCCESS)
3262 return rcStrict;
3263
3264 /*
3265 * Handle the intercept.
3266 */
3267 TRPMEVENT enmEvtType = TRPM_TRAP;
3268 switch (pMsg->ExceptionVector)
3269 {
3270 /*
3271 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3272 * and need to turn them over to GIM.
3273 *
3274 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3275 * #UD for handling non-native hypercall instructions. (IEM will
3276 * decode both and let the GIM provider decide whether to accept it.)
3277 */
3278 case X86_XCPT_UD:
3279 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3280 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3281 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3282
3283 if (nemHcWinIsInterestingUndefinedOpcode(pMsg->InstructionByteCount, pMsg->InstructionBytes,
3284 pMsg->Header.ExecutionState.EferLma && pMsg->Header.CsSegment.Long ))
3285 {
3286 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pMsg->Header.Rip,
3287 pMsg->InstructionBytes, pMsg->InstructionByteCount);
3288 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3289 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3290 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
3291 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3292 return rcStrict;
3293 }
3294 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3295 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->InstructionByteCount, pMsg->InstructionBytes ));
3296 break;
3297
3298 /*
3299 * Filter debug exceptions.
3300 */
3301 case X86_XCPT_DB:
3302 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3303 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3304 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3305 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3306 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header) ));
3307 break;
3308
3309 case X86_XCPT_BP:
3310 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3311 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3312 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3313 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3314 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->Header.InstructionLength));
3315 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3316 break;
3317
3318 /* This shouldn't happen. */
3319 default:
3320 AssertLogRelMsgFailedReturn(("ExceptionVector=%#x\n", pMsg->ExceptionVector), VERR_IEM_IPE_6);
3321 }
3322
3323 /*
3324 * Inject it.
3325 */
3326 rcStrict = IEMInjectTrap(pVCpu, pMsg->ExceptionVector, enmEvtType, pMsg->ErrorCode,
3327 pMsg->ExceptionParameter /*??*/, pMsg->Header.InstructionLength);
3328 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3329 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3330 nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->ExceptionVector, VBOXSTRICTRC_VAL(rcStrict) ));
3331 return rcStrict;
3332}
3333#elif defined(IN_RING3)
3334/**
3335 * Deals with MSR access exits (WHvRunVpExitReasonException).
3336 *
3337 * @returns Strict VBox status code.
3338 * @param pVM The cross context VM structure.
3339 * @param pVCpu The cross context per CPU structure.
3340 * @param pExit The VM exit information to handle.
3341 * @sa nemR3WinHandleExitException
3342 */
3343NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3344{
3345 /*
3346 * Get most of the register state since we'll end up making IEM inject the
3347 * event. The exception isn't normally flaged as a pending event, so duh.
3348 *
3349 * Note! We can optimize this later with event injection.
3350 */
3351 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3352 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType,
3353 pExit->VpException.ErrorCode, pExit->VpException.ExceptionParameter ));
3354 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, true /*fClearXcpt*/);
3355 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3356 if (pExit->VpException.ExceptionType == X86_XCPT_DB)
3357 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3358 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, fWhat, "Xcpt");
3359 if (rcStrict != VINF_SUCCESS)
3360 return rcStrict;
3361
3362 /*
3363 * Handle the intercept.
3364 */
3365 TRPMEVENT enmEvtType = TRPM_TRAP;
3366 switch (pExit->VpException.ExceptionType)
3367 {
3368 /*
3369 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3370 * and need to turn them over to GIM.
3371 *
3372 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3373 * #UD for handling non-native hypercall instructions. (IEM will
3374 * decode both and let the GIM provider decide whether to accept it.)
3375 */
3376 case X86_XCPT_UD:
3377 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3378 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3379 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3380 if (nemHcWinIsInterestingUndefinedOpcode(pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes,
3381 pExit->VpContext.ExecutionState.EferLma && pExit->VpContext.Cs.Long ))
3382 {
3383 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip,
3384 pExit->VpException.InstructionBytes,
3385 pExit->VpException.InstructionByteCount);
3386 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3387 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3388 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
3389 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3390 return rcStrict;
3391 }
3392
3393 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu,
3394 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3395 pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes ));
3396 break;
3397
3398 /*
3399 * Filter debug exceptions.
3400 */
3401 case X86_XCPT_DB:
3402 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3403 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3404 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3405 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3406 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext) ));
3407 break;
3408
3409 case X86_XCPT_BP:
3410 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3411 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3412 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3413 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3414 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.InstructionLength));
3415 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3416 break;
3417
3418 /* This shouldn't happen. */
3419 default:
3420 AssertLogRelMsgFailedReturn(("ExceptionType=%#x\n", pExit->VpException.ExceptionType), VERR_IEM_IPE_6);
3421 }
3422
3423 /*
3424 * Inject it.
3425 */
3426 rcStrict = IEMInjectTrap(pVCpu, pExit->VpException.ExceptionType, enmEvtType, pExit->VpException.ErrorCode,
3427 pExit->VpException.ExceptionParameter /*??*/, pExit->VpContext.InstructionLength);
3428 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3429 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3430 nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType, VBOXSTRICTRC_VAL(rcStrict) ));
3431
3432 RT_NOREF_PV(pVM);
3433 return rcStrict;
3434}
3435#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3436
3437
3438#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3439/**
3440 * Deals with unrecoverable exception (triple fault).
3441 *
3442 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up
3443 * here too. So we'll leave it to IEM to decide.
3444 *
3445 * @returns Strict VBox status code.
3446 * @param pVCpu The cross context per CPU structure.
3447 * @param pMsgHdr The message header.
3448 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3449 * @sa nemR3WinHandleExitUnrecoverableException
3450 */
3451NEM_TMPL_STATIC VBOXSTRICTRC
3452nemHCWinHandleMessageUnrecoverableException(PVMCPU pVCpu, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr, PGVMCPU pGVCpu)
3453{
3454 /* Check message register value sanity. */
3455 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsgHdr->CsSegment);
3456 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsgHdr->Rip);
3457 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsgHdr->Rflags);
3458 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsgHdr->Cr8);
3459
3460# if 0
3461 /*
3462 * Just copy the state we've got and handle it in the loop for now.
3463 */
3464 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
3465 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n",
3466 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags));
3467 return VINF_EM_TRIPLE_FAULT;
3468# else
3469 /*
3470 * Let IEM decide whether this is really it.
3471 */
3472 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3473 pMsgHdr->Rip + pMsgHdr->CsSegment.Base, ASMReadTSC());
3474 nemHCWinCopyStateFromX64Header(pVCpu, pMsgHdr);
3475 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
3476 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3477 if (rcStrict == VINF_SUCCESS)
3478 {
3479 rcStrict = IEMExecOne(pVCpu);
3480 if (rcStrict == VINF_SUCCESS)
3481 {
3482 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3483 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags ));
3484 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3485 return VINF_SUCCESS;
3486 }
3487 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3488 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3489 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3490 else
3491 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3492 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3493 }
3494 else
3495 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3496 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3497 return rcStrict;
3498# endif
3499}
3500#elif defined(IN_RING3)
3501/**
3502 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
3503 *
3504 * @returns Strict VBox status code.
3505 * @param pVM The cross context VM structure.
3506 * @param pVCpu The cross context per CPU structure.
3507 * @param pExit The VM exit information to handle.
3508 * @sa nemHCWinHandleMessageUnrecoverableException
3509 */
3510NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3511{
3512# if 0
3513 /*
3514 * Just copy the state we've got and handle it in the loop for now.
3515 */
3516 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3517 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3518 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3519 RT_NOREF_PV(pVM);
3520 return VINF_EM_TRIPLE_FAULT;
3521# else
3522 /*
3523 * Let IEM decide whether this is really it.
3524 */
3525 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3526 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3527 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
3528 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL,
3529 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3530 if (rcStrict == VINF_SUCCESS)
3531 {
3532 rcStrict = IEMExecOne(pVCpu);
3533 if (rcStrict == VINF_SUCCESS)
3534 {
3535 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3536 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3537 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3538 return VINF_SUCCESS;
3539 }
3540 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3541 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3542 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3543 else
3544 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3545 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3546 }
3547 else
3548 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3549 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3550 RT_NOREF_PV(pVM);
3551 return rcStrict;
3552# endif
3553
3554}
3555#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3556
3557
3558#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3559/**
3560 * Handles messages (VM exits).
3561 *
3562 * @returns Strict VBox status code.
3563 * @param pVM The cross context VM structure.
3564 * @param pVCpu The cross context per CPU structure.
3565 * @param pMappingHeader The message slot mapping.
3566 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3567 * @sa nemR3WinHandleExit
3568 */
3569NEM_TMPL_STATIC VBOXSTRICTRC
3570nemHCWinHandleMessage(PVM pVM, PVMCPU pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader, PGVMCPU pGVCpu)
3571{
3572 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage)
3573 {
3574 AssertMsg(pMappingHeader->cbMessage == HV_MESSAGE_SIZE, ("%#x\n", pMappingHeader->cbMessage));
3575 HV_MESSAGE const *pMsg = (HV_MESSAGE const *)(pMappingHeader + 1);
3576 switch (pMsg->Header.MessageType)
3577 {
3578 case HvMessageTypeUnmappedGpa:
3579 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3580 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3581 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pGVCpu);
3582
3583 case HvMessageTypeGpaIntercept:
3584 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3585 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept);
3586 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pGVCpu);
3587
3588 case HvMessageTypeX64IoPortIntercept:
3589 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept));
3590 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3591 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept, pGVCpu);
3592
3593 case HvMessageTypeX64Halt:
3594 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3595 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3596 pMsg->X64InterceptHeader.Rip + pMsg->X64InterceptHeader.CsSegment.Base, ASMReadTSC());
3597 Log4(("HaltExit\n"));
3598 return VINF_EM_HALT;
3599
3600 case HvMessageTypeX64InterruptWindow:
3601 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow));
3602 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3603 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow, pGVCpu);
3604
3605 case HvMessageTypeX64CpuidIntercept:
3606 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept));
3607 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3608 return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept, pGVCpu);
3609
3610 case HvMessageTypeX64MsrIntercept:
3611 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept));
3612 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3613 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept, pGVCpu);
3614
3615 case HvMessageTypeX64ExceptionIntercept:
3616 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept));
3617 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3618 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept, pGVCpu);
3619
3620 case HvMessageTypeUnrecoverableException:
3621 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
3622 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3623 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader, pGVCpu);
3624
3625 case HvMessageTypeInvalidVpRegisterValue:
3626 case HvMessageTypeUnsupportedFeature:
3627 case HvMessageTypeTlbPageSizeMismatch:
3628 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3629 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg),
3630 VERR_NEM_IPE_3);
3631
3632 case HvMessageTypeX64ApicEoi:
3633 case HvMessageTypeX64LegacyFpError:
3634 case HvMessageTypeX64RegisterIntercept:
3635 case HvMessageTypeApicEoi:
3636 case HvMessageTypeFerrAsserted:
3637 case HvMessageTypeEventLogBufferComplete:
3638 case HvMessageTimerExpired:
3639 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3640 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3641 VERR_NEM_IPE_3);
3642
3643 default:
3644 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3645 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3646 VERR_NEM_IPE_3);
3647 }
3648 }
3649 else
3650 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n",
3651 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage),
3652 VERR_NEM_IPE_4);
3653}
3654#elif defined(IN_RING3)
3655/**
3656 * Handles VM exits.
3657 *
3658 * @returns Strict VBox status code.
3659 * @param pVM The cross context VM structure.
3660 * @param pVCpu The cross context per CPU structure.
3661 * @param pExit The VM exit information to handle.
3662 * @sa nemHCWinHandleMessage
3663 */
3664NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
3665{
3666 switch (pExit->ExitReason)
3667 {
3668 case WHvRunVpExitReasonMemoryAccess:
3669 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3670 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
3671
3672 case WHvRunVpExitReasonX64IoPortAccess:
3673 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3674 return nemR3WinHandleExitIoPort(pVM, pVCpu, pExit);
3675
3676 case WHvRunVpExitReasonX64Halt:
3677 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3678 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3679 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3680 Log4(("HaltExit\n"));
3681 return VINF_EM_HALT;
3682
3683 case WHvRunVpExitReasonCanceled:
3684 return VINF_SUCCESS;
3685
3686 case WHvRunVpExitReasonX64InterruptWindow:
3687 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3688 return nemR3WinHandleExitInterruptWindow(pVM, pVCpu, pExit);
3689
3690 case WHvRunVpExitReasonX64Cpuid:
3691 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3692 return nemR3WinHandleExitCpuId(pVM, pVCpu, pExit);
3693
3694 case WHvRunVpExitReasonX64MsrAccess:
3695 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3696 return nemR3WinHandleExitMsr(pVM, pVCpu, pExit);
3697
3698 case WHvRunVpExitReasonException:
3699 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3700 return nemR3WinHandleExitException(pVM, pVCpu, pExit);
3701
3702 case WHvRunVpExitReasonUnrecoverableException:
3703 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3704 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
3705
3706 case WHvRunVpExitReasonUnsupportedFeature:
3707 case WHvRunVpExitReasonInvalidVpRegisterValue:
3708 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3709 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
3710 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
3711
3712 /* Undesired exits: */
3713 case WHvRunVpExitReasonNone:
3714 default:
3715 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3716 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
3717 }
3718}
3719#endif /* IN_RING3 && !NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3720
3721
3722#ifdef IN_RING0
3723/**
3724 * Perform an I/O control operation on the partition handle (VID.SYS),
3725 * restarting on alert-like behaviour.
3726 *
3727 * @returns NT status code.
3728 * @param pGVM The ring-0 VM structure.
3729 * @param pGVCpu The ring-0 CPU structure.
3730 * @param pVCpu The calling cross context CPU structure.
3731 * @param fFlags The wait flags.
3732 * @param cMillies The timeout in milliseconds
3733 */
3734static NTSTATUS nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(PGVM pGVM, PGVMCPU pGVCpu, PVMCPU pVCpu,
3735 uint32_t fFlags, uint32_t cMillies)
3736{
3737 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3738 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags;
3739 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3740 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3741 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3742 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3743 NULL, 0);
3744 if (rcNt == STATUS_SUCCESS)
3745 { /* likely */ }
3746 /*
3747 * Generally, if we get down here, we have been interrupted between ACK'ing
3748 * a message and waiting for the next due to a NtAlertThread call. So, we
3749 * should stop ACK'ing the previous message and get on waiting on the next.
3750 * See similar stuff in nemHCWinRunGC().
3751 */
3752 else if ( rcNt == STATUS_TIMEOUT
3753 || rcNt == STATUS_ALERTED /* just in case */
3754 || rcNt == STATUS_KERNEL_APC /* just in case */
3755 || rcNt == STATUS_USER_APC /* just in case */)
3756 {
3757 DBGFTRACE_CUSTOM(pVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/1 %#x (f=%#x)", rcNt, fFlags);
3758 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingAlerts);
3759 Assert(fFlags & VID_MSHAGN_F_GET_NEXT_MESSAGE);
3760
3761 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pVCpu->idCpu;
3762 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags & ~VID_MSHAGN_F_HANDLE_MESSAGE;
3763 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3764 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3765 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3766 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3767 NULL, 0);
3768 DBGFTRACE_CUSTOM(pVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/2 %#x", rcNt);
3769 }
3770 return rcNt;
3771}
3772
3773#endif /* IN_RING0 */
3774
3775
3776#ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
3777/**
3778 * Worker for nemHCWinRunGC that stops the execution on the way out.
3779 *
3780 * The CPU was running the last time we checked, no there are no messages that
3781 * needs being marked handled/whatever. Caller checks this.
3782 *
3783 * @returns rcStrict on success, error status on failure.
3784 * @param pVM The cross context VM structure.
3785 * @param pVCpu The cross context per CPU structure.
3786 * @param rcStrict The nemHCWinRunGC return status. This is a little
3787 * bit unnecessary, except in internal error cases,
3788 * since we won't need to stop the CPU if we took an
3789 * exit.
3790 * @param pMappingHeader The message slot mapping.
3791 * @param pGVM The global (ring-0) VM structure (NULL in r3).
3792 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3793 */
3794NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict,
3795 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
3796 PGVM pGVM, PGVMCPU pGVCpu)
3797{
3798# ifdef DBGFTRACE_ENABLED
3799 HV_MESSAGE const volatile *pMsgForTrace = (HV_MESSAGE const volatile *)(pMappingHeader + 1);
3800# endif
3801
3802 /*
3803 * Try stopping the processor. If we're lucky we manage to do this before it
3804 * does another VM exit.
3805 */
3806 DBGFTRACE_CUSTOM(pVM, "nemStop#0");
3807# ifdef IN_RING0
3808 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
3809 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction,
3810 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3811 NULL, 0);
3812 if (NT_SUCCESS(rcNt))
3813 {
3814 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay (%#x)", rcNt);
3815 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3816 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3817 return rcStrict;
3818 }
3819# else
3820 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu);
3821 if (fRet)
3822 {
3823 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay");
3824 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3825 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3826 return rcStrict;
3827 }
3828 RT_NOREF(pGVM, pGVCpu);
3829# endif
3830
3831 /*
3832 * Dang. The CPU stopped by itself and we got a couple of message to deal with.
3833 */
3834# ifdef IN_RING0
3835 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", rcNt);
3836 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt),
3837 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3838# else
3839 DWORD dwErr = RTNtLastErrorValue();
3840 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", dwErr);
3841 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr),
3842 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3843# endif
3844 Log8(("nemHCWinStopCpu: Stopping CPU #%u pending...\n", pVCpu->idCpu));
3845 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPending);
3846
3847 /*
3848 * First message: Exit or similar, sometimes VidMessageStopRequestComplete.
3849 * Note! We can safely ASSUME that rcStrict isn't an important information one.
3850 */
3851# ifdef IN_RING0
3852 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pGVM, pGVCpu, pVCpu, VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3853 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
3854 pMsgForTrace->Header.MessageType);
3855 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
3856 ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3857 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3858# else
3859 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3860 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3861 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
3862 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3863 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3864 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3865# endif
3866
3867 VID_MESSAGE_TYPE enmVidMsgType = pMappingHeader->enmVidMsgType;
3868 if (enmVidMsgType != VidMessageStopRequestComplete)
3869 {
3870 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pGVCpu);
3871 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict))
3872 rcStrict = rcStrict2;
3873 DBGFTRACE_CUSTOM(pVM, "nemStop#1: handled %#x -> %d", pMsgForTrace->Header.MessageType, VBOXSTRICTRC_VAL(rcStrict));
3874
3875 /*
3876 * Mark it as handled and get the stop request completed message, then mark
3877 * that as handled too. CPU is back into fully stopped stated then.
3878 */
3879# ifdef IN_RING0
3880 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pGVM, pGVCpu, pVCpu,
3881 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE,
3882 30000 /*ms*/);
3883 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
3884 pMsgForTrace->Header.MessageType);
3885 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
3886 ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3887 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3888# else
3889 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3890 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3891 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
3892 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3893 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3894 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3895# endif
3896
3897 /* It should be a stop request completed message. */
3898 enmVidMsgType = pMappingHeader->enmVidMsgType;
3899 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete,
3900 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n",
3901 enmVidMsgType, pMappingHeader->cbMessage),
3902 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3903
3904 /*
3905 * Mark the VidMessageStopRequestComplete message as handled.
3906 */
3907# ifdef IN_RING0
3908 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pGVM, pGVCpu, pVCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
3909 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType,
3910 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3911 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
3912 ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3913 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3914# else
3915 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
3916 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
3917 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3918 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3919 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3920# endif
3921 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) ));
3922 }
3923 else
3924 {
3925 /** @todo I'm not so sure about this now... */
3926 DBGFTRACE_CUSTOM(pVM, "nemStop#9: %#x %#x %#x", pMappingHeader->enmVidMsgType,
3927 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3928 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingOdd);
3929 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc) - 1st VidMessageSlotHandleAndGetNext got VidMessageStopRequestComplete.\n",
3930 VBOXSTRICTRC_VAL(rcStrict) ));
3931 }
3932 return rcStrict;
3933}
3934#endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
3935
3936#if defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3)
3937
3938/**
3939 * Deals with pending interrupt related force flags, may inject interrupt.
3940 *
3941 * @returns VBox strict status code.
3942 * @param pVM The cross context VM structure.
3943 * @param pVCpu The cross context per CPU structure.
3944 * @param pGVCpu The global (ring-0) per CPU structure.
3945 * @param pfInterruptWindows Where to return interrupt window flags.
3946 */
3947NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleInterruptFF(PVM pVM, PVMCPU pVCpu, PGVMCPU pGVCpu, uint8_t *pfInterruptWindows)
3948{
3949 Assert(!TRPMHasTrap(pVCpu));
3950 RT_NOREF_PV(pVM);
3951
3952 /*
3953 * First update APIC. We ASSUME this won't need TPR/CR8.
3954 */
3955 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3956 {
3957 APICUpdatePendingInterrupts(pVCpu);
3958 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
3959 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3960 return VINF_SUCCESS;
3961 }
3962
3963 /*
3964 * We don't currently implement SMIs.
3965 */
3966 AssertReturn(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);
3967
3968 /*
3969 * Check if we've got the minimum of state required for deciding whether we
3970 * can inject interrupts and NMIs. If we don't have it, get all we might require
3971 * for injection via IEM.
3972 */
3973 bool const fPendingNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3974 uint64_t fNeedExtrn = CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
3975 | (fPendingNmi ? CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI : 0);
3976 if (pVCpu->cpum.GstCtx.fExtrn & fNeedExtrn)
3977 {
3978 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
3979 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF");
3980 if (rcStrict != VINF_SUCCESS)
3981 return rcStrict;
3982 }
3983 bool const fInhibitInterrupts = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3984 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip;
3985
3986 /*
3987 * NMI? Try deliver it first.
3988 */
3989 if (fPendingNmi)
3990 {
3991 if ( !fInhibitInterrupts
3992 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
3993 {
3994 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
3995 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
3996 if (rcStrict == VINF_SUCCESS)
3997 {
3998 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3999 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_NMI, TRPM_HARDWARE_INT, 0, 0, 0);
4000 Log8(("Injected NMI on %u (%d)\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4001 }
4002 return rcStrict;
4003 }
4004 *pfInterruptWindows |= NEM_WIN_INTW_F_NMI;
4005 Log8(("NMI window pending on %u\n", pVCpu->idCpu));
4006 }
4007
4008 /*
4009 * APIC or PIC interrupt?
4010 */
4011 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4012 {
4013 if ( !fInhibitInterrupts
4014 && pVCpu->cpum.GstCtx.rflags.Bits.u1IF)
4015 {
4016 AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR);
4017 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu,
4018 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4019 if (rcStrict == VINF_SUCCESS)
4020 {
4021 uint8_t bInterrupt;
4022 int rc = PDMGetInterrupt(pVCpu, &bInterrupt);
4023 if (RT_SUCCESS(rc))
4024 {
4025 rcStrict = IEMInjectTrap(pVCpu, bInterrupt, TRPM_HARDWARE_INT, 0, 0, 0);
4026 Log8(("Injected interrupt %#x on %u (%d)\n", bInterrupt, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4027 }
4028 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4029 {
4030 *pfInterruptWindows |= (bInterrupt >> 4 /*??*/) << NEM_WIN_INTW_F_PRIO_SHIFT;
4031 Log8(("VERR_APIC_INTR_MASKED_BY_TPR: *pfInterruptWindows=%#x\n", *pfInterruptWindows));
4032 }
4033 else
4034 Log8(("PDMGetInterrupt failed -> %d\n", rc));
4035 }
4036 return rcStrict;
4037 }
4038 *pfInterruptWindows |= NEM_WIN_INTW_F_REGULAR;
4039 Log8(("Interrupt window pending on %u\n", pVCpu->idCpu));
4040 }
4041
4042 return VINF_SUCCESS;
4043}
4044
4045
4046/**
4047 * Inner NEM runloop for windows.
4048 *
4049 * @returns Strict VBox status code.
4050 * @param pVM The cross context VM structure.
4051 * @param pVCpu The cross context per CPU structure.
4052 * @param pGVM The ring-0 VM structure (NULL in ring-3).
4053 * @param pGVCpu The ring-0 per CPU structure (NULL in ring-3).
4054 */
4055NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVM pVM, PVMCPU pVCpu, PGVM pGVM, PGVMCPU pGVCpu)
4056{
4057 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags));
4058# ifdef LOG_ENABLED
4059 if (LogIs3Enabled())
4060 nemHCWinLogState(pVM, pVCpu);
4061# endif
4062# ifdef IN_RING0
4063 Assert(pVCpu->idCpu == pGVCpu->idCpu);
4064# endif
4065
4066 /*
4067 * Try switch to NEM runloop state.
4068 */
4069 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
4070 { /* likely */ }
4071 else
4072 {
4073 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4074 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
4075 return VINF_SUCCESS;
4076 }
4077
4078 /*
4079 * The run loop.
4080 *
4081 * Current approach to state updating to use the sledgehammer and sync
4082 * everything every time. This will be optimized later.
4083 */
4084# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4085 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;
4086# endif
4087 const bool fSingleStepping = DBGFIsStepping(pVCpu);
4088// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
4089// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
4090// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
4091 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4092 for (unsigned iLoop = 0;; iLoop++)
4093 {
4094# ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4095 /*
4096 * Hack alert!
4097 */
4098 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
4099 if (cMappedPages >= 4000)
4100 {
4101 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinWHvUnmapOnePageCallback, NULL);
4102 Log(("nemHCWinRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
4103 }
4104# endif
4105
4106 /*
4107 * Pending interrupts or such? Need to check and deal with this prior
4108 * to the state syncing.
4109 */
4110 pVCpu->nem.s.fDesiredInterruptWindows = 0;
4111 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC
4112 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4113 {
4114# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4115 /* Make sure the CPU isn't executing. */
4116 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4117 {
4118 pVCpu->nem.s.fHandleAndGetFlags = 0;
4119 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
4120 if (rcStrict == VINF_SUCCESS)
4121 { /* likely */ }
4122 else
4123 {
4124 LogFlow(("NEM/%u: breaking: nemHCWinStopCpu -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4125 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4126 break;
4127 }
4128 }
4129# endif
4130
4131 /* Try inject interrupt. */
4132 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, pGVCpu, &pVCpu->nem.s.fDesiredInterruptWindows);
4133 if (rcStrict == VINF_SUCCESS)
4134 { /* likely */ }
4135 else
4136 {
4137 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4138 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4139 break;
4140 }
4141 }
4142
4143 /*
4144 * Ensure that hyper-V has the whole state.
4145 * (We always update the interrupt windows settings when active as hyper-V seems
4146 * to forget about it after an exit.)
4147 */
4148 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK))
4149 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)
4150 || ( ( pVCpu->nem.s.fDesiredInterruptWindows
4151 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows)
4152# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4153 && pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */
4154# endif
4155 )
4156 )
4157 {
4158# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4159 AssertMsg(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */,
4160 ("%#x fExtrn=%#RX64 (%#RX64) fDesiredInterruptWindows=%d fCurrentInterruptWindows=%#x vs %#x\n",
4161 pVCpu->nem.s.fHandleAndGetFlags, pVCpu->cpum.GstCtx.fExtrn, ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK),
4162 pVCpu->nem.s.fDesiredInterruptWindows, pVCpu->nem.s.fCurrentInterruptWindows, pVCpu->nem.s.fDesiredInterruptWindows));
4163# endif
4164# ifdef IN_RING0
4165 int rc2 = nemR0WinExportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx);
4166# else
4167 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
4168 RT_NOREF(pGVM, pGVCpu);
4169# endif
4170 AssertRCReturn(rc2, rc2);
4171 }
4172
4173 /*
4174 * Poll timers and run for a bit.
4175 *
4176 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
4177 * so we take the time of the next timer event and uses that as a deadline.
4178 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
4179 */
4180 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
4181 * the whole polling job when timers have changed... */
4182 uint64_t offDeltaIgnored;
4183 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
4184 if ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4185 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4186 {
4187# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4188 if (pVCpu->nem.s.fHandleAndGetFlags)
4189 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ }
4190 else
4191 {
4192# ifdef IN_RING0
4193 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
4194 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction,
4195 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
4196 NULL, 0);
4197 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));
4198 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pGVCpu->idCpu, rcNt),
4199 VERR_NEM_IPE_5);
4200# else
4201 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
4202 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n",
4203 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()),
4204 VERR_NEM_IPE_5);
4205# endif
4206 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4207 }
4208# endif /* NEM_WIN_TEMPLATE_MODE_OWN_RUN_API */
4209
4210 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
4211 {
4212# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4213 uint64_t const nsNow = RTTimeNanoTS();
4214 int64_t const cNsNextTimerEvt = nsNow - nsNextTimerEvt;
4215 uint32_t cMsWait;
4216 if (cNsNextTimerEvt < 100000 /* ns */)
4217 cMsWait = 0;
4218 else if ((uint64_t)cNsNextTimerEvt < RT_NS_1SEC)
4219 {
4220 if ((uint32_t)cNsNextTimerEvt < 2*RT_NS_1MS)
4221 cMsWait = 1;
4222 else
4223 cMsWait = ((uint32_t)cNsNextTimerEvt - 100000 /*ns*/) / RT_NS_1MS;
4224 }
4225 else
4226 cMsWait = RT_MS_1SEC;
4227# ifdef IN_RING0
4228 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
4229 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags;
4230 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMsWait;
4231 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
4232 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
4233 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
4234 NULL, 0);
4235 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4236 if (rcNt == STATUS_SUCCESS)
4237# else
4238 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4239 pVCpu->nem.s.fHandleAndGetFlags, cMsWait);
4240 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4241 if (fRet)
4242# endif
4243# else
4244 WHV_RUN_VP_EXIT_CONTEXT ExitReason;
4245 RT_ZERO(ExitReason);
4246 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
4247 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4248 if (SUCCEEDED(hrc))
4249# endif
4250 {
4251 /*
4252 * Deal with the message.
4253 */
4254# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4255 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pGVCpu);
4256 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;
4257# else
4258 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
4259# endif
4260 if (rcStrict == VINF_SUCCESS)
4261 { /* hopefully likely */ }
4262 else
4263 {
4264 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4265 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4266 break;
4267 }
4268 }
4269 else
4270 {
4271# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4272
4273 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT,
4274 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah,
4275 the error code conversion is into WAIT_XXX, i.e. NT status codes. */
4276# ifndef IN_RING0
4277 DWORD rcNt = GetLastError();
4278# endif
4279 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt));
4280 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT
4281 || rcNt == STATUS_ALERTED /* just in case */
4282 || rcNt == STATUS_USER_APC /* ditto */
4283 || rcNt == STATUS_KERNEL_APC /* ditto */
4284 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n",
4285 pVCpu->idCpu, rcNt, rcNt),
4286 VERR_NEM_IPE_0);
4287 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4288 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout);
4289# else
4290 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
4291 pVCpu->idCpu, hrc, GetLastError()),
4292 VERR_NEM_IPE_0);
4293# endif
4294 }
4295
4296 /*
4297 * If no relevant FFs are pending, loop.
4298 */
4299 if ( !VM_FF_IS_PENDING( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
4300 && !VMCPU_FF_IS_PENDING(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4301 continue;
4302
4303 /** @todo Try handle pending flags, not just return to EM loops. Take care
4304 * not to set important RCs here unless we've handled a message. */
4305 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#x)\n",
4306 pVCpu->idCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4307 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
4308 }
4309 else
4310 {
4311 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
4312 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
4313 }
4314 }
4315 else
4316 {
4317 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
4318 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
4319 }
4320 break;
4321 } /* the run loop */
4322
4323
4324 /*
4325 * If the CPU is running, make sure to stop it before we try sync back the
4326 * state and return to EM. We don't sync back the whole state if we can help it.
4327 */
4328# ifdef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
4329 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4330 {
4331 pVCpu->nem.s.fHandleAndGetFlags = 0;
4332 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
4333 }
4334# endif
4335
4336 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
4337 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4338
4339 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
4340 {
4341 /* Try anticipate what we might need. */
4342 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
4343 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
4344 || RT_FAILURE(rcStrict))
4345 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4346# ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */
4347 else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
4348 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
4349 fImport = CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
4350 else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
4351 fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
4352# endif
4353 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
4354 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4355 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
4356
4357 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
4358 {
4359# ifdef IN_RING0
4360 int rc2 = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4361 if (RT_SUCCESS(rc2))
4362 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4363 else if (rc2 == VERR_NEM_FLUSH_TLB)
4364 {
4365 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4366 if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
4367 rcStrict = -rc2;
4368 else
4369 {
4370 pVCpu->nem.s.rcPending = -rc2;
4371 LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
4372 }
4373 }
4374# else
4375 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4376 if (RT_SUCCESS(rc2))
4377 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
4378# endif
4379 else if (RT_SUCCESS(rcStrict))
4380 rcStrict = rc2;
4381 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
4382 pVCpu->cpum.GstCtx.fExtrn = 0;
4383 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
4384 }
4385 else
4386 {
4387 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4388 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
4389 }
4390 }
4391 else
4392 {
4393 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4394 pVCpu->cpum.GstCtx.fExtrn = 0;
4395 }
4396
4397 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
4398 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, VBOXSTRICTRC_VAL(rcStrict) ));
4399 return rcStrict;
4400}
4401
4402#endif /* defined(NEM_WIN_TEMPLATE_MODE_OWN_RUN_API) || defined(IN_RING3) */
4403
4404/**
4405 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
4406 */
4407NEM_TMPL_STATIC DECLCALLBACK(int) nemHCWinUnsetForA20CheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys,
4408 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
4409{
4410 /* We'll just unmap the memory. */
4411 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
4412 {
4413#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4414 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
4415 AssertRC(rc);
4416 if (RT_SUCCESS(rc))
4417#else
4418 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
4419 if (SUCCEEDED(hrc))
4420#endif
4421 {
4422 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4423 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
4424 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
4425 }
4426 else
4427 {
4428#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4429 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
4430 return rc;
4431#else
4432 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4433 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4434 return VERR_NEM_IPE_2;
4435#endif
4436 }
4437 }
4438 RT_NOREF(pVCpu, pvUser);
4439 return VINF_SUCCESS;
4440}
4441
4442
4443/**
4444 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
4445 *
4446 * @returns The PGMPhysNemQueryPageInfo result.
4447 * @param pVM The cross context VM structure.
4448 * @param pVCpu The cross context virtual CPU structure.
4449 * @param GCPhys The page to unmap.
4450 */
4451NEM_TMPL_STATIC int nemHCWinUnmapPageForA20Gate(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
4452{
4453 PGMPHYSNEMPAGEINFO Info;
4454 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
4455 nemHCWinUnsetForA20CheckerCallback, NULL);
4456}
4457
4458
4459void nemHCNativeNotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
4460{
4461 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
4462 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
4463}
4464
4465
4466void nemHCNativeNotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
4467 int fRestoreAsRAM, bool fRestoreAsRAM2)
4468{
4469 Log5(("nemHCNativeNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d fRestoreAsRAM=%d fRestoreAsRAM2=%d\n",
4470 GCPhys, cb, enmKind, fRestoreAsRAM, fRestoreAsRAM2));
4471 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb); NOREF(fRestoreAsRAM); NOREF(fRestoreAsRAM2);
4472}
4473
4474
4475void nemHCNativeNotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
4476 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
4477{
4478 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
4479 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
4480 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
4481}
4482
4483
4484/**
4485 * Worker that maps pages into Hyper-V.
4486 *
4487 * This is used by the PGM physical page notifications as well as the memory
4488 * access VMEXIT handlers.
4489 *
4490 * @returns VBox status code.
4491 * @param pVM The cross context VM structure.
4492 * @param pVCpu The cross context virtual CPU structure of the
4493 * calling EMT.
4494 * @param GCPhysSrc The source page address.
4495 * @param GCPhysDst The hyper-V destination page. This may differ from
4496 * GCPhysSrc when A20 is disabled.
4497 * @param fPageProt NEM_PAGE_PROT_XXX.
4498 * @param pu2State Our page state (input/output).
4499 * @param fBackingChanged Set if the page backing is being changed.
4500 * @thread EMT(pVCpu)
4501 */
4502NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
4503 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
4504{
4505#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4506 /*
4507 * When using the hypercalls instead of the ring-3 APIs, we don't need to
4508 * unmap memory before modifying it. We still want to track the state though,
4509 * since unmap will fail when called an unmapped page and we don't want to redo
4510 * upgrades/downgrades.
4511 */
4512 uint8_t const u2OldState = *pu2State;
4513 int rc;
4514 if (fPageProt == NEM_PAGE_PROT_NONE)
4515 {
4516 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4517 {
4518 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4519 if (RT_SUCCESS(rc))
4520 {
4521 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4522 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4523 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4524 }
4525 else
4526 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4527 }
4528 else
4529 rc = VINF_SUCCESS;
4530 }
4531 else if (fPageProt & NEM_PAGE_PROT_WRITE)
4532 {
4533 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
4534 {
4535 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4536 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4537 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4538 if (RT_SUCCESS(rc))
4539 {
4540 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4541 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4542 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4543 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4544 NOREF(cMappedPages);
4545 }
4546 else
4547 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4548 }
4549 else
4550 rc = VINF_SUCCESS;
4551 }
4552 else
4553 {
4554 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
4555 {
4556 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4557 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4558 if (RT_SUCCESS(rc))
4559 {
4560 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4561 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4562 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4563 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4564 NOREF(cMappedPages);
4565 }
4566 else
4567 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4568 }
4569 else
4570 rc = VINF_SUCCESS;
4571 }
4572
4573 return VINF_SUCCESS;
4574
4575#else
4576 /*
4577 * Looks like we need to unmap a page before we can change the backing
4578 * or even modify the protection. This is going to be *REALLY* efficient.
4579 * PGM lends us two bits to keep track of the state here.
4580 */
4581 uint8_t const u2OldState = *pu2State;
4582 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
4583 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
4584 if ( fBackingChanged
4585 || u2NewState != u2OldState)
4586 {
4587 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4588 {
4589# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4590 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4591 AssertRC(rc);
4592 if (RT_SUCCESS(rc))
4593 {
4594 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4595 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4596 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4597 {
4598 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4599 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4600 return VINF_SUCCESS;
4601 }
4602 }
4603 else
4604 {
4605 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4606 return rc;
4607 }
4608# else
4609 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
4610 if (SUCCEEDED(hrc))
4611 {
4612 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4613 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4614 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4615 {
4616 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4617 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4618 return VINF_SUCCESS;
4619 }
4620 }
4621 else
4622 {
4623 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4624 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4625 return VERR_NEM_INIT_FAILED;
4626 }
4627# endif
4628 }
4629 }
4630
4631 /*
4632 * Writeable mapping?
4633 */
4634 if (fPageProt & NEM_PAGE_PROT_WRITE)
4635 {
4636# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4637 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4638 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4639 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4640 AssertRC(rc);
4641 if (RT_SUCCESS(rc))
4642 {
4643 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4644 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4645 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4646 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4647 return VINF_SUCCESS;
4648 }
4649 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4650 return rc;
4651# else
4652 void *pvPage;
4653 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
4654 if (RT_SUCCESS(rc))
4655 {
4656 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
4657 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4658 if (SUCCEEDED(hrc))
4659 {
4660 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4661 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4662 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4663 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4664 return VINF_SUCCESS;
4665 }
4666 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4667 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4668 return VERR_NEM_INIT_FAILED;
4669 }
4670 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4671 return rc;
4672# endif
4673 }
4674
4675 if (fPageProt & NEM_PAGE_PROT_READ)
4676 {
4677# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4678 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4679 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4680 AssertRC(rc);
4681 if (RT_SUCCESS(rc))
4682 {
4683 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4684 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4685 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4686 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4687 return VINF_SUCCESS;
4688 }
4689 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4690 return rc;
4691# else
4692 const void *pvPage;
4693 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
4694 if (RT_SUCCESS(rc))
4695 {
4696 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
4697 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
4698 if (SUCCEEDED(hrc))
4699 {
4700 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4701 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4702 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4703 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4704 return VINF_SUCCESS;
4705 }
4706 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4707 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4708 return VERR_NEM_INIT_FAILED;
4709 }
4710 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4711 return rc;
4712# endif
4713 }
4714
4715 /* We already unmapped it above. */
4716 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4717 return VINF_SUCCESS;
4718#endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4719}
4720
4721
4722NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVM pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
4723{
4724 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
4725 {
4726 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
4727 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4728 return VINF_SUCCESS;
4729 }
4730
4731#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4732 PVMCPU pVCpu = VMMGetCpu(pVM);
4733 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4734 AssertRC(rc);
4735 if (RT_SUCCESS(rc))
4736 {
4737 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4738 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
4739 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4740 return VINF_SUCCESS;
4741 }
4742 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4743 return rc;
4744#else
4745 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
4746 if (SUCCEEDED(hrc))
4747 {
4748 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4749 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4750 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
4751 return VINF_SUCCESS;
4752 }
4753 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
4754 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4755 return VERR_NEM_IPE_6;
4756#endif
4757}
4758
4759
4760int nemHCNativeNotifyPhysPageAllocated(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4761 PGMPAGETYPE enmType, uint8_t *pu2State)
4762{
4763 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4764 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4765 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4766
4767 int rc;
4768#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4769 PVMCPU pVCpu = VMMGetCpu(pVM);
4770 if ( pVM->nem.s.fA20Enabled
4771 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4772 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4773 else
4774 {
4775 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4776 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4777 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
4778 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4779
4780 }
4781#else
4782 RT_NOREF_PV(fPageProt);
4783 if ( pVM->nem.s.fA20Enabled
4784 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4785 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4786 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4787 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4788 else
4789 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
4790#endif
4791 return rc;
4792}
4793
4794
4795void nemHCNativeNotifyPhysPageProtChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4796 PGMPAGETYPE enmType, uint8_t *pu2State)
4797{
4798 Log5(("nemHCNativeNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4799 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4800 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4801
4802#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4803 PVMCPU pVCpu = VMMGetCpu(pVM);
4804 if ( pVM->nem.s.fA20Enabled
4805 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4806 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4807 else
4808 {
4809 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4810 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4811 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4812 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4813 }
4814#else
4815 RT_NOREF_PV(fPageProt);
4816 if ( pVM->nem.s.fA20Enabled
4817 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4818 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4819 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4820 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4821 /* else: ignore since we've got the alias page at this address. */
4822#endif
4823}
4824
4825
4826void nemHCNativeNotifyPhysPageChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
4827 uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
4828{
4829 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4830 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
4831 RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType);
4832
4833#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4834 PVMCPU pVCpu = VMMGetCpu(pVM);
4835 if ( pVM->nem.s.fA20Enabled
4836 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4837 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4838 else
4839 {
4840 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4841 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4842 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4843 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4844 }
4845#else
4846 RT_NOREF_PV(fPageProt);
4847 if ( pVM->nem.s.fA20Enabled
4848 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4849 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4850 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4851 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4852 /* else: ignore since we've got the alias page at this address. */
4853#endif
4854}
4855
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette