VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h@ 72727

Last change on this file since 72727 was 72690, checked in by vboxsync, 7 years ago

NEM: Separate stats for odd and alerts when stopping the CPU. bugref:9044.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 225.3 KB
Line 
1/* $Id: NEMAllNativeTemplate-win.cpp.h 72690 2018-06-26 02:54:37Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, Windows code template ring-0/3.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22/** Copy back a segment from hyper-V. */
23#define NEM_WIN_COPY_BACK_SEG(a_Dst, a_Src) \
24 do { \
25 (a_Dst).u64Base = (a_Src).Base; \
26 (a_Dst).u32Limit = (a_Src).Limit; \
27 (a_Dst).ValidSel = (a_Dst).Sel = (a_Src).Selector; \
28 (a_Dst).Attr.u = (a_Src).Attributes; \
29 (a_Dst).fFlags = CPUMSELREG_FLAGS_VALID; \
30 } while (0)
31
32/** @def NEMWIN_ASSERT_MSG_REG_VAL
33 * Asserts the correctness of a register value in a message/context.
34 */
35#if 0
36# define NEMWIN_NEED_GET_REGISTER
37# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
38# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, a_Expr, a_Msg) \
39 do { \
40 HV_REGISTER_VALUE TmpVal; \
41 nemHCWinGetRegister(a_pVCpu, a_pGVCpu, a_enmReg, &TmpVal); \
42 AssertMsg(a_Expr, a_Msg); \
43 } while (0)
44# else
45# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, a_Expr, a_Msg) \
46 do { \
47 WHV_REGISTER_VALUE TmpVal; \
48 nemR3WinGetRegister(a_pVCpu, a_enmReg, &TmpVal); \
49 AssertMsg(a_Expr, a_Msg); \
50 } while (0)
51# endif
52#else
53# define NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, a_Expr, a_Msg) do { } while (0)
54#endif
55
56/** @def NEMWIN_ASSERT_MSG_REG_VAL
57 * Asserts the correctness of a 64-bit register value in a message/context.
58 */
59#define NEMWIN_ASSERT_MSG_REG_VAL64(a_pVCpu, a_pGVCpu, a_enmReg, a_u64Val) \
60 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, (a_u64Val) == TmpVal.Reg64, \
61 (#a_u64Val "=%#RX64, expected %#RX64\n", (a_u64Val), TmpVal.Reg64))
62/** @def NEMWIN_ASSERT_MSG_REG_VAL
63 * Asserts the correctness of a segment register value in a message/context.
64 */
65#define NEMWIN_ASSERT_MSG_REG_SEG(a_pVCpu, a_pGVCpu, a_enmReg, a_SReg) \
66 NEMWIN_ASSERT_MSG_REG_VAL(a_pVCpu, a_pGVCpu, a_enmReg, \
67 (a_SReg).Base == TmpVal.Segment.Base \
68 && (a_SReg).Limit == TmpVal.Segment.Limit \
69 && (a_SReg).Selector == TmpVal.Segment.Selector \
70 && (a_SReg).Attributes == TmpVal.Segment.Attributes, \
71 ( #a_SReg "=%#RX16 {%#RX64 LB %#RX32,%#RX16} expected %#RX16 {%#RX64 LB %#RX32,%#RX16}\n", \
72 (a_SReg).Selector, (a_SReg).Base, (a_SReg).Limit, (a_SReg).Attributes, \
73 TmpVal.Segment.Selector, TmpVal.Segment.Base, TmpVal.Segment.Limit, TmpVal.Segment.Attributes))
74
75
76/*********************************************************************************************************************************
77* Global Variables *
78*********************************************************************************************************************************/
79/** NEM_WIN_PAGE_STATE_XXX names. */
80NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
81
82/** HV_INTERCEPT_ACCESS_TYPE names. */
83static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
84
85
86/*********************************************************************************************************************************
87* Internal Functions *
88*********************************************************************************************************************************/
89NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
90 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
91
92
93
94#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
95
96/**
97 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
98 *
99 * @returns VBox status code.
100 * @param pVM The cross context VM structure.
101 * @param pVCpu The cross context virtual CPU structure of the caller.
102 * @param GCPhysSrc The source page. Does not need to be page aligned.
103 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
104 * when A20 is disabled.
105 * @param fFlags HV_MAP_GPA_XXX.
106 */
107DECLINLINE(int) nemHCWinHypercallMapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
108{
109#ifdef IN_RING0
110 /** @todo optimize further, caller generally has the physical address. */
111 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
112 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
113 return nemR0WinMapPages(pGVM, pVM, &pGVM->aCpus[pVCpu->idCpu],
114 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
115 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
116 1, fFlags);
117#else
118 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
119 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
120 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
121 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
122 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
123#endif
124}
125
126
127/**
128 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
129 *
130 * @returns VBox status code.
131 * @param pVM The cross context VM structure.
132 * @param pVCpu The cross context virtual CPU structure of the caller.
133 * @param GCPhys The page to unmap. Does not need to be page aligned.
134 */
135DECLINLINE(int) nemHCWinHypercallUnmapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
136{
137# ifdef IN_RING0
138 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
139 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
140 return nemR0WinUnmapPages(pGVM, &pGVM->aCpus[pVCpu->idCpu], GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
141# else
142 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
143 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
144 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
145# endif
146}
147
148#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
149#ifndef IN_RING0
150
151NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
152{
153# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
154 NOREF(pCtx);
155 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL);
156 AssertLogRelRCReturn(rc, rc);
157 return rc;
158
159# else
160 /*
161 * The following is very similar to what nemR0WinExportState() does.
162 */
163 WHV_REGISTER_NAME aenmNames[128];
164 WHV_REGISTER_VALUE aValues[128];
165
166 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
167 if ( !fWhat
168 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
169 return VINF_SUCCESS;
170 uintptr_t iReg = 0;
171
172# define ADD_REG64(a_enmName, a_uValue) do { \
173 aenmNames[iReg] = (a_enmName); \
174 aValues[iReg].Reg128.High64 = 0; \
175 aValues[iReg].Reg64 = (a_uValue); \
176 iReg++; \
177 } while (0)
178# define ADD_REG128(a_enmName, a_uValueLo, a_uValueHi) do { \
179 aenmNames[iReg] = (a_enmName); \
180 aValues[iReg].Reg128.Low64 = (a_uValueLo); \
181 aValues[iReg].Reg128.High64 = (a_uValueHi); \
182 iReg++; \
183 } while (0)
184
185 /* GPRs */
186 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
187 {
188 if (fWhat & CPUMCTX_EXTRN_RAX)
189 ADD_REG64(WHvX64RegisterRax, pCtx->rax);
190 if (fWhat & CPUMCTX_EXTRN_RCX)
191 ADD_REG64(WHvX64RegisterRcx, pCtx->rcx);
192 if (fWhat & CPUMCTX_EXTRN_RDX)
193 ADD_REG64(WHvX64RegisterRdx, pCtx->rdx);
194 if (fWhat & CPUMCTX_EXTRN_RBX)
195 ADD_REG64(WHvX64RegisterRbx, pCtx->rbx);
196 if (fWhat & CPUMCTX_EXTRN_RSP)
197 ADD_REG64(WHvX64RegisterRsp, pCtx->rsp);
198 if (fWhat & CPUMCTX_EXTRN_RBP)
199 ADD_REG64(WHvX64RegisterRbp, pCtx->rbp);
200 if (fWhat & CPUMCTX_EXTRN_RSI)
201 ADD_REG64(WHvX64RegisterRsi, pCtx->rsi);
202 if (fWhat & CPUMCTX_EXTRN_RDI)
203 ADD_REG64(WHvX64RegisterRdi, pCtx->rdi);
204 if (fWhat & CPUMCTX_EXTRN_R8_R15)
205 {
206 ADD_REG64(WHvX64RegisterR8, pCtx->r8);
207 ADD_REG64(WHvX64RegisterR9, pCtx->r9);
208 ADD_REG64(WHvX64RegisterR10, pCtx->r10);
209 ADD_REG64(WHvX64RegisterR11, pCtx->r11);
210 ADD_REG64(WHvX64RegisterR12, pCtx->r12);
211 ADD_REG64(WHvX64RegisterR13, pCtx->r13);
212 ADD_REG64(WHvX64RegisterR14, pCtx->r14);
213 ADD_REG64(WHvX64RegisterR15, pCtx->r15);
214 }
215 }
216
217 /* RIP & Flags */
218 if (fWhat & CPUMCTX_EXTRN_RIP)
219 ADD_REG64(WHvX64RegisterRip, pCtx->rip);
220 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
221 ADD_REG64(WHvX64RegisterRflags, pCtx->rflags.u);
222
223 /* Segments */
224# define ADD_SEG(a_enmName, a_SReg) \
225 do { \
226 aenmNames[iReg] = a_enmName; \
227 aValues[iReg].Segment.Base = (a_SReg).u64Base; \
228 aValues[iReg].Segment.Limit = (a_SReg).u32Limit; \
229 aValues[iReg].Segment.Selector = (a_SReg).Sel; \
230 aValues[iReg].Segment.Attributes = (a_SReg).Attr.u; \
231 iReg++; \
232 } while (0)
233 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
234 {
235 if (fWhat & CPUMCTX_EXTRN_ES)
236 ADD_SEG(WHvX64RegisterEs, pCtx->es);
237 if (fWhat & CPUMCTX_EXTRN_CS)
238 ADD_SEG(WHvX64RegisterCs, pCtx->cs);
239 if (fWhat & CPUMCTX_EXTRN_SS)
240 ADD_SEG(WHvX64RegisterSs, pCtx->ss);
241 if (fWhat & CPUMCTX_EXTRN_DS)
242 ADD_SEG(WHvX64RegisterDs, pCtx->ds);
243 if (fWhat & CPUMCTX_EXTRN_FS)
244 ADD_SEG(WHvX64RegisterFs, pCtx->fs);
245 if (fWhat & CPUMCTX_EXTRN_GS)
246 ADD_SEG(WHvX64RegisterGs, pCtx->gs);
247 }
248
249 /* Descriptor tables & task segment. */
250 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
251 {
252 if (fWhat & CPUMCTX_EXTRN_LDTR)
253 ADD_SEG(WHvX64RegisterLdtr, pCtx->ldtr);
254 if (fWhat & CPUMCTX_EXTRN_TR)
255 ADD_SEG(WHvX64RegisterTr, pCtx->tr);
256 if (fWhat & CPUMCTX_EXTRN_IDTR)
257 {
258 aenmNames[iReg] = WHvX64RegisterIdtr;
259 aValues[iReg].Table.Limit = pCtx->idtr.cbIdt;
260 aValues[iReg].Table.Base = pCtx->idtr.pIdt;
261 iReg++;
262 }
263 if (fWhat & CPUMCTX_EXTRN_GDTR)
264 {
265 aenmNames[iReg] = WHvX64RegisterGdtr;
266 aValues[iReg].Table.Limit = pCtx->gdtr.cbGdt;
267 aValues[iReg].Table.Base = pCtx->gdtr.pGdt;
268 iReg++;
269 }
270 }
271
272 /* Control registers. */
273 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
274 {
275 if (fWhat & CPUMCTX_EXTRN_CR0)
276 ADD_REG64(WHvX64RegisterCr0, pCtx->cr0);
277 if (fWhat & CPUMCTX_EXTRN_CR2)
278 ADD_REG64(WHvX64RegisterCr2, pCtx->cr2);
279 if (fWhat & CPUMCTX_EXTRN_CR3)
280 ADD_REG64(WHvX64RegisterCr3, pCtx->cr3);
281 if (fWhat & CPUMCTX_EXTRN_CR4)
282 ADD_REG64(WHvX64RegisterCr4, pCtx->cr4);
283 }
284 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
285 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu));
286
287 /* Debug registers. */
288/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
289 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
290 {
291 ADD_REG64(WHvX64RegisterDr0, pCtx->dr[0]); // CPUMGetHyperDR0(pVCpu));
292 ADD_REG64(WHvX64RegisterDr1, pCtx->dr[1]); // CPUMGetHyperDR1(pVCpu));
293 ADD_REG64(WHvX64RegisterDr2, pCtx->dr[2]); // CPUMGetHyperDR2(pVCpu));
294 ADD_REG64(WHvX64RegisterDr3, pCtx->dr[3]); // CPUMGetHyperDR3(pVCpu));
295 }
296 if (fWhat & CPUMCTX_EXTRN_DR6)
297 ADD_REG64(WHvX64RegisterDr6, pCtx->dr[6]); // CPUMGetHyperDR6(pVCpu));
298 if (fWhat & CPUMCTX_EXTRN_DR7)
299 ADD_REG64(WHvX64RegisterDr7, pCtx->dr[7]); // CPUMGetHyperDR7(pVCpu));
300
301 /* Floating point state. */
302 if (fWhat & CPUMCTX_EXTRN_X87)
303 {
304 ADD_REG128(WHvX64RegisterFpMmx0, pCtx->pXStateR3->x87.aRegs[0].au64[0], pCtx->pXStateR3->x87.aRegs[0].au64[1]);
305 ADD_REG128(WHvX64RegisterFpMmx1, pCtx->pXStateR3->x87.aRegs[1].au64[0], pCtx->pXStateR3->x87.aRegs[1].au64[1]);
306 ADD_REG128(WHvX64RegisterFpMmx2, pCtx->pXStateR3->x87.aRegs[2].au64[0], pCtx->pXStateR3->x87.aRegs[2].au64[1]);
307 ADD_REG128(WHvX64RegisterFpMmx3, pCtx->pXStateR3->x87.aRegs[3].au64[0], pCtx->pXStateR3->x87.aRegs[3].au64[1]);
308 ADD_REG128(WHvX64RegisterFpMmx4, pCtx->pXStateR3->x87.aRegs[4].au64[0], pCtx->pXStateR3->x87.aRegs[4].au64[1]);
309 ADD_REG128(WHvX64RegisterFpMmx5, pCtx->pXStateR3->x87.aRegs[5].au64[0], pCtx->pXStateR3->x87.aRegs[5].au64[1]);
310 ADD_REG128(WHvX64RegisterFpMmx6, pCtx->pXStateR3->x87.aRegs[6].au64[0], pCtx->pXStateR3->x87.aRegs[6].au64[1]);
311 ADD_REG128(WHvX64RegisterFpMmx7, pCtx->pXStateR3->x87.aRegs[7].au64[0], pCtx->pXStateR3->x87.aRegs[7].au64[1]);
312
313 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
314 aValues[iReg].FpControlStatus.FpControl = pCtx->pXStateR3->x87.FCW;
315 aValues[iReg].FpControlStatus.FpStatus = pCtx->pXStateR3->x87.FSW;
316 aValues[iReg].FpControlStatus.FpTag = pCtx->pXStateR3->x87.FTW;
317 aValues[iReg].FpControlStatus.Reserved = pCtx->pXStateR3->x87.FTW >> 8;
318 aValues[iReg].FpControlStatus.LastFpOp = pCtx->pXStateR3->x87.FOP;
319 aValues[iReg].FpControlStatus.LastFpRip = (pCtx->pXStateR3->x87.FPUIP)
320 | ((uint64_t)pCtx->pXStateR3->x87.CS << 32)
321 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd1 << 48);
322 iReg++;
323
324 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
325 aValues[iReg].XmmControlStatus.LastFpRdp = (pCtx->pXStateR3->x87.FPUDP)
326 | ((uint64_t)pCtx->pXStateR3->x87.DS << 32)
327 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd2 << 48);
328 aValues[iReg].XmmControlStatus.XmmStatusControl = pCtx->pXStateR3->x87.MXCSR;
329 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR3->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
330 iReg++;
331 }
332
333 /* Vector state. */
334 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
335 {
336 ADD_REG128(WHvX64RegisterXmm0, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Hi);
337 ADD_REG128(WHvX64RegisterXmm1, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Hi);
338 ADD_REG128(WHvX64RegisterXmm2, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Hi);
339 ADD_REG128(WHvX64RegisterXmm3, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Hi);
340 ADD_REG128(WHvX64RegisterXmm4, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Hi);
341 ADD_REG128(WHvX64RegisterXmm5, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Hi);
342 ADD_REG128(WHvX64RegisterXmm6, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Hi);
343 ADD_REG128(WHvX64RegisterXmm7, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Hi);
344 ADD_REG128(WHvX64RegisterXmm8, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Hi);
345 ADD_REG128(WHvX64RegisterXmm9, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Hi);
346 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi);
347 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi);
348 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi);
349 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi);
350 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi);
351 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi);
352 }
353
354 /* MSRs */
355 // WHvX64RegisterTsc - don't touch
356 if (fWhat & CPUMCTX_EXTRN_EFER)
357 ADD_REG64(WHvX64RegisterEfer, pCtx->msrEFER);
358 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
359 ADD_REG64(WHvX64RegisterKernelGsBase, pCtx->msrKERNELGSBASE);
360 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
361 {
362 ADD_REG64(WHvX64RegisterSysenterCs, pCtx->SysEnter.cs);
363 ADD_REG64(WHvX64RegisterSysenterEip, pCtx->SysEnter.eip);
364 ADD_REG64(WHvX64RegisterSysenterEsp, pCtx->SysEnter.esp);
365 }
366 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
367 {
368 ADD_REG64(WHvX64RegisterStar, pCtx->msrSTAR);
369 ADD_REG64(WHvX64RegisterLstar, pCtx->msrLSTAR);
370 ADD_REG64(WHvX64RegisterCstar, pCtx->msrCSTAR);
371 ADD_REG64(WHvX64RegisterSfmask, pCtx->msrSFMASK);
372 }
373 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
374 {
375 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu));
376 ADD_REG64(WHvX64RegisterPat, pCtx->msrPAT);
377#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
378 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
379#endif
380 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
381 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
382 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
383 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
384 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
385 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
386 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
387 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
388 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
389 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
390 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
391 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
392 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
393 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
394#if 0 /** @todo these registers aren't available? Might explain something.. .*/
395 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
396 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
397 {
398 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
399 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
400 }
401#endif
402 }
403
404 /* event injection (clear it). */
405 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
406 ADD_REG64(WHvRegisterPendingInterruption, 0);
407
408 /* Interruptibility state. This can get a little complicated since we get
409 half of the state via HV_X64_VP_EXECUTION_STATE. */
410 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
411 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
412 {
413 ADD_REG64(WHvRegisterInterruptState, 0);
414 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
415 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
416 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
417 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
418 aValues[iReg - 1].InterruptState.NmiMasked = 1;
419 }
420 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
421 {
422 if ( pVCpu->nem.s.fLastInterruptShadow
423 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
424 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
425 {
426 ADD_REG64(WHvRegisterInterruptState, 0);
427 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
428 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
429 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
430 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
431 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
432 // aValues[iReg - 1].InterruptState.NmiMasked = 1;
433 }
434 }
435 else
436 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
437
438 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
439 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
440 if ( fDesiredIntWin
441 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
442 {
443 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
444 ADD_REG64(WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin);
445 Assert(aValues[iReg - 1].DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
446 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
447 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
448 }
449
450 /// @todo WHvRegisterPendingEvent0
451 /// @todo WHvRegisterPendingEvent1
452
453 /*
454 * Set the registers.
455 */
456 Assert(iReg < RT_ELEMENTS(aValues));
457 Assert(iReg < RT_ELEMENTS(aenmNames));
458# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
459 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
460 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
461# endif
462 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
463 if (SUCCEEDED(hrc))
464 {
465 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
466 return VINF_SUCCESS;
467 }
468 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
469 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
470 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
471 return VERR_INTERNAL_ERROR;
472
473# undef ADD_REG64
474# undef ADD_REG128
475# undef ADD_SEG
476
477# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
478}
479
480
481NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
482{
483# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
484 /* See NEMR0ImportState */
485 NOREF(pCtx);
486 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, fWhat, NULL);
487 if (RT_SUCCESS(rc))
488 return rc;
489 if (rc == VERR_NEM_FLUSH_TLB)
490 return PGMFlushTLB(pVCpu, pCtx->cr3, true /*fGlobal*/);
491 if (rc == VERR_NEM_CHANGE_PGM_MODE)
492 return PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
493 AssertLogRelRCReturn(rc, rc);
494 return rc;
495
496# else
497 WHV_REGISTER_NAME aenmNames[128];
498
499 fWhat &= pCtx->fExtrn;
500 uintptr_t iReg = 0;
501
502 /* GPRs */
503 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
504 {
505 if (fWhat & CPUMCTX_EXTRN_RAX)
506 aenmNames[iReg++] = WHvX64RegisterRax;
507 if (fWhat & CPUMCTX_EXTRN_RCX)
508 aenmNames[iReg++] = WHvX64RegisterRcx;
509 if (fWhat & CPUMCTX_EXTRN_RDX)
510 aenmNames[iReg++] = WHvX64RegisterRdx;
511 if (fWhat & CPUMCTX_EXTRN_RBX)
512 aenmNames[iReg++] = WHvX64RegisterRbx;
513 if (fWhat & CPUMCTX_EXTRN_RSP)
514 aenmNames[iReg++] = WHvX64RegisterRsp;
515 if (fWhat & CPUMCTX_EXTRN_RBP)
516 aenmNames[iReg++] = WHvX64RegisterRbp;
517 if (fWhat & CPUMCTX_EXTRN_RSI)
518 aenmNames[iReg++] = WHvX64RegisterRsi;
519 if (fWhat & CPUMCTX_EXTRN_RDI)
520 aenmNames[iReg++] = WHvX64RegisterRdi;
521 if (fWhat & CPUMCTX_EXTRN_R8_R15)
522 {
523 aenmNames[iReg++] = WHvX64RegisterR8;
524 aenmNames[iReg++] = WHvX64RegisterR9;
525 aenmNames[iReg++] = WHvX64RegisterR10;
526 aenmNames[iReg++] = WHvX64RegisterR11;
527 aenmNames[iReg++] = WHvX64RegisterR12;
528 aenmNames[iReg++] = WHvX64RegisterR13;
529 aenmNames[iReg++] = WHvX64RegisterR14;
530 aenmNames[iReg++] = WHvX64RegisterR15;
531 }
532 }
533
534 /* RIP & Flags */
535 if (fWhat & CPUMCTX_EXTRN_RIP)
536 aenmNames[iReg++] = WHvX64RegisterRip;
537 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
538 aenmNames[iReg++] = WHvX64RegisterRflags;
539
540 /* Segments */
541 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
542 {
543 if (fWhat & CPUMCTX_EXTRN_ES)
544 aenmNames[iReg++] = WHvX64RegisterEs;
545 if (fWhat & CPUMCTX_EXTRN_CS)
546 aenmNames[iReg++] = WHvX64RegisterCs;
547 if (fWhat & CPUMCTX_EXTRN_SS)
548 aenmNames[iReg++] = WHvX64RegisterSs;
549 if (fWhat & CPUMCTX_EXTRN_DS)
550 aenmNames[iReg++] = WHvX64RegisterDs;
551 if (fWhat & CPUMCTX_EXTRN_FS)
552 aenmNames[iReg++] = WHvX64RegisterFs;
553 if (fWhat & CPUMCTX_EXTRN_GS)
554 aenmNames[iReg++] = WHvX64RegisterGs;
555 }
556
557 /* Descriptor tables. */
558 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
559 {
560 if (fWhat & CPUMCTX_EXTRN_LDTR)
561 aenmNames[iReg++] = WHvX64RegisterLdtr;
562 if (fWhat & CPUMCTX_EXTRN_TR)
563 aenmNames[iReg++] = WHvX64RegisterTr;
564 if (fWhat & CPUMCTX_EXTRN_IDTR)
565 aenmNames[iReg++] = WHvX64RegisterIdtr;
566 if (fWhat & CPUMCTX_EXTRN_GDTR)
567 aenmNames[iReg++] = WHvX64RegisterGdtr;
568 }
569
570 /* Control registers. */
571 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
572 {
573 if (fWhat & CPUMCTX_EXTRN_CR0)
574 aenmNames[iReg++] = WHvX64RegisterCr0;
575 if (fWhat & CPUMCTX_EXTRN_CR2)
576 aenmNames[iReg++] = WHvX64RegisterCr2;
577 if (fWhat & CPUMCTX_EXTRN_CR3)
578 aenmNames[iReg++] = WHvX64RegisterCr3;
579 if (fWhat & CPUMCTX_EXTRN_CR4)
580 aenmNames[iReg++] = WHvX64RegisterCr4;
581 }
582 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
583 aenmNames[iReg++] = WHvX64RegisterCr8;
584
585 /* Debug registers. */
586 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
587 {
588 aenmNames[iReg++] = WHvX64RegisterDr0;
589 aenmNames[iReg++] = WHvX64RegisterDr1;
590 aenmNames[iReg++] = WHvX64RegisterDr2;
591 aenmNames[iReg++] = WHvX64RegisterDr3;
592 }
593 if (fWhat & CPUMCTX_EXTRN_DR6)
594 aenmNames[iReg++] = WHvX64RegisterDr6;
595 if (fWhat & CPUMCTX_EXTRN_DR7)
596 aenmNames[iReg++] = WHvX64RegisterDr7;
597
598 /* Floating point state. */
599 if (fWhat & CPUMCTX_EXTRN_X87)
600 {
601 aenmNames[iReg++] = WHvX64RegisterFpMmx0;
602 aenmNames[iReg++] = WHvX64RegisterFpMmx1;
603 aenmNames[iReg++] = WHvX64RegisterFpMmx2;
604 aenmNames[iReg++] = WHvX64RegisterFpMmx3;
605 aenmNames[iReg++] = WHvX64RegisterFpMmx4;
606 aenmNames[iReg++] = WHvX64RegisterFpMmx5;
607 aenmNames[iReg++] = WHvX64RegisterFpMmx6;
608 aenmNames[iReg++] = WHvX64RegisterFpMmx7;
609 aenmNames[iReg++] = WHvX64RegisterFpControlStatus;
610 }
611 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
612 aenmNames[iReg++] = WHvX64RegisterXmmControlStatus;
613
614 /* Vector state. */
615 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
616 {
617 aenmNames[iReg++] = WHvX64RegisterXmm0;
618 aenmNames[iReg++] = WHvX64RegisterXmm1;
619 aenmNames[iReg++] = WHvX64RegisterXmm2;
620 aenmNames[iReg++] = WHvX64RegisterXmm3;
621 aenmNames[iReg++] = WHvX64RegisterXmm4;
622 aenmNames[iReg++] = WHvX64RegisterXmm5;
623 aenmNames[iReg++] = WHvX64RegisterXmm6;
624 aenmNames[iReg++] = WHvX64RegisterXmm7;
625 aenmNames[iReg++] = WHvX64RegisterXmm8;
626 aenmNames[iReg++] = WHvX64RegisterXmm9;
627 aenmNames[iReg++] = WHvX64RegisterXmm10;
628 aenmNames[iReg++] = WHvX64RegisterXmm11;
629 aenmNames[iReg++] = WHvX64RegisterXmm12;
630 aenmNames[iReg++] = WHvX64RegisterXmm13;
631 aenmNames[iReg++] = WHvX64RegisterXmm14;
632 aenmNames[iReg++] = WHvX64RegisterXmm15;
633 }
634
635 /* MSRs */
636 // WHvX64RegisterTsc - don't touch
637 if (fWhat & CPUMCTX_EXTRN_EFER)
638 aenmNames[iReg++] = WHvX64RegisterEfer;
639 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
640 aenmNames[iReg++] = WHvX64RegisterKernelGsBase;
641 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
642 {
643 aenmNames[iReg++] = WHvX64RegisterSysenterCs;
644 aenmNames[iReg++] = WHvX64RegisterSysenterEip;
645 aenmNames[iReg++] = WHvX64RegisterSysenterEsp;
646 }
647 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
648 {
649 aenmNames[iReg++] = WHvX64RegisterStar;
650 aenmNames[iReg++] = WHvX64RegisterLstar;
651 aenmNames[iReg++] = WHvX64RegisterCstar;
652 aenmNames[iReg++] = WHvX64RegisterSfmask;
653 }
654
655//#ifdef LOG_ENABLED
656// const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
657//#endif
658 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
659 {
660 aenmNames[iReg++] = WHvX64RegisterApicBase; /// @todo APIC BASE
661 aenmNames[iReg++] = WHvX64RegisterPat;
662#if 0 /*def LOG_ENABLED*/ /** @todo Check if WHvX64RegisterMsrMtrrCap works... */
663 aenmNames[iReg++] = WHvX64RegisterMsrMtrrCap;
664#endif
665 aenmNames[iReg++] = WHvX64RegisterMsrMtrrDefType;
666 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix64k00000;
667 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16k80000;
668 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16kA0000;
669 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC0000;
670 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC8000;
671 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD0000;
672 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD8000;
673 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE0000;
674 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE8000;
675 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF0000;
676 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF8000;
677 aenmNames[iReg++] = WHvX64RegisterTscAux;
678 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
679//#ifdef LOG_ENABLED
680// if (enmCpuVendor != CPUMCPUVENDOR_AMD)
681// aenmNames[iReg++] = HvX64RegisterIa32FeatureControl;
682//#endif
683 }
684
685 /* Interruptibility. */
686 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
687 {
688 aenmNames[iReg++] = WHvRegisterInterruptState;
689 aenmNames[iReg++] = WHvX64RegisterRip;
690 }
691
692 /* event injection */
693 aenmNames[iReg++] = WHvRegisterPendingInterruption;
694 aenmNames[iReg++] = WHvRegisterPendingEvent0;
695 aenmNames[iReg++] = WHvRegisterPendingEvent1;
696
697 size_t const cRegs = iReg;
698 Assert(cRegs < RT_ELEMENTS(aenmNames));
699
700 /*
701 * Get the registers.
702 */
703 WHV_REGISTER_VALUE aValues[128];
704 RT_ZERO(aValues);
705 Assert(RT_ELEMENTS(aValues) >= cRegs);
706 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
707# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
708 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
709 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
710# endif
711 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
712 AssertLogRelMsgReturn(SUCCEEDED(hrc),
713 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
714 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
715 , VERR_NEM_GET_REGISTERS_FAILED);
716
717 iReg = 0;
718# define GET_REG64(a_DstVar, a_enmName) do { \
719 Assert(aenmNames[iReg] == (a_enmName)); \
720 (a_DstVar) = aValues[iReg].Reg64; \
721 iReg++; \
722 } while (0)
723# define GET_REG64_LOG7(a_DstVar, a_enmName, a_szLogName) do { \
724 Assert(aenmNames[iReg] == (a_enmName)); \
725 if ((a_DstVar) != aValues[iReg].Reg64) \
726 Log7(("NEM/%u: " a_szLogName " changed %RX64 -> %RX64\n", pVCpu->idCpu, (a_DstVar), aValues[iReg].Reg64)); \
727 (a_DstVar) = aValues[iReg].Reg64; \
728 iReg++; \
729 } while (0)
730# define GET_REG128(a_DstVarLo, a_DstVarHi, a_enmName) do { \
731 Assert(aenmNames[iReg] == a_enmName); \
732 (a_DstVarLo) = aValues[iReg].Reg128.Low64; \
733 (a_DstVarHi) = aValues[iReg].Reg128.High64; \
734 iReg++; \
735 } while (0)
736# define GET_SEG(a_SReg, a_enmName) do { \
737 Assert(aenmNames[iReg] == (a_enmName)); \
738 NEM_WIN_COPY_BACK_SEG(a_SReg, aValues[iReg].Segment); \
739 iReg++; \
740 } while (0)
741
742 /* GPRs */
743 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
744 {
745 if (fWhat & CPUMCTX_EXTRN_RAX)
746 GET_REG64(pCtx->rax, WHvX64RegisterRax);
747 if (fWhat & CPUMCTX_EXTRN_RCX)
748 GET_REG64(pCtx->rcx, WHvX64RegisterRcx);
749 if (fWhat & CPUMCTX_EXTRN_RDX)
750 GET_REG64(pCtx->rdx, WHvX64RegisterRdx);
751 if (fWhat & CPUMCTX_EXTRN_RBX)
752 GET_REG64(pCtx->rbx, WHvX64RegisterRbx);
753 if (fWhat & CPUMCTX_EXTRN_RSP)
754 GET_REG64(pCtx->rsp, WHvX64RegisterRsp);
755 if (fWhat & CPUMCTX_EXTRN_RBP)
756 GET_REG64(pCtx->rbp, WHvX64RegisterRbp);
757 if (fWhat & CPUMCTX_EXTRN_RSI)
758 GET_REG64(pCtx->rsi, WHvX64RegisterRsi);
759 if (fWhat & CPUMCTX_EXTRN_RDI)
760 GET_REG64(pCtx->rdi, WHvX64RegisterRdi);
761 if (fWhat & CPUMCTX_EXTRN_R8_R15)
762 {
763 GET_REG64(pCtx->r8, WHvX64RegisterR8);
764 GET_REG64(pCtx->r9, WHvX64RegisterR9);
765 GET_REG64(pCtx->r10, WHvX64RegisterR10);
766 GET_REG64(pCtx->r11, WHvX64RegisterR11);
767 GET_REG64(pCtx->r12, WHvX64RegisterR12);
768 GET_REG64(pCtx->r13, WHvX64RegisterR13);
769 GET_REG64(pCtx->r14, WHvX64RegisterR14);
770 GET_REG64(pCtx->r15, WHvX64RegisterR15);
771 }
772 }
773
774 /* RIP & Flags */
775 if (fWhat & CPUMCTX_EXTRN_RIP)
776 GET_REG64(pCtx->rip, WHvX64RegisterRip);
777 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
778 GET_REG64(pCtx->rflags.u, WHvX64RegisterRflags);
779
780 /* Segments */
781 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
782 {
783 if (fWhat & CPUMCTX_EXTRN_ES)
784 GET_SEG(pCtx->es, WHvX64RegisterEs);
785 if (fWhat & CPUMCTX_EXTRN_CS)
786 GET_SEG(pCtx->cs, WHvX64RegisterCs);
787 if (fWhat & CPUMCTX_EXTRN_SS)
788 GET_SEG(pCtx->ss, WHvX64RegisterSs);
789 if (fWhat & CPUMCTX_EXTRN_DS)
790 GET_SEG(pCtx->ds, WHvX64RegisterDs);
791 if (fWhat & CPUMCTX_EXTRN_FS)
792 GET_SEG(pCtx->fs, WHvX64RegisterFs);
793 if (fWhat & CPUMCTX_EXTRN_GS)
794 GET_SEG(pCtx->gs, WHvX64RegisterGs);
795 }
796
797 /* Descriptor tables and the task segment. */
798 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
799 {
800 if (fWhat & CPUMCTX_EXTRN_LDTR)
801 GET_SEG(pCtx->ldtr, WHvX64RegisterLdtr);
802
803 if (fWhat & CPUMCTX_EXTRN_TR)
804 {
805 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
806 avoid to trigger sanity assertions around the code, always fix this. */
807 GET_SEG(pCtx->tr, WHvX64RegisterTr);
808 switch (pCtx->tr.Attr.n.u4Type)
809 {
810 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
811 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
812 break;
813 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
814 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
815 break;
816 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
817 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
818 break;
819 }
820 }
821 if (fWhat & CPUMCTX_EXTRN_IDTR)
822 {
823 Assert(aenmNames[iReg] == WHvX64RegisterIdtr);
824 pCtx->idtr.cbIdt = aValues[iReg].Table.Limit;
825 pCtx->idtr.pIdt = aValues[iReg].Table.Base;
826 iReg++;
827 }
828 if (fWhat & CPUMCTX_EXTRN_GDTR)
829 {
830 Assert(aenmNames[iReg] == WHvX64RegisterGdtr);
831 pCtx->gdtr.cbGdt = aValues[iReg].Table.Limit;
832 pCtx->gdtr.pGdt = aValues[iReg].Table.Base;
833 iReg++;
834 }
835 }
836
837 /* Control registers. */
838 bool fMaybeChangedMode = false;
839 bool fFlushTlb = false;
840 bool fFlushGlobalTlb = false;
841 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
842 {
843 if (fWhat & CPUMCTX_EXTRN_CR0)
844 {
845 Assert(aenmNames[iReg] == WHvX64RegisterCr0);
846 if (pCtx->cr0 != aValues[iReg].Reg64)
847 {
848 CPUMSetGuestCR0(pVCpu, aValues[iReg].Reg64);
849 fMaybeChangedMode = true;
850 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
851 }
852 iReg++;
853 }
854 if (fWhat & CPUMCTX_EXTRN_CR2)
855 GET_REG64(pCtx->cr2, WHvX64RegisterCr2);
856 if (fWhat & CPUMCTX_EXTRN_CR3)
857 {
858 if (pCtx->cr3 != aValues[iReg].Reg64)
859 {
860 CPUMSetGuestCR3(pVCpu, aValues[iReg].Reg64);
861 fFlushTlb = true;
862 }
863 iReg++;
864 }
865 if (fWhat & CPUMCTX_EXTRN_CR4)
866 {
867 if (pCtx->cr4 != aValues[iReg].Reg64)
868 {
869 CPUMSetGuestCR4(pVCpu, aValues[iReg].Reg64);
870 fMaybeChangedMode = true;
871 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
872 }
873 iReg++;
874 }
875 }
876 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
877 {
878 Assert(aenmNames[iReg] == WHvX64RegisterCr8);
879 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4);
880 iReg++;
881 }
882
883 /* Debug registers. */
884 /** @todo fixme */
885 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
886 {
887 Assert(aenmNames[iReg] == WHvX64RegisterDr0);
888 Assert(aenmNames[iReg+3] == WHvX64RegisterDr3);
889 if (pCtx->dr[0] != aValues[iReg].Reg64)
890 CPUMSetGuestDR0(pVCpu, aValues[iReg].Reg64);
891 iReg++;
892 if (pCtx->dr[1] != aValues[iReg].Reg64)
893 CPUMSetGuestDR1(pVCpu, aValues[iReg].Reg64);
894 iReg++;
895 if (pCtx->dr[2] != aValues[iReg].Reg64)
896 CPUMSetGuestDR2(pVCpu, aValues[iReg].Reg64);
897 iReg++;
898 if (pCtx->dr[3] != aValues[iReg].Reg64)
899 CPUMSetGuestDR3(pVCpu, aValues[iReg].Reg64);
900 iReg++;
901 }
902 if (fWhat & CPUMCTX_EXTRN_DR6)
903 {
904 Assert(aenmNames[iReg] == WHvX64RegisterDr6);
905 if (pCtx->dr[6] != aValues[iReg].Reg64)
906 CPUMSetGuestDR6(pVCpu, aValues[iReg].Reg64);
907 iReg++;
908 }
909 if (fWhat & CPUMCTX_EXTRN_DR7)
910 {
911 Assert(aenmNames[iReg] == WHvX64RegisterDr7);
912 if (pCtx->dr[7] != aValues[iReg].Reg64)
913 CPUMSetGuestDR7(pVCpu, aValues[iReg].Reg64);
914 iReg++;
915 }
916
917 /* Floating point state. */
918 if (fWhat & CPUMCTX_EXTRN_X87)
919 {
920 GET_REG128(pCtx->pXStateR3->x87.aRegs[0].au64[0], pCtx->pXStateR3->x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);
921 GET_REG128(pCtx->pXStateR3->x87.aRegs[1].au64[0], pCtx->pXStateR3->x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);
922 GET_REG128(pCtx->pXStateR3->x87.aRegs[2].au64[0], pCtx->pXStateR3->x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);
923 GET_REG128(pCtx->pXStateR3->x87.aRegs[3].au64[0], pCtx->pXStateR3->x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);
924 GET_REG128(pCtx->pXStateR3->x87.aRegs[4].au64[0], pCtx->pXStateR3->x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);
925 GET_REG128(pCtx->pXStateR3->x87.aRegs[5].au64[0], pCtx->pXStateR3->x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);
926 GET_REG128(pCtx->pXStateR3->x87.aRegs[6].au64[0], pCtx->pXStateR3->x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);
927 GET_REG128(pCtx->pXStateR3->x87.aRegs[7].au64[0], pCtx->pXStateR3->x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);
928
929 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);
930 pCtx->pXStateR3->x87.FCW = aValues[iReg].FpControlStatus.FpControl;
931 pCtx->pXStateR3->x87.FSW = aValues[iReg].FpControlStatus.FpStatus;
932 pCtx->pXStateR3->x87.FTW = aValues[iReg].FpControlStatus.FpTag
933 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;
934 pCtx->pXStateR3->x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;
935 pCtx->pXStateR3->x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;
936 pCtx->pXStateR3->x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);
937 pCtx->pXStateR3->x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);
938 iReg++;
939 }
940
941 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
942 {
943 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus);
944 if (fWhat & CPUMCTX_EXTRN_X87)
945 {
946 pCtx->pXStateR3->x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp;
947 pCtx->pXStateR3->x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32);
948 pCtx->pXStateR3->x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48);
949 }
950 pCtx->pXStateR3->x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl;
951 pCtx->pXStateR3->x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
952 iReg++;
953 }
954
955 /* Vector state. */
956 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
957 {
958 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0);
959 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1);
960 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2);
961 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3);
962 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4);
963 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5);
964 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6);
965 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7);
966 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8);
967 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9);
968 GET_REG128(pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10);
969 GET_REG128(pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11);
970 GET_REG128(pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12);
971 GET_REG128(pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13);
972 GET_REG128(pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14);
973 GET_REG128(pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15);
974 }
975
976 /* MSRs */
977 // WHvX64RegisterTsc - don't touch
978 if (fWhat & CPUMCTX_EXTRN_EFER)
979 {
980 Assert(aenmNames[iReg] == WHvX64RegisterEfer);
981 if (aValues[iReg].Reg64 != pCtx->msrEFER)
982 {
983 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, aValues[iReg].Reg64));
984 if ((aValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
985 PGMNotifyNxeChanged(pVCpu, RT_BOOL(aValues[iReg].Reg64 & MSR_K6_EFER_NXE));
986 pCtx->msrEFER = aValues[iReg].Reg64;
987 fMaybeChangedMode = true;
988 }
989 iReg++;
990 }
991 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
992 GET_REG64_LOG7(pCtx->msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");
993 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
994 {
995 GET_REG64_LOG7(pCtx->SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");
996 GET_REG64_LOG7(pCtx->SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");
997 GET_REG64_LOG7(pCtx->SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");
998 }
999 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1000 {
1001 GET_REG64_LOG7(pCtx->msrSTAR, WHvX64RegisterStar, "MSR STAR");
1002 GET_REG64_LOG7(pCtx->msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");
1003 GET_REG64_LOG7(pCtx->msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");
1004 GET_REG64_LOG7(pCtx->msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");
1005 }
1006 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1007 {
1008 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
1009 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1010 if (aValues[iReg].Reg64 != uOldBase)
1011 {
1012 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1013 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
1014 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
1015 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", VBOXSTRICTRC_VAL(rc2), aValues[iReg].Reg64));
1016 }
1017 iReg++;
1018
1019 GET_REG64_LOG7(pCtx->msrPAT, WHvX64RegisterPat, "MSR PAT");
1020#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1021 GET_REG64_LOG7(pCtx->msrPAT, WHvX64RegisterMsrMtrrCap);
1022#endif
1023 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1024 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
1025 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
1026 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
1027 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
1028 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
1029 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
1030 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
1031 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
1032 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
1033 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
1034 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
1035 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
1036 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
1037 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
1038 }
1039
1040 /* Interruptibility. */
1041 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1042 {
1043 Assert(aenmNames[iReg] == WHvRegisterInterruptState);
1044 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);
1045
1046 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1047 {
1048 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
1049 if (aValues[iReg].InterruptState.InterruptShadow)
1050 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
1051 else
1052 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1053 }
1054
1055 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1056 {
1057 if (aValues[iReg].InterruptState.NmiMasked)
1058 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1059 else
1060 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1061 }
1062
1063 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
1064 iReg += 2;
1065 }
1066
1067 /* Event injection. */
1068 /// @todo WHvRegisterPendingInterruption
1069 Assert(aenmNames[iReg] == WHvRegisterPendingInterruption);
1070 if (aValues[iReg].PendingInterruption.InterruptionPending)
1071 {
1072 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
1073 aValues[iReg].PendingInterruption.InterruptionType, aValues[iReg].PendingInterruption.InterruptionVector,
1074 aValues[iReg].PendingInterruption.DeliverErrorCode, aValues[iReg].PendingInterruption.ErrorCode,
1075 aValues[iReg].PendingInterruption.InstructionLength, aValues[iReg].PendingInterruption.NestedEvent));
1076 AssertMsg((aValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
1077 ("%#RX64\n", aValues[iReg].PendingInterruption.AsUINT64));
1078 }
1079
1080 /// @todo WHvRegisterPendingEvent0
1081 /// @todo WHvRegisterPendingEvent1
1082
1083 /* Almost done, just update extrn flags and maybe change PGM mode. */
1084 pCtx->fExtrn &= ~fWhat;
1085 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
1086 pCtx->fExtrn = 0;
1087
1088 /* Typical. */
1089 if (!fMaybeChangedMode && !fFlushTlb)
1090 return VINF_SUCCESS;
1091
1092 /*
1093 * Slow.
1094 */
1095 if (fMaybeChangedMode)
1096 {
1097 int rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1098 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
1099 }
1100
1101 if (fFlushTlb)
1102 {
1103 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, fFlushGlobalTlb);
1104 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
1105 }
1106
1107 return VINF_SUCCESS;
1108# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1109}
1110
1111#endif /* !IN_RING0 */
1112
1113
1114/**
1115 * Interface for importing state on demand (used by IEM).
1116 *
1117 * @returns VBox status code.
1118 * @param pVCpu The cross context CPU structure.
1119 * @param pCtx The target CPU context.
1120 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1121 */
1122VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
1123{
1124 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1125
1126#ifdef IN_RING0
1127 /** @todo improve and secure this translation */
1128 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf);
1129 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1130 VMCPUID idCpu = pVCpu->idCpu;
1131 ASMCompilerBarrier();
1132 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1133
1134 return nemR0WinImportState(pGVM, &pGVM->aCpus[idCpu], pCtx, fWhat);
1135#else
1136 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, fWhat);
1137#endif
1138}
1139
1140
1141/**
1142 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1143 *
1144 * @returns VBox status code.
1145 * @param pVCpu The cross context CPU structure.
1146 * @param pcTicks Where to return the CPU tick count.
1147 * @param puAux Where to return the TSC_AUX register value.
1148 */
1149VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPU pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1150{
1151 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1152
1153#ifdef IN_RING3
1154 PVM pVM = pVCpu->CTX_SUFF(pVM);
1155 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1156 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1157
1158# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1159 /* Call ring-0 and get the values. */
1160 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_QUERY_CPU_TICK, 0, NULL);
1161 AssertLogRelRCReturn(rc, rc);
1162 *pcTicks = pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks;
1163 if (puAux)
1164 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX
1165 ? pVCpu->nem.s.Hypercall.QueryCpuTick.uAux : CPUMGetGuestTscAux(pVCpu);
1166 return VINF_SUCCESS;
1167
1168# else
1169 /* Call the offical API. */
1170 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1171 WHV_REGISTER_VALUE aValues[2] = { {0, 0}, {0, 0} };
1172 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1173 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1174 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1175 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1176 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1177 , VERR_NEM_GET_REGISTERS_FAILED);
1178 *pcTicks = aValues[0].Reg64;
1179 if (puAux)
1180 *pcTicks = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[0].Reg64 : CPUMGetGuestTscAux(pVCpu);
1181 return VINF_SUCCESS;
1182#endif
1183#else /* IN_RING0 */
1184 /** @todo improve and secure this translation */
1185 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf);
1186 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1187 VMCPUID idCpu = pVCpu->idCpu;
1188 ASMCompilerBarrier();
1189 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1190
1191 int rc = nemR0WinQueryCpuTick(pGVM, &pGVM->aCpus[idCpu], pcTicks, puAux);
1192 if (RT_SUCCESS(rc) && puAux && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX))
1193 *puAux = CPUMGetGuestTscAux(pVCpu);
1194 return rc;
1195#endif /* IN_RING0 */
1196}
1197
1198
1199/**
1200 * Resumes CPU clock (TSC) on all virtual CPUs.
1201 *
1202 * This is called by TM when the VM is started, restored, resumed or similar.
1203 *
1204 * @returns VBox status code.
1205 * @param pVM The cross context VM structure.
1206 * @param pVCpu The cross context CPU structure of the calling EMT.
1207 * @param uPausedTscValue The TSC value at the time of pausing.
1208 */
1209VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVM pVM, PVMCPU pVCpu, uint64_t uPausedTscValue)
1210{
1211#ifdef IN_RING0
1212 /** @todo improve and secure this translation */
1213 PGVM pGVM = GVMMR0ByHandle(pVM->hSelf);
1214 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1215 VMCPUID idCpu = pVCpu->idCpu;
1216 ASMCompilerBarrier();
1217 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1218
1219 return nemR0WinResumeCpuTickOnAll(pGVM, &pGVM->aCpus[idCpu], uPausedTscValue);
1220#else /* IN_RING3 */
1221 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1222 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1223
1224# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1225 /* Call ring-0 and do it all there. */
1226 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL, uPausedTscValue, NULL);
1227
1228# else
1229 /*
1230 * Call the offical API to do the job.
1231 */
1232 if (pVM->cCpus > 1)
1233 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1234
1235 /* Start with the first CPU. */
1236 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1237 WHV_REGISTER_VALUE Value = {0, 0};
1238 Value.Reg64 = uPausedTscValue;
1239 uint64_t const uFirstTsc = ASMReadTSC();
1240 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1241 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1242 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1243 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1244 , VERR_NEM_SET_TSC);
1245
1246 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1247 that we don't introduce too much drift here. */
1248 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1249 {
1250 Assert(enmName == WHvX64RegisterTsc);
1251 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1252 Value.Reg64 = uPausedTscValue + offDelta;
1253 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1254 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1255 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1256 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1257 , VERR_NEM_SET_TSC);
1258 }
1259
1260 return VINF_SUCCESS;
1261# endif
1262#endif /* IN_RING3 */
1263}
1264
1265#ifdef NEMWIN_NEED_GET_REGISTER
1266# if defined(IN_RING0) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1267/** Worker for assertion macro. */
1268NEM_TMPL_STATIC int nemHCWinGetRegister(PVMCPU pVCpu, PGVMCPU pGVCpu, uint32_t enmReg, HV_REGISTER_VALUE *pRetValue)
1269{
1270 RT_ZERO(*pRetValue);
1271# ifdef IN_RING3
1272 RT_NOREF(pVCpu, pGVCpu, enmReg);
1273 return VERR_NOT_IMPLEMENTED;
1274# else
1275 NOREF(pVCpu);
1276
1277 /*
1278 * Hypercall parameters.
1279 */
1280 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1281 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1282 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1283
1284 pInput->PartitionId = pGVCpu->pGVM->nem.s.idHvPartition;
1285 pInput->VpIndex = pGVCpu->idCpu;
1286 pInput->fFlags = 0;
1287 pInput->Names[0] = (HV_REGISTER_NAME)enmReg;
1288
1289 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
1290 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1291 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
1292
1293 /*
1294 * Make the hypercall and copy out the value.
1295 */
1296 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
1297 pGVCpu->nem.s.HypercallData.HCPhysPage,
1298 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1299 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 cRegs=%#x\n", uResult, 1),
1300 VERR_NEM_GET_REGISTERS_FAILED);
1301
1302 *pRetValue = paValues[0];
1303 return VINF_SUCCESS;
1304# endif
1305}
1306# else
1307/** Worker for assertion macro. */
1308NEM_TMPL_STATIC int nemR3WinGetRegister(PVMCPU a_pVCpu, uint32_t a_enmReg, WHV_REGISTER_VALUE pValue)
1309{
1310 RT_ZERO(*pRetValue);
1311 RT_NOREF(pVCpu, pGVCpu, enmReg);
1312 return VERR_NOT_IMPLEMENTED;
1313}
1314# endif
1315#endif
1316
1317
1318#ifdef LOG_ENABLED
1319/**
1320 * Get the virtual processor running status.
1321 */
1322DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPU pVCpu)
1323{
1324# ifdef IN_RING0
1325 NOREF(pVCpu);
1326 return VidProcessorStatusUndefined;
1327# else
1328 RTERRVARS Saved;
1329 RTErrVarsSave(&Saved);
1330
1331 /*
1332 * This API is disabled in release builds, it seems. On build 17101 it requires
1333 * the following patch to be enabled (windbg): eb vid+12180 0f 84 98 00 00 00
1334 */
1335 VID_PROCESSOR_STATUS enmCpuStatus = VidProcessorStatusUndefined;
1336 NTSTATUS rcNt = g_pfnVidGetVirtualProcessorRunningStatus(pVCpu->pVMR3->nem.s.hPartitionDevice, pVCpu->idCpu, &enmCpuStatus);
1337 AssertRC(rcNt);
1338
1339 RTErrVarsRestore(&Saved);
1340 return enmCpuStatus;
1341# endif
1342}
1343#endif /* LOG_ENABLED */
1344
1345
1346#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1347# ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KTHREADs and KeAlertThread. */
1348/**
1349 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.
1350 *
1351 * This is an experiment only.
1352 *
1353 * @returns VBox status code.
1354 * @param pVM The cross context VM structure.
1355 * @param pVCpu The cross context virtual CPU structure of the
1356 * calling EMT.
1357 */
1358NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVM pVM, PVMCPU pVCpu)
1359{
1360 /*
1361 * Work the state.
1362 *
1363 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.
1364 * So, we just need to modify the state and kick the EMT if it's waiting on
1365 * messages. For the latter we use QueueUserAPC / KeAlterThread.
1366 */
1367 for (;;)
1368 {
1369 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
1370 switch (enmState)
1371 {
1372 case VMCPUSTATE_STARTED_EXEC_NEM:
1373 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))
1374 {
1375 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM -> CANCELED");
1376 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));
1377 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelChangedState);
1378 return VINF_SUCCESS;
1379 }
1380 break;
1381
1382 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:
1383 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
1384 {
1385 DBGFTRACE_CUSTOM(pVM, "VMCPUSTATE_STARTED_EXEC_NEM_WAIT -> CANCELED");
1386# ifdef IN_RING0
1387 NTSTATUS rcNt = KeAlertThread(??);
1388 DBGFTRACE_CUSTOM(pVM, "KeAlertThread -> %#x", rcNt);
1389# else
1390 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);
1391 DBGFTRACE_CUSTOM(pVM, "NtAlertThread -> %#x", rcNt);
1392# endif
1393 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));
1394 Assert(rcNt == STATUS_SUCCESS);
1395 if (NT_SUCCESS(rcNt))
1396 {
1397 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelAlertedThread);
1398 return VINF_SUCCESS;
1399 }
1400 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));
1401 }
1402 break;
1403
1404 default:
1405 return VINF_SUCCESS;
1406 }
1407
1408 ASMNopPause();
1409 RT_NOREF(pVM);
1410 }
1411}
1412# endif /* IN_RING3 */
1413#endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
1414
1415
1416#ifdef LOG_ENABLED
1417/**
1418 * Logs the current CPU state.
1419 */
1420NEM_TMPL_STATIC void nemHCWinLogState(PVM pVM, PVMCPU pVCpu)
1421{
1422 if (LogIs3Enabled())
1423 {
1424# if 0 // def IN_RING3 - causes lazy state import assertions all over CPUM.
1425 char szRegs[4096];
1426 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1427 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1428 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1429 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1430 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1431 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1432 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1433 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1434 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1435 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1436 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1437 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1438 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1439 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1440 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1441 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1442 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1443 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1444 " efer=%016VR{efer}\n"
1445 " pat=%016VR{pat}\n"
1446 " sf_mask=%016VR{sf_mask}\n"
1447 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1448 " lstar=%016VR{lstar}\n"
1449 " star=%016VR{star} cstar=%016VR{cstar}\n"
1450 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1451 );
1452
1453 char szInstr[256];
1454 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1455 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1456 szInstr, sizeof(szInstr), NULL);
1457 Log3(("%s%s\n", szRegs, szInstr));
1458# else
1459 /** @todo stat logging in ring-0 */
1460 RT_NOREF(pVM, pVCpu);
1461# endif
1462 }
1463}
1464#endif /* LOG_ENABLED */
1465
1466
1467/** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */
1468#define SWITCH_IT(a_szPrefix) \
1469 do \
1470 switch (u)\
1471 { \
1472 case 0x00: return a_szPrefix ""; \
1473 case 0x01: return a_szPrefix ",Pnd"; \
1474 case 0x02: return a_szPrefix ",Dbg"; \
1475 case 0x03: return a_szPrefix ",Pnd,Dbg"; \
1476 case 0x04: return a_szPrefix ",Shw"; \
1477 case 0x05: return a_szPrefix ",Pnd,Shw"; \
1478 case 0x06: return a_szPrefix ",Shw,Dbg"; \
1479 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \
1480 default: AssertFailedReturn("WTF?"); \
1481 } \
1482 while (0)
1483
1484#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1485/**
1486 * Translates the execution stat bitfield into a short log string, VID version.
1487 *
1488 * @returns Read-only log string.
1489 * @param pMsgHdr The header which state to summarize.
1490 */
1491static const char *nemHCWinExecStateToLogStr(HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1492{
1493 unsigned u = (unsigned)pMsgHdr->ExecutionState.InterruptionPending
1494 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1)
1495 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2);
1496 if (pMsgHdr->ExecutionState.EferLma)
1497 SWITCH_IT("LM");
1498 else if (pMsgHdr->ExecutionState.Cr0Pe)
1499 SWITCH_IT("PM");
1500 else
1501 SWITCH_IT("RM");
1502}
1503#elif defined(IN_RING3)
1504/**
1505 * Translates the execution stat bitfield into a short log string, WinHv version.
1506 *
1507 * @returns Read-only log string.
1508 * @param pExitCtx The exit context which state to summarize.
1509 */
1510static const char *nemR3WinExecStateToLogStr(WHV_VP_EXIT_CONTEXT const *pExitCtx)
1511{
1512 unsigned u = (unsigned)pExitCtx->ExecutionState.InterruptionPending
1513 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1)
1514 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2);
1515 if (pExitCtx->ExecutionState.EferLma)
1516 SWITCH_IT("LM");
1517 else if (pExitCtx->ExecutionState.Cr0Pe)
1518 SWITCH_IT("PM");
1519 else
1520 SWITCH_IT("RM");
1521}
1522#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1523#undef SWITCH_IT
1524
1525
1526#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1527/**
1528 * Advances the guest RIP and clear EFLAGS.RF, VID version.
1529 *
1530 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1531 *
1532 * @param pVCpu The cross context virtual CPU structure.
1533 * @param pCtx The CPU context to update.
1534 * @param pExitCtx The exit context.
1535 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1536 */
1537DECLINLINE(void) nemHCWinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx,
1538 HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr, uint8_t cbMinInstr)
1539{
1540 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1541
1542 /* Advance the RIP. */
1543 Assert(pMsgHdr->InstructionLength >= cbMinInstr); RT_NOREF_PV(cbMinInstr);
1544 pCtx->rip += pMsgHdr->InstructionLength;
1545 pCtx->rflags.Bits.u1RF = 0;
1546
1547 /* Update interrupt inhibition. */
1548 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1549 { /* likely */ }
1550 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1551 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1552}
1553#elif defined(IN_RING3)
1554/**
1555 * Advances the guest RIP and clear EFLAGS.RF, WinHv version.
1556 *
1557 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1558 *
1559 * @param pVCpu The cross context virtual CPU structure.
1560 * @param pCtx The CPU context to update.
1561 * @param pExitCtx The exit context.
1562 * @param cbMinInstr The minimum instruction length, or 1 if not unknown.
1563 */
1564DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx,
1565 WHV_VP_EXIT_CONTEXT const *pExitCtx, uint8_t cbMinInstr)
1566{
1567 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1568
1569 /* Advance the RIP. */
1570 Assert(pExitCtx->InstructionLength > cbMinInstr);
1571 pCtx->rip += pExitCtx->InstructionLength;
1572 pCtx->rflags.Bits.u1RF = 0;
1573
1574 /* Update interrupt inhibition. */
1575 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1576 { /* likely */ }
1577 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1578 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1579}
1580#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1581
1582
1583
1584NEM_TMPL_STATIC DECLCALLBACK(int)
1585nemHCWinUnmapOnePageCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1586{
1587 RT_NOREF_PV(pvUser);
1588#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1589 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1590 AssertRC(rc);
1591 if (RT_SUCCESS(rc))
1592#else
1593 RT_NOREF_PV(pVCpu);
1594 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1595 if (SUCCEEDED(hrc))
1596#endif
1597 {
1598 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1599 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1600 }
1601 else
1602 {
1603#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1604 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1605#else
1606 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1607 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(),
1608 RTNtLastErrorValue(), pVM->nem.s.cMappedPages));
1609#endif
1610 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1611 }
1612 if (pVM->nem.s.cMappedPages > 0)
1613 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1614 return VINF_SUCCESS;
1615}
1616
1617
1618/**
1619 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1620 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1621 */
1622typedef struct NEMHCWINHMACPCCSTATE
1623{
1624 /** Input: Write access. */
1625 bool fWriteAccess;
1626 /** Output: Set if we did something. */
1627 bool fDidSomething;
1628 /** Output: Set it we should resume. */
1629 bool fCanResume;
1630} NEMHCWINHMACPCCSTATE;
1631
1632/**
1633 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1634 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1635 * NEMHCWINHMACPCCSTATE structure. }
1636 */
1637NEM_TMPL_STATIC DECLCALLBACK(int)
1638nemHCWinHandleMemoryAccessPageCheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1639{
1640 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1641 pState->fDidSomething = false;
1642 pState->fCanResume = false;
1643
1644 /* If A20 is disabled, we may need to make another query on the masked
1645 page to get the correct protection information. */
1646 uint8_t u2State = pInfo->u2NemState;
1647 RTGCPHYS GCPhysSrc;
1648 if ( pVM->nem.s.fA20Enabled
1649 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1650 GCPhysSrc = GCPhys;
1651 else
1652 {
1653 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1654 PGMPHYSNEMPAGEINFO Info2;
1655 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1656 AssertRCReturn(rc, rc);
1657
1658 *pInfo = Info2;
1659 pInfo->u2NemState = u2State;
1660 }
1661
1662 /*
1663 * Consolidate current page state with actual page protection and access type.
1664 * We don't really consider downgrades here, as they shouldn't happen.
1665 */
1666#ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1667 /** @todo Someone at microsoft please explain:
1668 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1669 * readonly page as writable (unmap, then map again). Specifically, this was an
1670 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1671 * a hope to work around that we no longer pre-map anything, just unmap stuff
1672 * and do it lazily here. And here we will first unmap, restart, and then remap
1673 * with new protection or backing.
1674 */
1675#endif
1676 int rc;
1677 switch (u2State)
1678 {
1679 case NEM_WIN_PAGE_STATE_UNMAPPED:
1680 case NEM_WIN_PAGE_STATE_NOT_SET:
1681 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1682 {
1683 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1684 return VINF_SUCCESS;
1685 }
1686
1687 /* Don't bother remapping it if it's a write request to a non-writable page. */
1688 if ( pState->fWriteAccess
1689 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1690 {
1691 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1692 return VINF_SUCCESS;
1693 }
1694
1695 /* Map the page. */
1696 rc = nemHCNativeSetPhysPage(pVM,
1697 pVCpu,
1698 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1699 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1700 pInfo->fNemProt,
1701 &u2State,
1702 true /*fBackingState*/);
1703 pInfo->u2NemState = u2State;
1704 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1705 GCPhys, g_apszPageStates[u2State], rc));
1706 pState->fDidSomething = true;
1707 pState->fCanResume = true;
1708 return rc;
1709
1710 case NEM_WIN_PAGE_STATE_READABLE:
1711 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1712 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1713 {
1714 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1715 return VINF_SUCCESS;
1716 }
1717
1718#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1719 /* Upgrade page to writable. */
1720/** @todo test this*/
1721 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1722 && pState->fWriteAccess)
1723 {
1724 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
1725 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1726 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1727 AssertRC(rc);
1728 if (RT_SUCCESS(rc))
1729 {
1730 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
1731 pState->fDidSomething = true;
1732 pState->fCanResume = true;
1733 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
1734 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
1735 }
1736 }
1737 else
1738 {
1739 /* Need to emulate the acces. */
1740 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
1741 rc = VINF_SUCCESS;
1742 }
1743 return rc;
1744#else
1745 break;
1746#endif
1747
1748 case NEM_WIN_PAGE_STATE_WRITABLE:
1749 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1750 {
1751 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3\n", GCPhys));
1752 return VINF_SUCCESS;
1753 }
1754#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1755 AssertFailed(); /* There should be no downgrades. */
1756#endif
1757 break;
1758
1759 default:
1760 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1761 }
1762
1763 /*
1764 * Unmap and restart the instruction.
1765 * If this fails, which it does every so often, just unmap everything for now.
1766 */
1767#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1768 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1769 AssertRC(rc);
1770 if (RT_SUCCESS(rc))
1771#else
1772 /** @todo figure out whether we mess up the state or if it's WHv. */
1773 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1774 if (SUCCEEDED(hrc))
1775#endif
1776 {
1777 pState->fDidSomething = true;
1778 pState->fCanResume = true;
1779 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1780 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1781 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1782 return VINF_SUCCESS;
1783 }
1784#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1785 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
1786 return rc;
1787#else
1788 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1789 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(),
1790 pVM->nem.s.cMappedPages));
1791
1792 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
1793 Log(("nemHCWinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
1794
1795 pState->fDidSomething = true;
1796 pState->fCanResume = true;
1797 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1798 return VINF_SUCCESS;
1799#endif
1800}
1801
1802
1803
1804#if defined(IN_RING0) && defined(NEM_WIN_USE_OUR_OWN_RUN_API)
1805/**
1806 * Wrapper around nemR0WinImportState that converts VERR_NEM_CHANGE_PGM_MODE and
1807 * VERR_NEM_FLUSH_TBL into informational status codes and logs+asserts statuses.
1808 *
1809 * @returns VBox strict status code.
1810 * @param pGVM The global (ring-0) VM structure.
1811 * @param pGVCpu The global (ring-0) per CPU structure.
1812 * @param pCtx The CPU context to import into.
1813 * @param fWhat What to import.
1814 * @param pszCaller Who is doing the importing.
1815 */
1816DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, const char *pszCaller)
1817{
1818 int rc = nemR0WinImportState(pGVM, pGVCpu, pCtx, fWhat);
1819 if (RT_SUCCESS(rc))
1820 {
1821 Assert(rc == VINF_SUCCESS);
1822 return VINF_SUCCESS;
1823 }
1824
1825 if (rc == VERR_NEM_CHANGE_PGM_MODE || rc == VERR_NEM_FLUSH_TLB || rc == VERR_NEM_UPDATE_APIC_BASE)
1826 {
1827 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
1828 return -rc;
1829 }
1830 RT_NOREF(pszCaller);
1831 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
1832}
1833#endif /* IN_RING0 && NEM_WIN_USE_OUR_OWN_RUN_API*/
1834
1835#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3)
1836/**
1837 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
1838 *
1839 * Unlike the wrapped APIs, this checks whether it's necessary.
1840 *
1841 * @returns VBox strict status code.
1842 * @param pGVM The global (ring-0) VM structure.
1843 * @param pGVCpu The global (ring-0) per CPU structure.
1844 * @param pCtx The CPU context to import into.
1845 * @param fWhat What to import.
1846 * @param pszCaller Who is doing the importing.
1847 */
1848DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx,
1849 uint64_t fWhat, const char *pszCaller)
1850{
1851 if (pCtx->fExtrn & fWhat)
1852 {
1853#ifdef IN_RING0
1854 RT_NOREF(pVCpu);
1855 return nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, fWhat, pszCaller);
1856#else
1857 RT_NOREF(pGVCpu, pszCaller);
1858 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, fWhat);
1859 AssertRCReturn(rc, rc);
1860#endif
1861 }
1862 return VINF_SUCCESS;
1863}
1864#endif /* NEM_WIN_USE_OUR_OWN_RUN_API || IN_RING3 */
1865
1866#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1867/**
1868 * Copies register state from the X64 intercept message header.
1869 *
1870 * ASSUMES no state copied yet.
1871 *
1872 * @param pVCpu The cross context per CPU structure.
1873 * @param pCtx The registe rcontext.
1874 * @param pHdr The X64 intercept message header.
1875 * @sa nemR3WinCopyStateFromX64Header
1876 */
1877DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr)
1878{
1879 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1880 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1881 NEM_WIN_COPY_BACK_SEG(pCtx->cs, pHdr->CsSegment);
1882 pCtx->rip = pHdr->Rip;
1883 pCtx->rflags.u = pHdr->Rflags;
1884
1885 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow;
1886 if (!pHdr->ExecutionState.InterruptShadow)
1887 {
1888 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1889 { /* likely */ }
1890 else
1891 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1892 }
1893 else
1894 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip);
1895
1896 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1897}
1898#elif defined(IN_RING3)
1899/**
1900 * Copies register state from the (common) exit context.
1901 *
1902 * ASSUMES no state copied yet.
1903 *
1904 * @param pVCpu The cross context per CPU structure.
1905 * @param pCtx The registe rcontext.
1906 * @param pExitCtx The common exit context.
1907 * @sa nemHCWinCopyStateFromX64Header
1908 */
1909DECLINLINE(void) nemR3WinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1910{
1911 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1912 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1913 NEM_WIN_COPY_BACK_SEG(pCtx->cs, pExitCtx->Cs);
1914 pCtx->rip = pExitCtx->Rip;
1915 pCtx->rflags.u = pExitCtx->Rflags;
1916
1917 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow;
1918 if (!pExitCtx->ExecutionState.InterruptShadow)
1919 {
1920 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1921 { /* likely */ }
1922 else
1923 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1924 }
1925 else
1926 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip);
1927
1928 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1929}
1930#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1931
1932
1933#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1934/**
1935 * Deals with memory intercept message.
1936 *
1937 * @returns Strict VBox status code.
1938 * @param pVM The cross context VM structure.
1939 * @param pVCpu The cross context per CPU structure.
1940 * @param pMsg The message.
1941 * @param pCtx The register context.
1942 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
1943 * @sa nemR3WinHandleExitMemory
1944 */
1945NEM_TMPL_STATIC VBOXSTRICTRC
1946nemHCWinHandleMessageMemory(PVM pVM, PVMCPU pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
1947{
1948 uint64_t const uHostTsc = ASMReadTSC();
1949 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
1950 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1951 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
1952
1953 /*
1954 * Whatever we do, we must clear pending event injection upon resume.
1955 */
1956 if (pMsg->Header.ExecutionState.InterruptionPending)
1957 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1958
1959# if 0 /* Experiment: 20K -> 34K exit/s. */
1960 if ( pMsg->Header.ExecutionState.EferLma
1961 && pMsg->Header.CsSegment.Long
1962 && pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
1963 {
1964 if ( pMsg->Header.Rip - (uint64_t)0xf65a < (uint64_t)(0xf662 - 0xf65a)
1965 && pMsg->InstructionBytes[0] == 0x89
1966 && pMsg->InstructionBytes[1] == 0x03)
1967 {
1968 pCtx->rip = pMsg->Header.Rip + 2;
1969 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
1970 AssertMsg(pMsg->Header.InstructionLength == 2, ("%#x\n", pMsg->Header.InstructionLength));
1971 //Log(("%RX64 msg:\n%.80Rhxd\n", pCtx->rip, pMsg));
1972 return VINF_SUCCESS;
1973 }
1974 }
1975# endif
1976
1977 /*
1978 * Ask PGM for information about the given GCPhys. We need to check if we're
1979 * out of sync first.
1980 */
1981 NEMHCWINHMACPCCSTATE State = { pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE, false, false };
1982 PGMPHYSNEMPAGEINFO Info;
1983 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMsg->GuestPhysicalAddress, State.fWriteAccess, &Info,
1984 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1985 if (RT_SUCCESS(rc))
1986 {
1987 if (Info.fNemProt & ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1988 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1989 {
1990 if (State.fCanResume)
1991 {
1992 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1993 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1994 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1995 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1996 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1997 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
1998 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
1999 return VINF_SUCCESS;
2000 }
2001 }
2002 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2003 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2004 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2005 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2006 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2007 }
2008 else
2009 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2010 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2011 pMsg->GuestPhysicalAddress, rc, State.fDidSomething ? " modified-backing" : "",
2012 g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
2013
2014 /*
2015 * Emulate the memory access, either access handler or special memory.
2016 */
2017 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2018 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2019 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2020 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2021 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
2022 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2023 VBOXSTRICTRC rcStrict;
2024# ifdef IN_RING0
2025 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx,
2026 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit");
2027 if (rcStrict != VINF_SUCCESS)
2028 return rcStrict;
2029# else
2030 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2031 AssertRCReturn(rc, rc);
2032 NOREF(pGVCpu);
2033# endif
2034
2035 if (pMsg->Reserved1)
2036 Log(("MemExit/Reserved1=%#x\n", pMsg->Reserved1));
2037 if (pMsg->Header.ExecutionState.Reserved0 || pMsg->Header.ExecutionState.Reserved1)
2038 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pMsg->Header.ExecutionState.Reserved0, pMsg->Header.ExecutionState.Reserved1));
2039
2040 if (!pExitRec)
2041 {
2042 //if (pMsg->InstructionByteCount > 0)
2043 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2044 if (pMsg->InstructionByteCount > 0)
2045 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pMsg->Header.Rip,
2046 pMsg->InstructionBytes, pMsg->InstructionByteCount);
2047 else
2048 rcStrict = IEMExecOne(pVCpu);
2049 /** @todo do we need to do anything wrt debugging here? */
2050 }
2051 else
2052 {
2053 /* Frequent access or probing. */
2054 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2055 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2056 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2057 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2058 }
2059 return rcStrict;
2060}
2061#elif defined(IN_RING3)
2062/**
2063 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2064 *
2065 * @returns Strict VBox status code.
2066 * @param pVM The cross context VM structure.
2067 * @param pVCpu The cross context per CPU structure.
2068 * @param pExit The VM exit information to handle.
2069 * @param pCtx The register context.
2070 * @sa nemHCWinHandleMessageMemory
2071 */
2072NEM_TMPL_STATIC VBOXSTRICTRC
2073nemR3WinHandleExitMemory(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2074{
2075 uint64_t const uHostTsc = ASMReadTSC();
2076 Assert(pExit->MemoryAccess.AccessInfo.AccessType != 3);
2077
2078 /*
2079 * Whatever we do, we must clear pending event injection upon resume.
2080 */
2081 if (pExit->VpContext.ExecutionState.InterruptionPending)
2082 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2083
2084 /*
2085 * Ask PGM for information about the given GCPhys. We need to check if we're
2086 * out of sync first.
2087 */
2088 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite, false, false };
2089 PGMPHYSNEMPAGEINFO Info;
2090 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
2091 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2092 if (RT_SUCCESS(rc))
2093 {
2094 if (Info.fNemProt & ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2095 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2096 {
2097 if (State.fCanResume)
2098 {
2099 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2100 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2101 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2102 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2103 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2104 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2105 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2106 return VINF_SUCCESS;
2107 }
2108 }
2109 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2110 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2111 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2112 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2113 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2114 }
2115 else
2116 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2117 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2118 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2119 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2120
2121 /*
2122 * Emulate the memory access, either access handler or special memory.
2123 */
2124 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2125 pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2126 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2127 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2128 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2129 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2130 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2131 AssertRCReturn(rc, rc);
2132 if (pExit->VpContext.ExecutionState.Reserved0 || pExit->VpContext.ExecutionState.Reserved1)
2133 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pExit->VpContext.ExecutionState.Reserved0, pExit->VpContext.ExecutionState.Reserved1));
2134
2135 VBOXSTRICTRC rcStrict;
2136 if (!pExitRec)
2137 {
2138 //if (pMsg->InstructionByteCount > 0)
2139 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2140 if (pExit->MemoryAccess.InstructionByteCount > 0)
2141 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pExit->VpContext.Rip,
2142 pExit->MemoryAccess.InstructionBytes, pExit->MemoryAccess.InstructionByteCount);
2143 else
2144 rcStrict = IEMExecOne(pVCpu);
2145 /** @todo do we need to do anything wrt debugging here? */
2146 }
2147 else
2148 {
2149 /* Frequent access or probing. */
2150 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2151 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2152 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2153 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2154 }
2155 return rcStrict;
2156}
2157#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2158
2159
2160#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2161/**
2162 * Deals with I/O port intercept message.
2163 *
2164 * @returns Strict VBox status code.
2165 * @param pVM The cross context VM structure.
2166 * @param pVCpu The cross context per CPU structure.
2167 * @param pMsg The message.
2168 * @param pCtx The register context.
2169 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2170 */
2171NEM_TMPL_STATIC VBOXSTRICTRC
2172nemHCWinHandleMessageIoPort(PVM pVM, PVMCPU pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
2173{
2174 /*
2175 * Assert message sanity.
2176 */
2177 Assert( pMsg->AccessInfo.AccessSize == 1
2178 || pMsg->AccessInfo.AccessSize == 2
2179 || pMsg->AccessInfo.AccessSize == 4);
2180 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2181 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2182 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2183 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2184 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2185 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2186 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
2187 if (pMsg->AccessInfo.StringOp)
2188 {
2189 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterDs, pMsg->DsSegment);
2190 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterEs, pMsg->EsSegment);
2191 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRcx, pMsg->Rcx);
2192 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRsi, pMsg->Rsi);
2193 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdi, pMsg->Rdi);
2194 }
2195
2196 /*
2197 * Whatever we do, we must clear pending event injection upon resume.
2198 */
2199 if (pMsg->Header.ExecutionState.InterruptionPending)
2200 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2201
2202 /*
2203 * Add history first to avoid two paths doing EMHistoryExec calls.
2204 */
2205 VBOXSTRICTRC rcStrict;
2206 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2207 !pMsg->AccessInfo.StringOp
2208 ? ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2209 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2210 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2211 : ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2212 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2213 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2214 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2215 if (!pExitRec)
2216 {
2217 if (!pMsg->AccessInfo.StringOp)
2218 {
2219 /*
2220 * Simple port I/O.
2221 */
2222 static uint32_t const s_fAndMask[8] =
2223 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2224 uint32_t const fAndMask = s_fAndMask[pMsg->AccessInfo.AccessSize];
2225
2226 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2227 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2228 {
2229 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize);
2230 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2231 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2232 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2233 if (IOM_SUCCESS(rcStrict))
2234 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header, 1);
2235# ifdef IN_RING0
2236 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
2237 && !pCtx->rflags.Bits.u1TF
2238 /** @todo check for debug breakpoints */ )
2239 return EMRZSetPendingIoPortWrite(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2240 pMsg->AccessInfo.AccessSize, (uint32_t)pMsg->Rax & fAndMask);
2241# endif
2242 else
2243 {
2244 pCtx->rax = pMsg->Rax;
2245 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2246 }
2247 }
2248 else
2249 {
2250 uint32_t uValue = 0;
2251 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize);
2252 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2253 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2254 pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2255 if (IOM_SUCCESS(rcStrict))
2256 {
2257 if (pMsg->AccessInfo.AccessSize != 4)
2258 pCtx->rax = (pMsg->Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2259 else
2260 pCtx->rax = uValue;
2261 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2262 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pCtx->rax));
2263 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header, 1);
2264 }
2265 else
2266 {
2267 pCtx->rax = pMsg->Rax;
2268 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2269# ifdef IN_RING0
2270 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
2271 && !pCtx->rflags.Bits.u1TF
2272 /** @todo check for debug breakpoints */ )
2273 return EMRZSetPendingIoPortRead(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2274 pMsg->AccessInfo.AccessSize);
2275# endif
2276 }
2277 }
2278 }
2279 else
2280 {
2281 /*
2282 * String port I/O.
2283 */
2284 /** @todo Someone at Microsoft please explain how we can get the address mode
2285 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2286 * getting the default mode, it can always be overridden by a prefix. This
2287 * forces us to interpret the instruction from opcodes, which is suboptimal.
2288 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2289 * CPUs that are reasonably new.
2290 *
2291 * Of course, it's possible this is an undocumented and we just need to do some
2292 * experiments to figure out how it's communicated. Alternatively, we can scan
2293 * the opcode bytes for possible evil prefixes.
2294 */
2295 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2296 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2297 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2298 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pMsg->DsSegment);
2299 NEM_WIN_COPY_BACK_SEG(pCtx->es, pMsg->EsSegment);
2300 pCtx->rax = pMsg->Rax;
2301 pCtx->rcx = pMsg->Rcx;
2302 pCtx->rdi = pMsg->Rdi;
2303 pCtx->rsi = pMsg->Rsi;
2304# ifdef IN_RING0
2305 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2306 if (rcStrict != VINF_SUCCESS)
2307 return rcStrict;
2308# else
2309 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2310 AssertRCReturn(rc, rc);
2311 RT_NOREF(pGVCpu);
2312# endif
2313
2314 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2315 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2316 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2317 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS",
2318 pMsg->PortNumber, pMsg->AccessInfo.AccessSize ));
2319 rcStrict = IEMExecOne(pVCpu);
2320 }
2321 if (IOM_SUCCESS(rcStrict))
2322 {
2323 /*
2324 * Do debug checks.
2325 */
2326 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2327 || (pMsg->Header.Rflags & X86_EFL_TF)
2328 || DBGFBpIsHwIoArmed(pVM) )
2329 {
2330 /** @todo Debugging. */
2331 }
2332 }
2333 return rcStrict;
2334 }
2335
2336 /*
2337 * Frequent exit or something needing probing.
2338 * Get state and call EMHistoryExec.
2339 */
2340 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2341 if (!pMsg->AccessInfo.StringOp)
2342 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2343 else
2344 {
2345 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2346 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2347 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pMsg->DsSegment);
2348 NEM_WIN_COPY_BACK_SEG(pCtx->es, pMsg->EsSegment);
2349 pCtx->rcx = pMsg->Rcx;
2350 pCtx->rdi = pMsg->Rdi;
2351 pCtx->rsi = pMsg->Rsi;
2352 }
2353 pCtx->rax = pMsg->Rax;
2354
2355# ifdef IN_RING0
2356 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2357 if (rcStrict != VINF_SUCCESS)
2358 return rcStrict;
2359# else
2360 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2361 AssertRCReturn(rc, rc);
2362 RT_NOREF(pGVCpu);
2363# endif
2364
2365 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2366 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2367 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2368 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUT" : "IN",
2369 pMsg->AccessInfo.StringOp ? "S" : "",
2370 pMsg->PortNumber, pMsg->AccessInfo.AccessSize));
2371 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2372 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2373 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2374 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2375 return rcStrict;
2376}
2377#elif defined(IN_RING3)
2378/**
2379 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess).
2380 *
2381 * @returns Strict VBox status code.
2382 * @param pVM The cross context VM structure.
2383 * @param pVCpu The cross context per CPU structure.
2384 * @param pExit The VM exit information to handle.
2385 * @param pCtx The register context.
2386 * @sa nemHCWinHandleMessageIoPort
2387 */
2388NEM_TMPL_STATIC VBOXSTRICTRC
2389nemR3WinHandleExitIoPort(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2390{
2391 Assert( pExit->IoPortAccess.AccessInfo.AccessSize == 1
2392 || pExit->IoPortAccess.AccessInfo.AccessSize == 2
2393 || pExit->IoPortAccess.AccessInfo.AccessSize == 4);
2394
2395 /*
2396 * Whatever we do, we must clear pending event injection upon resume.
2397 */
2398 if (pExit->VpContext.ExecutionState.InterruptionPending)
2399 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2400
2401 /*
2402 * Add history first to avoid two paths doing EMHistoryExec calls.
2403 */
2404 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2405 !pExit->IoPortAccess.AccessInfo.StringOp
2406 ? ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2407 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2408 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2409 : ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2410 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2411 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2412 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2413 if (!pExitRec)
2414 {
2415 VBOXSTRICTRC rcStrict;
2416 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2417 {
2418 /*
2419 * Simple port I/O.
2420 */
2421 static uint32_t const s_fAndMask[8] =
2422 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2423 uint32_t const fAndMask = s_fAndMask[pExit->IoPortAccess.AccessInfo.AccessSize];
2424 if (pExit->IoPortAccess.AccessInfo.IsWrite)
2425 {
2426 rcStrict = IOMIOPortWrite(pVM, pVCpu, pExit->IoPortAccess.PortNumber,
2427 (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2428 pExit->IoPortAccess.AccessInfo.AccessSize);
2429 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2430 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2431 pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2432 pExit->IoPortAccess.AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2433 if (IOM_SUCCESS(rcStrict))
2434 {
2435 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2436 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext, 1);
2437 }
2438 }
2439 else
2440 {
2441 uint32_t uValue = 0;
2442 rcStrict = IOMIOPortRead(pVM, pVCpu, pExit->IoPortAccess.PortNumber, &uValue,
2443 pExit->IoPortAccess.AccessInfo.AccessSize);
2444 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2445 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2446 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2447 if (IOM_SUCCESS(rcStrict))
2448 {
2449 if (pExit->IoPortAccess.AccessInfo.AccessSize != 4)
2450 pCtx->rax = (pExit->IoPortAccess.Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2451 else
2452 pCtx->rax = uValue;
2453 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2454 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pExit->IoPortAccess.Rax, pCtx->rax));
2455 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2456 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext, 1);
2457 }
2458 }
2459 }
2460 else
2461 {
2462 /*
2463 * String port I/O.
2464 */
2465 /** @todo Someone at Microsoft please explain how we can get the address mode
2466 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2467 * getting the default mode, it can always be overridden by a prefix. This
2468 * forces us to interpret the instruction from opcodes, which is suboptimal.
2469 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2470 * CPUs that are reasonably new.
2471 *
2472 * Of course, it's possible this is an undocumented and we just need to do some
2473 * experiments to figure out how it's communicated. Alternatively, we can scan
2474 * the opcode bytes for possible evil prefixes.
2475 */
2476 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2477 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2478 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2479 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pExit->IoPortAccess.Ds);
2480 NEM_WIN_COPY_BACK_SEG(pCtx->es, pExit->IoPortAccess.Es);
2481 pCtx->rax = pExit->IoPortAccess.Rax;
2482 pCtx->rcx = pExit->IoPortAccess.Rcx;
2483 pCtx->rdi = pExit->IoPortAccess.Rdi;
2484 pCtx->rsi = pExit->IoPortAccess.Rsi;
2485 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2486 AssertRCReturn(rc, rc);
2487
2488 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2489 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2490 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2491 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUTS" : "INS",
2492 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize ));
2493 rcStrict = IEMExecOne(pVCpu);
2494 }
2495 if (IOM_SUCCESS(rcStrict))
2496 {
2497 /*
2498 * Do debug checks.
2499 */
2500 if ( pExit->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2501 || (pExit->VpContext.Rflags & X86_EFL_TF)
2502 || DBGFBpIsHwIoArmed(pVM) )
2503 {
2504 /** @todo Debugging. */
2505 }
2506 }
2507 return rcStrict;
2508 }
2509
2510 /*
2511 * Frequent exit or something needing probing.
2512 * Get state and call EMHistoryExec.
2513 */
2514 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2515 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2516 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2517 else
2518 {
2519 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2520 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2521 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pExit->IoPortAccess.Ds);
2522 NEM_WIN_COPY_BACK_SEG(pCtx->es, pExit->IoPortAccess.Es);
2523 pCtx->rcx = pExit->IoPortAccess.Rcx;
2524 pCtx->rdi = pExit->IoPortAccess.Rdi;
2525 pCtx->rsi = pExit->IoPortAccess.Rsi;
2526 }
2527 pCtx->rax = pExit->IoPortAccess.Rax;
2528 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2529 AssertRCReturn(rc, rc);
2530 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2531 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2532 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2533 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUT" : "IN",
2534 pExit->IoPortAccess.AccessInfo.StringOp ? "S" : "",
2535 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize));
2536 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2537 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2538 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2539 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2540 return rcStrict;
2541}
2542#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2543
2544
2545#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2546/**
2547 * Deals with interrupt window message.
2548 *
2549 * @returns Strict VBox status code.
2550 * @param pVM The cross context VM structure.
2551 * @param pVCpu The cross context per CPU structure.
2552 * @param pMsg The message.
2553 * @param pCtx The register context.
2554 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2555 * @sa nemR3WinHandleExitInterruptWindow
2556 */
2557NEM_TMPL_STATIC VBOXSTRICTRC
2558nemHCWinHandleMessageInterruptWindow(PVM pVM, PVMCPU pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg,
2559 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2560{
2561 /*
2562 * Assert message sanity.
2563 */
2564 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE
2565 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here
2566 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2567 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type));
2568
2569 /*
2570 * Just copy the state we've got and handle it in the loop for now.
2571 */
2572 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2573 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2574
2575 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2576 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2577 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2578 pMsg->Type, RT_BOOL(pMsg->Header.Rflags & X86_EFL_IF), pMsg->Header.ExecutionState.InterruptShadow));
2579
2580 /** @todo call nemHCWinHandleInterruptFF */
2581 RT_NOREF(pVM, pGVCpu);
2582 return VINF_SUCCESS;
2583}
2584#elif defined(IN_RING3)
2585/**
2586 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow).
2587 *
2588 * @returns Strict VBox status code.
2589 * @param pVM The cross context VM structure.
2590 * @param pVCpu The cross context per CPU structure.
2591 * @param pExit The VM exit information to handle.
2592 * @param pCtx The register context.
2593 * @sa nemHCWinHandleMessageInterruptWindow
2594 */
2595NEM_TMPL_STATIC VBOXSTRICTRC
2596nemR3WinHandleExitInterruptWindow(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2597{
2598 /*
2599 * Assert message sanity.
2600 */
2601 AssertMsg( pExit->InterruptWindow.DeliverableType == WHvX64PendingInterrupt
2602 || pExit->InterruptWindow.DeliverableType == WHvX64PendingNmi,
2603 ("%#x\n", pExit->InterruptWindow.DeliverableType));
2604
2605 /*
2606 * Just copy the state we've got and handle it in the loop for now.
2607 */
2608 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2609 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2610
2611 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2612 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2613 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2614 pExit->InterruptWindow.DeliverableType, RT_BOOL(pExit->VpContext.Rflags & X86_EFL_IF),
2615 pExit->VpContext.ExecutionState.InterruptShadow));
2616
2617 /** @todo call nemHCWinHandleInterruptFF */
2618 RT_NOREF(pVM);
2619 return VINF_SUCCESS;
2620}
2621#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2622
2623
2624#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2625/**
2626 * Deals with CPUID intercept message.
2627 *
2628 * @returns Strict VBox status code.
2629 * @param pVM The cross context VM structure.
2630 * @param pVCpu The cross context per CPU structure.
2631 * @param pMsg The message.
2632 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2633 * @sa nemR3WinHandleExitCpuId
2634 */
2635NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVM pVM, PVMCPU pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg,
2636 PGVMCPU pGVCpu)
2637{
2638 /* Check message register value sanity. */
2639 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2640 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2641 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2642 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2643 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
2644 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRcx, pMsg->Rcx);
2645 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdx, pMsg->Rdx);
2646 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRbx, pMsg->Rbx);
2647
2648 /* Do exit history. */
2649 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2650 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2651 if (!pExitRec)
2652 {
2653 /*
2654 * Soak up state and execute the instruction.
2655 *
2656 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2657 * function and make everyone use it.
2658 */
2659 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2660 * only get weirder with nested VT-x and AMD-V support. */
2661 nemHCWinCopyStateFromX64Header(pVCpu, &pVCpu->cpum.GstCtx, &pMsg->Header);
2662
2663 /* Copy in the low register values (top is always cleared). */
2664 pVCpu->cpum.GstCtx.rax = (uint32_t)pMsg->Rax;
2665 pVCpu->cpum.GstCtx.rcx = (uint32_t)pMsg->Rcx;
2666 pVCpu->cpum.GstCtx.rdx = (uint32_t)pMsg->Rdx;
2667 pVCpu->cpum.GstCtx.rbx = (uint32_t)pMsg->Rbx;
2668 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2669
2670 /* Get the correct values. */
2671 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2672 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2673
2674 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2675 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2676 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2677 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2678 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2679
2680 /* Move RIP and we're done. */
2681 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pVCpu->cpum.GstCtx, &pMsg->Header, 2);
2682
2683 return VINF_SUCCESS;
2684 }
2685
2686 /*
2687 * Frequent exit or something needing probing.
2688 * Get state and call EMHistoryExec.
2689 */
2690 nemHCWinCopyStateFromX64Header(pVCpu, &pVCpu->cpum.GstCtx, &pMsg->Header);
2691 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2692 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2693 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
2694 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
2695 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2696 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2697 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2698 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2699 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2700# ifdef IN_RING0
2701 VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, &pVCpu->cpum.GstCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit");
2702 if (rcStrict != VINF_SUCCESS)
2703 return rcStrict;
2704 RT_NOREF(pVM);
2705# else
2706 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, &pVCpu->cpum.GstCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2707 AssertRCReturn(rc, rc);
2708 RT_NOREF(pGVCpu);
2709# endif
2710 VBOXSTRICTRC rcStrictExec = EMHistoryExec(pVCpu, pExitRec, 0);
2711 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2712 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2713 VBOXSTRICTRC_VAL(rcStrictExec), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2714 return rcStrictExec;
2715}
2716#elif defined(IN_RING3)
2717/**
2718 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid).
2719 *
2720 * @returns Strict VBox status code.
2721 * @param pVM The cross context VM structure.
2722 * @param pVCpu The cross context per CPU structure.
2723 * @param pExit The VM exit information to handle.
2724 * @sa nemHCWinHandleMessageCpuId
2725 */
2726NEM_TMPL_STATIC VBOXSTRICTRC
2727nemR3WinHandleExitCpuId(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2728{
2729 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2730 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2731 if (!pExitRec)
2732 {
2733 /*
2734 * Soak up state and execute the instruction.
2735 *
2736 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2737 * function and make everyone use it.
2738 */
2739 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2740 * only get weirder with nested VT-x and AMD-V support. */
2741 nemR3WinCopyStateFromX64Header(pVCpu, &pVCpu->cpum.GstCtx, &pExit->VpContext);
2742
2743 /* Copy in the low register values (top is always cleared). */
2744 pVCpu->cpum.GstCtx.rax = (uint32_t)pExit->CpuidAccess.Rax;
2745 pVCpu->cpum.GstCtx.rcx = (uint32_t)pExit->CpuidAccess.Rcx;
2746 pVCpu->cpum.GstCtx.rdx = (uint32_t)pExit->CpuidAccess.Rdx;
2747 pVCpu->cpum.GstCtx.rbx = (uint32_t)pExit->CpuidAccess.Rbx;
2748 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2749
2750 /* Get the correct values. */
2751 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2752 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2753
2754 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2755 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2756 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2757 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2758 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2759
2760 /* Move RIP and we're done. */
2761 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pVCpu->cpum.GstCtx, &pExit->VpContext, 2);
2762
2763 RT_NOREF_PV(pVM);
2764 return VINF_SUCCESS;
2765 }
2766
2767 /*
2768 * Frequent exit or something needing probing.
2769 * Get state and call EMHistoryExec.
2770 */
2771 nemR3WinCopyStateFromX64Header(pVCpu, &pVCpu->cpum.GstCtx, &pExit->VpContext);
2772 pVCpu->cpum.GstCtx.rax = pExit->CpuidAccess.Rax;
2773 pVCpu->cpum.GstCtx.rcx = pExit->CpuidAccess.Rcx;
2774 pVCpu->cpum.GstCtx.rdx = pExit->CpuidAccess.Rdx;
2775 pVCpu->cpum.GstCtx.rbx = pExit->CpuidAccess.Rbx;
2776 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2777 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2778 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2779 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2780 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2781 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, &pVCpu->cpum.GstCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2782 AssertRCReturn(rc, rc);
2783 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2784 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2785 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2786 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2787 return rcStrict;
2788}
2789#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2790
2791
2792#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2793/**
2794 * Deals with MSR intercept message.
2795 *
2796 * @returns Strict VBox status code.
2797 * @param pVCpu The cross context per CPU structure.
2798 * @param pMsg The message.
2799 * @param pCtx The register context.
2800 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2801 * @sa nemR3WinHandleExitMsr
2802 */
2803NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPU pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg,
2804 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2805{
2806 /*
2807 * A wee bit of sanity first.
2808 */
2809 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2810 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2811 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
2812 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
2813 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
2814 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
2815 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
2816 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdx, pMsg->Rdx);
2817
2818 /*
2819 * Check CPL as that's common to both RDMSR and WRMSR.
2820 */
2821 VBOXSTRICTRC rcStrict;
2822 if (pMsg->Header.ExecutionState.Cpl == 0)
2823 {
2824 /*
2825 * Get all the MSR state. Since we're getting EFER, we also need to
2826 * get CR0, CR4 and CR3.
2827 */
2828 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2829 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2830 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2831 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2832 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2833
2834 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2835 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2836 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2837 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2838 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2839 "MSRs");
2840 if (rcStrict == VINF_SUCCESS)
2841 {
2842 if (!pExitRec)
2843 {
2844 /*
2845 * Handle writes.
2846 */
2847 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2848 {
2849 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx));
2850 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n",
2851 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2852 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2853 if (rcStrict == VINF_SUCCESS)
2854 {
2855 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header, 2);
2856 return VINF_SUCCESS;
2857 }
2858# ifndef IN_RING3
2859 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2860 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2861 rcStrict = VINF_CPUM_R3_MSR_WRITE;
2862 return rcStrict;
2863# else
2864 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n",
2865 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2866 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2867# endif
2868 }
2869 /*
2870 * Handle reads.
2871 */
2872 else
2873 {
2874 uint64_t uValue = 0;
2875 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue);
2876 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2877 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2878 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2879 if (rcStrict == VINF_SUCCESS)
2880 {
2881 pCtx->rax = (uint32_t)uValue;
2882 pCtx->rdx = uValue >> 32;
2883 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2884 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header, 2);
2885 return VINF_SUCCESS;
2886 }
2887# ifndef IN_RING3
2888 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2889 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2890 rcStrict = VINF_CPUM_R3_MSR_READ;
2891 return rcStrict;
2892# else
2893 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2894 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2895 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2896# endif
2897 }
2898 }
2899 else
2900 {
2901 /*
2902 * Handle frequent exit or something needing probing.
2903 */
2904 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
2905 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2906 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD", pMsg->MsrNumber));
2907 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2908 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2909 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2910 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2911 return rcStrict;
2912 }
2913 }
2914 else
2915 {
2916 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2917 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2918 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD",
2919 pMsg->MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2920 return rcStrict;
2921 }
2922 }
2923 else if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2924 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n",
2925 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2926 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx ));
2927 else
2928 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n",
2929 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2930 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber));
2931
2932 /*
2933 * If we get down here, we're supposed to #GP(0).
2934 */
2935 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2936 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2937 if (rcStrict == VINF_SUCCESS)
2938 {
2939 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2940 if (rcStrict == VINF_IEM_RAISED_XCPT)
2941 rcStrict = VINF_SUCCESS;
2942 else if (rcStrict != VINF_SUCCESS)
2943 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2944 }
2945 return rcStrict;
2946}
2947#elif defined(IN_RING3)
2948/**
2949 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess).
2950 *
2951 * @returns Strict VBox status code.
2952 * @param pVM The cross context VM structure.
2953 * @param pVCpu The cross context per CPU structure.
2954 * @param pExit The VM exit information to handle.
2955 * @param pCtx The register context.
2956 * @sa nemHCWinHandleMessageMsr
2957 */
2958NEM_TMPL_STATIC VBOXSTRICTRC
2959nemR3WinHandleExitMsr(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2960{
2961 /*
2962 * Check CPL as that's common to both RDMSR and WRMSR.
2963 */
2964 VBOXSTRICTRC rcStrict;
2965 if (pExit->VpContext.ExecutionState.Cpl == 0)
2966 {
2967 /*
2968 * Get all the MSR state. Since we're getting EFER, we also need to
2969 * get CR0, CR4 and CR3.
2970 */
2971 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2972 pExit->MsrAccess.AccessInfo.IsWrite
2973 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2974 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2975 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2976 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2977 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
2978 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2979 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2980 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2981 "MSRs");
2982 if (rcStrict == VINF_SUCCESS)
2983 {
2984 if (!pExitRec)
2985 {
2986 /*
2987 * Handle writes.
2988 */
2989 if (pExit->MsrAccess.AccessInfo.IsWrite)
2990 {
2991 rcStrict = CPUMSetGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber,
2992 RT_MAKE_U64((uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx));
2993 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2994 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
2995 (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2996 if (rcStrict == VINF_SUCCESS)
2997 {
2998 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext, 2);
2999 return VINF_SUCCESS;
3000 }
3001 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", pVCpu->idCpu,
3002 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3003 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx,
3004 VBOXSTRICTRC_VAL(rcStrict) ));
3005 }
3006 /*
3007 * Handle reads.
3008 */
3009 else
3010 {
3011 uint64_t uValue = 0;
3012 rcStrict = CPUMQueryGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, &uValue);
3013 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu,
3014 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3015 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3016 if (rcStrict == VINF_SUCCESS)
3017 {
3018 pCtx->rax = (uint32_t)uValue;
3019 pCtx->rdx = uValue >> 32;
3020 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
3021 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext, 2);
3022 return VINF_SUCCESS;
3023 }
3024 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3025 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
3026 uValue, VBOXSTRICTRC_VAL(rcStrict) ));
3027 }
3028 }
3029 else
3030 {
3031 /*
3032 * Handle frequent exit or something needing probing.
3033 */
3034 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
3035 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3036 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber));
3037 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
3038 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
3039 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3040 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
3041 return rcStrict;
3042 }
3043 }
3044 else
3045 {
3046 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
3047 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3048 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
3049 return rcStrict;
3050 }
3051 }
3052 else if (pExit->MsrAccess.AccessInfo.IsWrite)
3053 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3054 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3055 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx ));
3056 else
3057 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3058 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
3059 pExit->MsrAccess.MsrNumber));
3060
3061 /*
3062 * If we get down here, we're supposed to #GP(0).
3063 */
3064 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
3065 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
3066 if (rcStrict == VINF_SUCCESS)
3067 {
3068 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
3069 if (rcStrict == VINF_IEM_RAISED_XCPT)
3070 rcStrict = VINF_SUCCESS;
3071 else if (rcStrict != VINF_SUCCESS)
3072 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
3073 }
3074
3075 RT_NOREF_PV(pVM);
3076 return rcStrict;
3077}
3078#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3079
3080
3081/**
3082 * Worker for nemHCWinHandleMessageException & nemR3WinHandleExitException that
3083 * checks if the given opcodes are of interest at all.
3084 *
3085 * @returns true if interesting, false if not.
3086 * @param cbOpcodes Number of opcode bytes available.
3087 * @param pbOpcodes The opcode bytes.
3088 * @param f64BitMode Whether we're in 64-bit mode.
3089 */
3090DECLINLINE(bool) nemHcWinIsInterestingUndefinedOpcode(uint8_t cbOpcodes, uint8_t const *pbOpcodes, bool f64BitMode)
3091{
3092 /*
3093 * Currently only interested in VMCALL and VMMCALL.
3094 */
3095 while (cbOpcodes >= 3)
3096 {
3097 switch (pbOpcodes[0])
3098 {
3099 case 0x0f:
3100 switch (pbOpcodes[1])
3101 {
3102 case 0x01:
3103 switch (pbOpcodes[2])
3104 {
3105 case 0xc1: /* 0f 01 c1 VMCALL */
3106 return true;
3107 case 0xd9: /* 0f 01 d9 VMMCALL */
3108 return true;
3109 default:
3110 break;
3111 }
3112 break;
3113 }
3114 break;
3115
3116 default:
3117 return false;
3118
3119 /* prefixes */
3120 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
3121 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
3122 if (!f64BitMode)
3123 return false;
3124 RT_FALL_THRU();
3125 case X86_OP_PRF_CS:
3126 case X86_OP_PRF_SS:
3127 case X86_OP_PRF_DS:
3128 case X86_OP_PRF_ES:
3129 case X86_OP_PRF_FS:
3130 case X86_OP_PRF_GS:
3131 case X86_OP_PRF_SIZE_OP:
3132 case X86_OP_PRF_SIZE_ADDR:
3133 case X86_OP_PRF_LOCK:
3134 case X86_OP_PRF_REPZ:
3135 case X86_OP_PRF_REPNZ:
3136 cbOpcodes--;
3137 pbOpcodes++;
3138 continue;
3139 }
3140 break;
3141 }
3142 return false;
3143}
3144
3145
3146#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3147/**
3148 * Copies state included in a exception intercept message.
3149 *
3150 * @param pVCpu The cross context per CPU structure.
3151 * @param pMsg The message.
3152 * @param pCtx The register context.
3153 * @param fClearXcpt Clear pending exception.
3154 */
3155DECLINLINE(void) nemHCWinCopyStateFromExceptionMessage(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg,
3156 PCPUMCTX pCtx, bool fClearXcpt)
3157{
3158 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
3159 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_DS
3160 | (fClearXcpt ? CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT : 0) );
3161 pCtx->rax = pMsg->Rax;
3162 pCtx->rcx = pMsg->Rcx;
3163 pCtx->rdx = pMsg->Rdx;
3164 pCtx->rbx = pMsg->Rbx;
3165 pCtx->rsp = pMsg->Rsp;
3166 pCtx->rbp = pMsg->Rbp;
3167 pCtx->rsi = pMsg->Rsi;
3168 pCtx->rdi = pMsg->Rdi;
3169 pCtx->r8 = pMsg->R8;
3170 pCtx->r9 = pMsg->R9;
3171 pCtx->r10 = pMsg->R10;
3172 pCtx->r11 = pMsg->R11;
3173 pCtx->r12 = pMsg->R12;
3174 pCtx->r13 = pMsg->R13;
3175 pCtx->r14 = pMsg->R14;
3176 pCtx->r15 = pMsg->R15;
3177 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pMsg->DsSegment);
3178 NEM_WIN_COPY_BACK_SEG(pCtx->ss, pMsg->SsSegment);
3179}
3180#elif defined(IN_RING3)
3181/**
3182 * Copies state included in a exception intercept exit.
3183 *
3184 * @param pVCpu The cross context per CPU structure.
3185 * @param pExit The VM exit information.
3186 * @param pCtx The register context.
3187 * @param fClearXcpt Clear pending exception.
3188 */
3189DECLINLINE(void) nemR3WinCopyStateFromExceptionMessage(PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit,
3190 PCPUMCTX pCtx, bool fClearXcpt)
3191{
3192 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
3193 if (fClearXcpt)
3194 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
3195}
3196#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3197
3198
3199#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3200/**
3201 * Deals with exception intercept message (HvMessageTypeX64ExceptionIntercept).
3202 *
3203 * @returns Strict VBox status code.
3204 * @param pVCpu The cross context per CPU structure.
3205 * @param pMsg The message.
3206 * @param pCtx The register context.
3207 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3208 * @sa nemR3WinHandleExitMsr
3209 */
3210NEM_TMPL_STATIC VBOXSTRICTRC
3211nemHCWinHandleMessageException(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
3212{
3213 /*
3214 * Assert sanity.
3215 */
3216 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
3217 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
3218 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
3219 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsg->Header.CsSegment);
3220 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsg->Header.Rip);
3221 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsg->Header.Rflags);
3222 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsg->Header.Cr8);
3223 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterDs, pMsg->DsSegment);
3224 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterSs, pMsg->SsSegment);
3225 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRax, pMsg->Rax);
3226 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRcx, pMsg->Rcx);
3227 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdx, pMsg->Rdx);
3228 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRbx, pMsg->Rbx);
3229 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRsp, pMsg->Rsp);
3230 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRbp, pMsg->Rbp);
3231 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRsi, pMsg->Rsi);
3232 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRdi, pMsg->Rdi);
3233 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR8, pMsg->R8);
3234 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR9, pMsg->R9);
3235 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR10, pMsg->R10);
3236 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR11, pMsg->R11);
3237 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR12, pMsg->R12);
3238 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR13, pMsg->R13);
3239 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR14, pMsg->R14);
3240 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterR15, pMsg->R15);
3241
3242 /*
3243 * Get most of the register state since we'll end up making IEM inject the
3244 * event. The exception isn't normally flaged as a pending event, so duh.
3245 *
3246 * Note! We can optimize this later with event injection.
3247 */
3248 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n",
3249 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
3250 pMsg->ExceptionVector, pMsg->ErrorCode, pMsg->ExceptionParameter));
3251 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, pCtx, true /*fClearXcpt*/);
3252 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3253 if (pMsg->ExceptionVector == X86_XCPT_DB)
3254 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3255 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, fWhat, "Xcpt");
3256 if (rcStrict != VINF_SUCCESS)
3257 return rcStrict;
3258
3259 /*
3260 * Handle the intercept.
3261 */
3262 TRPMEVENT enmEvtType = TRPM_TRAP;
3263 switch (pMsg->ExceptionVector)
3264 {
3265 /*
3266 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3267 * and need to turn them over to GIM.
3268 *
3269 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3270 * #UD for handling non-native hypercall instructions. (IEM will
3271 * decode both and let the GIM provider decide whether to accept it.)
3272 */
3273 case X86_XCPT_UD:
3274 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3275 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3276 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3277
3278 if (nemHcWinIsInterestingUndefinedOpcode(pMsg->InstructionByteCount, pMsg->InstructionBytes,
3279 pMsg->Header.ExecutionState.EferLma && pMsg->Header.CsSegment.Long ))
3280 {
3281 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pMsg->Header.Rip, pMsg->InstructionBytes,
3282 pMsg->InstructionByteCount);
3283 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3284 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3285 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
3286 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3287 return rcStrict;
3288 }
3289 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3290 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->InstructionByteCount, pMsg->InstructionBytes ));
3291 break;
3292
3293 /*
3294 * Filter debug exceptions.
3295 */
3296 case X86_XCPT_DB:
3297 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3298 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3299 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3300 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3301 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header) ));
3302 break;
3303
3304 case X86_XCPT_BP:
3305 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3306 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3307 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3308 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3309 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->Header.InstructionLength));
3310 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3311 break;
3312
3313 /* This shouldn't happen. */
3314 default:
3315 AssertLogRelMsgFailedReturn(("ExceptionVector=%#x\n", pMsg->ExceptionVector), VERR_IEM_IPE_6);
3316 }
3317
3318 /*
3319 * Inject it.
3320 */
3321 rcStrict = IEMInjectTrap(pVCpu, pMsg->ExceptionVector, enmEvtType, pMsg->ErrorCode,
3322 pMsg->ExceptionParameter /*??*/, pMsg->Header.InstructionLength);
3323 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3324 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3325 nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->ExceptionVector, VBOXSTRICTRC_VAL(rcStrict) ));
3326 return rcStrict;
3327}
3328#elif defined(IN_RING3)
3329/**
3330 * Deals with MSR access exits (WHvRunVpExitReasonException).
3331 *
3332 * @returns Strict VBox status code.
3333 * @param pVM The cross context VM structure.
3334 * @param pVCpu The cross context per CPU structure.
3335 * @param pExit The VM exit information to handle.
3336 * @param pCtx The register context.
3337 * @sa nemR3WinHandleExitException
3338 */
3339NEM_TMPL_STATIC VBOXSTRICTRC
3340nemR3WinHandleExitException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
3341{
3342 /*
3343 * Get most of the register state since we'll end up making IEM inject the
3344 * event. The exception isn't normally flaged as a pending event, so duh.
3345 *
3346 * Note! We can optimize this later with event injection.
3347 */
3348 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3349 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType,
3350 pExit->VpException.ErrorCode, pExit->VpException.ExceptionParameter ));
3351 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, pCtx, true /*fClearXcpt*/);
3352 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3353 if (pExit->VpException.ExceptionType == X86_XCPT_DB)
3354 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3355 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, fWhat, "Xcpt");
3356 if (rcStrict != VINF_SUCCESS)
3357 return rcStrict;
3358
3359 /*
3360 * Handle the intercept.
3361 */
3362 TRPMEVENT enmEvtType = TRPM_TRAP;
3363 switch (pExit->VpException.ExceptionType)
3364 {
3365 /*
3366 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3367 * and need to turn them over to GIM.
3368 *
3369 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3370 * #UD for handling non-native hypercall instructions. (IEM will
3371 * decode both and let the GIM provider decide whether to accept it.)
3372 */
3373 case X86_XCPT_UD:
3374 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3375 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3376 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3377 if (nemHcWinIsInterestingUndefinedOpcode(pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes,
3378 pExit->VpContext.ExecutionState.EferLma && pExit->VpContext.Cs.Long ))
3379 {
3380 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pExit->VpContext.Rip,
3381 pExit->VpException.InstructionBytes,
3382 pExit->VpException.InstructionByteCount);
3383 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3384 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3385 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
3386 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3387 return rcStrict;
3388 }
3389
3390 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu,
3391 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3392 pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes ));
3393 break;
3394
3395 /*
3396 * Filter debug exceptions.
3397 */
3398 case X86_XCPT_DB:
3399 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3400 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3401 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3402 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3403 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext) ));
3404 break;
3405
3406 case X86_XCPT_BP:
3407 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3408 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3409 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3410 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3411 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.InstructionLength));
3412 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3413 break;
3414
3415 /* This shouldn't happen. */
3416 default:
3417 AssertLogRelMsgFailedReturn(("ExceptionType=%#x\n", pExit->VpException.ExceptionType), VERR_IEM_IPE_6);
3418 }
3419
3420 /*
3421 * Inject it.
3422 */
3423 rcStrict = IEMInjectTrap(pVCpu, pExit->VpException.ExceptionType, enmEvtType, pExit->VpException.ErrorCode,
3424 pExit->VpException.ExceptionParameter /*??*/, pExit->VpContext.InstructionLength);
3425 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3426 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3427 nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType, VBOXSTRICTRC_VAL(rcStrict) ));
3428
3429 RT_NOREF_PV(pVM);
3430 return rcStrict;
3431}
3432#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3433
3434
3435#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3436/**
3437 * Deals with unrecoverable exception (triple fault).
3438 *
3439 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up
3440 * here too. So we'll leave it to IEM to decide.
3441 *
3442 * @returns Strict VBox status code.
3443 * @param pVCpu The cross context per CPU structure.
3444 * @param pMsgHdr The message header.
3445 * @param pCtx The register context.
3446 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3447 * @sa nemR3WinHandleExitUnrecoverableException
3448 */
3449NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageUnrecoverableException(PVMCPU pVCpu,
3450 HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr,
3451 PCPUMCTX pCtx, PGVMCPU pGVCpu)
3452{
3453 /* Check message register value sanity. */
3454 NEMWIN_ASSERT_MSG_REG_SEG( pVCpu, pGVCpu, HvX64RegisterCs, pMsgHdr->CsSegment);
3455 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRip, pMsgHdr->Rip);
3456 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterRflags, pMsgHdr->Rflags);
3457 NEMWIN_ASSERT_MSG_REG_VAL64(pVCpu, pGVCpu, HvX64RegisterCr8, (uint64_t)pMsgHdr->Cr8);
3458
3459# if 0
3460 /*
3461 * Just copy the state we've got and handle it in the loop for now.
3462 */
3463 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr);
3464 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n",
3465 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags));
3466 return VINF_EM_TRIPLE_FAULT;
3467# else
3468 /*
3469 * Let IEM decide whether this is really it.
3470 */
3471 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3472 pMsgHdr->Rip + pMsgHdr->CsSegment.Base, ASMReadTSC());
3473 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr);
3474 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
3475 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3476 if (rcStrict == VINF_SUCCESS)
3477 {
3478 rcStrict = IEMExecOne(pVCpu);
3479 if (rcStrict == VINF_SUCCESS)
3480 {
3481 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3482 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags ));
3483 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3484 return VINF_SUCCESS;
3485 }
3486 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3487 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3488 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3489 else
3490 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3491 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3492 }
3493 else
3494 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3495 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3496 return rcStrict;
3497# endif
3498}
3499#elif defined(IN_RING3)
3500/**
3501 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
3502 *
3503 * @returns Strict VBox status code.
3504 * @param pVM The cross context VM structure.
3505 * @param pVCpu The cross context per CPU structure.
3506 * @param pExit The VM exit information to handle.
3507 * @param pCtx The register context.
3508 * @sa nemHCWinHandleMessageUnrecoverableException
3509 */
3510NEM_TMPL_STATIC VBOXSTRICTRC
3511nemR3WinHandleExitUnrecoverableException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
3512{
3513# if 0
3514 /*
3515 * Just copy the state we've got and handle it in the loop for now.
3516 */
3517 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
3518 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3519 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3520 RT_NOREF_PV(pVM);
3521 return VINF_EM_TRIPLE_FAULT;
3522# else
3523 /*
3524 * Let IEM decide whether this is really it.
3525 */
3526 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3527 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3528 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
3529 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
3530 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3531 if (rcStrict == VINF_SUCCESS)
3532 {
3533 rcStrict = IEMExecOne(pVCpu);
3534 if (rcStrict == VINF_SUCCESS)
3535 {
3536 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3537 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3538 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3539 return VINF_SUCCESS;
3540 }
3541 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3542 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3543 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3544 else
3545 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3546 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3547 }
3548 else
3549 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3550 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3551 RT_NOREF_PV(pVM);
3552 return rcStrict;
3553# endif
3554
3555}
3556#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3557
3558
3559#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3560/**
3561 * Handles messages (VM exits).
3562 *
3563 * @returns Strict VBox status code.
3564 * @param pVM The cross context VM structure.
3565 * @param pVCpu The cross context per CPU structure.
3566 * @param pMappingHeader The message slot mapping.
3567 * @param pCtx The register context.
3568 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3569 * @sa nemR3WinHandleExit
3570 */
3571NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessage(PVM pVM, PVMCPU pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
3572 PCPUMCTX pCtx, PGVMCPU pGVCpu)
3573{
3574 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage)
3575 {
3576 AssertMsg(pMappingHeader->cbMessage == HV_MESSAGE_SIZE, ("%#x\n", pMappingHeader->cbMessage));
3577 HV_MESSAGE const *pMsg = (HV_MESSAGE const *)(pMappingHeader + 1);
3578 switch (pMsg->Header.MessageType)
3579 {
3580 case HvMessageTypeUnmappedGpa:
3581 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3582 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3583 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx, pGVCpu);
3584
3585 case HvMessageTypeGpaIntercept:
3586 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3587 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept);
3588 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx, pGVCpu);
3589
3590 case HvMessageTypeX64IoPortIntercept:
3591 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept));
3592 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3593 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept, pCtx, pGVCpu);
3594
3595 case HvMessageTypeX64Halt:
3596 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3597 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3598 pMsg->X64InterceptHeader.Rip + pMsg->X64InterceptHeader.CsSegment.Base, ASMReadTSC());
3599 Log4(("HaltExit\n"));
3600 return VINF_EM_HALT;
3601
3602 case HvMessageTypeX64InterruptWindow:
3603 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow));
3604 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3605 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow, pCtx, pGVCpu);
3606
3607 case HvMessageTypeX64CpuidIntercept:
3608 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept));
3609 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3610 return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept, pGVCpu);
3611
3612 case HvMessageTypeX64MsrIntercept:
3613 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept));
3614 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3615 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept, pCtx, pGVCpu);
3616
3617 case HvMessageTypeX64ExceptionIntercept:
3618 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept));
3619 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3620 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept, pCtx, pGVCpu);
3621
3622 case HvMessageTypeUnrecoverableException:
3623 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
3624 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3625 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader, pCtx, pGVCpu);
3626
3627 case HvMessageTypeInvalidVpRegisterValue:
3628 case HvMessageTypeUnsupportedFeature:
3629 case HvMessageTypeTlbPageSizeMismatch:
3630 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3631 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg),
3632 VERR_NEM_IPE_3);
3633
3634 case HvMessageTypeX64ApicEoi:
3635 case HvMessageTypeX64LegacyFpError:
3636 case HvMessageTypeX64RegisterIntercept:
3637 case HvMessageTypeApicEoi:
3638 case HvMessageTypeFerrAsserted:
3639 case HvMessageTypeEventLogBufferComplete:
3640 case HvMessageTimerExpired:
3641 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3642 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3643 VERR_NEM_IPE_3);
3644
3645 default:
3646 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3647 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3648 VERR_NEM_IPE_3);
3649 }
3650 }
3651 else
3652 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n",
3653 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage),
3654 VERR_NEM_IPE_4);
3655}
3656#elif defined(IN_RING3)
3657/**
3658 * Handles VM exits.
3659 *
3660 * @returns Strict VBox status code.
3661 * @param pVM The cross context VM structure.
3662 * @param pVCpu The cross context per CPU structure.
3663 * @param pExit The VM exit information to handle.
3664 * @param pCtx The register context.
3665 * @sa nemHCWinHandleMessage
3666 */
3667NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
3668{
3669 switch (pExit->ExitReason)
3670 {
3671 case WHvRunVpExitReasonMemoryAccess:
3672 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3673 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit, pCtx);
3674
3675 case WHvRunVpExitReasonX64IoPortAccess:
3676 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3677 return nemR3WinHandleExitIoPort(pVM, pVCpu, pExit, pCtx);
3678
3679 case WHvRunVpExitReasonX64Halt:
3680 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3681 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3682 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3683 Log4(("HaltExit\n"));
3684 return VINF_EM_HALT;
3685
3686 case WHvRunVpExitReasonCanceled:
3687 return VINF_SUCCESS;
3688
3689 case WHvRunVpExitReasonX64InterruptWindow:
3690 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3691 return nemR3WinHandleExitInterruptWindow(pVM, pVCpu, pExit, pCtx);
3692
3693 case WHvRunVpExitReasonX64Cpuid:
3694 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3695 return nemR3WinHandleExitCpuId(pVM, pVCpu, pExit);
3696
3697 case WHvRunVpExitReasonX64MsrAccess:
3698 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3699 return nemR3WinHandleExitMsr(pVM, pVCpu, pExit, pCtx);
3700
3701 case WHvRunVpExitReasonException:
3702 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3703 return nemR3WinHandleExitException(pVM, pVCpu, pExit, pCtx);
3704
3705 case WHvRunVpExitReasonUnrecoverableException:
3706 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3707 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit, pCtx);
3708
3709 case WHvRunVpExitReasonUnsupportedFeature:
3710 case WHvRunVpExitReasonInvalidVpRegisterValue:
3711 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3712 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
3713 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
3714
3715 /* Undesired exits: */
3716 case WHvRunVpExitReasonNone:
3717 default:
3718 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3719 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
3720 }
3721}
3722#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3723
3724
3725#ifdef IN_RING0
3726/**
3727 * Perform an I/O control operation on the partition handle (VID.SYS),
3728 * restarting on alert-like behaviour.
3729 *
3730 * @returns NT status code.
3731 * @param pGVM The ring-0 VM structure.
3732 * @param pGVCpu The ring-0 CPU structure.
3733 * @param pVCpu The calling cross context CPU structure.
3734 * @param fFlags The wait flags.
3735 * @param cMillies The timeout in milliseconds
3736 */
3737static NTSTATUS nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(PGVM pGVM, PGVMCPU pGVCpu, PVMCPU pVCpu,
3738 uint32_t fFlags, uint32_t cMillies)
3739{
3740 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3741 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags;
3742 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3743 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3744 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3745 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3746 NULL, 0);
3747 if (rcNt == STATUS_SUCCESS)
3748 { /* likely */ }
3749 /*
3750 * Generally, if we get down here, we have been interrupted between ACK'ing
3751 * a message and waiting for the next due to a NtAlertThread call. So, we
3752 * should stop ACK'ing the previous message and get on waiting on the next.
3753 * See similar stuff in nemHCWinRunGC().
3754 */
3755 else if ( rcNt == STATUS_TIMEOUT
3756 || rcNt == STATUS_ALERTED /* just in case */
3757 || rcNt == STATUS_KERNEL_APC /* just in case */
3758 || rcNt == STATUS_USER_APC /* just in case */)
3759 {
3760 DBGFTRACE_CUSTOM(pVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/1 %#x (f=%#x)", rcNt, fFlags);
3761 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingAlerts);
3762 Assert(fFlags & VID_MSHAGN_F_GET_NEXT_MESSAGE);
3763
3764 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pVCpu->idCpu;
3765 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = fFlags & ~VID_MSHAGN_F_HANDLE_MESSAGE;
3766 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3767 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3768 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3769 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3770 NULL, 0);
3771 DBGFTRACE_CUSTOM(pVCpu->CTX_SUFF(pVM), "IoCtlMessageSlotHandleAndGetNextRestart/2 %#x", rcNt);
3772 }
3773 return rcNt;
3774}
3775
3776#endif /* IN_RING0 */
3777
3778
3779#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3780/**
3781 * Worker for nemHCWinRunGC that stops the execution on the way out.
3782 *
3783 * The CPU was running the last time we checked, no there are no messages that
3784 * needs being marked handled/whatever. Caller checks this.
3785 *
3786 * @returns rcStrict on success, error status on failure.
3787 * @param pVM The cross context VM structure.
3788 * @param pVCpu The cross context per CPU structure.
3789 * @param rcStrict The nemHCWinRunGC return status. This is a little
3790 * bit unnecessary, except in internal error cases,
3791 * since we won't need to stop the CPU if we took an
3792 * exit.
3793 * @param pMappingHeader The message slot mapping.
3794 * @param pGVM The global (ring-0) VM structure (NULL in r3).
3795 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3796 */
3797NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict,
3798 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
3799 PGVM pGVM, PGVMCPU pGVCpu)
3800{
3801# ifdef DBGFTRACE_ENABLED
3802 HV_MESSAGE const volatile *pMsgForTrace = (HV_MESSAGE const volatile *)(pMappingHeader + 1);
3803# endif
3804
3805 /*
3806 * Try stopping the processor. If we're lucky we manage to do this before it
3807 * does another VM exit.
3808 */
3809 DBGFTRACE_CUSTOM(pVM, "nemStop#0");
3810# ifdef IN_RING0
3811 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
3812 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction,
3813 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3814 NULL, 0);
3815 if (NT_SUCCESS(rcNt))
3816 {
3817 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay (%#x)", rcNt);
3818 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3819 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3820 return rcStrict;
3821 }
3822# else
3823 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu);
3824 if (fRet)
3825 {
3826 DBGFTRACE_CUSTOM(pVM, "nemStop#0: okay");
3827 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3828 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3829 return rcStrict;
3830 }
3831 RT_NOREF(pGVM, pGVCpu);
3832# endif
3833
3834 /*
3835 * Dang. The CPU stopped by itself and we got a couple of message to deal with.
3836 */
3837# ifdef IN_RING0
3838 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", rcNt);
3839 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt),
3840 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3841# else
3842 DWORD dwErr = RTNtLastErrorValue();
3843 DBGFTRACE_CUSTOM(pVM, "nemStop#0: pending (%#x)", dwErr);
3844 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr),
3845 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3846# endif
3847 Log8(("nemHCWinStopCpu: Stopping CPU #%u pending...\n", pVCpu->idCpu));
3848 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPending);
3849
3850 /*
3851 * First message: Exit or similar, sometimes VidMessageStopRequestComplete.
3852 * Note! We can safely ASSUME that rcStrict isn't an important information one.
3853 */
3854# ifdef IN_RING0
3855 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pGVM, pGVCpu, pVCpu, VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3856 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
3857 pMsgForTrace->Header.MessageType);
3858 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
3859 ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3860 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3861# else
3862 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3863 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3864 DBGFTRACE_CUSTOM(pVM, "nemStop#1: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
3865 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3866 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3867 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3868# endif
3869
3870 VID_MESSAGE_TYPE enmVidMsgType = pMappingHeader->enmVidMsgType;
3871 if (enmVidMsgType != VidMessageStopRequestComplete)
3872 {
3873 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, CPUMQueryGuestCtxPtr(pVCpu), pGVCpu);
3874 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict))
3875 rcStrict = rcStrict2;
3876 DBGFTRACE_CUSTOM(pVM, "nemStop#1: handled %#x -> %d", pMsgForTrace->Header.MessageType, VBOXSTRICTRC_VAL(rcStrict));
3877
3878 /*
3879 * Mark it as handled and get the stop request completed message, then mark
3880 * that as handled too. CPU is back into fully stopped stated then.
3881 */
3882# ifdef IN_RING0
3883 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pGVM, pGVCpu, pVCpu,
3884 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE,
3885 30000 /*ms*/);
3886 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage,
3887 pMsgForTrace->Header.MessageType);
3888 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
3889 ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3890 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3891# else
3892 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3893 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3894 DBGFTRACE_CUSTOM(pVM, "nemStop#2: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
3895 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3896 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3897 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3898# endif
3899
3900 /* It should be a stop request completed message. */
3901 enmVidMsgType = pMappingHeader->enmVidMsgType;
3902 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete,
3903 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n",
3904 enmVidMsgType, pMappingHeader->cbMessage),
3905 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3906
3907 /*
3908 * Mark the VidMessageStopRequestComplete message as handled.
3909 */
3910# ifdef IN_RING0
3911 rcNt = nemR0NtPerformIoCtlMessageSlotHandleAndGetNext(pGVM, pGVCpu, pVCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
3912 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %#x / %#x %#x %#x", rcNt, pMappingHeader->enmVidMsgType,
3913 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3914 AssertLogRelMsgReturn(rcNt == STATUS_SUCCESS,
3915 ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3916 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3917# else
3918 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
3919 DBGFTRACE_CUSTOM(pVM, "nemStop#3: %d+%#x / %#x %#x %#x", fWait, RTNtLastErrorValue(), pMappingHeader->enmVidMsgType,
3920 pMsgForTrace->Header.MessageType, pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3921 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3922 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3923# endif
3924 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) ));
3925 }
3926 else
3927 {
3928 /** @todo I'm not so sure about this now... */
3929 DBGFTRACE_CUSTOM(pVM, "nemStop#9: %#x %#x %#x", pMappingHeader->enmVidMsgType,
3930 pMappingHeader->cbMessage, pMsgForTrace->Header.MessageType);
3931 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingOdd);
3932 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc) - 1st VidMessageSlotHandleAndGetNext got VidMessageStopRequestComplete.\n",
3933 VBOXSTRICTRC_VAL(rcStrict) ));
3934 }
3935 return rcStrict;
3936}
3937#endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
3938
3939#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3)
3940
3941/**
3942 * Deals with pending interrupt related force flags, may inject interrupt.
3943 *
3944 * @returns VBox strict status code.
3945 * @param pVM The cross context VM structure.
3946 * @param pVCpu The cross context per CPU structure.
3947 * @param pGVCpu The global (ring-0) per CPU structure.
3948 * @param pCtx The register context.
3949 * @param pfInterruptWindows Where to return interrupt window flags.
3950 */
3951NEM_TMPL_STATIC VBOXSTRICTRC
3952nemHCWinHandleInterruptFF(PVM pVM, PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint8_t *pfInterruptWindows)
3953{
3954 Assert(!TRPMHasTrap(pVCpu));
3955 RT_NOREF_PV(pVM);
3956
3957 /*
3958 * First update APIC. We ASSUME this won't need TPR/CR8.
3959 */
3960 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3961 {
3962 APICUpdatePendingInterrupts(pVCpu);
3963 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
3964 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3965 return VINF_SUCCESS;
3966 }
3967
3968 /*
3969 * We don't currently implement SMIs.
3970 */
3971 AssertReturn(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);
3972
3973 /*
3974 * Check if we've got the minimum of state required for deciding whether we
3975 * can inject interrupts and NMIs. If we don't have it, get all we might require
3976 * for injection via IEM.
3977 */
3978 bool const fPendingNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3979 uint64_t fNeedExtrn = CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
3980 | (fPendingNmi ? CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI : 0);
3981 if (pCtx->fExtrn & fNeedExtrn)
3982 {
3983 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
3984 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF");
3985 if (rcStrict != VINF_SUCCESS)
3986 return rcStrict;
3987 }
3988 bool const fInhibitInterrupts = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3989 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip;
3990
3991 /*
3992 * NMI? Try deliver it first.
3993 */
3994 if (fPendingNmi)
3995 {
3996 if ( !fInhibitInterrupts
3997 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
3998 {
3999 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
4000 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4001 if (rcStrict == VINF_SUCCESS)
4002 {
4003 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
4004 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_NMI, TRPM_HARDWARE_INT, 0, 0, 0);
4005 Log8(("Injected NMI on %u (%d)\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4006 }
4007 return rcStrict;
4008 }
4009 *pfInterruptWindows |= NEM_WIN_INTW_F_NMI;
4010 Log8(("NMI window pending on %u\n", pVCpu->idCpu));
4011 }
4012
4013 /*
4014 * APIC or PIC interrupt?
4015 */
4016 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4017 {
4018 if ( !fInhibitInterrupts
4019 && pCtx->rflags.Bits.u1IF)
4020 {
4021 AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR);
4022 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
4023 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
4024 if (rcStrict == VINF_SUCCESS)
4025 {
4026 uint8_t bInterrupt;
4027 int rc = PDMGetInterrupt(pVCpu, &bInterrupt);
4028 if (RT_SUCCESS(rc))
4029 {
4030 rcStrict = IEMInjectTrap(pVCpu, bInterrupt, TRPM_HARDWARE_INT, 0, 0, 0);
4031 Log8(("Injected interrupt %#x on %u (%d)\n", bInterrupt, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4032 }
4033 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
4034 {
4035 *pfInterruptWindows |= (bInterrupt >> 4 /*??*/) << NEM_WIN_INTW_F_PRIO_SHIFT;
4036 Log8(("VERR_APIC_INTR_MASKED_BY_TPR: *pfInterruptWindows=%#x\n", *pfInterruptWindows));
4037 }
4038 else
4039 Log8(("PDMGetInterrupt failed -> %d\n", rc));
4040 }
4041 return rcStrict;
4042 }
4043 *pfInterruptWindows |= NEM_WIN_INTW_F_REGULAR;
4044 Log8(("Interrupt window pending on %u\n", pVCpu->idCpu));
4045 }
4046
4047 return VINF_SUCCESS;
4048}
4049
4050
4051/**
4052 * Inner NEM runloop for windows.
4053 *
4054 * @returns Strict VBox status code.
4055 * @param pVM The cross context VM structure.
4056 * @param pVCpu The cross context per CPU structure.
4057 * @param pGVM The ring-0 VM structure (NULL in ring-3).
4058 * @param pGVCpu The ring-0 per CPU structure (NULL in ring-3).
4059 */
4060NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVM pVM, PVMCPU pVCpu, PGVM pGVM, PGVMCPU pGVCpu)
4061{
4062 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4063 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags));
4064# ifdef LOG_ENABLED
4065 if (LogIs3Enabled())
4066 nemHCWinLogState(pVM, pVCpu);
4067# endif
4068# ifdef IN_RING0
4069 Assert(pVCpu->idCpu == pGVCpu->idCpu);
4070# endif
4071
4072 /*
4073 * Try switch to NEM runloop state.
4074 */
4075 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
4076 { /* likely */ }
4077 else
4078 {
4079 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4080 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
4081 return VINF_SUCCESS;
4082 }
4083
4084 /*
4085 * The run loop.
4086 *
4087 * Current approach to state updating to use the sledgehammer and sync
4088 * everything every time. This will be optimized later.
4089 */
4090# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
4091 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;
4092 uint32_t cMillies = 5000; /** @todo lower this later... */
4093# endif
4094 const bool fSingleStepping = DBGFIsStepping(pVCpu);
4095// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
4096// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
4097// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
4098 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4099 for (unsigned iLoop = 0;; iLoop++)
4100 {
4101# ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4102 /*
4103 * Hack alert!
4104 */
4105 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
4106 if (cMappedPages >= 4000)
4107 {
4108 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinWHvUnmapOnePageCallback, NULL);
4109 Log(("nemHCWinRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
4110 }
4111# endif
4112
4113 /*
4114 * Pending interrupts or such? Need to check and deal with this prior
4115 * to the state syncing.
4116 */
4117 pVCpu->nem.s.fDesiredInterruptWindows = 0;
4118 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC
4119 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4120 {
4121# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
4122 /* Make sure the CPU isn't executing. */
4123 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4124 {
4125 pVCpu->nem.s.fHandleAndGetFlags = 0;
4126 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
4127 if (rcStrict == VINF_SUCCESS)
4128 { /* likely */ }
4129 else
4130 {
4131 LogFlow(("NEM/%u: breaking: nemHCWinStopCpu -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4132 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4133 break;
4134 }
4135 }
4136# endif
4137
4138 /* Try inject interrupt. */
4139 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, pGVCpu, pCtx, &pVCpu->nem.s.fDesiredInterruptWindows);
4140 if (rcStrict == VINF_SUCCESS)
4141 { /* likely */ }
4142 else
4143 {
4144 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4145 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4146 break;
4147 }
4148 }
4149
4150 /*
4151 * Ensure that hyper-V has the whole state.
4152 * (We always update the interrupt windows settings when active as hyper-V seems
4153 * to forget about it after an exit.)
4154 */
4155 if ( (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK))
4156 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)
4157 || pVCpu->nem.s.fDesiredInterruptWindows
4158 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows)
4159 {
4160# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
4161 Assert(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */);
4162# endif
4163# ifdef IN_RING0
4164 int rc2 = nemR0WinExportState(pGVM, pGVCpu, pCtx);
4165# else
4166 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu, pCtx);
4167 RT_NOREF(pGVM, pGVCpu);
4168# endif
4169 AssertRCReturn(rc2, rc2);
4170 }
4171
4172 /*
4173 * Run a bit.
4174 */
4175 if ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
4176 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
4177 {
4178# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
4179 if (pVCpu->nem.s.fHandleAndGetFlags)
4180 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ }
4181 else
4182 {
4183# ifdef IN_RING0
4184 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
4185 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction,
4186 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
4187 NULL, 0);
4188 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));
4189 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pGVCpu->idCpu, rcNt),
4190 VERR_NEM_IPE_5);
4191# else
4192 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
4193 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n",
4194 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()),
4195 VERR_NEM_IPE_5);
4196# endif
4197 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4198 }
4199# endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
4200
4201 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
4202 {
4203# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
4204# ifdef IN_RING0
4205 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
4206 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags;
4207 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
4208 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
4209 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
4210 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
4211 NULL, 0);
4212 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4213 if (rcNt == STATUS_SUCCESS)
4214# else
4215 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4216 pVCpu->nem.s.fHandleAndGetFlags, cMillies);
4217 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4218 if (fRet)
4219# endif
4220# else
4221 WHV_RUN_VP_EXIT_CONTEXT ExitReason;
4222 RT_ZERO(ExitReason);
4223 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
4224 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4225 if (SUCCEEDED(hrc))
4226# endif
4227 {
4228 /*
4229 * Deal with the message.
4230 */
4231# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
4232 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pCtx, pGVCpu);
4233 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;
4234# else
4235 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason, pCtx);
4236# endif
4237 if (rcStrict == VINF_SUCCESS)
4238 { /* hopefully likely */ }
4239 else
4240 {
4241 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4242 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4243 break;
4244 }
4245 }
4246 else
4247 {
4248# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
4249
4250 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT,
4251 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah,
4252 the error code conversion is into WAIT_XXX, i.e. NT status codes. */
4253# ifndef IN_RING0
4254 DWORD rcNt = GetLastError();
4255# endif
4256 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt));
4257 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT
4258 || rcNt == STATUS_ALERTED /* just in case */
4259 || rcNt == STATUS_USER_APC /* ditto */
4260 || rcNt == STATUS_KERNEL_APC /* ditto */
4261 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n",
4262 pVCpu->idCpu, rcNt, rcNt),
4263 VERR_NEM_IPE_0);
4264 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4265 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout);
4266# else
4267 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
4268 pVCpu->idCpu, hrc, GetLastError()),
4269 VERR_NEM_IPE_0);
4270# endif
4271 }
4272
4273 /*
4274 * If no relevant FFs are pending, loop.
4275 */
4276 if ( !VM_FF_IS_PENDING( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
4277 && !VMCPU_FF_IS_PENDING(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4278 continue;
4279
4280 /** @todo Try handle pending flags, not just return to EM loops. Take care
4281 * not to set important RCs here unless we've handled a message. */
4282 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#x)\n",
4283 pVCpu->idCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4284 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
4285 }
4286 else
4287 {
4288 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
4289 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
4290 }
4291 }
4292 else
4293 {
4294 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
4295 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
4296 }
4297 break;
4298 } /* the run loop */
4299
4300
4301 /*
4302 * If the CPU is running, make sure to stop it before we try sync back the
4303 * state and return to EM. We don't sync back the whole state if we can help it.
4304 */
4305# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
4306 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4307 {
4308 pVCpu->nem.s.fHandleAndGetFlags = 0;
4309 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
4310 }
4311# endif
4312
4313 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
4314 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4315
4316 if (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
4317 {
4318 /* Try anticipate what we might need. */
4319 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
4320 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
4321 || RT_FAILURE(rcStrict))
4322 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4323# ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */
4324 else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
4325 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
4326 fImport = CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
4327 else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
4328 fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
4329# endif
4330 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
4331 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4332 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
4333
4334 if (pCtx->fExtrn & fImport)
4335 {
4336# ifdef IN_RING0
4337 int rc2 = nemR0WinImportState(pGVM, pGVCpu, pCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4338 if (RT_SUCCESS(rc2))
4339 pCtx->fExtrn &= ~fImport;
4340 else if (rc2 == VERR_NEM_CHANGE_PGM_MODE || rc2 == VERR_NEM_FLUSH_TLB || rc2 == VERR_NEM_UPDATE_APIC_BASE)
4341 {
4342 pCtx->fExtrn &= ~fImport;
4343 if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
4344 rcStrict = -rc2;
4345 else
4346 {
4347 pVCpu->nem.s.rcPending = -rc2;
4348 LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
4349 }
4350 }
4351# else
4352 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4353 if (RT_SUCCESS(rc2))
4354 pCtx->fExtrn &= ~fImport;
4355# endif
4356 else if (RT_SUCCESS(rcStrict))
4357 rcStrict = rc2;
4358 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
4359 pCtx->fExtrn = 0;
4360 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
4361 }
4362 else
4363 {
4364 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4365 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
4366 }
4367 }
4368 else
4369 {
4370 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4371 pCtx->fExtrn = 0;
4372 }
4373
4374 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
4375 pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags, VBOXSTRICTRC_VAL(rcStrict) ));
4376 return rcStrict;
4377}
4378
4379#endif /* defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3) */
4380
4381/**
4382 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
4383 */
4384NEM_TMPL_STATIC DECLCALLBACK(int) nemHCWinUnsetForA20CheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys,
4385 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
4386{
4387 /* We'll just unmap the memory. */
4388 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
4389 {
4390#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4391 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
4392 AssertRC(rc);
4393 if (RT_SUCCESS(rc))
4394#else
4395 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
4396 if (SUCCEEDED(hrc))
4397#endif
4398 {
4399 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4400 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
4401 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
4402 }
4403 else
4404 {
4405#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4406 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
4407 return rc;
4408#else
4409 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4410 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4411 return VERR_NEM_IPE_2;
4412#endif
4413 }
4414 }
4415 RT_NOREF(pVCpu, pvUser);
4416 return VINF_SUCCESS;
4417}
4418
4419
4420/**
4421 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
4422 *
4423 * @returns The PGMPhysNemQueryPageInfo result.
4424 * @param pVM The cross context VM structure.
4425 * @param pVCpu The cross context virtual CPU structure.
4426 * @param GCPhys The page to unmap.
4427 */
4428NEM_TMPL_STATIC int nemHCWinUnmapPageForA20Gate(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
4429{
4430 PGMPHYSNEMPAGEINFO Info;
4431 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
4432 nemHCWinUnsetForA20CheckerCallback, NULL);
4433}
4434
4435
4436void nemHCNativeNotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
4437{
4438 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
4439 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
4440}
4441
4442
4443void nemHCNativeNotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
4444 int fRestoreAsRAM, bool fRestoreAsRAM2)
4445{
4446 Log5(("nemHCNativeNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d fRestoreAsRAM=%d fRestoreAsRAM2=%d\n",
4447 GCPhys, cb, enmKind, fRestoreAsRAM, fRestoreAsRAM2));
4448 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb); NOREF(fRestoreAsRAM); NOREF(fRestoreAsRAM2);
4449}
4450
4451
4452void nemHCNativeNotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
4453 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
4454{
4455 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
4456 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
4457 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
4458}
4459
4460
4461/**
4462 * Worker that maps pages into Hyper-V.
4463 *
4464 * This is used by the PGM physical page notifications as well as the memory
4465 * access VMEXIT handlers.
4466 *
4467 * @returns VBox status code.
4468 * @param pVM The cross context VM structure.
4469 * @param pVCpu The cross context virtual CPU structure of the
4470 * calling EMT.
4471 * @param GCPhysSrc The source page address.
4472 * @param GCPhysDst The hyper-V destination page. This may differ from
4473 * GCPhysSrc when A20 is disabled.
4474 * @param fPageProt NEM_PAGE_PROT_XXX.
4475 * @param pu2State Our page state (input/output).
4476 * @param fBackingChanged Set if the page backing is being changed.
4477 * @thread EMT(pVCpu)
4478 */
4479NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
4480 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
4481{
4482#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4483 /*
4484 * When using the hypercalls instead of the ring-3 APIs, we don't need to
4485 * unmap memory before modifying it. We still want to track the state though,
4486 * since unmap will fail when called an unmapped page and we don't want to redo
4487 * upgrades/downgrades.
4488 */
4489 uint8_t const u2OldState = *pu2State;
4490 int rc;
4491 if (fPageProt == NEM_PAGE_PROT_NONE)
4492 {
4493 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4494 {
4495 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4496 if (RT_SUCCESS(rc))
4497 {
4498 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4499 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4500 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4501 }
4502 else
4503 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4504 }
4505 else
4506 rc = VINF_SUCCESS;
4507 }
4508 else if (fPageProt & NEM_PAGE_PROT_WRITE)
4509 {
4510 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
4511 {
4512 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4513 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4514 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4515 if (RT_SUCCESS(rc))
4516 {
4517 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4518 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4519 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4520 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4521 NOREF(cMappedPages);
4522 }
4523 else
4524 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4525 }
4526 else
4527 rc = VINF_SUCCESS;
4528 }
4529 else
4530 {
4531 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
4532 {
4533 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4534 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4535 if (RT_SUCCESS(rc))
4536 {
4537 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4538 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4539 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4540 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4541 NOREF(cMappedPages);
4542 }
4543 else
4544 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4545 }
4546 else
4547 rc = VINF_SUCCESS;
4548 }
4549
4550 return VINF_SUCCESS;
4551
4552#else
4553 /*
4554 * Looks like we need to unmap a page before we can change the backing
4555 * or even modify the protection. This is going to be *REALLY* efficient.
4556 * PGM lends us two bits to keep track of the state here.
4557 */
4558 uint8_t const u2OldState = *pu2State;
4559 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
4560 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
4561 if ( fBackingChanged
4562 || u2NewState != u2OldState)
4563 {
4564 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4565 {
4566# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4567 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4568 AssertRC(rc);
4569 if (RT_SUCCESS(rc))
4570 {
4571 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4572 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4573 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4574 {
4575 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4576 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4577 return VINF_SUCCESS;
4578 }
4579 }
4580 else
4581 {
4582 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4583 return rc;
4584 }
4585# else
4586 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
4587 if (SUCCEEDED(hrc))
4588 {
4589 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4590 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4591 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4592 {
4593 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4594 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4595 return VINF_SUCCESS;
4596 }
4597 }
4598 else
4599 {
4600 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4601 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4602 return VERR_NEM_INIT_FAILED;
4603 }
4604# endif
4605 }
4606 }
4607
4608 /*
4609 * Writeable mapping?
4610 */
4611 if (fPageProt & NEM_PAGE_PROT_WRITE)
4612 {
4613# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4614 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4615 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4616 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4617 AssertRC(rc);
4618 if (RT_SUCCESS(rc))
4619 {
4620 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4621 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4622 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4623 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4624 return VINF_SUCCESS;
4625 }
4626 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4627 return rc;
4628# else
4629 void *pvPage;
4630 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
4631 if (RT_SUCCESS(rc))
4632 {
4633 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
4634 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4635 if (SUCCEEDED(hrc))
4636 {
4637 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4638 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4639 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4640 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4641 return VINF_SUCCESS;
4642 }
4643 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4644 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4645 return VERR_NEM_INIT_FAILED;
4646 }
4647 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4648 return rc;
4649# endif
4650 }
4651
4652 if (fPageProt & NEM_PAGE_PROT_READ)
4653 {
4654# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4655 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4656 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4657 AssertRC(rc);
4658 if (RT_SUCCESS(rc))
4659 {
4660 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4661 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4662 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4663 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4664 return VINF_SUCCESS;
4665 }
4666 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4667 return rc;
4668# else
4669 const void *pvPage;
4670 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
4671 if (RT_SUCCESS(rc))
4672 {
4673 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
4674 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
4675 if (SUCCEEDED(hrc))
4676 {
4677 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4678 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4679 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4680 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4681 return VINF_SUCCESS;
4682 }
4683 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4684 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4685 return VERR_NEM_INIT_FAILED;
4686 }
4687 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4688 return rc;
4689# endif
4690 }
4691
4692 /* We already unmapped it above. */
4693 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4694 return VINF_SUCCESS;
4695#endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4696}
4697
4698
4699NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVM pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
4700{
4701 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
4702 {
4703 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
4704 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4705 return VINF_SUCCESS;
4706 }
4707
4708#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4709 PVMCPU pVCpu = VMMGetCpu(pVM);
4710 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4711 AssertRC(rc);
4712 if (RT_SUCCESS(rc))
4713 {
4714 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4715 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
4716 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4717 return VINF_SUCCESS;
4718 }
4719 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4720 return rc;
4721#else
4722 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
4723 if (SUCCEEDED(hrc))
4724 {
4725 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4726 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4727 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
4728 return VINF_SUCCESS;
4729 }
4730 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
4731 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4732 return VERR_NEM_IPE_6;
4733#endif
4734}
4735
4736
4737int nemHCNativeNotifyPhysPageAllocated(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4738 PGMPAGETYPE enmType, uint8_t *pu2State)
4739{
4740 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4741 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4742 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4743
4744 int rc;
4745#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4746 PVMCPU pVCpu = VMMGetCpu(pVM);
4747 if ( pVM->nem.s.fA20Enabled
4748 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4749 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4750 else
4751 {
4752 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4753 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4754 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
4755 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4756
4757 }
4758#else
4759 RT_NOREF_PV(fPageProt);
4760 if ( pVM->nem.s.fA20Enabled
4761 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4762 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4763 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4764 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4765 else
4766 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
4767#endif
4768 return rc;
4769}
4770
4771
4772void nemHCNativeNotifyPhysPageProtChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4773 PGMPAGETYPE enmType, uint8_t *pu2State)
4774{
4775 Log5(("nemHCNativeNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4776 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4777 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4778
4779#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4780 PVMCPU pVCpu = VMMGetCpu(pVM);
4781 if ( pVM->nem.s.fA20Enabled
4782 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4783 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4784 else
4785 {
4786 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4787 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4788 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4789 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4790 }
4791#else
4792 RT_NOREF_PV(fPageProt);
4793 if ( pVM->nem.s.fA20Enabled
4794 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4795 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4796 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4797 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4798 /* else: ignore since we've got the alias page at this address. */
4799#endif
4800}
4801
4802
4803void nemHCNativeNotifyPhysPageChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
4804 uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
4805{
4806 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4807 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
4808 RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType);
4809
4810#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4811 PVMCPU pVCpu = VMMGetCpu(pVM);
4812 if ( pVM->nem.s.fA20Enabled
4813 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4814 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4815 else
4816 {
4817 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4818 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4819 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4820 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4821 }
4822#else
4823 RT_NOREF_PV(fPageProt);
4824 if ( pVM->nem.s.fA20Enabled
4825 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4826 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4827 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4828 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4829 /* else: ignore since we've got the alias page at this address. */
4830#endif
4831}
4832
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette