VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h@ 72619

Last change on this file since 72619 was 72575, checked in by vboxsync, 7 years ago

NEM/win: Implemented exit optimizations for MMIO, I/O ports, CPUID and MSR exits. Gives a nice speedup (8-10x), except for return-to-ring-3 scenarios. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 214.5 KB
Line 
1/* $Id: NEMAllNativeTemplate-win.cpp.h 72575 2018-06-15 21:25:32Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, Windows code template ring-0/3.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22/** Copy back a segment from hyper-V. */
23#define NEM_WIN_COPY_BACK_SEG(a_Dst, a_Src) \
24 do { \
25 (a_Dst).u64Base = (a_Src).Base; \
26 (a_Dst).u32Limit = (a_Src).Limit; \
27 (a_Dst).ValidSel = (a_Dst).Sel = (a_Src).Selector; \
28 (a_Dst).Attr.u = (a_Src).Attributes; \
29 (a_Dst).fFlags = CPUMSELREG_FLAGS_VALID; \
30 } while (0)
31
32
33/*********************************************************************************************************************************
34* Global Variables *
35*********************************************************************************************************************************/
36/** NEM_WIN_PAGE_STATE_XXX names. */
37NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
38
39/** HV_INTERCEPT_ACCESS_TYPE names. */
40static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
41
42
43/*********************************************************************************************************************************
44* Internal Functions *
45*********************************************************************************************************************************/
46NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
47 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
48
49
50#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
51
52/**
53 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
54 *
55 * @returns VBox status code.
56 * @param pVM The cross context VM structure.
57 * @param pVCpu The cross context virtual CPU structure of the caller.
58 * @param GCPhysSrc The source page. Does not need to be page aligned.
59 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
60 * when A20 is disabled.
61 * @param fFlags HV_MAP_GPA_XXX.
62 */
63DECLINLINE(int) nemHCWinHypercallMapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
64{
65#ifdef IN_RING0
66 /** @todo optimize further, caller generally has the physical address. */
67 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
68 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
69 return nemR0WinMapPages(pGVM, pVM, &pGVM->aCpus[pVCpu->idCpu],
70 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
71 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
72 1, fFlags);
73#else
74 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
75 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
76 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
77 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
78 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
79#endif
80}
81
82
83/**
84 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
85 *
86 * @returns VBox status code.
87 * @param pVM The cross context VM structure.
88 * @param pVCpu The cross context virtual CPU structure of the caller.
89 * @param GCPhys The page to unmap. Does not need to be page aligned.
90 */
91DECLINLINE(int) nemHCWinHypercallUnmapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
92{
93# ifdef IN_RING0
94 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
95 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
96 return nemR0WinUnmapPages(pGVM, &pGVM->aCpus[pVCpu->idCpu], GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
97# else
98 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
99 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
100 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
101# endif
102}
103
104#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
105#ifndef IN_RING0
106
107NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
108{
109# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
110 NOREF(pCtx);
111 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL);
112 AssertLogRelRCReturn(rc, rc);
113 return rc;
114
115# else
116 /*
117 * The following is very similar to what nemR0WinExportState() does.
118 */
119 WHV_REGISTER_NAME aenmNames[128];
120 WHV_REGISTER_VALUE aValues[128];
121
122 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
123 if ( !fWhat
124 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
125 return VINF_SUCCESS;
126 uintptr_t iReg = 0;
127
128# define ADD_REG64(a_enmName, a_uValue) do { \
129 aenmNames[iReg] = (a_enmName); \
130 aValues[iReg].Reg128.High64 = 0; \
131 aValues[iReg].Reg64 = (a_uValue); \
132 iReg++; \
133 } while (0)
134# define ADD_REG128(a_enmName, a_uValueLo, a_uValueHi) do { \
135 aenmNames[iReg] = (a_enmName); \
136 aValues[iReg].Reg128.Low64 = (a_uValueLo); \
137 aValues[iReg].Reg128.High64 = (a_uValueHi); \
138 iReg++; \
139 } while (0)
140
141 /* GPRs */
142 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
143 {
144 if (fWhat & CPUMCTX_EXTRN_RAX)
145 ADD_REG64(WHvX64RegisterRax, pCtx->rax);
146 if (fWhat & CPUMCTX_EXTRN_RCX)
147 ADD_REG64(WHvX64RegisterRcx, pCtx->rcx);
148 if (fWhat & CPUMCTX_EXTRN_RDX)
149 ADD_REG64(WHvX64RegisterRdx, pCtx->rdx);
150 if (fWhat & CPUMCTX_EXTRN_RBX)
151 ADD_REG64(WHvX64RegisterRbx, pCtx->rbx);
152 if (fWhat & CPUMCTX_EXTRN_RSP)
153 ADD_REG64(WHvX64RegisterRsp, pCtx->rsp);
154 if (fWhat & CPUMCTX_EXTRN_RBP)
155 ADD_REG64(WHvX64RegisterRbp, pCtx->rbp);
156 if (fWhat & CPUMCTX_EXTRN_RSI)
157 ADD_REG64(WHvX64RegisterRsi, pCtx->rsi);
158 if (fWhat & CPUMCTX_EXTRN_RDI)
159 ADD_REG64(WHvX64RegisterRdi, pCtx->rdi);
160 if (fWhat & CPUMCTX_EXTRN_R8_R15)
161 {
162 ADD_REG64(WHvX64RegisterR8, pCtx->r8);
163 ADD_REG64(WHvX64RegisterR9, pCtx->r9);
164 ADD_REG64(WHvX64RegisterR10, pCtx->r10);
165 ADD_REG64(WHvX64RegisterR11, pCtx->r11);
166 ADD_REG64(WHvX64RegisterR12, pCtx->r12);
167 ADD_REG64(WHvX64RegisterR13, pCtx->r13);
168 ADD_REG64(WHvX64RegisterR14, pCtx->r14);
169 ADD_REG64(WHvX64RegisterR15, pCtx->r15);
170 }
171 }
172
173 /* RIP & Flags */
174 if (fWhat & CPUMCTX_EXTRN_RIP)
175 ADD_REG64(WHvX64RegisterRip, pCtx->rip);
176 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
177 ADD_REG64(WHvX64RegisterRflags, pCtx->rflags.u);
178
179 /* Segments */
180# define ADD_SEG(a_enmName, a_SReg) \
181 do { \
182 aenmNames[iReg] = a_enmName; \
183 aValues[iReg].Segment.Base = (a_SReg).u64Base; \
184 aValues[iReg].Segment.Limit = (a_SReg).u32Limit; \
185 aValues[iReg].Segment.Selector = (a_SReg).Sel; \
186 aValues[iReg].Segment.Attributes = (a_SReg).Attr.u; \
187 iReg++; \
188 } while (0)
189 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
190 {
191 if (fWhat & CPUMCTX_EXTRN_ES)
192 ADD_SEG(WHvX64RegisterEs, pCtx->es);
193 if (fWhat & CPUMCTX_EXTRN_CS)
194 ADD_SEG(WHvX64RegisterCs, pCtx->cs);
195 if (fWhat & CPUMCTX_EXTRN_SS)
196 ADD_SEG(WHvX64RegisterSs, pCtx->ss);
197 if (fWhat & CPUMCTX_EXTRN_DS)
198 ADD_SEG(WHvX64RegisterDs, pCtx->ds);
199 if (fWhat & CPUMCTX_EXTRN_FS)
200 ADD_SEG(WHvX64RegisterFs, pCtx->fs);
201 if (fWhat & CPUMCTX_EXTRN_GS)
202 ADD_SEG(WHvX64RegisterGs, pCtx->gs);
203 }
204
205 /* Descriptor tables & task segment. */
206 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
207 {
208 if (fWhat & CPUMCTX_EXTRN_LDTR)
209 ADD_SEG(WHvX64RegisterLdtr, pCtx->ldtr);
210 if (fWhat & CPUMCTX_EXTRN_TR)
211 ADD_SEG(WHvX64RegisterTr, pCtx->tr);
212 if (fWhat & CPUMCTX_EXTRN_IDTR)
213 {
214 aenmNames[iReg] = WHvX64RegisterIdtr;
215 aValues[iReg].Table.Limit = pCtx->idtr.cbIdt;
216 aValues[iReg].Table.Base = pCtx->idtr.pIdt;
217 iReg++;
218 }
219 if (fWhat & CPUMCTX_EXTRN_GDTR)
220 {
221 aenmNames[iReg] = WHvX64RegisterGdtr;
222 aValues[iReg].Table.Limit = pCtx->gdtr.cbGdt;
223 aValues[iReg].Table.Base = pCtx->gdtr.pGdt;
224 iReg++;
225 }
226 }
227
228 /* Control registers. */
229 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
230 {
231 if (fWhat & CPUMCTX_EXTRN_CR0)
232 ADD_REG64(WHvX64RegisterCr0, pCtx->cr0);
233 if (fWhat & CPUMCTX_EXTRN_CR2)
234 ADD_REG64(WHvX64RegisterCr2, pCtx->cr2);
235 if (fWhat & CPUMCTX_EXTRN_CR3)
236 ADD_REG64(WHvX64RegisterCr3, pCtx->cr3);
237 if (fWhat & CPUMCTX_EXTRN_CR4)
238 ADD_REG64(WHvX64RegisterCr4, pCtx->cr4);
239 }
240 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
241 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu));
242
243 /* Debug registers. */
244/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
245 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
246 {
247 ADD_REG64(WHvX64RegisterDr0, pCtx->dr[0]); // CPUMGetHyperDR0(pVCpu));
248 ADD_REG64(WHvX64RegisterDr1, pCtx->dr[1]); // CPUMGetHyperDR1(pVCpu));
249 ADD_REG64(WHvX64RegisterDr2, pCtx->dr[2]); // CPUMGetHyperDR2(pVCpu));
250 ADD_REG64(WHvX64RegisterDr3, pCtx->dr[3]); // CPUMGetHyperDR3(pVCpu));
251 }
252 if (fWhat & CPUMCTX_EXTRN_DR6)
253 ADD_REG64(WHvX64RegisterDr6, pCtx->dr[6]); // CPUMGetHyperDR6(pVCpu));
254 if (fWhat & CPUMCTX_EXTRN_DR7)
255 ADD_REG64(WHvX64RegisterDr7, pCtx->dr[7]); // CPUMGetHyperDR7(pVCpu));
256
257 /* Floating point state. */
258 if (fWhat & CPUMCTX_EXTRN_X87)
259 {
260 ADD_REG128(WHvX64RegisterFpMmx0, pCtx->pXStateR3->x87.aRegs[0].au64[0], pCtx->pXStateR3->x87.aRegs[0].au64[1]);
261 ADD_REG128(WHvX64RegisterFpMmx1, pCtx->pXStateR3->x87.aRegs[1].au64[0], pCtx->pXStateR3->x87.aRegs[1].au64[1]);
262 ADD_REG128(WHvX64RegisterFpMmx2, pCtx->pXStateR3->x87.aRegs[2].au64[0], pCtx->pXStateR3->x87.aRegs[2].au64[1]);
263 ADD_REG128(WHvX64RegisterFpMmx3, pCtx->pXStateR3->x87.aRegs[3].au64[0], pCtx->pXStateR3->x87.aRegs[3].au64[1]);
264 ADD_REG128(WHvX64RegisterFpMmx4, pCtx->pXStateR3->x87.aRegs[4].au64[0], pCtx->pXStateR3->x87.aRegs[4].au64[1]);
265 ADD_REG128(WHvX64RegisterFpMmx5, pCtx->pXStateR3->x87.aRegs[5].au64[0], pCtx->pXStateR3->x87.aRegs[5].au64[1]);
266 ADD_REG128(WHvX64RegisterFpMmx6, pCtx->pXStateR3->x87.aRegs[6].au64[0], pCtx->pXStateR3->x87.aRegs[6].au64[1]);
267 ADD_REG128(WHvX64RegisterFpMmx7, pCtx->pXStateR3->x87.aRegs[7].au64[0], pCtx->pXStateR3->x87.aRegs[7].au64[1]);
268
269 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
270 aValues[iReg].FpControlStatus.FpControl = pCtx->pXStateR3->x87.FCW;
271 aValues[iReg].FpControlStatus.FpStatus = pCtx->pXStateR3->x87.FSW;
272 aValues[iReg].FpControlStatus.FpTag = pCtx->pXStateR3->x87.FTW;
273 aValues[iReg].FpControlStatus.Reserved = pCtx->pXStateR3->x87.FTW >> 8;
274 aValues[iReg].FpControlStatus.LastFpOp = pCtx->pXStateR3->x87.FOP;
275 aValues[iReg].FpControlStatus.LastFpRip = (pCtx->pXStateR3->x87.FPUIP)
276 | ((uint64_t)pCtx->pXStateR3->x87.CS << 32)
277 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd1 << 48);
278 iReg++;
279
280 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
281 aValues[iReg].XmmControlStatus.LastFpRdp = (pCtx->pXStateR3->x87.FPUDP)
282 | ((uint64_t)pCtx->pXStateR3->x87.DS << 32)
283 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd2 << 48);
284 aValues[iReg].XmmControlStatus.XmmStatusControl = pCtx->pXStateR3->x87.MXCSR;
285 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR3->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
286 iReg++;
287 }
288
289 /* Vector state. */
290 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
291 {
292 ADD_REG128(WHvX64RegisterXmm0, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Hi);
293 ADD_REG128(WHvX64RegisterXmm1, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Hi);
294 ADD_REG128(WHvX64RegisterXmm2, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Hi);
295 ADD_REG128(WHvX64RegisterXmm3, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Hi);
296 ADD_REG128(WHvX64RegisterXmm4, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Hi);
297 ADD_REG128(WHvX64RegisterXmm5, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Hi);
298 ADD_REG128(WHvX64RegisterXmm6, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Hi);
299 ADD_REG128(WHvX64RegisterXmm7, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Hi);
300 ADD_REG128(WHvX64RegisterXmm8, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Hi);
301 ADD_REG128(WHvX64RegisterXmm9, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Hi);
302 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi);
303 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi);
304 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi);
305 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi);
306 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi);
307 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi);
308 }
309
310 /* MSRs */
311 // WHvX64RegisterTsc - don't touch
312 if (fWhat & CPUMCTX_EXTRN_EFER)
313 ADD_REG64(WHvX64RegisterEfer, pCtx->msrEFER);
314 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
315 ADD_REG64(WHvX64RegisterKernelGsBase, pCtx->msrKERNELGSBASE);
316 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
317 {
318 ADD_REG64(WHvX64RegisterSysenterCs, pCtx->SysEnter.cs);
319 ADD_REG64(WHvX64RegisterSysenterEip, pCtx->SysEnter.eip);
320 ADD_REG64(WHvX64RegisterSysenterEsp, pCtx->SysEnter.esp);
321 }
322 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
323 {
324 ADD_REG64(WHvX64RegisterStar, pCtx->msrSTAR);
325 ADD_REG64(WHvX64RegisterLstar, pCtx->msrLSTAR);
326 ADD_REG64(WHvX64RegisterCstar, pCtx->msrCSTAR);
327 ADD_REG64(WHvX64RegisterSfmask, pCtx->msrSFMASK);
328 }
329 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
330 {
331 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu));
332 ADD_REG64(WHvX64RegisterPat, pCtx->msrPAT);
333#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
334 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
335#endif
336 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
337 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
338 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
339 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
340 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
341 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
342 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
343 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
344 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
345 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
346 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
347 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
348 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
349 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
350#if 0 /** @todo these registers aren't available? Might explain something.. .*/
351 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
352 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
353 {
354 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
355 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
356 }
357#endif
358 }
359
360 /* event injection (clear it). */
361 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
362 ADD_REG64(WHvRegisterPendingInterruption, 0);
363
364 /* Interruptibility state. This can get a little complicated since we get
365 half of the state via HV_X64_VP_EXECUTION_STATE. */
366 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
367 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
368 {
369 ADD_REG64(WHvRegisterInterruptState, 0);
370 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
371 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
372 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
373 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
374 aValues[iReg - 1].InterruptState.NmiMasked = 1;
375 }
376 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
377 {
378 if ( pVCpu->nem.s.fLastInterruptShadow
379 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
380 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
381 {
382 ADD_REG64(WHvRegisterInterruptState, 0);
383 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
384 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
385 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
386 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
387 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
388 // aValues[iReg - 1].InterruptState.NmiMasked = 1;
389 }
390 }
391 else
392 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
393
394 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
395 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
396 if ( fDesiredIntWin
397 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
398 {
399 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
400 ADD_REG64(WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin);
401 Assert(aValues[iReg - 1].DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
402 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
403 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
404 }
405
406 /// @todo WHvRegisterPendingEvent0
407 /// @todo WHvRegisterPendingEvent1
408
409 /*
410 * Set the registers.
411 */
412 Assert(iReg < RT_ELEMENTS(aValues));
413 Assert(iReg < RT_ELEMENTS(aenmNames));
414# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
415 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
416 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
417# endif
418 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
419 if (SUCCEEDED(hrc))
420 {
421 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
422 return VINF_SUCCESS;
423 }
424 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
425 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
426 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
427 return VERR_INTERNAL_ERROR;
428
429# undef ADD_REG64
430# undef ADD_REG128
431# undef ADD_SEG
432
433# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
434}
435
436
437NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
438{
439# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
440 /* See NEMR0ImportState */
441 NOREF(pCtx);
442 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, fWhat, NULL);
443 if (RT_SUCCESS(rc))
444 return rc;
445 if (rc == VERR_NEM_FLUSH_TLB)
446 return PGMFlushTLB(pVCpu, pCtx->cr3, true /*fGlobal*/);
447 if (rc == VERR_NEM_CHANGE_PGM_MODE)
448 return PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
449 AssertLogRelRCReturn(rc, rc);
450 return rc;
451
452# else
453 WHV_REGISTER_NAME aenmNames[128];
454
455 fWhat &= pCtx->fExtrn;
456 uintptr_t iReg = 0;
457
458 /* GPRs */
459 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
460 {
461 if (fWhat & CPUMCTX_EXTRN_RAX)
462 aenmNames[iReg++] = WHvX64RegisterRax;
463 if (fWhat & CPUMCTX_EXTRN_RCX)
464 aenmNames[iReg++] = WHvX64RegisterRcx;
465 if (fWhat & CPUMCTX_EXTRN_RDX)
466 aenmNames[iReg++] = WHvX64RegisterRdx;
467 if (fWhat & CPUMCTX_EXTRN_RBX)
468 aenmNames[iReg++] = WHvX64RegisterRbx;
469 if (fWhat & CPUMCTX_EXTRN_RSP)
470 aenmNames[iReg++] = WHvX64RegisterRsp;
471 if (fWhat & CPUMCTX_EXTRN_RBP)
472 aenmNames[iReg++] = WHvX64RegisterRbp;
473 if (fWhat & CPUMCTX_EXTRN_RSI)
474 aenmNames[iReg++] = WHvX64RegisterRsi;
475 if (fWhat & CPUMCTX_EXTRN_RDI)
476 aenmNames[iReg++] = WHvX64RegisterRdi;
477 if (fWhat & CPUMCTX_EXTRN_R8_R15)
478 {
479 aenmNames[iReg++] = WHvX64RegisterR8;
480 aenmNames[iReg++] = WHvX64RegisterR9;
481 aenmNames[iReg++] = WHvX64RegisterR10;
482 aenmNames[iReg++] = WHvX64RegisterR11;
483 aenmNames[iReg++] = WHvX64RegisterR12;
484 aenmNames[iReg++] = WHvX64RegisterR13;
485 aenmNames[iReg++] = WHvX64RegisterR14;
486 aenmNames[iReg++] = WHvX64RegisterR15;
487 }
488 }
489
490 /* RIP & Flags */
491 if (fWhat & CPUMCTX_EXTRN_RIP)
492 aenmNames[iReg++] = WHvX64RegisterRip;
493 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
494 aenmNames[iReg++] = WHvX64RegisterRflags;
495
496 /* Segments */
497 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
498 {
499 if (fWhat & CPUMCTX_EXTRN_ES)
500 aenmNames[iReg++] = WHvX64RegisterEs;
501 if (fWhat & CPUMCTX_EXTRN_CS)
502 aenmNames[iReg++] = WHvX64RegisterCs;
503 if (fWhat & CPUMCTX_EXTRN_SS)
504 aenmNames[iReg++] = WHvX64RegisterSs;
505 if (fWhat & CPUMCTX_EXTRN_DS)
506 aenmNames[iReg++] = WHvX64RegisterDs;
507 if (fWhat & CPUMCTX_EXTRN_FS)
508 aenmNames[iReg++] = WHvX64RegisterFs;
509 if (fWhat & CPUMCTX_EXTRN_GS)
510 aenmNames[iReg++] = WHvX64RegisterGs;
511 }
512
513 /* Descriptor tables. */
514 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
515 {
516 if (fWhat & CPUMCTX_EXTRN_LDTR)
517 aenmNames[iReg++] = WHvX64RegisterLdtr;
518 if (fWhat & CPUMCTX_EXTRN_TR)
519 aenmNames[iReg++] = WHvX64RegisterTr;
520 if (fWhat & CPUMCTX_EXTRN_IDTR)
521 aenmNames[iReg++] = WHvX64RegisterIdtr;
522 if (fWhat & CPUMCTX_EXTRN_GDTR)
523 aenmNames[iReg++] = WHvX64RegisterGdtr;
524 }
525
526 /* Control registers. */
527 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
528 {
529 if (fWhat & CPUMCTX_EXTRN_CR0)
530 aenmNames[iReg++] = WHvX64RegisterCr0;
531 if (fWhat & CPUMCTX_EXTRN_CR2)
532 aenmNames[iReg++] = WHvX64RegisterCr2;
533 if (fWhat & CPUMCTX_EXTRN_CR3)
534 aenmNames[iReg++] = WHvX64RegisterCr3;
535 if (fWhat & CPUMCTX_EXTRN_CR4)
536 aenmNames[iReg++] = WHvX64RegisterCr4;
537 }
538 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
539 aenmNames[iReg++] = WHvX64RegisterCr8;
540
541 /* Debug registers. */
542 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
543 {
544 aenmNames[iReg++] = WHvX64RegisterDr0;
545 aenmNames[iReg++] = WHvX64RegisterDr1;
546 aenmNames[iReg++] = WHvX64RegisterDr2;
547 aenmNames[iReg++] = WHvX64RegisterDr3;
548 }
549 if (fWhat & CPUMCTX_EXTRN_DR6)
550 aenmNames[iReg++] = WHvX64RegisterDr6;
551 if (fWhat & CPUMCTX_EXTRN_DR7)
552 aenmNames[iReg++] = WHvX64RegisterDr7;
553
554 /* Floating point state. */
555 if (fWhat & CPUMCTX_EXTRN_X87)
556 {
557 aenmNames[iReg++] = WHvX64RegisterFpMmx0;
558 aenmNames[iReg++] = WHvX64RegisterFpMmx1;
559 aenmNames[iReg++] = WHvX64RegisterFpMmx2;
560 aenmNames[iReg++] = WHvX64RegisterFpMmx3;
561 aenmNames[iReg++] = WHvX64RegisterFpMmx4;
562 aenmNames[iReg++] = WHvX64RegisterFpMmx5;
563 aenmNames[iReg++] = WHvX64RegisterFpMmx6;
564 aenmNames[iReg++] = WHvX64RegisterFpMmx7;
565 aenmNames[iReg++] = WHvX64RegisterFpControlStatus;
566 }
567 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
568 aenmNames[iReg++] = WHvX64RegisterXmmControlStatus;
569
570 /* Vector state. */
571 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
572 {
573 aenmNames[iReg++] = WHvX64RegisterXmm0;
574 aenmNames[iReg++] = WHvX64RegisterXmm1;
575 aenmNames[iReg++] = WHvX64RegisterXmm2;
576 aenmNames[iReg++] = WHvX64RegisterXmm3;
577 aenmNames[iReg++] = WHvX64RegisterXmm4;
578 aenmNames[iReg++] = WHvX64RegisterXmm5;
579 aenmNames[iReg++] = WHvX64RegisterXmm6;
580 aenmNames[iReg++] = WHvX64RegisterXmm7;
581 aenmNames[iReg++] = WHvX64RegisterXmm8;
582 aenmNames[iReg++] = WHvX64RegisterXmm9;
583 aenmNames[iReg++] = WHvX64RegisterXmm10;
584 aenmNames[iReg++] = WHvX64RegisterXmm11;
585 aenmNames[iReg++] = WHvX64RegisterXmm12;
586 aenmNames[iReg++] = WHvX64RegisterXmm13;
587 aenmNames[iReg++] = WHvX64RegisterXmm14;
588 aenmNames[iReg++] = WHvX64RegisterXmm15;
589 }
590
591 /* MSRs */
592 // WHvX64RegisterTsc - don't touch
593 if (fWhat & CPUMCTX_EXTRN_EFER)
594 aenmNames[iReg++] = WHvX64RegisterEfer;
595 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
596 aenmNames[iReg++] = WHvX64RegisterKernelGsBase;
597 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
598 {
599 aenmNames[iReg++] = WHvX64RegisterSysenterCs;
600 aenmNames[iReg++] = WHvX64RegisterSysenterEip;
601 aenmNames[iReg++] = WHvX64RegisterSysenterEsp;
602 }
603 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
604 {
605 aenmNames[iReg++] = WHvX64RegisterStar;
606 aenmNames[iReg++] = WHvX64RegisterLstar;
607 aenmNames[iReg++] = WHvX64RegisterCstar;
608 aenmNames[iReg++] = WHvX64RegisterSfmask;
609 }
610
611//#ifdef LOG_ENABLED
612// const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
613//#endif
614 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
615 {
616 aenmNames[iReg++] = WHvX64RegisterApicBase; /// @todo APIC BASE
617 aenmNames[iReg++] = WHvX64RegisterPat;
618#if 0 /*def LOG_ENABLED*/ /** @todo Check if WHvX64RegisterMsrMtrrCap works... */
619 aenmNames[iReg++] = WHvX64RegisterMsrMtrrCap;
620#endif
621 aenmNames[iReg++] = WHvX64RegisterMsrMtrrDefType;
622 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix64k00000;
623 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16k80000;
624 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16kA0000;
625 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC0000;
626 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC8000;
627 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD0000;
628 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD8000;
629 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE0000;
630 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE8000;
631 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF0000;
632 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF8000;
633 aenmNames[iReg++] = WHvX64RegisterTscAux;
634 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
635//#ifdef LOG_ENABLED
636// if (enmCpuVendor != CPUMCPUVENDOR_AMD)
637// aenmNames[iReg++] = HvX64RegisterIa32FeatureControl;
638//#endif
639 }
640
641 /* Interruptibility. */
642 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
643 {
644 aenmNames[iReg++] = WHvRegisterInterruptState;
645 aenmNames[iReg++] = WHvX64RegisterRip;
646 }
647
648 /* event injection */
649 aenmNames[iReg++] = WHvRegisterPendingInterruption;
650 aenmNames[iReg++] = WHvRegisterPendingEvent0;
651 aenmNames[iReg++] = WHvRegisterPendingEvent1;
652
653 size_t const cRegs = iReg;
654 Assert(cRegs < RT_ELEMENTS(aenmNames));
655
656 /*
657 * Get the registers.
658 */
659 WHV_REGISTER_VALUE aValues[128];
660 RT_ZERO(aValues);
661 Assert(RT_ELEMENTS(aValues) >= cRegs);
662 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
663# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
664 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
665 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
666# endif
667 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
668 AssertLogRelMsgReturn(SUCCEEDED(hrc),
669 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
670 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
671 , VERR_NEM_GET_REGISTERS_FAILED);
672
673 iReg = 0;
674# define GET_REG64(a_DstVar, a_enmName) do { \
675 Assert(aenmNames[iReg] == (a_enmName)); \
676 (a_DstVar) = aValues[iReg].Reg64; \
677 iReg++; \
678 } while (0)
679# define GET_REG64_LOG7(a_DstVar, a_enmName, a_szLogName) do { \
680 Assert(aenmNames[iReg] == (a_enmName)); \
681 if ((a_DstVar) != aValues[iReg].Reg64) \
682 Log7(("NEM/%u: " a_szLogName " changed %RX64 -> %RX64\n", pVCpu->idCpu, (a_DstVar), aValues[iReg].Reg64)); \
683 (a_DstVar) = aValues[iReg].Reg64; \
684 iReg++; \
685 } while (0)
686# define GET_REG128(a_DstVarLo, a_DstVarHi, a_enmName) do { \
687 Assert(aenmNames[iReg] == a_enmName); \
688 (a_DstVarLo) = aValues[iReg].Reg128.Low64; \
689 (a_DstVarHi) = aValues[iReg].Reg128.High64; \
690 iReg++; \
691 } while (0)
692# define GET_SEG(a_SReg, a_enmName) do { \
693 Assert(aenmNames[iReg] == (a_enmName)); \
694 NEM_WIN_COPY_BACK_SEG(a_SReg, aValues[iReg].Segment); \
695 iReg++; \
696 } while (0)
697
698 /* GPRs */
699 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
700 {
701 if (fWhat & CPUMCTX_EXTRN_RAX)
702 GET_REG64(pCtx->rax, WHvX64RegisterRax);
703 if (fWhat & CPUMCTX_EXTRN_RCX)
704 GET_REG64(pCtx->rcx, WHvX64RegisterRcx);
705 if (fWhat & CPUMCTX_EXTRN_RDX)
706 GET_REG64(pCtx->rdx, WHvX64RegisterRdx);
707 if (fWhat & CPUMCTX_EXTRN_RBX)
708 GET_REG64(pCtx->rbx, WHvX64RegisterRbx);
709 if (fWhat & CPUMCTX_EXTRN_RSP)
710 GET_REG64(pCtx->rsp, WHvX64RegisterRsp);
711 if (fWhat & CPUMCTX_EXTRN_RBP)
712 GET_REG64(pCtx->rbp, WHvX64RegisterRbp);
713 if (fWhat & CPUMCTX_EXTRN_RSI)
714 GET_REG64(pCtx->rsi, WHvX64RegisterRsi);
715 if (fWhat & CPUMCTX_EXTRN_RDI)
716 GET_REG64(pCtx->rdi, WHvX64RegisterRdi);
717 if (fWhat & CPUMCTX_EXTRN_R8_R15)
718 {
719 GET_REG64(pCtx->r8, WHvX64RegisterR8);
720 GET_REG64(pCtx->r9, WHvX64RegisterR9);
721 GET_REG64(pCtx->r10, WHvX64RegisterR10);
722 GET_REG64(pCtx->r11, WHvX64RegisterR11);
723 GET_REG64(pCtx->r12, WHvX64RegisterR12);
724 GET_REG64(pCtx->r13, WHvX64RegisterR13);
725 GET_REG64(pCtx->r14, WHvX64RegisterR14);
726 GET_REG64(pCtx->r15, WHvX64RegisterR15);
727 }
728 }
729
730 /* RIP & Flags */
731 if (fWhat & CPUMCTX_EXTRN_RIP)
732 GET_REG64(pCtx->rip, WHvX64RegisterRip);
733 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
734 GET_REG64(pCtx->rflags.u, WHvX64RegisterRflags);
735
736 /* Segments */
737 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
738 {
739 if (fWhat & CPUMCTX_EXTRN_ES)
740 GET_SEG(pCtx->es, WHvX64RegisterEs);
741 if (fWhat & CPUMCTX_EXTRN_CS)
742 GET_SEG(pCtx->cs, WHvX64RegisterCs);
743 if (fWhat & CPUMCTX_EXTRN_SS)
744 GET_SEG(pCtx->ss, WHvX64RegisterSs);
745 if (fWhat & CPUMCTX_EXTRN_DS)
746 GET_SEG(pCtx->ds, WHvX64RegisterDs);
747 if (fWhat & CPUMCTX_EXTRN_FS)
748 GET_SEG(pCtx->fs, WHvX64RegisterFs);
749 if (fWhat & CPUMCTX_EXTRN_GS)
750 GET_SEG(pCtx->gs, WHvX64RegisterGs);
751 }
752
753 /* Descriptor tables and the task segment. */
754 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
755 {
756 if (fWhat & CPUMCTX_EXTRN_LDTR)
757 GET_SEG(pCtx->ldtr, WHvX64RegisterLdtr);
758
759 if (fWhat & CPUMCTX_EXTRN_TR)
760 {
761 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
762 avoid to trigger sanity assertions around the code, always fix this. */
763 GET_SEG(pCtx->tr, WHvX64RegisterTr);
764 switch (pCtx->tr.Attr.n.u4Type)
765 {
766 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
767 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
768 break;
769 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
770 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
771 break;
772 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
773 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
774 break;
775 }
776 }
777 if (fWhat & CPUMCTX_EXTRN_IDTR)
778 {
779 Assert(aenmNames[iReg] == WHvX64RegisterIdtr);
780 pCtx->idtr.cbIdt = aValues[iReg].Table.Limit;
781 pCtx->idtr.pIdt = aValues[iReg].Table.Base;
782 iReg++;
783 }
784 if (fWhat & CPUMCTX_EXTRN_GDTR)
785 {
786 Assert(aenmNames[iReg] == WHvX64RegisterGdtr);
787 pCtx->gdtr.cbGdt = aValues[iReg].Table.Limit;
788 pCtx->gdtr.pGdt = aValues[iReg].Table.Base;
789 iReg++;
790 }
791 }
792
793 /* Control registers. */
794 bool fMaybeChangedMode = false;
795 bool fFlushTlb = false;
796 bool fFlushGlobalTlb = false;
797 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
798 {
799 if (fWhat & CPUMCTX_EXTRN_CR0)
800 {
801 Assert(aenmNames[iReg] == WHvX64RegisterCr0);
802 if (pCtx->cr0 != aValues[iReg].Reg64)
803 {
804 CPUMSetGuestCR0(pVCpu, aValues[iReg].Reg64);
805 fMaybeChangedMode = true;
806 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
807 }
808 iReg++;
809 }
810 if (fWhat & CPUMCTX_EXTRN_CR2)
811 GET_REG64(pCtx->cr2, WHvX64RegisterCr2);
812 if (fWhat & CPUMCTX_EXTRN_CR3)
813 {
814 if (pCtx->cr3 != aValues[iReg].Reg64)
815 {
816 CPUMSetGuestCR3(pVCpu, aValues[iReg].Reg64);
817 fFlushTlb = true;
818 }
819 iReg++;
820 }
821 if (fWhat & CPUMCTX_EXTRN_CR4)
822 {
823 if (pCtx->cr4 != aValues[iReg].Reg64)
824 {
825 CPUMSetGuestCR4(pVCpu, aValues[iReg].Reg64);
826 fMaybeChangedMode = true;
827 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
828 }
829 iReg++;
830 }
831 }
832 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
833 {
834 Assert(aenmNames[iReg] == WHvX64RegisterCr8);
835 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4);
836 iReg++;
837 }
838
839 /* Debug registers. */
840 /** @todo fixme */
841 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
842 {
843 Assert(aenmNames[iReg] == WHvX64RegisterDr0);
844 Assert(aenmNames[iReg+3] == WHvX64RegisterDr3);
845 if (pCtx->dr[0] != aValues[iReg].Reg64)
846 CPUMSetGuestDR0(pVCpu, aValues[iReg].Reg64);
847 iReg++;
848 if (pCtx->dr[1] != aValues[iReg].Reg64)
849 CPUMSetGuestDR1(pVCpu, aValues[iReg].Reg64);
850 iReg++;
851 if (pCtx->dr[2] != aValues[iReg].Reg64)
852 CPUMSetGuestDR2(pVCpu, aValues[iReg].Reg64);
853 iReg++;
854 if (pCtx->dr[3] != aValues[iReg].Reg64)
855 CPUMSetGuestDR3(pVCpu, aValues[iReg].Reg64);
856 iReg++;
857 }
858 if (fWhat & CPUMCTX_EXTRN_DR6)
859 {
860 Assert(aenmNames[iReg] == WHvX64RegisterDr6);
861 if (pCtx->dr[6] != aValues[iReg].Reg64)
862 CPUMSetGuestDR6(pVCpu, aValues[iReg].Reg64);
863 iReg++;
864 }
865 if (fWhat & CPUMCTX_EXTRN_DR7)
866 {
867 Assert(aenmNames[iReg] == WHvX64RegisterDr7);
868 if (pCtx->dr[7] != aValues[iReg].Reg64)
869 CPUMSetGuestDR7(pVCpu, aValues[iReg].Reg64);
870 iReg++;
871 }
872
873 /* Floating point state. */
874 if (fWhat & CPUMCTX_EXTRN_X87)
875 {
876 GET_REG128(pCtx->pXStateR3->x87.aRegs[0].au64[0], pCtx->pXStateR3->x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);
877 GET_REG128(pCtx->pXStateR3->x87.aRegs[1].au64[0], pCtx->pXStateR3->x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);
878 GET_REG128(pCtx->pXStateR3->x87.aRegs[2].au64[0], pCtx->pXStateR3->x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);
879 GET_REG128(pCtx->pXStateR3->x87.aRegs[3].au64[0], pCtx->pXStateR3->x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);
880 GET_REG128(pCtx->pXStateR3->x87.aRegs[4].au64[0], pCtx->pXStateR3->x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);
881 GET_REG128(pCtx->pXStateR3->x87.aRegs[5].au64[0], pCtx->pXStateR3->x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);
882 GET_REG128(pCtx->pXStateR3->x87.aRegs[6].au64[0], pCtx->pXStateR3->x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);
883 GET_REG128(pCtx->pXStateR3->x87.aRegs[7].au64[0], pCtx->pXStateR3->x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);
884
885 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);
886 pCtx->pXStateR3->x87.FCW = aValues[iReg].FpControlStatus.FpControl;
887 pCtx->pXStateR3->x87.FSW = aValues[iReg].FpControlStatus.FpStatus;
888 pCtx->pXStateR3->x87.FTW = aValues[iReg].FpControlStatus.FpTag
889 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;
890 pCtx->pXStateR3->x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;
891 pCtx->pXStateR3->x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;
892 pCtx->pXStateR3->x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);
893 pCtx->pXStateR3->x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);
894 iReg++;
895 }
896
897 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
898 {
899 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus);
900 if (fWhat & CPUMCTX_EXTRN_X87)
901 {
902 pCtx->pXStateR3->x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp;
903 pCtx->pXStateR3->x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32);
904 pCtx->pXStateR3->x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48);
905 }
906 pCtx->pXStateR3->x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl;
907 pCtx->pXStateR3->x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
908 iReg++;
909 }
910
911 /* Vector state. */
912 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
913 {
914 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0);
915 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1);
916 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2);
917 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3);
918 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4);
919 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5);
920 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6);
921 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7);
922 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8);
923 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9);
924 GET_REG128(pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10);
925 GET_REG128(pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11);
926 GET_REG128(pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12);
927 GET_REG128(pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13);
928 GET_REG128(pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14);
929 GET_REG128(pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15);
930 }
931
932 /* MSRs */
933 // WHvX64RegisterTsc - don't touch
934 if (fWhat & CPUMCTX_EXTRN_EFER)
935 {
936 Assert(aenmNames[iReg] == WHvX64RegisterEfer);
937 if (aValues[iReg].Reg64 != pCtx->msrEFER)
938 {
939 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, aValues[iReg].Reg64));
940 if ((aValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
941 PGMNotifyNxeChanged(pVCpu, RT_BOOL(aValues[iReg].Reg64 & MSR_K6_EFER_NXE));
942 pCtx->msrEFER = aValues[iReg].Reg64;
943 fMaybeChangedMode = true;
944 }
945 iReg++;
946 }
947 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
948 GET_REG64_LOG7(pCtx->msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");
949 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
950 {
951 GET_REG64_LOG7(pCtx->SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");
952 GET_REG64_LOG7(pCtx->SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");
953 GET_REG64_LOG7(pCtx->SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");
954 }
955 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
956 {
957 GET_REG64_LOG7(pCtx->msrSTAR, WHvX64RegisterStar, "MSR STAR");
958 GET_REG64_LOG7(pCtx->msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");
959 GET_REG64_LOG7(pCtx->msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");
960 GET_REG64_LOG7(pCtx->msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");
961 }
962 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
963 {
964 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
965 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
966 if (aValues[iReg].Reg64 != uOldBase)
967 {
968 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
969 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
970 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
971 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", VBOXSTRICTRC_VAL(rc2), aValues[iReg].Reg64));
972 }
973 iReg++;
974
975 GET_REG64_LOG7(pCtx->msrPAT, WHvX64RegisterPat, "MSR PAT");
976#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
977 GET_REG64_LOG7(pCtx->msrPAT, WHvX64RegisterMsrMtrrCap);
978#endif
979 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
980 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
981 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
982 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
983 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
984 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
985 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
986 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
987 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
988 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
989 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
990 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
991 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
992 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
993 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
994 }
995
996 /* Interruptibility. */
997 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
998 {
999 Assert(aenmNames[iReg] == WHvRegisterInterruptState);
1000 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);
1001
1002 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1003 {
1004 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
1005 if (aValues[iReg].InterruptState.InterruptShadow)
1006 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
1007 else
1008 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1009 }
1010
1011 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1012 {
1013 if (aValues[iReg].InterruptState.NmiMasked)
1014 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1015 else
1016 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1017 }
1018
1019 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
1020 iReg += 2;
1021 }
1022
1023 /* Event injection. */
1024 /// @todo WHvRegisterPendingInterruption
1025 Assert(aenmNames[iReg] == WHvRegisterPendingInterruption);
1026 if (aValues[iReg].PendingInterruption.InterruptionPending)
1027 {
1028 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
1029 aValues[iReg].PendingInterruption.InterruptionType, aValues[iReg].PendingInterruption.InterruptionVector,
1030 aValues[iReg].PendingInterruption.DeliverErrorCode, aValues[iReg].PendingInterruption.ErrorCode,
1031 aValues[iReg].PendingInterruption.InstructionLength, aValues[iReg].PendingInterruption.NestedEvent));
1032 AssertMsg((aValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
1033 ("%#RX64\n", aValues[iReg].PendingInterruption.AsUINT64));
1034 }
1035
1036 /// @todo WHvRegisterPendingEvent0
1037 /// @todo WHvRegisterPendingEvent1
1038
1039 /* Almost done, just update extrn flags and maybe change PGM mode. */
1040 pCtx->fExtrn &= ~fWhat;
1041 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
1042 pCtx->fExtrn = 0;
1043
1044 /* Typical. */
1045 if (!fMaybeChangedMode && !fFlushTlb)
1046 return VINF_SUCCESS;
1047
1048 /*
1049 * Slow.
1050 */
1051 if (fMaybeChangedMode)
1052 {
1053 int rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1054 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
1055 }
1056
1057 if (fFlushTlb)
1058 {
1059 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, fFlushGlobalTlb);
1060 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
1061 }
1062
1063 return VINF_SUCCESS;
1064# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1065}
1066
1067#endif /* !IN_RING0 */
1068
1069
1070/**
1071 * Interface for importing state on demand (used by IEM).
1072 *
1073 * @returns VBox status code.
1074 * @param pVCpu The cross context CPU structure.
1075 * @param pCtx The target CPU context.
1076 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1077 */
1078VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
1079{
1080 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1081
1082#ifdef IN_RING0
1083 /** @todo improve and secure this translation */
1084 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf);
1085 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1086 VMCPUID idCpu = pVCpu->idCpu;
1087 ASMCompilerBarrier();
1088 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1089
1090 return nemR0WinImportState(pGVM, &pGVM->aCpus[idCpu], pCtx, fWhat);
1091#else
1092 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, fWhat);
1093#endif
1094}
1095
1096
1097/**
1098 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1099 *
1100 * @returns VBox status code.
1101 * @param pVCpu The cross context CPU structure.
1102 * @param pcTicks Where to return the CPU tick count.
1103 * @param puAux Where to return the TSC_AUX register value.
1104 */
1105VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPU pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1106{
1107 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1108
1109#ifdef IN_RING3
1110 PVM pVM = pVCpu->CTX_SUFF(pVM);
1111 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1112 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1113
1114# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1115 /* Call ring-0 and get the values. */
1116 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_QUERY_CPU_TICK, 0, NULL);
1117 AssertLogRelRCReturn(rc, rc);
1118 *pcTicks = pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks;
1119 if (puAux)
1120 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX
1121 ? pVCpu->nem.s.Hypercall.QueryCpuTick.uAux : CPUMGetGuestTscAux(pVCpu);
1122 return VINF_SUCCESS;
1123
1124# else
1125 /* Call the offical API. */
1126 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1127 WHV_REGISTER_VALUE aValues[2] = { {0, 0}, {0, 0} };
1128 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1129 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1130 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1131 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1132 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1133 , VERR_NEM_GET_REGISTERS_FAILED);
1134 *pcTicks = aValues[0].Reg64;
1135 if (puAux)
1136 *pcTicks = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[0].Reg64 : CPUMGetGuestTscAux(pVCpu);
1137 return VINF_SUCCESS;
1138#endif
1139#else /* IN_RING0 */
1140 /** @todo improve and secure this translation */
1141 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf);
1142 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1143 VMCPUID idCpu = pVCpu->idCpu;
1144 ASMCompilerBarrier();
1145 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1146
1147 int rc = nemR0WinQueryCpuTick(pGVM, &pGVM->aCpus[idCpu], pcTicks, puAux);
1148 if (RT_SUCCESS(rc) && puAux && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX))
1149 *puAux = CPUMGetGuestTscAux(pVCpu);
1150 return rc;
1151#endif /* IN_RING0 */
1152}
1153
1154
1155/**
1156 * Resumes CPU clock (TSC) on all virtual CPUs.
1157 *
1158 * This is called by TM when the VM is started, restored, resumed or similar.
1159 *
1160 * @returns VBox status code.
1161 * @param pVM The cross context VM structure.
1162 * @param pVCpu The cross context CPU structure of the calling EMT.
1163 * @param uPausedTscValue The TSC value at the time of pausing.
1164 */
1165VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVM pVM, PVMCPU pVCpu, uint64_t uPausedTscValue)
1166{
1167#ifdef IN_RING0
1168 /** @todo improve and secure this translation */
1169 PGVM pGVM = GVMMR0ByHandle(pVM->hSelf);
1170 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1171 VMCPUID idCpu = pVCpu->idCpu;
1172 ASMCompilerBarrier();
1173 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1174
1175 return nemR0WinResumeCpuTickOnAll(pGVM, &pGVM->aCpus[idCpu], uPausedTscValue);
1176#else /* IN_RING3 */
1177 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1178 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1179
1180# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1181 /* Call ring-0 and do it all there. */
1182 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL, uPausedTscValue, NULL);
1183
1184# else
1185 /*
1186 * Call the offical API to do the job.
1187 */
1188 if (pVM->cCpus > 1)
1189 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1190
1191 /* Start with the first CPU. */
1192 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1193 WHV_REGISTER_VALUE Value = {0, 0};
1194 Value.Reg64 = uPausedTscValue;
1195 uint64_t const uFirstTsc = ASMReadTSC();
1196 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1197 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1198 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1199 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1200 , VERR_NEM_SET_TSC);
1201
1202 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1203 that we don't introduce too much drift here. */
1204 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1205 {
1206 Assert(enmName == WHvX64RegisterTsc);
1207 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1208 Value.Reg64 = uPausedTscValue + offDelta;
1209 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1210 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1211 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1212 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1213 , VERR_NEM_SET_TSC);
1214 }
1215
1216 return VINF_SUCCESS;
1217# endif
1218#endif /* IN_RING3 */
1219}
1220
1221
1222#ifdef LOG_ENABLED
1223/**
1224 * Get the virtual processor running status.
1225 */
1226DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPU pVCpu)
1227{
1228# ifdef IN_RING0
1229 NOREF(pVCpu);
1230 return VidProcessorStatusUndefined;
1231# else
1232 RTERRVARS Saved;
1233 RTErrVarsSave(&Saved);
1234
1235 /*
1236 * This API is disabled in release builds, it seems. On build 17101 it requires
1237 * the following patch to be enabled (windbg): eb vid+12180 0f 84 98 00 00 00
1238 */
1239 VID_PROCESSOR_STATUS enmCpuStatus = VidProcessorStatusUndefined;
1240 NTSTATUS rcNt = g_pfnVidGetVirtualProcessorRunningStatus(pVCpu->pVMR3->nem.s.hPartitionDevice, pVCpu->idCpu, &enmCpuStatus);
1241 AssertRC(rcNt);
1242
1243 RTErrVarsRestore(&Saved);
1244 return enmCpuStatus;
1245# endif
1246}
1247#endif /* LOG_ENABLED */
1248
1249
1250#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1251# ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KTHREADs and KeAlertThread. */
1252/**
1253 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.
1254 *
1255 * This is an experiment only.
1256 *
1257 * @returns VBox status code.
1258 * @param pVM The cross context VM structure.
1259 * @param pVCpu The cross context virtual CPU structure of the
1260 * calling EMT.
1261 */
1262NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVM pVM, PVMCPU pVCpu)
1263{
1264 /*
1265 * Work the state.
1266 *
1267 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.
1268 * So, we just need to modify the state and kick the EMT if it's waiting on
1269 * messages. For the latter we use QueueUserAPC / KeAlterThread.
1270 */
1271 for (;;)
1272 {
1273 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
1274 switch (enmState)
1275 {
1276 case VMCPUSTATE_STARTED_EXEC_NEM:
1277 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))
1278 {
1279 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));
1280 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelChangedState);
1281 return VINF_SUCCESS;
1282 }
1283 break;
1284
1285 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:
1286 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
1287 {
1288# ifdef IN_RING0
1289 NTSTATUS rcNt = KeAlertThread(??);
1290# else
1291 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);
1292# endif
1293 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));
1294 Assert(rcNt == STATUS_SUCCESS);
1295 if (NT_SUCCESS(rcNt))
1296 {
1297 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelAlertedThread);
1298 return VINF_SUCCESS;
1299 }
1300 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));
1301 }
1302 break;
1303
1304 default:
1305 return VINF_SUCCESS;
1306 }
1307
1308 ASMNopPause();
1309 RT_NOREF(pVM);
1310 }
1311}
1312# endif /* IN_RING3 */
1313#endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
1314
1315
1316#ifdef LOG_ENABLED
1317/**
1318 * Logs the current CPU state.
1319 */
1320NEM_TMPL_STATIC void nemHCWinLogState(PVM pVM, PVMCPU pVCpu)
1321{
1322 if (LogIs3Enabled())
1323 {
1324# ifdef IN_RING3
1325 char szRegs[4096];
1326 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1327 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1328 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1329 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1330 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1331 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1332 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1333 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1334 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1335 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1336 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1337 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1338 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1339 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1340 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1341 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1342 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1343 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1344 " efer=%016VR{efer}\n"
1345 " pat=%016VR{pat}\n"
1346 " sf_mask=%016VR{sf_mask}\n"
1347 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1348 " lstar=%016VR{lstar}\n"
1349 " star=%016VR{star} cstar=%016VR{cstar}\n"
1350 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1351 );
1352
1353 char szInstr[256];
1354 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1355 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1356 szInstr, sizeof(szInstr), NULL);
1357 Log3(("%s%s\n", szRegs, szInstr));
1358# else
1359 /** @todo stat logging in ring-0 */
1360 RT_NOREF(pVM, pVCpu);
1361# endif
1362 }
1363}
1364#endif /* LOG_ENABLED */
1365
1366
1367/** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */
1368#define SWITCH_IT(a_szPrefix) \
1369 do \
1370 switch (u)\
1371 { \
1372 case 0x00: return a_szPrefix ""; \
1373 case 0x01: return a_szPrefix ",Pnd"; \
1374 case 0x02: return a_szPrefix ",Dbg"; \
1375 case 0x03: return a_szPrefix ",Pnd,Dbg"; \
1376 case 0x04: return a_szPrefix ",Shw"; \
1377 case 0x05: return a_szPrefix ",Pnd,Shw"; \
1378 case 0x06: return a_szPrefix ",Shw,Dbg"; \
1379 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \
1380 default: AssertFailedReturn("WTF?"); \
1381 } \
1382 while (0)
1383
1384#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1385/**
1386 * Translates the execution stat bitfield into a short log string, VID version.
1387 *
1388 * @returns Read-only log string.
1389 * @param pMsgHdr The header which state to summarize.
1390 */
1391static const char *nemHCWinExecStateToLogStr(HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1392{
1393 unsigned u = (unsigned)pMsgHdr->ExecutionState.InterruptionPending
1394 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1)
1395 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2);
1396 if (pMsgHdr->ExecutionState.EferLma)
1397 SWITCH_IT("LM");
1398 else if (pMsgHdr->ExecutionState.Cr0Pe)
1399 SWITCH_IT("PM");
1400 else
1401 SWITCH_IT("RM");
1402}
1403#elif defined(IN_RING3)
1404/**
1405 * Translates the execution stat bitfield into a short log string, WinHv version.
1406 *
1407 * @returns Read-only log string.
1408 * @param pExitCtx The exit context which state to summarize.
1409 */
1410static const char *nemR3WinExecStateToLogStr(WHV_VP_EXIT_CONTEXT const *pExitCtx)
1411{
1412 unsigned u = (unsigned)pExitCtx->ExecutionState.InterruptionPending
1413 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1)
1414 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2);
1415 if (pExitCtx->ExecutionState.EferLma)
1416 SWITCH_IT("LM");
1417 else if (pExitCtx->ExecutionState.Cr0Pe)
1418 SWITCH_IT("PM");
1419 else
1420 SWITCH_IT("RM");
1421}
1422#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1423#undef SWITCH_IT
1424
1425
1426#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1427/**
1428 * Advances the guest RIP and clear EFLAGS.RF, VID version.
1429 *
1430 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1431 *
1432 * @param pVCpu The cross context virtual CPU structure.
1433 * @param pCtx The CPU context to update.
1434 * @param pExitCtx The exit context.
1435 */
1436DECLINLINE(void) nemHCWinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1437{
1438 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1439
1440 /* Advance the RIP. */
1441 Assert(pMsgHdr->InstructionLength > 0 && pMsgHdr->InstructionLength < 16);
1442 pCtx->rip += pMsgHdr->InstructionLength;
1443 pCtx->rflags.Bits.u1RF = 0;
1444
1445 /* Update interrupt inhibition. */
1446 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1447 { /* likely */ }
1448 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1449 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1450}
1451#elif defined(IN_RING3)
1452/**
1453 * Advances the guest RIP and clear EFLAGS.RF, WinHv version.
1454 *
1455 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1456 *
1457 * @param pVCpu The cross context virtual CPU structure.
1458 * @param pCtx The CPU context to update.
1459 * @param pExitCtx The exit context.
1460 */
1461DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1462{
1463 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1464
1465 /* Advance the RIP. */
1466 Assert(pExitCtx->InstructionLength > 0 && pExitCtx->InstructionLength < 16);
1467 pCtx->rip += pExitCtx->InstructionLength;
1468 pCtx->rflags.Bits.u1RF = 0;
1469
1470 /* Update interrupt inhibition. */
1471 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1472 { /* likely */ }
1473 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1474 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1475}
1476#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1477
1478
1479
1480NEM_TMPL_STATIC DECLCALLBACK(int)
1481nemHCWinUnmapOnePageCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1482{
1483 RT_NOREF_PV(pvUser);
1484#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1485 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1486 AssertRC(rc);
1487 if (RT_SUCCESS(rc))
1488#else
1489 RT_NOREF_PV(pVCpu);
1490 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1491 if (SUCCEEDED(hrc))
1492#endif
1493 {
1494 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1495 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1496 }
1497 else
1498 {
1499#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1500 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1501#else
1502 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1503 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(),
1504 RTNtLastErrorValue(), pVM->nem.s.cMappedPages));
1505#endif
1506 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1507 }
1508 if (pVM->nem.s.cMappedPages > 0)
1509 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1510 return VINF_SUCCESS;
1511}
1512
1513
1514/**
1515 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1516 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1517 */
1518typedef struct NEMHCWINHMACPCCSTATE
1519{
1520 /** Input: Write access. */
1521 bool fWriteAccess;
1522 /** Output: Set if we did something. */
1523 bool fDidSomething;
1524 /** Output: Set it we should resume. */
1525 bool fCanResume;
1526} NEMHCWINHMACPCCSTATE;
1527
1528/**
1529 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1530 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1531 * NEMHCWINHMACPCCSTATE structure. }
1532 */
1533NEM_TMPL_STATIC DECLCALLBACK(int)
1534nemHCWinHandleMemoryAccessPageCheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1535{
1536 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1537 pState->fDidSomething = false;
1538 pState->fCanResume = false;
1539
1540 /* If A20 is disabled, we may need to make another query on the masked
1541 page to get the correct protection information. */
1542 uint8_t u2State = pInfo->u2NemState;
1543 RTGCPHYS GCPhysSrc;
1544 if ( pVM->nem.s.fA20Enabled
1545 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1546 GCPhysSrc = GCPhys;
1547 else
1548 {
1549 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1550 PGMPHYSNEMPAGEINFO Info2;
1551 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1552 AssertRCReturn(rc, rc);
1553
1554 *pInfo = Info2;
1555 pInfo->u2NemState = u2State;
1556 }
1557
1558 /*
1559 * Consolidate current page state with actual page protection and access type.
1560 * We don't really consider downgrades here, as they shouldn't happen.
1561 */
1562#ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1563 /** @todo Someone at microsoft please explain:
1564 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1565 * readonly page as writable (unmap, then map again). Specifically, this was an
1566 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1567 * a hope to work around that we no longer pre-map anything, just unmap stuff
1568 * and do it lazily here. And here we will first unmap, restart, and then remap
1569 * with new protection or backing.
1570 */
1571#endif
1572 int rc;
1573 switch (u2State)
1574 {
1575 case NEM_WIN_PAGE_STATE_UNMAPPED:
1576 case NEM_WIN_PAGE_STATE_NOT_SET:
1577 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1578 {
1579 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1580 return VINF_SUCCESS;
1581 }
1582
1583 /* Don't bother remapping it if it's a write request to a non-writable page. */
1584 if ( pState->fWriteAccess
1585 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1586 {
1587 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1588 return VINF_SUCCESS;
1589 }
1590
1591 /* Map the page. */
1592 rc = nemHCNativeSetPhysPage(pVM,
1593 pVCpu,
1594 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1595 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1596 pInfo->fNemProt,
1597 &u2State,
1598 true /*fBackingState*/);
1599 pInfo->u2NemState = u2State;
1600 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1601 GCPhys, g_apszPageStates[u2State], rc));
1602 pState->fDidSomething = true;
1603 pState->fCanResume = true;
1604 return rc;
1605
1606 case NEM_WIN_PAGE_STATE_READABLE:
1607 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1608 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1609 {
1610 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1611 return VINF_SUCCESS;
1612 }
1613
1614#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1615 /* Upgrade page to writable. */
1616/** @todo test this*/
1617 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1618 && pState->fWriteAccess)
1619 {
1620 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
1621 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1622 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1623 AssertRC(rc);
1624 if (RT_SUCCESS(rc))
1625 {
1626 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
1627 pState->fDidSomething = true;
1628 pState->fCanResume = true;
1629 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
1630 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
1631 }
1632 }
1633 else
1634 {
1635 /* Need to emulate the acces. */
1636 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
1637 rc = VINF_SUCCESS;
1638 }
1639 return rc;
1640#else
1641 break;
1642#endif
1643
1644 case NEM_WIN_PAGE_STATE_WRITABLE:
1645 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1646 {
1647 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3\n", GCPhys));
1648 return VINF_SUCCESS;
1649 }
1650#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1651 AssertFailed(); /* There should be no downgrades. */
1652#endif
1653 break;
1654
1655 default:
1656 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1657 }
1658
1659 /*
1660 * Unmap and restart the instruction.
1661 * If this fails, which it does every so often, just unmap everything for now.
1662 */
1663#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1664 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1665 AssertRC(rc);
1666 if (RT_SUCCESS(rc))
1667#else
1668 /** @todo figure out whether we mess up the state or if it's WHv. */
1669 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1670 if (SUCCEEDED(hrc))
1671#endif
1672 {
1673 pState->fDidSomething = true;
1674 pState->fCanResume = true;
1675 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1676 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1677 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1678 return VINF_SUCCESS;
1679 }
1680#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1681 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
1682 return rc;
1683#else
1684 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1685 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(),
1686 pVM->nem.s.cMappedPages));
1687
1688 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
1689 Log(("nemHCWinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
1690
1691 pState->fDidSomething = true;
1692 pState->fCanResume = true;
1693 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1694 return VINF_SUCCESS;
1695#endif
1696}
1697
1698
1699
1700#if defined(IN_RING0) && defined(NEM_WIN_USE_OUR_OWN_RUN_API)
1701/**
1702 * Wrapper around nemR0WinImportState that converts VERR_NEM_CHANGE_PGM_MODE and
1703 * VERR_NEM_FLUSH_TBL into informational status codes and logs+asserts statuses.
1704 *
1705 * @returns VBox strict status code.
1706 * @param pGVM The global (ring-0) VM structure.
1707 * @param pGVCpu The global (ring-0) per CPU structure.
1708 * @param pCtx The CPU context to import into.
1709 * @param fWhat What to import.
1710 * @param pszCaller Who is doing the importing.
1711 */
1712DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, const char *pszCaller)
1713{
1714 int rc = nemR0WinImportState(pGVM, pGVCpu, pCtx, fWhat);
1715 if (RT_SUCCESS(rc))
1716 {
1717 Assert(rc == VINF_SUCCESS);
1718 return VINF_SUCCESS;
1719 }
1720
1721 if (rc == VERR_NEM_CHANGE_PGM_MODE || rc == VERR_NEM_FLUSH_TLB || rc == VERR_NEM_UPDATE_APIC_BASE)
1722 {
1723 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
1724 return -rc;
1725 }
1726 RT_NOREF(pszCaller);
1727 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
1728}
1729#endif /* IN_RING0 && NEM_WIN_USE_OUR_OWN_RUN_API*/
1730
1731#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3)
1732/**
1733 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
1734 *
1735 * Unlike the wrapped APIs, this checks whether it's necessary.
1736 *
1737 * @returns VBox strict status code.
1738 * @param pGVM The global (ring-0) VM structure.
1739 * @param pGVCpu The global (ring-0) per CPU structure.
1740 * @param pCtx The CPU context to import into.
1741 * @param fWhat What to import.
1742 * @param pszCaller Who is doing the importing.
1743 */
1744DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx,
1745 uint64_t fWhat, const char *pszCaller)
1746{
1747 if (pCtx->fExtrn & fWhat)
1748 {
1749#ifdef IN_RING0
1750 RT_NOREF(pVCpu);
1751 return nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, fWhat, pszCaller);
1752#else
1753 RT_NOREF(pGVCpu, pszCaller);
1754 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, fWhat);
1755 AssertRCReturn(rc, rc);
1756#endif
1757 }
1758 return VINF_SUCCESS;
1759}
1760#endif /* NEM_WIN_USE_OUR_OWN_RUN_API || IN_RING3 */
1761
1762#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1763/**
1764 * Copies register state from the X64 intercept message header.
1765 *
1766 * ASSUMES no state copied yet.
1767 *
1768 * @param pVCpu The cross context per CPU structure.
1769 * @param pCtx The registe rcontext.
1770 * @param pHdr The X64 intercept message header.
1771 * @sa nemR3WinCopyStateFromX64Header
1772 */
1773DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr)
1774{
1775 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1776 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1777 NEM_WIN_COPY_BACK_SEG(pCtx->cs, pHdr->CsSegment);
1778 pCtx->rip = pHdr->Rip;
1779 pCtx->rflags.u = pHdr->Rflags;
1780
1781 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow;
1782 if (!pHdr->ExecutionState.InterruptShadow)
1783 {
1784 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1785 { /* likely */ }
1786 else
1787 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1788 }
1789 else
1790 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip);
1791
1792 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1793}
1794#elif defined(IN_RING3)
1795/**
1796 * Copies register state from the (common) exit context.
1797 *
1798 * ASSUMES no state copied yet.
1799 *
1800 * @param pVCpu The cross context per CPU structure.
1801 * @param pCtx The registe rcontext.
1802 * @param pExitCtx The common exit context.
1803 * @sa nemHCWinCopyStateFromX64Header
1804 */
1805DECLINLINE(void) nemR3WinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1806{
1807 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1808 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1809 NEM_WIN_COPY_BACK_SEG(pCtx->cs, pExitCtx->Cs);
1810 pCtx->rip = pExitCtx->Rip;
1811 pCtx->rflags.u = pExitCtx->Rflags;
1812
1813 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow;
1814 if (!pExitCtx->ExecutionState.InterruptShadow)
1815 {
1816 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1817 { /* likely */ }
1818 else
1819 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1820 }
1821 else
1822 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip);
1823
1824 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1825}
1826#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1827
1828
1829#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1830/**
1831 * Deals with memory intercept message.
1832 *
1833 * @returns Strict VBox status code.
1834 * @param pVM The cross context VM structure.
1835 * @param pVCpu The cross context per CPU structure.
1836 * @param pMsg The message.
1837 * @param pCtx The register context.
1838 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
1839 * @sa nemR3WinHandleExitMemory
1840 */
1841NEM_TMPL_STATIC VBOXSTRICTRC
1842nemHCWinHandleMessageMemory(PVM pVM, PVMCPU pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
1843{
1844 uint64_t const uHostTsc = ASMReadTSC();
1845 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
1846 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1847 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
1848 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
1849
1850 /*
1851 * Whatever we do, we must clear pending event injection upon resume.
1852 */
1853 if (pMsg->Header.ExecutionState.InterruptionPending)
1854 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1855
1856#if 0 /* Experiment: 20K -> 34K exit/s. */
1857 if ( pMsg->Header.ExecutionState.EferLma
1858 && pMsg->Header.CsSegment.Long
1859 && pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
1860 {
1861 if ( pMsg->Header.Rip - (uint64_t)0xf65a < (uint64_t)(0xf662 - 0xf65a)
1862 && pMsg->InstructionBytes[0] == 0x89
1863 && pMsg->InstructionBytes[1] == 0x03)
1864 {
1865 pCtx->rip = pMsg->Header.Rip + 2;
1866 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
1867 AssertMsg(pMsg->Header.InstructionLength == 2, ("%#x\n", pMsg->Header.InstructionLength));
1868 //Log(("%RX64 msg:\n%.80Rhxd\n", pCtx->rip, pMsg));
1869 return VINF_SUCCESS;
1870 }
1871 }
1872#endif
1873
1874 /*
1875 * Ask PGM for information about the given GCPhys. We need to check if we're
1876 * out of sync first.
1877 */
1878 NEMHCWINHMACPCCSTATE State = { pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE, false, false };
1879 PGMPHYSNEMPAGEINFO Info;
1880 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMsg->GuestPhysicalAddress, State.fWriteAccess, &Info,
1881 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1882 if (RT_SUCCESS(rc))
1883 {
1884 if (Info.fNemProt & ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1885 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1886 {
1887 if (State.fCanResume)
1888 {
1889 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1890 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1891 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1892 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1893 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1894 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
1895 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
1896 return VINF_SUCCESS;
1897 }
1898 }
1899 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
1900 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1901 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1902 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1903 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1904 }
1905 else
1906 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
1907 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1908 pMsg->GuestPhysicalAddress, rc, State.fDidSomething ? " modified-backing" : "",
1909 g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1910
1911 /*
1912 * Emulate the memory access, either access handler or special memory.
1913 */
1914 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
1915 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1916 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1917 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1918 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, uHostTsc);
1919 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
1920 VBOXSTRICTRC rcStrict;
1921# ifdef IN_RING0
1922 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx,
1923 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit");
1924 if (rcStrict != VINF_SUCCESS)
1925 return rcStrict;
1926# else
1927 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
1928 AssertRCReturn(rc, rc);
1929 NOREF(pGVCpu);
1930# endif
1931
1932 if (pMsg->Reserved1)
1933 Log(("MemExit/Reserved1=%#x\n", pMsg->Reserved1));
1934 if (pMsg->Header.ExecutionState.Reserved0 || pMsg->Header.ExecutionState.Reserved1)
1935 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pMsg->Header.ExecutionState.Reserved0, pMsg->Header.ExecutionState.Reserved1));
1936
1937 if (!pExitRec)
1938 {
1939 //if (pMsg->InstructionByteCount > 0)
1940 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
1941 if (pMsg->InstructionByteCount > 0)
1942 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pMsg->Header.Rip,
1943 pMsg->InstructionBytes, pMsg->InstructionByteCount);
1944 else
1945 rcStrict = IEMExecOne(pVCpu);
1946 /** @todo do we need to do anything wrt debugging here? */
1947 }
1948 else
1949 {
1950 /* Frequent access or probing. */
1951 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
1952 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
1953 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1954 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1955 }
1956 return rcStrict;
1957}
1958#elif defined(IN_RING3)
1959/**
1960 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
1961 *
1962 * @returns Strict VBox status code.
1963 * @param pVM The cross context VM structure.
1964 * @param pVCpu The cross context per CPU structure.
1965 * @param pExit The VM exit information to handle.
1966 * @param pCtx The register context.
1967 * @sa nemHCWinHandleMessageMemory
1968 */
1969NEM_TMPL_STATIC VBOXSTRICTRC
1970nemR3WinHandleExitMemory(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
1971{
1972 uint64_t const uHostTsc = ASMReadTSC();
1973 Assert(pExit->MemoryAccess.AccessInfo.AccessType != 3);
1974 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
1975
1976 /*
1977 * Whatever we do, we must clear pending event injection upon resume.
1978 */
1979 if (pExit->VpContext.ExecutionState.InterruptionPending)
1980 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1981
1982 /*
1983 * Ask PGM for information about the given GCPhys. We need to check if we're
1984 * out of sync first.
1985 */
1986 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite, false, false };
1987 PGMPHYSNEMPAGEINFO Info;
1988 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
1989 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1990 if (RT_SUCCESS(rc))
1991 {
1992 if (Info.fNemProt & ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
1993 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1994 {
1995 if (State.fCanResume)
1996 {
1997 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1998 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
1999 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2000 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2001 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2002 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2003 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2004 return VINF_SUCCESS;
2005 }
2006 }
2007 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2008 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2009 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2010 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2011 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2012 }
2013 else
2014 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
2015 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2016 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2017 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
2018
2019 /*
2020 * Emulate the memory access, either access handler or special memory.
2021 */
2022 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2023 pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2024 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2025 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2026 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, uHostTsc);
2027 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2028 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2029 AssertRCReturn(rc, rc);
2030 if (pExit->VpContext.ExecutionState.Reserved0 || pExit->VpContext.ExecutionState.Reserved1)
2031 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pExit->VpContext.ExecutionState.Reserved0, pExit->VpContext.ExecutionState.Reserved1));
2032
2033 VBOXSTRICTRC rcStrict;
2034 if (!pExitRec)
2035 {
2036 //if (pMsg->InstructionByteCount > 0)
2037 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
2038 if (pExit->MemoryAccess.InstructionByteCount > 0)
2039 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pExit->VpContext.Rip,
2040 pExit->MemoryAccess.InstructionBytes, pExit->MemoryAccess.InstructionByteCount);
2041 else
2042 rcStrict = IEMExecOne(pVCpu);
2043 /** @todo do we need to do anything wrt debugging here? */
2044 }
2045 else
2046 {
2047 /* Frequent access or probing. */
2048 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2049 Log4(("MemExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2050 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2051 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2052 }
2053 return rcStrict;
2054}
2055#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2056
2057
2058#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2059/**
2060 * Deals with I/O port intercept message.
2061 *
2062 * @returns Strict VBox status code.
2063 * @param pVM The cross context VM structure.
2064 * @param pVCpu The cross context per CPU structure.
2065 * @param pMsg The message.
2066 * @param pCtx The register context.
2067 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2068 */
2069NEM_TMPL_STATIC VBOXSTRICTRC
2070nemHCWinHandleMessageIoPort(PVM pVM, PVMCPU pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
2071{
2072 Assert( pMsg->AccessInfo.AccessSize == 1
2073 || pMsg->AccessInfo.AccessSize == 2
2074 || pMsg->AccessInfo.AccessSize == 4);
2075 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2076 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2077 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2078
2079 /*
2080 * Whatever we do, we must clear pending event injection upon resume.
2081 */
2082 if (pMsg->Header.ExecutionState.InterruptionPending)
2083 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2084
2085 /*
2086 * Add history first to avoid two paths doing EMHistoryExec calls.
2087 */
2088 VBOXSTRICTRC rcStrict;
2089 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2090 !pMsg->AccessInfo.StringOp
2091 ? ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2092 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2093 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2094 : ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2095 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2096 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2097 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2098 if (!pExitRec)
2099 {
2100 if (!pMsg->AccessInfo.StringOp)
2101 {
2102 /*
2103 * Simple port I/O.
2104 */
2105 static uint32_t const s_fAndMask[8] =
2106 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2107 uint32_t const fAndMask = s_fAndMask[pMsg->AccessInfo.AccessSize];
2108
2109 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2110 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2111 {
2112 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize);
2113 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2114 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2115 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2116 if (IOM_SUCCESS(rcStrict))
2117 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2118# ifdef IN_RING0
2119 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
2120 && !pCtx->rflags.Bits.u1TF
2121 /** @todo check for debug breakpoints */ )
2122 return EMRZSetPendingIoPortWrite(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2123 pMsg->AccessInfo.AccessSize, (uint32_t)pMsg->Rax & fAndMask);
2124# endif
2125 else
2126 {
2127 pCtx->rax = pMsg->Rax;
2128 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2129 }
2130 }
2131 else
2132 {
2133 uint32_t uValue = 0;
2134 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize);
2135 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2136 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2137 pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2138 if (IOM_SUCCESS(rcStrict))
2139 {
2140 if (pMsg->AccessInfo.AccessSize != 4)
2141 pCtx->rax = (pMsg->Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2142 else
2143 pCtx->rax = uValue;
2144 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2145 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pCtx->rax));
2146 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2147 }
2148 else
2149 {
2150 pCtx->rax = pMsg->Rax;
2151 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2152# ifdef IN_RING0
2153 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
2154 && !pCtx->rflags.Bits.u1TF
2155 /** @todo check for debug breakpoints */ )
2156 return EMRZSetPendingIoPortRead(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2157 pMsg->AccessInfo.AccessSize);
2158# endif
2159 }
2160 }
2161 }
2162 else
2163 {
2164 /*
2165 * String port I/O.
2166 */
2167 /** @todo Someone at Microsoft please explain how we can get the address mode
2168 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2169 * getting the default mode, it can always be overridden by a prefix. This
2170 * forces us to interpret the instruction from opcodes, which is suboptimal.
2171 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2172 * CPUs that are reasonably new.
2173 *
2174 * Of course, it's possible this is an undocumented and we just need to do some
2175 * experiments to figure out how it's communicated. Alternatively, we can scan
2176 * the opcode bytes for possible evil prefixes.
2177 */
2178 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2179 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2180 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2181 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pMsg->DsSegment);
2182 NEM_WIN_COPY_BACK_SEG(pCtx->es, pMsg->EsSegment);
2183 pCtx->rax = pMsg->Rax;
2184 pCtx->rcx = pMsg->Rcx;
2185 pCtx->rdi = pMsg->Rdi;
2186 pCtx->rsi = pMsg->Rsi;
2187# ifdef IN_RING0
2188 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2189 if (rcStrict != VINF_SUCCESS)
2190 return rcStrict;
2191# else
2192 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2193 AssertRCReturn(rc, rc);
2194 RT_NOREF(pGVCpu);
2195# endif
2196
2197 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2198 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2199 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2200 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS",
2201 pMsg->PortNumber, pMsg->AccessInfo.AccessSize ));
2202 rcStrict = IEMExecOne(pVCpu);
2203 }
2204 if (IOM_SUCCESS(rcStrict))
2205 {
2206 /*
2207 * Do debug checks.
2208 */
2209 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2210 || (pMsg->Header.Rflags & X86_EFL_TF)
2211 || DBGFBpIsHwIoArmed(pVM) )
2212 {
2213 /** @todo Debugging. */
2214 }
2215 }
2216 return rcStrict;
2217 }
2218
2219 /*
2220 * Frequent exit or something needing probing.
2221 * Get state and call EMHistoryExec.
2222 */
2223 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2224 if (!pMsg->AccessInfo.StringOp)
2225 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2226 else
2227 {
2228 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2229 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2230 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pMsg->DsSegment);
2231 NEM_WIN_COPY_BACK_SEG(pCtx->es, pMsg->EsSegment);
2232 pCtx->rcx = pMsg->Rcx;
2233 pCtx->rdi = pMsg->Rdi;
2234 pCtx->rsi = pMsg->Rsi;
2235 }
2236 pCtx->rax = pMsg->Rax;
2237
2238# ifdef IN_RING0
2239 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2240 if (rcStrict != VINF_SUCCESS)
2241 return rcStrict;
2242# else
2243 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2244 AssertRCReturn(rc, rc);
2245 RT_NOREF(pGVCpu);
2246# endif
2247
2248 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2249 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2250 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2251 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUT" : "IN",
2252 pMsg->AccessInfo.StringOp ? "S" : "",
2253 pMsg->PortNumber, pMsg->AccessInfo.AccessSize));
2254 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2255 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2256 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2257 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2258 return rcStrict;
2259}
2260#elif defined(IN_RING3)
2261/**
2262 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess).
2263 *
2264 * @returns Strict VBox status code.
2265 * @param pVM The cross context VM structure.
2266 * @param pVCpu The cross context per CPU structure.
2267 * @param pExit The VM exit information to handle.
2268 * @param pCtx The register context.
2269 * @sa nemHCWinHandleMessageIoPort
2270 */
2271NEM_TMPL_STATIC VBOXSTRICTRC
2272nemR3WinHandleExitIoPort(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2273{
2274 Assert( pExit->IoPortAccess.AccessInfo.AccessSize == 1
2275 || pExit->IoPortAccess.AccessInfo.AccessSize == 2
2276 || pExit->IoPortAccess.AccessInfo.AccessSize == 4);
2277 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2278
2279 /*
2280 * Whatever we do, we must clear pending event injection upon resume.
2281 */
2282 if (pExit->VpContext.ExecutionState.InterruptionPending)
2283 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2284
2285 /*
2286 * Add history first to avoid two paths doing EMHistoryExec calls.
2287 */
2288 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2289 !pExit->IoPortAccess.AccessInfo.StringOp
2290 ? ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2291 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE)
2292 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ))
2293 : ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
2294 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE)
2295 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ)),
2296 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2297 if (!pExitRec)
2298 {
2299 VBOXSTRICTRC rcStrict;
2300 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2301 {
2302 /*
2303 * Simple port I/O.
2304 */
2305 static uint32_t const s_fAndMask[8] =
2306 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2307 uint32_t const fAndMask = s_fAndMask[pExit->IoPortAccess.AccessInfo.AccessSize];
2308 if (pExit->IoPortAccess.AccessInfo.IsWrite)
2309 {
2310 rcStrict = IOMIOPortWrite(pVM, pVCpu, pExit->IoPortAccess.PortNumber,
2311 (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2312 pExit->IoPortAccess.AccessInfo.AccessSize);
2313 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2314 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2315 pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2316 pExit->IoPortAccess.AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2317 if (IOM_SUCCESS(rcStrict))
2318 {
2319 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2320 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2321 }
2322 }
2323 else
2324 {
2325 uint32_t uValue = 0;
2326 rcStrict = IOMIOPortRead(pVM, pVCpu, pExit->IoPortAccess.PortNumber, &uValue,
2327 pExit->IoPortAccess.AccessInfo.AccessSize);
2328 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2329 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2330 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2331 if (IOM_SUCCESS(rcStrict))
2332 {
2333 if (pExit->IoPortAccess.AccessInfo.AccessSize != 4)
2334 pCtx->rax = (pExit->IoPortAccess.Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2335 else
2336 pCtx->rax = uValue;
2337 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2338 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pExit->IoPortAccess.Rax, pCtx->rax));
2339 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2340 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2341 }
2342 }
2343 }
2344 else
2345 {
2346 /*
2347 * String port I/O.
2348 */
2349 /** @todo Someone at Microsoft please explain how we can get the address mode
2350 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2351 * getting the default mode, it can always be overridden by a prefix. This
2352 * forces us to interpret the instruction from opcodes, which is suboptimal.
2353 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2354 * CPUs that are reasonably new.
2355 *
2356 * Of course, it's possible this is an undocumented and we just need to do some
2357 * experiments to figure out how it's communicated. Alternatively, we can scan
2358 * the opcode bytes for possible evil prefixes.
2359 */
2360 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2361 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2362 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2363 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pExit->IoPortAccess.Ds);
2364 NEM_WIN_COPY_BACK_SEG(pCtx->es, pExit->IoPortAccess.Es);
2365 pCtx->rax = pExit->IoPortAccess.Rax;
2366 pCtx->rcx = pExit->IoPortAccess.Rcx;
2367 pCtx->rdi = pExit->IoPortAccess.Rdi;
2368 pCtx->rsi = pExit->IoPortAccess.Rsi;
2369 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2370 AssertRCReturn(rc, rc);
2371
2372 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2373 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2374 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2375 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUTS" : "INS",
2376 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize ));
2377 rcStrict = IEMExecOne(pVCpu);
2378 }
2379 if (IOM_SUCCESS(rcStrict))
2380 {
2381 /*
2382 * Do debug checks.
2383 */
2384 if ( pExit->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2385 || (pExit->VpContext.Rflags & X86_EFL_TF)
2386 || DBGFBpIsHwIoArmed(pVM) )
2387 {
2388 /** @todo Debugging. */
2389 }
2390 }
2391 return rcStrict;
2392 }
2393
2394 /*
2395 * Frequent exit or something needing probing.
2396 * Get state and call EMHistoryExec.
2397 */
2398 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2399 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2400 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2401 else
2402 {
2403 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2404 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2405 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pExit->IoPortAccess.Ds);
2406 NEM_WIN_COPY_BACK_SEG(pCtx->es, pExit->IoPortAccess.Es);
2407 pCtx->rcx = pExit->IoPortAccess.Rcx;
2408 pCtx->rdi = pExit->IoPortAccess.Rdi;
2409 pCtx->rsi = pExit->IoPortAccess.Rsi;
2410 }
2411 pCtx->rax = pExit->IoPortAccess.Rax;
2412 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2413 AssertRCReturn(rc, rc);
2414 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s%s %#x LB %u -> EMHistoryExec\n",
2415 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2416 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2417 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUT" : "IN",
2418 pExit->IoPortAccess.AccessInfo.StringOp ? "S" : "",
2419 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize));
2420 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2421 Log4(("IOExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2422 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2423 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2424 return rcStrict;
2425}
2426#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2427
2428
2429#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2430/**
2431 * Deals with interrupt window message.
2432 *
2433 * @returns Strict VBox status code.
2434 * @param pVM The cross context VM structure.
2435 * @param pVCpu The cross context per CPU structure.
2436 * @param pMsg The message.
2437 * @param pCtx The register context.
2438 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2439 * @sa nemR3WinHandleExitInterruptWindow
2440 */
2441NEM_TMPL_STATIC VBOXSTRICTRC
2442nemHCWinHandleMessageInterruptWindow(PVM pVM, PVMCPU pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg,
2443 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2444{
2445 /*
2446 * Assert message sanity.
2447 */
2448 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE
2449 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here
2450 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2451 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2452 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type));
2453
2454 /*
2455 * Just copy the state we've got and handle it in the loop for now.
2456 */
2457 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2458 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2459
2460 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2461 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2462 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2463 pMsg->Type, RT_BOOL(pMsg->Header.Rflags & X86_EFL_IF), pMsg->Header.ExecutionState.InterruptShadow));
2464
2465 /** @todo call nemHCWinHandleInterruptFF */
2466 RT_NOREF(pVM, pGVCpu);
2467 return VINF_SUCCESS;
2468}
2469#elif defined(IN_RING3)
2470/**
2471 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow).
2472 *
2473 * @returns Strict VBox status code.
2474 * @param pVM The cross context VM structure.
2475 * @param pVCpu The cross context per CPU structure.
2476 * @param pExit The VM exit information to handle.
2477 * @param pCtx The register context.
2478 * @sa nemHCWinHandleMessageInterruptWindow
2479 */
2480NEM_TMPL_STATIC VBOXSTRICTRC
2481nemR3WinHandleExitInterruptWindow(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2482{
2483 /*
2484 * Assert message sanity.
2485 */
2486 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2487 AssertMsg( pExit->InterruptWindow.DeliverableType == WHvX64PendingInterrupt
2488 || pExit->InterruptWindow.DeliverableType == WHvX64PendingNmi,
2489 ("%#x\n", pExit->InterruptWindow.DeliverableType));
2490
2491 /*
2492 * Just copy the state we've got and handle it in the loop for now.
2493 */
2494 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTTERRUPT_WINDOW),
2495 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2496
2497 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2498 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2499 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2500 pExit->InterruptWindow.DeliverableType, RT_BOOL(pExit->VpContext.Rflags & X86_EFL_IF),
2501 pExit->VpContext.ExecutionState.InterruptShadow));
2502
2503 /** @todo call nemHCWinHandleInterruptFF */
2504 RT_NOREF(pVM);
2505 return VINF_SUCCESS;
2506}
2507#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2508
2509
2510#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2511/**
2512 * Deals with CPUID intercept message.
2513 *
2514 * @returns Strict VBox status code.
2515 * @param pVM The cross context VM structure.
2516 * @param pVCpu The cross context per CPU structure.
2517 * @param pMsg The message.
2518 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2519 * @sa nemR3WinHandleExitCpuId
2520 */
2521NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVM pVM, PVMCPU pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg,
2522 PGVMCPU pGVCpu)
2523{
2524 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2525 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2526 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2527 if (!pExitRec)
2528 {
2529 /*
2530 * Soak up state and execute the instruction.
2531 *
2532 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2533 * function and make everyone use it.
2534 */
2535 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2536 * only get weirder with nested VT-x and AMD-V support. */
2537 nemHCWinCopyStateFromX64Header(pVCpu, &pVCpu->cpum.GstCtx, &pMsg->Header);
2538
2539 /* Copy in the low register values (top is always cleared). */
2540 pVCpu->cpum.GstCtx.rax = (uint32_t)pMsg->Rax;
2541 pVCpu->cpum.GstCtx.rcx = (uint32_t)pMsg->Rcx;
2542 pVCpu->cpum.GstCtx.rdx = (uint32_t)pMsg->Rdx;
2543 pVCpu->cpum.GstCtx.rbx = (uint32_t)pMsg->Rbx;
2544 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2545
2546 /* Get the correct values. */
2547 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2548 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2549
2550 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2551 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2552 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2553 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2554 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2555
2556 /* Move RIP and we're done. */
2557 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, &pVCpu->cpum.GstCtx, &pMsg->Header);
2558
2559 return VINF_SUCCESS;
2560 }
2561
2562 /*
2563 * Frequent exit or something needing probing.
2564 * Get state and call EMHistoryExec.
2565 */
2566 nemHCWinCopyStateFromX64Header(pVCpu, &pVCpu->cpum.GstCtx, &pMsg->Header);
2567 pVCpu->cpum.GstCtx.rax = pMsg->Rax;
2568 pVCpu->cpum.GstCtx.rcx = pMsg->Rcx;
2569 pVCpu->cpum.GstCtx.rdx = pMsg->Rdx;
2570 pVCpu->cpum.GstCtx.rbx = pMsg->Rbx;
2571 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2572 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2573 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2574 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2575 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2576# ifdef IN_RING0
2577 VBOXSTRICTRC rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, &pVCpu->cpum.GstCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "CpuIdExit");
2578 if (rcStrict != VINF_SUCCESS)
2579 return rcStrict;
2580 RT_NOREF(pVM);
2581# else
2582 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, &pVCpu->cpum.GstCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2583 AssertRCReturn(rc, rc);
2584 RT_NOREF(pGVCpu);
2585# endif
2586 VBOXSTRICTRC rcStrictExec = EMHistoryExec(pVCpu, pExitRec, 0);
2587 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2588 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2589 VBOXSTRICTRC_VAL(rcStrictExec), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2590 return rcStrictExec;
2591}
2592#elif defined(IN_RING3)
2593/**
2594 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid).
2595 *
2596 * @returns Strict VBox status code.
2597 * @param pVM The cross context VM structure.
2598 * @param pVCpu The cross context per CPU structure.
2599 * @param pExit The VM exit information to handle.
2600 * @sa nemHCWinHandleMessageCpuId
2601 */
2602NEM_TMPL_STATIC VBOXSTRICTRC
2603nemR3WinHandleExitCpuId(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2604{
2605 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2606 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID),
2607 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2608 if (!pExitRec)
2609 {
2610 /*
2611 * Soak up state and execute the instruction.
2612 *
2613 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2614 * function and make everyone use it.
2615 */
2616 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2617 * only get weirder with nested VT-x and AMD-V support. */
2618 nemR3WinCopyStateFromX64Header(pVCpu, &pVCpu->cpum.GstCtx, &pExit->VpContext);
2619
2620 /* Copy in the low register values (top is always cleared). */
2621 pVCpu->cpum.GstCtx.rax = (uint32_t)pExit->CpuidAccess.Rax;
2622 pVCpu->cpum.GstCtx.rcx = (uint32_t)pExit->CpuidAccess.Rcx;
2623 pVCpu->cpum.GstCtx.rdx = (uint32_t)pExit->CpuidAccess.Rdx;
2624 pVCpu->cpum.GstCtx.rbx = (uint32_t)pExit->CpuidAccess.Rbx;
2625 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2626
2627 /* Get the correct values. */
2628 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx,
2629 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
2630
2631 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2632 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2633 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2634 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.ebx,
2635 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2636
2637 /* Move RIP and we're done. */
2638 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, &pVCpu->cpum.GstCtx, &pExit->VpContext);
2639
2640 RT_NOREF_PV(pVM);
2641 return VINF_SUCCESS;
2642 }
2643
2644 /*
2645 * Frequent exit or something needing probing.
2646 * Get state and call EMHistoryExec.
2647 */
2648 nemR3WinCopyStateFromX64Header(pVCpu, &pVCpu->cpum.GstCtx, &pExit->VpContext);
2649 pVCpu->cpum.GstCtx.rax = pExit->CpuidAccess.Rax;
2650 pVCpu->cpum.GstCtx.rcx = pExit->CpuidAccess.Rcx;
2651 pVCpu->cpum.GstCtx.rdx = pExit->CpuidAccess.Rdx;
2652 pVCpu->cpum.GstCtx.rbx = pExit->CpuidAccess.Rbx;
2653 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2654 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64) ==> EMHistoryExec\n",
2655 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2656 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2657 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2658 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, &pVCpu->cpum.GstCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2659 AssertRCReturn(rc, rc);
2660 VBOXSTRICTRC rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2661 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2662 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2663 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2664 return rcStrict;
2665}
2666#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2667
2668
2669#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2670/**
2671 * Deals with MSR intercept message.
2672 *
2673 * @returns Strict VBox status code.
2674 * @param pVCpu The cross context per CPU structure.
2675 * @param pMsg The message.
2676 * @param pCtx The register context.
2677 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2678 * @sa nemR3WinHandleExitMsr
2679 */
2680NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPU pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg,
2681 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2682{
2683 /*
2684 * A wee bit of sanity first.
2685 */
2686 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2687 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2688 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2689
2690 /*
2691 * Check CPL as that's common to both RDMSR and WRMSR.
2692 */
2693 VBOXSTRICTRC rcStrict;
2694 if (pMsg->Header.ExecutionState.Cpl == 0)
2695 {
2696 /*
2697 * Get all the MSR state. Since we're getting EFER, we also need to
2698 * get CR0, CR4 and CR3.
2699 */
2700 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2701 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2702 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2703 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2704 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
2705
2706 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2707 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2708 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2709 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2710 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2711 "MSRs");
2712 if (rcStrict == VINF_SUCCESS)
2713 {
2714 if (!pExitRec)
2715 {
2716 /*
2717 * Handle writes.
2718 */
2719 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2720 {
2721 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx));
2722 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n",
2723 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2724 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2725 if (rcStrict == VINF_SUCCESS)
2726 {
2727 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2728 return VINF_SUCCESS;
2729 }
2730# ifndef IN_RING3
2731 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2732 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2733 rcStrict = VINF_CPUM_R3_MSR_WRITE;
2734 return rcStrict;
2735# else
2736 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n",
2737 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2738 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2739# endif
2740 }
2741 /*
2742 * Handle reads.
2743 */
2744 else
2745 {
2746 uint64_t uValue = 0;
2747 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue);
2748 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2749 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2750 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2751 if (rcStrict == VINF_SUCCESS)
2752 {
2753 pCtx->rax = (uint32_t)uValue;
2754 pCtx->rdx = uValue >> 32;
2755 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2756 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2757 return VINF_SUCCESS;
2758 }
2759# ifndef IN_RING3
2760 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2761 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2762 rcStrict = VINF_CPUM_R3_MSR_READ;
2763 return rcStrict;
2764# else
2765 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2766 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2767 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2768# endif
2769 }
2770 }
2771 else
2772 {
2773 /*
2774 * Handle frequent exit or something needing probing.
2775 */
2776 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
2777 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2778 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD", pMsg->MsrNumber));
2779 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2780 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2781 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2782 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2783 return rcStrict;
2784 }
2785 }
2786 else
2787 {
2788 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2789 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2790 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD",
2791 pMsg->MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2792 return rcStrict;
2793 }
2794 }
2795 else if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2796 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n",
2797 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2798 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx ));
2799 else
2800 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n",
2801 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2802 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber));
2803
2804 /*
2805 * If we get down here, we're supposed to #GP(0).
2806 */
2807 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2808 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2809 if (rcStrict == VINF_SUCCESS)
2810 {
2811 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2812 if (rcStrict == VINF_IEM_RAISED_XCPT)
2813 rcStrict = VINF_SUCCESS;
2814 else if (rcStrict != VINF_SUCCESS)
2815 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2816 }
2817 return rcStrict;
2818}
2819#elif defined(IN_RING3)
2820/**
2821 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess).
2822 *
2823 * @returns Strict VBox status code.
2824 * @param pVM The cross context VM structure.
2825 * @param pVCpu The cross context per CPU structure.
2826 * @param pExit The VM exit information to handle.
2827 * @param pCtx The register context.
2828 * @sa nemHCWinHandleMessageMsr
2829 */
2830NEM_TMPL_STATIC VBOXSTRICTRC
2831nemR3WinHandleExitMsr(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2832{
2833 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2834
2835 /*
2836 * Check CPL as that's common to both RDMSR and WRMSR.
2837 */
2838 VBOXSTRICTRC rcStrict;
2839 if (pExit->VpContext.ExecutionState.Cpl == 0)
2840 {
2841 /*
2842 * Get all the MSR state. Since we're getting EFER, we also need to
2843 * get CR0, CR4 and CR3.
2844 */
2845 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2846 pExit->MsrAccess.AccessInfo.IsWrite
2847 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE)
2848 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ),
2849 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
2850 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2851 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
2852 (!pExitRec ? 0 : IEM_CPUMCTX_EXTRN_MUST_MASK)
2853 | CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2854 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2855 "MSRs");
2856 if (rcStrict == VINF_SUCCESS)
2857 {
2858 if (!pExitRec)
2859 {
2860 /*
2861 * Handle writes.
2862 */
2863 if (pExit->MsrAccess.AccessInfo.IsWrite)
2864 {
2865 rcStrict = CPUMSetGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber,
2866 RT_MAKE_U64((uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx));
2867 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2868 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
2869 (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2870 if (rcStrict == VINF_SUCCESS)
2871 {
2872 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2873 return VINF_SUCCESS;
2874 }
2875 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", pVCpu->idCpu,
2876 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2877 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx,
2878 VBOXSTRICTRC_VAL(rcStrict) ));
2879 }
2880 /*
2881 * Handle reads.
2882 */
2883 else
2884 {
2885 uint64_t uValue = 0;
2886 rcStrict = CPUMQueryGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, &uValue);
2887 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu,
2888 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2889 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2890 if (rcStrict == VINF_SUCCESS)
2891 {
2892 pCtx->rax = (uint32_t)uValue;
2893 pCtx->rdx = uValue >> 32;
2894 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2895 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2896 return VINF_SUCCESS;
2897 }
2898 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2899 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
2900 uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2901 }
2902 }
2903 else
2904 {
2905 /*
2906 * Handle frequent exit or something needing probing.
2907 */
2908 Log4(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %#08x\n",
2909 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2910 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber));
2911 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
2912 Log4(("MsrExit/%u: %04x:%08RX64/%s: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
2913 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2914 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2915 return rcStrict;
2916 }
2917 }
2918 else
2919 {
2920 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2921 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2922 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2923 return rcStrict;
2924 }
2925 }
2926 else if (pExit->MsrAccess.AccessInfo.IsWrite)
2927 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2928 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
2929 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx ));
2930 else
2931 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2932 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
2933 pExit->MsrAccess.MsrNumber));
2934
2935 /*
2936 * If we get down here, we're supposed to #GP(0).
2937 */
2938 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
2939 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2940 if (rcStrict == VINF_SUCCESS)
2941 {
2942 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2943 if (rcStrict == VINF_IEM_RAISED_XCPT)
2944 rcStrict = VINF_SUCCESS;
2945 else if (rcStrict != VINF_SUCCESS)
2946 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2947 }
2948
2949 RT_NOREF_PV(pVM);
2950 return rcStrict;
2951}
2952#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2953
2954
2955/**
2956 * Worker for nemHCWinHandleMessageException & nemR3WinHandleExitException that
2957 * checks if the given opcodes are of interest at all.
2958 *
2959 * @returns true if interesting, false if not.
2960 * @param cbOpcodes Number of opcode bytes available.
2961 * @param pbOpcodes The opcode bytes.
2962 * @param f64BitMode Whether we're in 64-bit mode.
2963 */
2964DECLINLINE(bool) nemHcWinIsInterestingUndefinedOpcode(uint8_t cbOpcodes, uint8_t const *pbOpcodes, bool f64BitMode)
2965{
2966 /*
2967 * Currently only interested in VMCALL and VMMCALL.
2968 */
2969 while (cbOpcodes >= 3)
2970 {
2971 switch (pbOpcodes[0])
2972 {
2973 case 0x0f:
2974 switch (pbOpcodes[1])
2975 {
2976 case 0x01:
2977 switch (pbOpcodes[2])
2978 {
2979 case 0xc1: /* 0f 01 c1 VMCALL */
2980 return true;
2981 case 0xd9: /* 0f 01 d9 VMMCALL */
2982 return true;
2983 default:
2984 break;
2985 }
2986 break;
2987 }
2988 break;
2989
2990 default:
2991 return false;
2992
2993 /* prefixes */
2994 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
2995 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
2996 if (!f64BitMode)
2997 return false;
2998 RT_FALL_THRU();
2999 case X86_OP_PRF_CS:
3000 case X86_OP_PRF_SS:
3001 case X86_OP_PRF_DS:
3002 case X86_OP_PRF_ES:
3003 case X86_OP_PRF_FS:
3004 case X86_OP_PRF_GS:
3005 case X86_OP_PRF_SIZE_OP:
3006 case X86_OP_PRF_SIZE_ADDR:
3007 case X86_OP_PRF_LOCK:
3008 case X86_OP_PRF_REPZ:
3009 case X86_OP_PRF_REPNZ:
3010 cbOpcodes--;
3011 pbOpcodes++;
3012 continue;
3013 }
3014 break;
3015 }
3016 return false;
3017}
3018
3019
3020#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3021/**
3022 * Copies state included in a exception intercept message.
3023 *
3024 * @param pVCpu The cross context per CPU structure.
3025 * @param pMsg The message.
3026 * @param pCtx The register context.
3027 * @param fClearXcpt Clear pending exception.
3028 */
3029DECLINLINE(void) nemHCWinCopyStateFromExceptionMessage(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg,
3030 PCPUMCTX pCtx, bool fClearXcpt)
3031{
3032 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
3033 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_DS
3034 | (fClearXcpt ? CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT : 0) );
3035 pCtx->rax = pMsg->Rax;
3036 pCtx->rcx = pMsg->Rcx;
3037 pCtx->rdx = pMsg->Rdx;
3038 pCtx->rbx = pMsg->Rbx;
3039 pCtx->rsp = pMsg->Rsp;
3040 pCtx->rbp = pMsg->Rbp;
3041 pCtx->rsi = pMsg->Rsi;
3042 pCtx->rdi = pMsg->Rdi;
3043 pCtx->r8 = pMsg->R8;
3044 pCtx->r9 = pMsg->R9;
3045 pCtx->r10 = pMsg->R10;
3046 pCtx->r11 = pMsg->R11;
3047 pCtx->r12 = pMsg->R12;
3048 pCtx->r13 = pMsg->R13;
3049 pCtx->r14 = pMsg->R14;
3050 pCtx->r15 = pMsg->R15;
3051 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pMsg->DsSegment);
3052 NEM_WIN_COPY_BACK_SEG(pCtx->ss, pMsg->SsSegment);
3053}
3054#elif defined(IN_RING3)
3055/**
3056 * Copies state included in a exception intercept exit.
3057 *
3058 * @param pVCpu The cross context per CPU structure.
3059 * @param pExit The VM exit information.
3060 * @param pCtx The register context.
3061 * @param fClearXcpt Clear pending exception.
3062 */
3063DECLINLINE(void) nemR3WinCopyStateFromExceptionMessage(PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit,
3064 PCPUMCTX pCtx, bool fClearXcpt)
3065{
3066 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
3067 if (fClearXcpt)
3068 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
3069}
3070#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3071
3072
3073#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3074/**
3075 * Deals with exception intercept message (HvMessageTypeX64ExceptionIntercept).
3076 *
3077 * @returns Strict VBox status code.
3078 * @param pVCpu The cross context per CPU structure.
3079 * @param pMsg The message.
3080 * @param pCtx The register context.
3081 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3082 * @sa nemR3WinHandleExitMsr
3083 */
3084NEM_TMPL_STATIC VBOXSTRICTRC
3085nemHCWinHandleMessageException(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
3086{
3087 /*
3088 * Assert sanity.
3089 */
3090 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
3091 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
3092 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
3093 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
3094
3095 /*
3096 * Get most of the register state since we'll end up making IEM inject the
3097 * event. The exception isn't normally flaged as a pending event, so duh.
3098 *
3099 * Note! We can optimize this later with event injection.
3100 */
3101 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n",
3102 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
3103 pMsg->ExceptionVector, pMsg->ErrorCode, pMsg->ExceptionParameter));
3104 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, pCtx, true /*fClearXcpt*/);
3105 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3106 if (pMsg->ExceptionVector == X86_XCPT_DB)
3107 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3108 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, fWhat, "Xcpt");
3109 if (rcStrict != VINF_SUCCESS)
3110 return rcStrict;
3111
3112 /*
3113 * Handle the intercept.
3114 */
3115 TRPMEVENT enmEvtType = TRPM_TRAP;
3116 switch (pMsg->ExceptionVector)
3117 {
3118 /*
3119 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3120 * and need to turn them over to GIM.
3121 *
3122 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3123 * #UD for handling non-native hypercall instructions. (IEM will
3124 * decode both and let the GIM provider decide whether to accept it.)
3125 */
3126 case X86_XCPT_UD:
3127 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3128 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3129 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3130
3131 if (nemHcWinIsInterestingUndefinedOpcode(pMsg->InstructionByteCount, pMsg->InstructionBytes,
3132 pMsg->Header.ExecutionState.EferLma && pMsg->Header.CsSegment.Long ))
3133 {
3134 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pMsg->Header.Rip, pMsg->InstructionBytes,
3135 pMsg->InstructionByteCount);
3136 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3137 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3138 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
3139 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3140 return rcStrict;
3141 }
3142 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3143 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->InstructionByteCount, pMsg->InstructionBytes ));
3144 break;
3145
3146 /*
3147 * Filter debug exceptions.
3148 */
3149 case X86_XCPT_DB:
3150 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3151 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3152 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3153 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3154 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header) ));
3155 break;
3156
3157 case X86_XCPT_BP:
3158 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3159 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3160 pMsg->Header.Rip + pMsg->Header.CsSegment.Base, ASMReadTSC());
3161 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
3162 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->Header.InstructionLength));
3163 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3164 break;
3165
3166 /* This shouldn't happen. */
3167 default:
3168 AssertLogRelMsgFailedReturn(("ExceptionVector=%#x\n", pMsg->ExceptionVector), VERR_IEM_IPE_6);
3169 }
3170
3171 /*
3172 * Inject it.
3173 */
3174 rcStrict = IEMInjectTrap(pVCpu, pMsg->ExceptionVector, enmEvtType, pMsg->ErrorCode,
3175 pMsg->ExceptionParameter /*??*/, pMsg->Header.InstructionLength);
3176 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3177 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
3178 nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->ExceptionVector, VBOXSTRICTRC_VAL(rcStrict) ));
3179 return rcStrict;
3180}
3181#elif defined(IN_RING3)
3182/**
3183 * Deals with MSR access exits (WHvRunVpExitReasonException).
3184 *
3185 * @returns Strict VBox status code.
3186 * @param pVM The cross context VM structure.
3187 * @param pVCpu The cross context per CPU structure.
3188 * @param pExit The VM exit information to handle.
3189 * @param pCtx The register context.
3190 * @sa nemR3WinHandleExitException
3191 */
3192NEM_TMPL_STATIC VBOXSTRICTRC
3193nemR3WinHandleExitException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
3194{
3195 /*
3196 * Assert sanity.
3197 */
3198 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
3199
3200 /*
3201 * Get most of the register state since we'll end up making IEM inject the
3202 * event. The exception isn't normally flaged as a pending event, so duh.
3203 *
3204 * Note! We can optimize this later with event injection.
3205 */
3206 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3207 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType,
3208 pExit->VpException.ErrorCode, pExit->VpException.ExceptionParameter ));
3209 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, pCtx, true /*fClearXcpt*/);
3210 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3211 if (pExit->VpException.ExceptionType == X86_XCPT_DB)
3212 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
3213 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, fWhat, "Xcpt");
3214 if (rcStrict != VINF_SUCCESS)
3215 return rcStrict;
3216
3217 /*
3218 * Handle the intercept.
3219 */
3220 TRPMEVENT enmEvtType = TRPM_TRAP;
3221 switch (pExit->VpException.ExceptionType)
3222 {
3223 /*
3224 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
3225 * and need to turn them over to GIM.
3226 *
3227 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
3228 * #UD for handling non-native hypercall instructions. (IEM will
3229 * decode both and let the GIM provider decide whether to accept it.)
3230 */
3231 case X86_XCPT_UD:
3232 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
3233 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_UD),
3234 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3235 if (nemHcWinIsInterestingUndefinedOpcode(pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes,
3236 pExit->VpContext.ExecutionState.EferLma && pExit->VpContext.Cs.Long ))
3237 {
3238 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pExit->VpContext.Rip,
3239 pExit->VpException.InstructionBytes,
3240 pExit->VpException.InstructionByteCount);
3241 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
3242 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3243 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
3244 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
3245 return rcStrict;
3246 }
3247
3248 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu,
3249 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
3250 pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes ));
3251 break;
3252
3253 /*
3254 * Filter debug exceptions.
3255 */
3256 case X86_XCPT_DB:
3257 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
3258 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_DB),
3259 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3260 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
3261 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext) ));
3262 break;
3263
3264 case X86_XCPT_BP:
3265 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
3266 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_XCPT_BP),
3267 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3268 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3269 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.InstructionLength));
3270 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
3271 break;
3272
3273 /* This shouldn't happen. */
3274 default:
3275 AssertLogRelMsgFailedReturn(("ExceptionType=%#x\n", pExit->VpException.ExceptionType), VERR_IEM_IPE_6);
3276 }
3277
3278 /*
3279 * Inject it.
3280 */
3281 rcStrict = IEMInjectTrap(pVCpu, pExit->VpException.ExceptionType, enmEvtType, pExit->VpException.ErrorCode,
3282 pExit->VpException.ExceptionParameter /*??*/, pExit->VpContext.InstructionLength);
3283 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
3284 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
3285 nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType, VBOXSTRICTRC_VAL(rcStrict) ));
3286
3287 RT_NOREF_PV(pVM);
3288 return rcStrict;
3289}
3290#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3291
3292
3293#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3294/**
3295 * Deals with unrecoverable exception (triple fault).
3296 *
3297 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up
3298 * here too. So we'll leave it to IEM to decide.
3299 *
3300 * @returns Strict VBox status code.
3301 * @param pVCpu The cross context per CPU structure.
3302 * @param pMsgHdr The message header.
3303 * @param pCtx The register context.
3304 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3305 * @sa nemR3WinHandleExitUnrecoverableException
3306 */
3307NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageUnrecoverableException(PVMCPU pVCpu,
3308 HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr,
3309 PCPUMCTX pCtx, PGVMCPU pGVCpu)
3310{
3311 AssertMsg(pMsgHdr->InstructionLength < 0x10, ("%#x\n", pMsgHdr->InstructionLength));
3312
3313# if 0
3314 /*
3315 * Just copy the state we've got and handle it in the loop for now.
3316 */
3317 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr);
3318 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n",
3319 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags));
3320 return VINF_EM_TRIPLE_FAULT;
3321# else
3322 /*
3323 * Let IEM decide whether this is really it.
3324 */
3325 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3326 pMsgHdr->Rip + pMsgHdr->CsSegment.Base, ASMReadTSC());
3327 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr);
3328 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
3329 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3330 if (rcStrict == VINF_SUCCESS)
3331 {
3332 rcStrict = IEMExecOne(pVCpu);
3333 if (rcStrict == VINF_SUCCESS)
3334 {
3335 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3336 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags ));
3337 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3338 return VINF_SUCCESS;
3339 }
3340 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3341 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3342 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3343 else
3344 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3345 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3346 }
3347 else
3348 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3349 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3350 return rcStrict;
3351# endif
3352}
3353#elif defined(IN_RING3)
3354/**
3355 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
3356 *
3357 * @returns Strict VBox status code.
3358 * @param pVM The cross context VM structure.
3359 * @param pVCpu The cross context per CPU structure.
3360 * @param pExit The VM exit information to handle.
3361 * @param pCtx The register context.
3362 * @sa nemHCWinHandleMessageUnrecoverableException
3363 */
3364NEM_TMPL_STATIC VBOXSTRICTRC
3365nemR3WinHandleExitUnrecoverableException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
3366{
3367 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
3368
3369# if 0
3370 /*
3371 * Just copy the state we've got and handle it in the loop for now.
3372 */
3373 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
3374 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3375 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3376 RT_NOREF_PV(pVM);
3377 return VINF_EM_TRIPLE_FAULT;
3378# else
3379 /*
3380 * Let IEM decide whether this is really it.
3381 */
3382 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
3383 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3384 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
3385 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
3386 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3387 if (rcStrict == VINF_SUCCESS)
3388 {
3389 rcStrict = IEMExecOne(pVCpu);
3390 if (rcStrict == VINF_SUCCESS)
3391 {
3392 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3393 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3394 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3395 return VINF_SUCCESS;
3396 }
3397 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3398 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3399 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3400 else
3401 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3402 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3403 }
3404 else
3405 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3406 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3407 RT_NOREF_PV(pVM);
3408 return rcStrict;
3409# endif
3410
3411}
3412#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3413
3414
3415#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3416/**
3417 * Handles messages (VM exits).
3418 *
3419 * @returns Strict VBox status code.
3420 * @param pVM The cross context VM structure.
3421 * @param pVCpu The cross context per CPU structure.
3422 * @param pMappingHeader The message slot mapping.
3423 * @param pCtx The register context.
3424 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3425 * @sa nemR3WinHandleExit
3426 */
3427NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessage(PVM pVM, PVMCPU pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
3428 PCPUMCTX pCtx, PGVMCPU pGVCpu)
3429{
3430 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage)
3431 {
3432 AssertMsg(pMappingHeader->cbMessage == HV_MESSAGE_SIZE, ("%#x\n", pMappingHeader->cbMessage));
3433 HV_MESSAGE const *pMsg = (HV_MESSAGE const *)(pMappingHeader + 1);
3434 switch (pMsg->Header.MessageType)
3435 {
3436 case HvMessageTypeUnmappedGpa:
3437 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3438 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3439 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx, pGVCpu);
3440
3441 case HvMessageTypeGpaIntercept:
3442 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3443 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept);
3444 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx, pGVCpu);
3445
3446 case HvMessageTypeX64IoPortIntercept:
3447 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept));
3448 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3449 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept, pCtx, pGVCpu);
3450
3451 case HvMessageTypeX64Halt:
3452 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3453 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3454 pMsg->X64InterceptHeader.Rip + pMsg->X64InterceptHeader.CsSegment.Base, ASMReadTSC());
3455 Log4(("HaltExit\n"));
3456 return VINF_EM_HALT;
3457
3458 case HvMessageTypeX64InterruptWindow:
3459 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow));
3460 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3461 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow, pCtx, pGVCpu);
3462
3463 case HvMessageTypeX64CpuidIntercept:
3464 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept));
3465 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3466 return nemHCWinHandleMessageCpuId(pVM, pVCpu, &pMsg->X64CpuIdIntercept, pGVCpu);
3467
3468 case HvMessageTypeX64MsrIntercept:
3469 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept));
3470 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3471 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept, pCtx, pGVCpu);
3472
3473 case HvMessageTypeX64ExceptionIntercept:
3474 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept));
3475 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3476 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept, pCtx, pGVCpu);
3477
3478 case HvMessageTypeUnrecoverableException:
3479 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
3480 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3481 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader, pCtx, pGVCpu);
3482
3483 case HvMessageTypeInvalidVpRegisterValue:
3484 case HvMessageTypeUnsupportedFeature:
3485 case HvMessageTypeTlbPageSizeMismatch:
3486 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3487 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg),
3488 VERR_NEM_IPE_3);
3489
3490 case HvMessageTypeX64ApicEoi:
3491 case HvMessageTypeX64LegacyFpError:
3492 case HvMessageTypeX64RegisterIntercept:
3493 case HvMessageTypeApicEoi:
3494 case HvMessageTypeFerrAsserted:
3495 case HvMessageTypeEventLogBufferComplete:
3496 case HvMessageTimerExpired:
3497 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3498 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3499 VERR_NEM_IPE_3);
3500
3501 default:
3502 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3503 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3504 VERR_NEM_IPE_3);
3505 }
3506 }
3507 else
3508 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n",
3509 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage),
3510 VERR_NEM_IPE_4);
3511}
3512#elif defined(IN_RING3)
3513/**
3514 * Handles VM exits.
3515 *
3516 * @returns Strict VBox status code.
3517 * @param pVM The cross context VM structure.
3518 * @param pVCpu The cross context per CPU structure.
3519 * @param pExit The VM exit information to handle.
3520 * @param pCtx The register context.
3521 * @sa nemHCWinHandleMessage
3522 */
3523NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
3524{
3525 switch (pExit->ExitReason)
3526 {
3527 case WHvRunVpExitReasonMemoryAccess:
3528 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3529 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit, pCtx);
3530
3531 case WHvRunVpExitReasonX64IoPortAccess:
3532 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3533 return nemR3WinHandleExitIoPort(pVM, pVCpu, pExit, pCtx);
3534
3535 case WHvRunVpExitReasonX64Halt:
3536 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3537 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_NEM, NEMEXITTYPE_HALT),
3538 pExit->VpContext.Rip + pExit->VpContext.Cs.Base, ASMReadTSC());
3539 Log4(("HaltExit\n"));
3540 return VINF_EM_HALT;
3541
3542 case WHvRunVpExitReasonCanceled:
3543 return VINF_SUCCESS;
3544
3545 case WHvRunVpExitReasonX64InterruptWindow:
3546 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3547 return nemR3WinHandleExitInterruptWindow(pVM, pVCpu, pExit, pCtx);
3548
3549 case WHvRunVpExitReasonX64Cpuid:
3550 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3551 return nemR3WinHandleExitCpuId(pVM, pVCpu, pExit);
3552
3553 case WHvRunVpExitReasonX64MsrAccess:
3554 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3555 return nemR3WinHandleExitMsr(pVM, pVCpu, pExit, pCtx);
3556
3557 case WHvRunVpExitReasonException:
3558 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3559 return nemR3WinHandleExitException(pVM, pVCpu, pExit, pCtx);
3560
3561 case WHvRunVpExitReasonUnrecoverableException:
3562 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3563 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit, pCtx);
3564
3565 case WHvRunVpExitReasonUnsupportedFeature:
3566 case WHvRunVpExitReasonInvalidVpRegisterValue:
3567 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3568 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
3569 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
3570
3571 /* Undesired exits: */
3572 case WHvRunVpExitReasonNone:
3573 default:
3574 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3575 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
3576 }
3577}
3578#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3579
3580
3581#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3582/**
3583 * Worker for nemHCWinRunGC that stops the execution on the way out.
3584 *
3585 * The CPU was running the last time we checked, no there are no messages that
3586 * needs being marked handled/whatever. Caller checks this.
3587 *
3588 * @returns rcStrict on success, error status on failure.
3589 * @param pVM The cross context VM structure.
3590 * @param pVCpu The cross context per CPU structure.
3591 * @param rcStrict The nemHCWinRunGC return status. This is a little
3592 * bit unnecessary, except in internal error cases,
3593 * since we won't need to stop the CPU if we took an
3594 * exit.
3595 * @param pMappingHeader The message slot mapping.
3596 * @param pGVM The global (ring-0) VM structure (NULL in r3).
3597 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3598 */
3599NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict,
3600 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
3601 PGVM pGVM, PGVMCPU pGVCpu)
3602{
3603 /*
3604 * Try stopping the processor. If we're lucky we manage to do this before it
3605 * does another VM exit.
3606 */
3607# ifdef IN_RING0
3608 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
3609 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction,
3610 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3611 NULL, 0);
3612 if (NT_SUCCESS(rcNt))
3613 {
3614 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3615 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3616 return rcStrict;
3617 }
3618# else
3619 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu);
3620 if (fRet)
3621 {
3622 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3623 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3624 return rcStrict;
3625 }
3626 RT_NOREF(pGVM, pGVCpu);
3627# endif
3628
3629 /*
3630 * Dang. The CPU stopped by itself and we got a couple of message to deal with.
3631 */
3632# ifdef IN_RING0
3633 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt),
3634 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3635# else
3636 DWORD dwErr = RTNtLastErrorValue();
3637 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr),
3638 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3639# endif
3640 Log8(("nemHCWinStopCpu: Stopping CPU #%u pending...\n", pVCpu->idCpu));
3641 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPending);
3642
3643 /*
3644 * First message: Exit or similar, sometimes VidMessageStopRequestComplete.
3645 * Note! We can safely ASSUME that rcStrict isn't an important information one.
3646 */
3647# ifdef IN_RING0
3648 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3649 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
3650 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/
3651 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3652 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3653 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3654 NULL, 0);
3655 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3656 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3657# else
3658 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3659 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3660 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3661 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3662# endif
3663
3664 VID_MESSAGE_TYPE enmVidMsgType = pMappingHeader->enmVidMsgType;
3665 if (enmVidMsgType != VidMessageStopRequestComplete)
3666 {
3667 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, CPUMQueryGuestCtxPtr(pVCpu), pGVCpu);
3668 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict))
3669 rcStrict = rcStrict2;
3670
3671 /*
3672 * Mark it as handled and get the stop request completed message, then mark
3673 * that as handled too. CPU is back into fully stopped stated then.
3674 */
3675# ifdef IN_RING0
3676 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3677 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE;
3678 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/
3679 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3680 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3681 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3682 NULL, 0);
3683 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3684 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3685# else
3686 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3687 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3688 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3689 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3690# endif
3691
3692 /* It should be a stop request completed message. */
3693 enmVidMsgType = pMappingHeader->enmVidMsgType;
3694 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete,
3695 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n",
3696 enmVidMsgType, pMappingHeader->cbMessage),
3697 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3698
3699 /*
3700 * Mark the VidMessageStopRequestComplete message as handled.
3701 */
3702# ifdef IN_RING0
3703 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3704 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_HANDLE_MESSAGE;
3705 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/
3706 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3707 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3708 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3709 NULL, 0);
3710 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3711 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3712# else
3713 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
3714 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3715 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3716# endif
3717 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) ));
3718 }
3719 else
3720 {
3721 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingOdd);
3722 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc) - 1st VidMessageSlotHandleAndGetNext got VidMessageStopRequestComplete.\n",
3723 VBOXSTRICTRC_VAL(rcStrict) ));
3724 }
3725 return rcStrict;
3726}
3727#endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
3728
3729#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3)
3730
3731/**
3732 * Deals with pending interrupt related force flags, may inject interrupt.
3733 *
3734 * @returns VBox strict status code.
3735 * @param pVM The cross context VM structure.
3736 * @param pVCpu The cross context per CPU structure.
3737 * @param pGVCpu The global (ring-0) per CPU structure.
3738 * @param pCtx The register context.
3739 * @param pfInterruptWindows Where to return interrupt window flags.
3740 */
3741NEM_TMPL_STATIC VBOXSTRICTRC
3742nemHCWinHandleInterruptFF(PVM pVM, PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint8_t *pfInterruptWindows)
3743{
3744 Assert(!TRPMHasTrap(pVCpu));
3745 RT_NOREF_PV(pVM);
3746
3747 /*
3748 * First update APIC. We ASSUME this won't need TPR/CR8.
3749 */
3750 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3751 {
3752 APICUpdatePendingInterrupts(pVCpu);
3753 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
3754 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3755 return VINF_SUCCESS;
3756 }
3757
3758 /*
3759 * We don't currently implement SMIs.
3760 */
3761 AssertReturn(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);
3762
3763 /*
3764 * Check if we've got the minimum of state required for deciding whether we
3765 * can inject interrupts and NMIs. If we don't have it, get all we might require
3766 * for injection via IEM.
3767 */
3768 bool const fPendingNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3769 uint64_t fNeedExtrn = CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
3770 | (fPendingNmi ? CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI : 0);
3771 if (pCtx->fExtrn & fNeedExtrn)
3772 {
3773 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
3774 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF");
3775 if (rcStrict != VINF_SUCCESS)
3776 return rcStrict;
3777 }
3778 bool const fInhibitInterrupts = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3779 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip;
3780
3781 /*
3782 * NMI? Try deliver it first.
3783 */
3784 if (fPendingNmi)
3785 {
3786 if ( !fInhibitInterrupts
3787 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
3788 {
3789 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
3790 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
3791 if (rcStrict == VINF_SUCCESS)
3792 {
3793 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3794 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_NMI, TRPM_HARDWARE_INT, 0, 0, 0);
3795 Log8(("Injected NMI on %u (%d)\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3796 }
3797 return rcStrict;
3798 }
3799 *pfInterruptWindows |= NEM_WIN_INTW_F_NMI;
3800 Log8(("NMI window pending on %u\n", pVCpu->idCpu));
3801 }
3802
3803 /*
3804 * APIC or PIC interrupt?
3805 */
3806 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
3807 {
3808 if ( !fInhibitInterrupts
3809 && pCtx->rflags.Bits.u1IF)
3810 {
3811 AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR);
3812 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
3813 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
3814 if (rcStrict == VINF_SUCCESS)
3815 {
3816 uint8_t bInterrupt;
3817 int rc = PDMGetInterrupt(pVCpu, &bInterrupt);
3818 if (RT_SUCCESS(rc))
3819 {
3820 rcStrict = IEMInjectTrap(pVCpu, bInterrupt, TRPM_HARDWARE_INT, 0, 0, 0);
3821 Log8(("Injected interrupt %#x on %u (%d)\n", bInterrupt, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3822 }
3823 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
3824 {
3825 *pfInterruptWindows |= (bInterrupt >> 4 /*??*/) << NEM_WIN_INTW_F_PRIO_SHIFT;
3826 Log8(("VERR_APIC_INTR_MASKED_BY_TPR: *pfInterruptWindows=%#x\n", *pfInterruptWindows));
3827 }
3828 else
3829 Log8(("PDMGetInterrupt failed -> %d\n", rc));
3830 }
3831 return rcStrict;
3832 }
3833 *pfInterruptWindows |= NEM_WIN_INTW_F_REGULAR;
3834 Log8(("Interrupt window pending on %u\n", pVCpu->idCpu));
3835 }
3836
3837 return VINF_SUCCESS;
3838}
3839
3840
3841/**
3842 * Inner NEM runloop for windows.
3843 *
3844 * @returns Strict VBox status code.
3845 * @param pVM The cross context VM structure.
3846 * @param pVCpu The cross context per CPU structure.
3847 * @param pGVM The ring-0 VM structure (NULL in ring-3).
3848 * @param pGVCpu The ring-0 per CPU structure (NULL in ring-3).
3849 */
3850NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVM pVM, PVMCPU pVCpu, PGVM pGVM, PGVMCPU pGVCpu)
3851{
3852 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
3853 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags));
3854# ifdef LOG_ENABLED
3855 if (LogIs3Enabled())
3856 nemHCWinLogState(pVM, pVCpu);
3857# endif
3858# ifdef IN_RING0
3859 Assert(pVCpu->idCpu == pGVCpu->idCpu);
3860# endif
3861
3862 /*
3863 * Try switch to NEM runloop state.
3864 */
3865 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
3866 { /* likely */ }
3867 else
3868 {
3869 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
3870 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
3871 return VINF_SUCCESS;
3872 }
3873
3874 /*
3875 * The run loop.
3876 *
3877 * Current approach to state updating to use the sledgehammer and sync
3878 * everything every time. This will be optimized later.
3879 */
3880# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3881 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;
3882 uint32_t cMillies = 5000; /** @todo lower this later... */
3883# endif
3884 const bool fSingleStepping = DBGFIsStepping(pVCpu);
3885// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
3886// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
3887// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
3888 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
3889 for (unsigned iLoop = 0;; iLoop++)
3890 {
3891# ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3892 /*
3893 * Hack alert!
3894 */
3895 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
3896 if (cMappedPages >= 4000)
3897 {
3898 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinWHvUnmapOnePageCallback, NULL);
3899 Log(("nemHCWinRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
3900 }
3901# endif
3902
3903 /*
3904 * Pending interrupts or such? Need to check and deal with this prior
3905 * to the state syncing.
3906 */
3907 pVCpu->nem.s.fDesiredInterruptWindows = 0;
3908 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC
3909 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3910 {
3911# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3912 /* Make sure the CPU isn't executing. */
3913 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
3914 {
3915 pVCpu->nem.s.fHandleAndGetFlags = 0;
3916 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
3917 if (rcStrict == VINF_SUCCESS)
3918 { /* likely */ }
3919 else
3920 {
3921 LogFlow(("NEM/%u: breaking: nemHCWinStopCpu -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3922 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3923 break;
3924 }
3925 }
3926# endif
3927
3928 /* Try inject interrupt. */
3929 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, pGVCpu, pCtx, &pVCpu->nem.s.fDesiredInterruptWindows);
3930 if (rcStrict == VINF_SUCCESS)
3931 { /* likely */ }
3932 else
3933 {
3934 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3935 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3936 break;
3937 }
3938 }
3939
3940 /*
3941 * Ensure that hyper-V has the whole state.
3942 * (We always update the interrupt windows settings when active as hyper-V seems
3943 * to forget about it after an exit.)
3944 */
3945 if ( (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK))
3946 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)
3947 || pVCpu->nem.s.fDesiredInterruptWindows
3948 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows)
3949 {
3950# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3951 Assert(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */);
3952# endif
3953# ifdef IN_RING0
3954 int rc2 = nemR0WinExportState(pGVM, pGVCpu, pCtx);
3955# else
3956 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu, pCtx);
3957 RT_NOREF(pGVM, pGVCpu);
3958# endif
3959 AssertRCReturn(rc2, rc2);
3960 }
3961
3962 /*
3963 * Run a bit.
3964 */
3965 if ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
3966 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3967 {
3968# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3969 if (pVCpu->nem.s.fHandleAndGetFlags)
3970 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ }
3971 else
3972 {
3973# ifdef IN_RING0
3974 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
3975 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction,
3976 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3977 NULL, 0);
3978 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));
3979 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pGVCpu->idCpu, rcNt),
3980 VERR_NEM_IPE_5);
3981# else
3982 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
3983 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n",
3984 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()),
3985 VERR_NEM_IPE_5);
3986# endif
3987 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
3988 }
3989# endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
3990
3991 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
3992 {
3993# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3994# ifdef IN_RING0
3995 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3996 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags;
3997 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3998 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3999 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
4000 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
4001 NULL, 0);
4002 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4003 if (rcNt == STATUS_SUCCESS)
4004# else
4005 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
4006 pVCpu->nem.s.fHandleAndGetFlags, cMillies);
4007 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4008 if (fRet)
4009# endif
4010# else
4011 WHV_RUN_VP_EXIT_CONTEXT ExitReason;
4012 RT_ZERO(ExitReason);
4013 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
4014 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
4015 if (SUCCEEDED(hrc))
4016# endif
4017 {
4018 /*
4019 * Deal with the message.
4020 */
4021# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
4022 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pCtx, pGVCpu);
4023 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;
4024# else
4025 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason, pCtx);
4026# endif
4027 if (rcStrict == VINF_SUCCESS)
4028 { /* hopefully likely */ }
4029 else
4030 {
4031 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
4032 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
4033 break;
4034 }
4035 }
4036 else
4037 {
4038# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
4039
4040 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT,
4041 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah,
4042 the error code conversion is into WAIT_XXX, i.e. NT status codes. */
4043# ifndef IN_RING0
4044 DWORD rcNt = GetLastError();
4045# endif
4046 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt));
4047 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT
4048 || rcNt == STATUS_ALERTED /* just in case */
4049 || rcNt == STATUS_USER_APC /* ditto */
4050 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n",
4051 pVCpu->idCpu, rcNt, rcNt),
4052 VERR_NEM_IPE_0);
4053 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
4054 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout);
4055# else
4056 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
4057 pVCpu->idCpu, hrc, GetLastError()),
4058 VERR_NEM_IPE_0);
4059
4060# endif
4061 }
4062
4063 /*
4064 * If no relevant FFs are pending, loop.
4065 */
4066 if ( !VM_FF_IS_PENDING( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
4067 && !VMCPU_FF_IS_PENDING(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
4068 continue;
4069
4070 /** @todo Try handle pending flags, not just return to EM loops. Take care
4071 * not to set important RCs here unless we've handled a message. */
4072 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#x)\n",
4073 pVCpu->idCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
4074 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
4075 }
4076 else
4077 {
4078 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
4079 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
4080 }
4081 }
4082 else
4083 {
4084 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
4085 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
4086 }
4087 break;
4088 } /* the run loop */
4089
4090
4091 /*
4092 * If the CPU is running, make sure to stop it before we try sync back the
4093 * state and return to EM. We don't sync back the whole state if we can help it.
4094 */
4095# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
4096 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
4097 {
4098 pVCpu->nem.s.fHandleAndGetFlags = 0;
4099 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
4100 }
4101# endif
4102
4103 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
4104 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
4105
4106 if (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
4107 {
4108 /* Try anticipate what we might need. */
4109 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
4110 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
4111 || RT_FAILURE(rcStrict))
4112 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4113# ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */
4114 else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
4115 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
4116 fImport = CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
4117 else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
4118 fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
4119# endif
4120 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
4121 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
4122 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
4123
4124 if (pCtx->fExtrn & fImport)
4125 {
4126# ifdef IN_RING0
4127 int rc2 = nemR0WinImportState(pGVM, pGVCpu, pCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4128 if (RT_SUCCESS(rc2))
4129 pCtx->fExtrn &= ~fImport;
4130 else if (rc2 == VERR_NEM_CHANGE_PGM_MODE || rc2 == VERR_NEM_FLUSH_TLB || rc2 == VERR_NEM_UPDATE_APIC_BASE)
4131 {
4132 pCtx->fExtrn &= ~fImport;
4133 if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
4134 rcStrict = -rc2;
4135 else
4136 {
4137 pVCpu->nem.s.rcPending = -rc2;
4138 LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
4139 }
4140 }
4141# else
4142 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
4143 if (RT_SUCCESS(rc2))
4144 pCtx->fExtrn &= ~fImport;
4145# endif
4146 else if (RT_SUCCESS(rcStrict))
4147 rcStrict = rc2;
4148 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
4149 pCtx->fExtrn = 0;
4150 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
4151 }
4152 else
4153 {
4154 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4155 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
4156 }
4157 }
4158 else
4159 {
4160 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
4161 pCtx->fExtrn = 0;
4162 }
4163
4164 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
4165 pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags, VBOXSTRICTRC_VAL(rcStrict) ));
4166 return rcStrict;
4167}
4168
4169#endif /* defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3) */
4170
4171/**
4172 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
4173 */
4174NEM_TMPL_STATIC DECLCALLBACK(int) nemHCWinUnsetForA20CheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys,
4175 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
4176{
4177 /* We'll just unmap the memory. */
4178 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
4179 {
4180#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4181 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
4182 AssertRC(rc);
4183 if (RT_SUCCESS(rc))
4184#else
4185 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
4186 if (SUCCEEDED(hrc))
4187#endif
4188 {
4189 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4190 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
4191 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
4192 }
4193 else
4194 {
4195#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4196 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
4197 return rc;
4198#else
4199 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4200 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4201 return VERR_NEM_IPE_2;
4202#endif
4203 }
4204 }
4205 RT_NOREF(pVCpu, pvUser);
4206 return VINF_SUCCESS;
4207}
4208
4209
4210/**
4211 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
4212 *
4213 * @returns The PGMPhysNemQueryPageInfo result.
4214 * @param pVM The cross context VM structure.
4215 * @param pVCpu The cross context virtual CPU structure.
4216 * @param GCPhys The page to unmap.
4217 */
4218NEM_TMPL_STATIC int nemHCWinUnmapPageForA20Gate(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
4219{
4220 PGMPHYSNEMPAGEINFO Info;
4221 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
4222 nemHCWinUnsetForA20CheckerCallback, NULL);
4223}
4224
4225
4226void nemHCNativeNotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
4227{
4228 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
4229 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
4230}
4231
4232
4233void nemHCNativeNotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
4234 int fRestoreAsRAM, bool fRestoreAsRAM2)
4235{
4236 Log5(("nemHCNativeNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d fRestoreAsRAM=%d fRestoreAsRAM2=%d\n",
4237 GCPhys, cb, enmKind, fRestoreAsRAM, fRestoreAsRAM2));
4238 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb); NOREF(fRestoreAsRAM); NOREF(fRestoreAsRAM2);
4239}
4240
4241
4242void nemHCNativeNotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
4243 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
4244{
4245 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
4246 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
4247 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
4248}
4249
4250
4251/**
4252 * Worker that maps pages into Hyper-V.
4253 *
4254 * This is used by the PGM physical page notifications as well as the memory
4255 * access VMEXIT handlers.
4256 *
4257 * @returns VBox status code.
4258 * @param pVM The cross context VM structure.
4259 * @param pVCpu The cross context virtual CPU structure of the
4260 * calling EMT.
4261 * @param GCPhysSrc The source page address.
4262 * @param GCPhysDst The hyper-V destination page. This may differ from
4263 * GCPhysSrc when A20 is disabled.
4264 * @param fPageProt NEM_PAGE_PROT_XXX.
4265 * @param pu2State Our page state (input/output).
4266 * @param fBackingChanged Set if the page backing is being changed.
4267 * @thread EMT(pVCpu)
4268 */
4269NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
4270 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
4271{
4272#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4273 /*
4274 * When using the hypercalls instead of the ring-3 APIs, we don't need to
4275 * unmap memory before modifying it. We still want to track the state though,
4276 * since unmap will fail when called an unmapped page and we don't want to redo
4277 * upgrades/downgrades.
4278 */
4279 uint8_t const u2OldState = *pu2State;
4280 int rc;
4281 if (fPageProt == NEM_PAGE_PROT_NONE)
4282 {
4283 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4284 {
4285 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4286 if (RT_SUCCESS(rc))
4287 {
4288 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4289 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4290 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4291 }
4292 else
4293 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4294 }
4295 else
4296 rc = VINF_SUCCESS;
4297 }
4298 else if (fPageProt & NEM_PAGE_PROT_WRITE)
4299 {
4300 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
4301 {
4302 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4303 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4304 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4305 if (RT_SUCCESS(rc))
4306 {
4307 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4308 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4309 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4310 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4311 NOREF(cMappedPages);
4312 }
4313 else
4314 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4315 }
4316 else
4317 rc = VINF_SUCCESS;
4318 }
4319 else
4320 {
4321 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
4322 {
4323 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4324 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4325 if (RT_SUCCESS(rc))
4326 {
4327 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4328 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
4329 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
4330 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4331 NOREF(cMappedPages);
4332 }
4333 else
4334 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4335 }
4336 else
4337 rc = VINF_SUCCESS;
4338 }
4339
4340 return VINF_SUCCESS;
4341
4342#else
4343 /*
4344 * Looks like we need to unmap a page before we can change the backing
4345 * or even modify the protection. This is going to be *REALLY* efficient.
4346 * PGM lends us two bits to keep track of the state here.
4347 */
4348 uint8_t const u2OldState = *pu2State;
4349 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
4350 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
4351 if ( fBackingChanged
4352 || u2NewState != u2OldState)
4353 {
4354 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4355 {
4356# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4357 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4358 AssertRC(rc);
4359 if (RT_SUCCESS(rc))
4360 {
4361 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4362 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4363 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4364 {
4365 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4366 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4367 return VINF_SUCCESS;
4368 }
4369 }
4370 else
4371 {
4372 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4373 return rc;
4374 }
4375# else
4376 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
4377 if (SUCCEEDED(hrc))
4378 {
4379 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4380 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4381 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4382 {
4383 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4384 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4385 return VINF_SUCCESS;
4386 }
4387 }
4388 else
4389 {
4390 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4391 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4392 return VERR_NEM_INIT_FAILED;
4393 }
4394# endif
4395 }
4396 }
4397
4398 /*
4399 * Writeable mapping?
4400 */
4401 if (fPageProt & NEM_PAGE_PROT_WRITE)
4402 {
4403# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4404 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4405 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4406 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4407 AssertRC(rc);
4408 if (RT_SUCCESS(rc))
4409 {
4410 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4411 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4412 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4413 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4414 return VINF_SUCCESS;
4415 }
4416 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4417 return rc;
4418# else
4419 void *pvPage;
4420 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
4421 if (RT_SUCCESS(rc))
4422 {
4423 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
4424 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4425 if (SUCCEEDED(hrc))
4426 {
4427 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4428 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4429 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4430 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4431 return VINF_SUCCESS;
4432 }
4433 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4434 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4435 return VERR_NEM_INIT_FAILED;
4436 }
4437 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4438 return rc;
4439# endif
4440 }
4441
4442 if (fPageProt & NEM_PAGE_PROT_READ)
4443 {
4444# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4445 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4446 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4447 AssertRC(rc);
4448 if (RT_SUCCESS(rc))
4449 {
4450 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4451 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4452 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4453 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4454 return VINF_SUCCESS;
4455 }
4456 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4457 return rc;
4458# else
4459 const void *pvPage;
4460 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
4461 if (RT_SUCCESS(rc))
4462 {
4463 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
4464 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
4465 if (SUCCEEDED(hrc))
4466 {
4467 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4468 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4469 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4470 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4471 return VINF_SUCCESS;
4472 }
4473 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4474 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4475 return VERR_NEM_INIT_FAILED;
4476 }
4477 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4478 return rc;
4479# endif
4480 }
4481
4482 /* We already unmapped it above. */
4483 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4484 return VINF_SUCCESS;
4485#endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4486}
4487
4488
4489NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVM pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
4490{
4491 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
4492 {
4493 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
4494 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4495 return VINF_SUCCESS;
4496 }
4497
4498#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4499 PVMCPU pVCpu = VMMGetCpu(pVM);
4500 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4501 AssertRC(rc);
4502 if (RT_SUCCESS(rc))
4503 {
4504 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4505 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
4506 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4507 return VINF_SUCCESS;
4508 }
4509 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4510 return rc;
4511#else
4512 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
4513 if (SUCCEEDED(hrc))
4514 {
4515 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4516 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4517 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
4518 return VINF_SUCCESS;
4519 }
4520 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
4521 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4522 return VERR_NEM_IPE_6;
4523#endif
4524}
4525
4526
4527int nemHCNativeNotifyPhysPageAllocated(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4528 PGMPAGETYPE enmType, uint8_t *pu2State)
4529{
4530 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4531 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4532 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4533
4534 int rc;
4535#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4536 PVMCPU pVCpu = VMMGetCpu(pVM);
4537 if ( pVM->nem.s.fA20Enabled
4538 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4539 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4540 else
4541 {
4542 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4543 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4544 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
4545 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4546
4547 }
4548#else
4549 RT_NOREF_PV(fPageProt);
4550 if ( pVM->nem.s.fA20Enabled
4551 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4552 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4553 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4554 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4555 else
4556 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
4557#endif
4558 return rc;
4559}
4560
4561
4562void nemHCNativeNotifyPhysPageProtChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4563 PGMPAGETYPE enmType, uint8_t *pu2State)
4564{
4565 Log5(("nemHCNativeNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4566 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4567 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4568
4569#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4570 PVMCPU pVCpu = VMMGetCpu(pVM);
4571 if ( pVM->nem.s.fA20Enabled
4572 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4573 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4574 else
4575 {
4576 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4577 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4578 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4579 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4580 }
4581#else
4582 RT_NOREF_PV(fPageProt);
4583 if ( pVM->nem.s.fA20Enabled
4584 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4585 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4586 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4587 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4588 /* else: ignore since we've got the alias page at this address. */
4589#endif
4590}
4591
4592
4593void nemHCNativeNotifyPhysPageChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
4594 uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
4595{
4596 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4597 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
4598 RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType);
4599
4600#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4601 PVMCPU pVCpu = VMMGetCpu(pVM);
4602 if ( pVM->nem.s.fA20Enabled
4603 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4604 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4605 else
4606 {
4607 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4608 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4609 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4610 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4611 }
4612#else
4613 RT_NOREF_PV(fPageProt);
4614 if ( pVM->nem.s.fA20Enabled
4615 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4616 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4617 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4618 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4619 /* else: ignore since we've got the alias page at this address. */
4620#endif
4621}
4622
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette