VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp@ 71082

Last change on this file since 71082 was 71082, checked in by vboxsync, 7 years ago

VMM,SUPDrv: More NEM/win page hacking. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 131.8 KB
Line 
1/* $Id: NEMR3Native-win.cpp 71082 2018-02-21 11:18:47Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Windows backend.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 * Log group 6: Ring-0 memory management
9 * Log group 12: API intercepts.
10 */
11
12/*
13 * Copyright (C) 2018 Oracle Corporation
14 *
15 * This file is part of VirtualBox Open Source Edition (OSE), as
16 * available from http://www.virtualbox.org. This file is free software;
17 * you can redistribute it and/or modify it under the terms of the GNU
18 * General Public License (GPL) as published by the Free Software
19 * Foundation, in version 2 as it comes in the "COPYING" file of the
20 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
21 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
22 */
23
24
25/*********************************************************************************************************************************
26* Header Files *
27*********************************************************************************************************************************/
28#define LOG_GROUP LOG_GROUP_NEM
29#include <iprt/nt/nt-and-windows.h>
30#include <iprt/nt/hyperv.h>
31#include <WinHvPlatform.h>
32
33#ifndef _WIN32_WINNT_WIN10
34# error "Missing _WIN32_WINNT_WIN10"
35#endif
36#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
37# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
38#endif
39#include <sysinfoapi.h>
40#include <debugapi.h>
41#include <errhandlingapi.h>
42#include <fileapi.h>
43#include <winerror.h> /* no api header for this. */
44
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/em.h>
48#include <VBox/vmm/apic.h>
49#include "NEMInternal.h"
50#include <VBox/vmm/vm.h>
51
52#include <iprt/ldr.h>
53#include <iprt/path.h>
54#include <iprt/string.h>
55
56
57/*********************************************************************************************************************************
58* Defined Constants And Macros *
59*********************************************************************************************************************************/
60#ifdef LOG_ENABLED
61# define NEM_WIN_INTERCEPT_NT_IO_CTLS
62#endif
63
64/** @name Our two-bit physical page state for PGMPAGE
65 * @{ */
66#define NEM_WIN_PAGE_STATE_NOT_SET 0
67#define NEM_WIN_PAGE_STATE_UNMAPPED 1
68#define NEM_WIN_PAGE_STATE_READABLE 2
69#define NEM_WIN_PAGE_STATE_WRITABLE 3
70/** @} */
71
72/** Checks if a_GCPhys is subject to the limited A20 gate emulation. */
73#define NEM_WIN_IS_SUBJECT_TO_A20(a_GCPhys) ((RTGCPHYS)((a_GCPhys) - _1M) < (RTGCPHYS)_64K)
74
75/** Checks if a_GCPhys is relevant to the limited A20 gate emulation. */
76#define NEM_WIN_IS_RELEVANT_TO_A20(a_GCPhys) \
77 ( ((RTGCPHYS)((a_GCPhys) - _1M) < (RTGCPHYS)_64K) || ((RTGCPHYS)(a_GCPhys) < (RTGCPHYS)_64K) )
78
79
80#define NEM_WIN_USE_HYPERCALLS
81
82
83/*********************************************************************************************************************************
84* Structures and Typedefs *
85*********************************************************************************************************************************/
86/** WHvRegisterInterruptState layout, reconstructed from the v7.1 DDK. */
87typedef union MISSINGINTERUPTSTATEREG
88{
89 /** 64-bit view. */
90 uint64_t au64[2];
91 struct /* unamed */
92 {
93 uint64_t fInterruptShadow : 1;
94 uint64_t fNmiMasked : 2;
95 uint64_t uReserved0 : 61;
96 uint64_t uReserved1;
97 };
98} MISSINGINTERUPTSTATEREG;
99AssertCompileSize(MISSINGINTERUPTSTATEREG, 16);
100
101/** Used by MISSINGPENDINGINTERRUPTIONREG. */
102typedef enum MISSINGPENDINGINTERRUPTIONTYPE
103{
104 kPendingIntType_Interrupt = 0,
105 kPendingIntType_Nmi,
106 kPendingIntType_Xcpt,
107 kPendingIntType_Dunno,
108 kPendingIntType_SoftwareInterrupt
109} MISSINGPENDINGINTERRUPTIONTYPE;
110
111/** WHvRegisterPendingInterruption layout, reconstructed from the v7.1 DDK. */
112typedef union MISSINGPENDINGINTERRUPTIONREG
113{
114 /** 64-bit view. */
115 uint64_t au64[2];
116 struct /* unamed */
117 {
118 uint32_t fInterruptionPending : 1;
119 uint32_t enmInterruptionType : 3; /**< MISSINGPENDINGINTERRUPTIONTYPE */
120 uint32_t fDeliverErrCd : 1;
121 uint32_t fUnknown0 : 1;
122 uint32_t fUnknown1 : 1; /**< Observed set when software interrupt was issued. */
123 uint32_t uReserved0 : 9;
124 uint32_t InterruptionVector : 16;
125 uint32_t uErrCd;
126 uint64_t uReserved1;
127 };
128} MISSINGPENDINGINTERRUPTIONREG;
129AssertCompileSize(MISSINGPENDINGINTERRUPTIONREG, 16);
130
131
132/*********************************************************************************************************************************
133* Global Variables *
134*********************************************************************************************************************************/
135/** @name APIs imported from WinHvPlatform.dll
136 * @{ */
137static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
138static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
139static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
140static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
141static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
142static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
143static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
144static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
145static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
146static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
147static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
148static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
149static decltype(WHvGetRunExitContextSize) * g_pfnWHvGetRunExitContextSize;
150static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
151static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
152static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
153/** @} */
154
155/** @name APIs imported from Vid.dll
156 * @{ */
157static BOOL (WINAPI *g_pfnVidGetHvPartitionId)(HANDLE hPartition, HV_PARTITION_ID *pidPartition);
158/** @} */
159
160
161/**
162 * Import instructions.
163 */
164static const struct
165{
166 uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
167 bool fOptional; /**< Set if import is optional. */
168 PFNRT *ppfn; /**< The function pointer variable. */
169 const char *pszName; /**< The function name. */
170} g_aImports[] =
171{
172#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
173 NEM_WIN_IMPORT(0, false, WHvGetCapability),
174 NEM_WIN_IMPORT(0, false, WHvCreatePartition),
175 NEM_WIN_IMPORT(0, false, WHvSetupPartition),
176 NEM_WIN_IMPORT(0, false, WHvDeletePartition),
177 NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
178 NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
179 NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
180 NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
181 NEM_WIN_IMPORT(0, false, WHvTranslateGva),
182 NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
183 NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
184 NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
185 NEM_WIN_IMPORT(0, false, WHvGetRunExitContextSize),
186 NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
187 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
188 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
189 NEM_WIN_IMPORT(1, false, VidGetHvPartitionId),
190#undef NEM_WIN_IMPORT
191};
192
193
194/*
195 * Let the preprocessor alias the APIs to import variables for better autocompletion.
196 */
197#ifndef IN_SLICKEDIT
198# define WHvGetCapability g_pfnWHvGetCapability
199# define WHvCreatePartition g_pfnWHvCreatePartition
200# define WHvSetupPartition g_pfnWHvSetupPartition
201# define WHvDeletePartition g_pfnWHvDeletePartition
202# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
203# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
204# define WHvMapGpaRange g_pfnWHvMapGpaRange
205# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
206# define WHvTranslateGva g_pfnWHvTranslateGva
207# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
208# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
209# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
210# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
211# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
212# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
213# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
214#endif
215
216/** NEM_WIN_PAGE_STATE_XXX names. */
217static const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
218/** WHV_MEMORY_ACCESS_TYPE names */
219static const char * const g_apszWHvMemAccesstypes[4] = { "read", "write", "exec", "!undefined!" };
220
221
222/*********************************************************************************************************************************
223* Internal Functions *
224*********************************************************************************************************************************/
225static int nemR3NativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fPageProt,
226 uint8_t *pu2State, bool fBackingChanged);
227
228
229#ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
230
231/** The real NtDeviceIoControlFile API in NTDLL. */
232static decltype(NtDeviceIoControlFile) *g_pfnNtDeviceIoControlFile;
233
234/**
235 * Wrapper that logs the call from VID.DLL.
236 *
237 * This is very handy for figuring out why an API call fails.
238 */
239static NTSTATUS WINAPI
240nemR3WinLogWrapper_NtDeviceIoControlFile(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
241 PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
242 PVOID pvOutput, ULONG cbOutput)
243{
244 NTSTATUS rcNt = g_pfnNtDeviceIoControlFile(hFile, hEvt, pfnApcCallback, pvApcCtx, pIos, uFunction,
245 pvInput, cbInput, pvOutput, cbOutput);
246 if (!hEvt && !pfnApcCallback && !pvApcCtx)
247 Log12(("VID!NtDeviceIoControlFile: hFile=%#zx pIos=%p->{s:%#x, i:%#zx} uFunction=%#x Input=%p LB %#x Output=%p LB %#x) -> %#x; Caller=%p\n",
248 hFile, pIos, pIos->Status, pIos->Information, uFunction, pvInput, cbInput, pvOutput, cbOutput, rcNt, ASMReturnAddress()));
249 else
250 Log12(("VID!NtDeviceIoControlFile: hFile=%#zx hEvt=%#zx Apc=%p/%p pIos=%p->{s:%#x, i:%#zx} uFunction=%#x Input=%p LB %#x Output=%p LB %#x) -> %#x; Caller=%p\n",
251 hFile, hEvt, pfnApcCallback, pvApcCtx, pIos, pIos->Status, pIos->Information, uFunction,
252 pvInput, cbInput, pvOutput, cbOutput, rcNt, ASMReturnAddress()));
253 return rcNt;
254}
255
256
257/**
258 * Patches the call table of VID.DLL so we can intercept and log
259 * NtDeviceIoControlFile.
260 *
261 * This is for DEBUGGING only.
262 *
263 * @param hLdrModVid The VID module handle.
264 */
265static void nemR3WinInitVidIntercepts(RTLDRMOD hLdrModVid)
266{
267 /*
268 * Locate the real API.
269 */
270 g_pfnNtDeviceIoControlFile = (decltype(NtDeviceIoControlFile) *)RTLdrGetSystemSymbol("NTDLL.DLL", "NtDeviceIoControlFile");
271 AssertReturnVoid(g_pfnNtDeviceIoControlFile > 0);
272
273 /*
274 * Locate the PE header and get what we need from it.
275 */
276 uint8_t const *pbImage = (uint8_t const *)RTLdrGetNativeHandle(hLdrModVid);
277 IMAGE_DOS_HEADER const *pMzHdr = (IMAGE_DOS_HEADER const *)pbImage;
278 AssertReturnVoid(pMzHdr->e_magic == IMAGE_DOS_SIGNATURE);
279 IMAGE_NT_HEADERS const *pNtHdrs = (IMAGE_NT_HEADERS const *)&pbImage[pMzHdr->e_lfanew];
280 AssertReturnVoid(pNtHdrs->Signature == IMAGE_NT_SIGNATURE);
281
282 uint32_t const cbImage = pNtHdrs->OptionalHeader.SizeOfImage;
283 IMAGE_DATA_DIRECTORY const ImportDir = pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT];
284
285 /*
286 * Walk the import descriptor table looking for NTDLL.DLL.
287 */
288 bool fSuccess = true;
289 AssertReturnVoid(ImportDir.Size > 0);
290 AssertReturnVoid(ImportDir.Size < cbImage);
291 AssertReturnVoid(ImportDir.VirtualAddress > 0);
292 AssertReturnVoid(ImportDir.VirtualAddress < cbImage);
293 for (PIMAGE_IMPORT_DESCRIPTOR pImps = (PIMAGE_IMPORT_DESCRIPTOR)&pbImage[ImportDir.VirtualAddress];
294 pImps->Name != 0 && pImps->FirstThunk != 0;
295 pImps++)
296 {
297 AssertReturnVoid(pImps->Name < cbImage);
298 const char *pszModName = (const char *)&pbImage[pImps->Name];
299 if (RTStrICmpAscii(pszModName, "ntdll.dll"))
300 continue;
301 AssertReturnVoid(pImps->FirstThunk < cbImage);
302 AssertReturnVoid(pImps->OriginalFirstThunk < cbImage);
303
304 /*
305 * Walk the thunks table(s) looking for NtDeviceIoControlFile.
306 */
307 PIMAGE_THUNK_DATA pFirstThunk = (PIMAGE_THUNK_DATA)&pbImage[pImps->FirstThunk]; /* update this. */
308 PIMAGE_THUNK_DATA pThunk = pImps->OriginalFirstThunk == 0 /* read from this. */
309 ? (PIMAGE_THUNK_DATA)&pbImage[pImps->FirstThunk]
310 : (PIMAGE_THUNK_DATA)&pbImage[pImps->OriginalFirstThunk];
311 while (pThunk->u1.Ordinal != 0)
312 {
313 if (!(pThunk->u1.Ordinal & IMAGE_ORDINAL_FLAG32))
314 {
315 AssertReturnVoid(pThunk->u1.Ordinal > 0 && pThunk->u1.Ordinal < cbImage);
316
317 const char *pszSymbol = (const char *)&pbImage[(uintptr_t)pThunk->u1.AddressOfData + 2];
318 if (strcmp(pszSymbol, "NtDeviceIoControlFile") == 0)
319 {
320 DWORD fOldProt = PAGE_EXECUTE;
321 VirtualProtect(&pFirstThunk->u1.Function, sizeof(uintptr_t), PAGE_EXECUTE_READWRITE, &fOldProt);
322 pFirstThunk->u1.Function = (uintptr_t)nemR3WinLogWrapper_NtDeviceIoControlFile;
323 VirtualProtect(&pFirstThunk->u1.Function, sizeof(uintptr_t), fOldProt, &fOldProt);
324 fSuccess = true;
325 }
326 }
327
328 pThunk++;
329 pFirstThunk++;
330 }
331 }
332 Assert(fSuccess);
333}
334#endif
335
336
337
338/**
339 * Worker for nemR3NativeInit that probes and load the native API.
340 *
341 * @returns VBox status code.
342 * @param fForced Whether the HMForced flag is set and we should
343 * fail if we cannot initialize.
344 * @param pErrInfo Where to always return error info.
345 */
346static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
347{
348 /*
349 * Check that the DLL files we need are present, but without loading them.
350 * We'd like to avoid loading them unnecessarily.
351 */
352 WCHAR wszPath[MAX_PATH + 64];
353 UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
354 if (cwcPath >= MAX_PATH || cwcPath < 2)
355 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
356
357 if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
358 wszPath[cwcPath++] = '\\';
359 RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
360 if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
361 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
362
363 /*
364 * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
365 */
366 if (!ASMHasCpuId())
367 return RTErrInfoSet(pErrInfo, VERR_NEM_NOT_AVAILABLE, "No CPUID support");
368 if (!ASMIsValidStdRange(ASMCpuId_EAX(0)))
369 return RTErrInfoSet(pErrInfo, VERR_NEM_NOT_AVAILABLE, "No CPUID leaf #1");
370 if (!(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_HVP))
371 return RTErrInfoSet(pErrInfo, VERR_NEM_NOT_AVAILABLE, "Not in a hypervisor partition (HVP=0)");
372
373 uint32_t cMaxHyperLeaf = 0;
374 uint32_t uEbx = 0;
375 uint32_t uEcx = 0;
376 uint32_t uEdx = 0;
377 ASMCpuIdExSlow(0x40000000, 0, 0, 0, &cMaxHyperLeaf, &uEbx, &uEcx, &uEdx);
378 if (!ASMIsValidHypervisorRange(cMaxHyperLeaf))
379 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "Invalid hypervisor CPUID range (%#x %#x %#x %#x)",
380 cMaxHyperLeaf, uEbx, uEcx, uEdx);
381 if ( uEbx != UINT32_C(0x7263694d) /* Micr */
382 || uEcx != UINT32_C(0x666f736f) /* osof */
383 || uEdx != UINT32_C(0x76482074) /* t Hv */)
384 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
385 "Not Hyper-V CPUID signature: %#x %#x %#x (expected %#x %#x %#x)",
386 uEbx, uEcx, uEdx, UINT32_C(0x7263694d), UINT32_C(0x666f736f), UINT32_C(0x76482074));
387 if (cMaxHyperLeaf < UINT32_C(0x40000005))
388 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "Too narrow hypervisor CPUID range (%#x)", cMaxHyperLeaf);
389
390 /** @todo would be great if we could recognize a root partition from the
391 * CPUID info, but I currently don't dare do that. */
392
393 /*
394 * Now try load the DLLs and resolve the APIs.
395 */
396 static const char * const s_apszDllNames[2] = { "WinHvPlatform.dll", "vid.dll" };
397 RTLDRMOD ahMods[2] = { NIL_RTLDRMOD, NIL_RTLDRMOD };
398 int rc = VINF_SUCCESS;
399 for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
400 {
401 int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
402 if (RT_FAILURE(rc2))
403 {
404 if (!RTErrInfoIsSet(pErrInfo))
405 RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
406 else
407 RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
408 ahMods[i] = NIL_RTLDRMOD;
409 rc = VERR_NEM_INIT_FAILED;
410 }
411 }
412 if (RT_SUCCESS(rc))
413 {
414#ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
415 nemR3WinInitVidIntercepts(ahMods[1]);
416#endif
417 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
418 {
419 int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
420 if (RT_FAILURE(rc2))
421 {
422 *g_aImports[i].ppfn = NULL;
423
424 LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
425 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
426 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
427 if (!g_aImports[i].fOptional)
428 {
429 if (RTErrInfoIsSet(pErrInfo))
430 RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
431 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
432 else
433 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
434 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
435 Assert(RT_FAILURE(rc));
436 }
437 }
438 }
439 if (RT_SUCCESS(rc))
440 Assert(!RTErrInfoIsSet(pErrInfo));
441 }
442
443 for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
444 RTLdrClose(ahMods[i]);
445 return rc;
446}
447
448
449/**
450 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
451 *
452 * @returns VBox status code.
453 * @param pVM The cross context VM structure.
454 * @param pErrInfo Where to always return error info.
455 */
456static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
457{
458#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
459#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
460#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
461
462 /*
463 * Is the hypervisor present with the desired capability?
464 *
465 * In build 17083 this translates into:
466 * - CPUID[0x00000001].HVP is set
467 * - CPUID[0x40000000] == "Microsoft Hv"
468 * - CPUID[0x40000001].eax == "Hv#1"
469 * - CPUID[0x40000003].ebx[12] is set.
470 * - VidGetExoPartitionProperty(INVALID_HANDLE_VALUE, 0x60000, &Ignored) returns
471 * a non-zero value.
472 */
473 /**
474 * @todo Someone at Microsoft please explain weird API design:
475 * 1. Pointless CapabilityCode duplication int the output;
476 * 2. No output size.
477 */
478 WHV_CAPABILITY Caps;
479 RT_ZERO(Caps);
480 SetLastError(0);
481 HRESULT hrc = WHvGetCapability(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
482 DWORD rcWin = GetLastError();
483 if (FAILED(hrc))
484 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
485 "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
486 hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
487 if (!Caps.HypervisorPresent)
488 {
489 if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
490 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
491 "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
492 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
493 }
494 LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
495
496
497 /*
498 * Check what extended VM exits are supported.
499 */
500 RT_ZERO(Caps);
501 hrc = WHvGetCapability(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
502 if (FAILED(hrc))
503 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
504 "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
505 hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
506 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
507 pVM->nem.s.fExtendedMsrExit = RT_BOOL(Caps.ExtendedVmExits.X64MsrExit);
508 pVM->nem.s.fExtendedCpuIdExit = RT_BOOL(Caps.ExtendedVmExits.X64CpuidExit);
509 pVM->nem.s.fExtendedXcptExit = RT_BOOL(Caps.ExtendedVmExits.ExceptionExit);
510 NEM_LOG_REL_CAP_SUB("fExtendedMsrExit", pVM->nem.s.fExtendedMsrExit);
511 NEM_LOG_REL_CAP_SUB("fExtendedCpuIdExit", pVM->nem.s.fExtendedCpuIdExit);
512 NEM_LOG_REL_CAP_SUB("fExtendedXcptExit", pVM->nem.s.fExtendedXcptExit);
513 if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
514 LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
515 /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
516
517 /*
518 * Check features in case they end up defining any.
519 */
520 RT_ZERO(Caps);
521 hrc = WHvGetCapability(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
522 if (FAILED(hrc))
523 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
524 "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
525 hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
526 if (Caps.Features.AsUINT64 & ~(uint64_t)0)
527 LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
528 /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
529
530 /*
531 * Check that the CPU vendor is supported.
532 */
533 RT_ZERO(Caps);
534 hrc = WHvGetCapability(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
535 if (FAILED(hrc))
536 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
537 "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
538 hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
539 switch (Caps.ProcessorVendor)
540 {
541 /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
542 case WHvProcessorVendorIntel:
543 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - Intel", Caps.ProcessorVendor);
544 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_INTEL;
545 break;
546 case WHvProcessorVendorAmd:
547 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - AMD", Caps.ProcessorVendor);
548 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_AMD;
549 break;
550 default:
551 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
552 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
553 }
554
555 /*
556 * CPU features, guessing these are virtual CPU features?
557 */
558 RT_ZERO(Caps);
559 hrc = WHvGetCapability(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
560 if (FAILED(hrc))
561 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
562 "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
563 hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
564 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
565#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
566 NEM_LOG_REL_CPU_FEATURE(Sse3Support);
567 NEM_LOG_REL_CPU_FEATURE(LahfSahfSupport);
568 NEM_LOG_REL_CPU_FEATURE(Ssse3Support);
569 NEM_LOG_REL_CPU_FEATURE(Sse4_1Support);
570 NEM_LOG_REL_CPU_FEATURE(Sse4_2Support);
571 NEM_LOG_REL_CPU_FEATURE(Sse4aSupport);
572 NEM_LOG_REL_CPU_FEATURE(XopSupport);
573 NEM_LOG_REL_CPU_FEATURE(PopCntSupport);
574 NEM_LOG_REL_CPU_FEATURE(Cmpxchg16bSupport);
575 NEM_LOG_REL_CPU_FEATURE(Altmovcr8Support);
576 NEM_LOG_REL_CPU_FEATURE(LzcntSupport);
577 NEM_LOG_REL_CPU_FEATURE(MisAlignSseSupport);
578 NEM_LOG_REL_CPU_FEATURE(MmxExtSupport);
579 NEM_LOG_REL_CPU_FEATURE(Amd3DNowSupport);
580 NEM_LOG_REL_CPU_FEATURE(ExtendedAmd3DNowSupport);
581 NEM_LOG_REL_CPU_FEATURE(Page1GbSupport);
582 NEM_LOG_REL_CPU_FEATURE(AesSupport);
583 NEM_LOG_REL_CPU_FEATURE(PclmulqdqSupport);
584 NEM_LOG_REL_CPU_FEATURE(PcidSupport);
585 NEM_LOG_REL_CPU_FEATURE(Fma4Support);
586 NEM_LOG_REL_CPU_FEATURE(F16CSupport);
587 NEM_LOG_REL_CPU_FEATURE(RdRandSupport);
588 NEM_LOG_REL_CPU_FEATURE(RdWrFsGsSupport);
589 NEM_LOG_REL_CPU_FEATURE(SmepSupport);
590 NEM_LOG_REL_CPU_FEATURE(EnhancedFastStringSupport);
591 NEM_LOG_REL_CPU_FEATURE(Bmi1Support);
592 NEM_LOG_REL_CPU_FEATURE(Bmi2Support);
593 /* two reserved bits here, see below */
594 NEM_LOG_REL_CPU_FEATURE(MovbeSupport);
595 NEM_LOG_REL_CPU_FEATURE(Npiep1Support);
596 NEM_LOG_REL_CPU_FEATURE(DepX87FPUSaveSupport);
597 NEM_LOG_REL_CPU_FEATURE(RdSeedSupport);
598 NEM_LOG_REL_CPU_FEATURE(AdxSupport);
599 NEM_LOG_REL_CPU_FEATURE(IntelPrefetchSupport);
600 NEM_LOG_REL_CPU_FEATURE(SmapSupport);
601 NEM_LOG_REL_CPU_FEATURE(HleSupport);
602 NEM_LOG_REL_CPU_FEATURE(RtmSupport);
603 NEM_LOG_REL_CPU_FEATURE(RdtscpSupport);
604 NEM_LOG_REL_CPU_FEATURE(ClflushoptSupport);
605 NEM_LOG_REL_CPU_FEATURE(ClwbSupport);
606 NEM_LOG_REL_CPU_FEATURE(ShaSupport);
607 NEM_LOG_REL_CPU_FEATURE(X87PointersSavedSupport);
608#undef NEM_LOG_REL_CPU_FEATURE
609 if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(43) - 1) | RT_BIT_64(27) | RT_BIT_64(28)))
610 LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
611 pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
612 /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
613
614 /*
615 * The cache line flush size.
616 */
617 RT_ZERO(Caps);
618 hrc = WHvGetCapability(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
619 if (FAILED(hrc))
620 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
621 "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
622 hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
623 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
624 if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
625 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
626 pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
627
628 /*
629 * See if they've added more properties that we're not aware of.
630 */
631 /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
632 if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
633 {
634 static const struct
635 {
636 uint32_t iMin, iMax; } s_aUnknowns[] =
637 {
638 { 0x0003, 0x000f },
639 { 0x1003, 0x100f },
640 { 0x2000, 0x200f },
641 { 0x3000, 0x300f },
642 { 0x4000, 0x400f },
643 };
644 for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
645 for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
646 {
647 RT_ZERO(Caps);
648 hrc = WHvGetCapability((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
649 if (SUCCEEDED(hrc))
650 LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
651 }
652 }
653
654#undef NEM_LOG_REL_CAP_EX
655#undef NEM_LOG_REL_CAP_SUB_EX
656#undef NEM_LOG_REL_CAP_SUB
657 return VINF_SUCCESS;
658}
659
660
661/**
662 * Creates and sets up a Hyper-V (exo) partition.
663 *
664 * @returns VBox status code.
665 * @param pVM The cross context VM structure.
666 * @param pErrInfo Where to always return error info.
667 */
668static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
669{
670 AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
671 AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
672
673 /*
674 * Create the partition.
675 */
676 WHV_PARTITION_HANDLE hPartition;
677 HRESULT hrc = WHvCreatePartition(&hPartition);
678 if (FAILED(hrc))
679 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
680 hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
681
682 int rc;
683
684 /*
685 * Set partition properties, most importantly the CPU count.
686 */
687 /**
688 * @todo Someone at Microsoft please explain another weird API:
689 * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
690 * argument rather than as part of the struct. That is so weird if you've
691 * used any other NT or windows API, including WHvGetCapability().
692 * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
693 * technically only need 9 bytes for setting/getting
694 * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
695 WHV_PARTITION_PROPERTY Property;
696 RT_ZERO(Property);
697 Property.PropertyCode = WHvPartitionPropertyCodeProcessorCount;
698 Property.ProcessorCount = pVM->cCpus;
699 hrc = WHvSetPartitionProperty(hPartition, &Property, sizeof(Property));
700 if (SUCCEEDED(hrc))
701 {
702 RT_ZERO(Property);
703 Property.PropertyCode = WHvPartitionPropertyCodeExtendedVmExits;
704 Property.ExtendedVmExits.X64CpuidExit = pVM->nem.s.fExtendedCpuIdExit;
705 Property.ExtendedVmExits.X64MsrExit = pVM->nem.s.fExtendedMsrExit;
706 Property.ExtendedVmExits.ExceptionExit = pVM->nem.s.fExtendedXcptExit;
707 hrc = WHvSetPartitionProperty(hPartition, &Property, sizeof(Property));
708 if (SUCCEEDED(hrc))
709 {
710 /*
711 * We'll continue setup in nemR3NativeInitAfterCPUM.
712 */
713 pVM->nem.s.fCreatedEmts = false;
714 pVM->nem.s.hPartition = hPartition;
715 LogRel(("NEM: Created partition %p.\n", hPartition));
716 return VINF_SUCCESS;
717 }
718
719 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
720 "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
721 Property.ExtendedVmExits.AsUINT64, hrc);
722 }
723 else
724 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
725 "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
726 pVM->cCpus, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
727 WHvDeletePartition(hPartition);
728
729 Assert(!pVM->nem.s.hPartitionDevice);
730 Assert(!pVM->nem.s.hPartition);
731 return rc;
732}
733
734
735/**
736 * Try initialize the native API.
737 *
738 * This may only do part of the job, more can be done in
739 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
740 *
741 * @returns VBox status code.
742 * @param pVM The cross context VM structure.
743 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
744 * the latter we'll fail if we cannot initialize.
745 * @param fForced Whether the HMForced flag is set and we should
746 * fail if we cannot initialize.
747 */
748int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
749{
750 /*
751 * Error state.
752 * The error message will be non-empty on failure and 'rc' will be set too.
753 */
754 RTERRINFOSTATIC ErrInfo;
755 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
756 int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
757 if (RT_SUCCESS(rc))
758 {
759 /*
760 * Check the capabilties of the hypervisor, starting with whether it's present.
761 */
762 rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
763 if (RT_SUCCESS(rc))
764 {
765 /*
766 * Check out our ring-0 capabilities.
767 */
768 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_NEM_INIT_VM, 0, NULL);
769 if (RT_SUCCESS(rc))
770 {
771 /*
772 * Create and initialize a partition.
773 */
774 rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
775 if (RT_SUCCESS(rc))
776 {
777 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
778 Log(("NEM: Marked active!\n"));
779 }
780 }
781 }
782 }
783
784 /*
785 * We only fail if in forced mode, otherwise just log the complaint and return.
786 */
787 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
788 if ( (fForced || !fFallback)
789 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
790 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
791
792 if (RTErrInfoIsSet(pErrInfo))
793 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
794 return VINF_SUCCESS;
795}
796
797
798/**
799 * This is called after CPUMR3Init is done.
800 *
801 * @returns VBox status code.
802 * @param pVM The VM handle..
803 */
804int nemR3NativeInitAfterCPUM(PVM pVM)
805{
806 /*
807 * Validate sanity.
808 */
809 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
810 AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
811 AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
812 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
813 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
814
815 /*
816 * Continue setting up the partition now that we've got most of the CPUID feature stuff.
817 */
818
819 /* Not sure if we really need to set the vendor. */
820 WHV_PARTITION_PROPERTY Property;
821 RT_ZERO(Property);
822 Property.PropertyCode = WHvPartitionPropertyCodeProcessorVendor;
823 Property.ProcessorVendor = pVM->nem.s.enmCpuVendor == CPUMCPUVENDOR_AMD ? WHvProcessorVendorAmd
824 : WHvProcessorVendorIntel;
825 HRESULT hrc = WHvSetPartitionProperty(hPartition, &Property, sizeof(Property));
826 if (FAILED(hrc))
827 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
828 "Failed to set WHvPartitionPropertyCodeProcessorVendor to %u: %Rhrc (Last=%#x/%u)",
829 Property.ProcessorVendor, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
830
831 /* Not sure if we really need to set the cache line flush size. */
832 RT_ZERO(Property);
833 Property.PropertyCode = WHvPartitionPropertyCodeProcessorClFlushSize;
834 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
835 hrc = WHvSetPartitionProperty(hPartition, &Property, sizeof(Property));
836 if (FAILED(hrc))
837 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
838 "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
839 pVM->nem.s.cCacheLineFlushShift, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
840
841 /*
842 * Sync CPU features with CPUM.
843 */
844 /** @todo sync CPU features with CPUM. */
845
846 /* Set the partition property. */
847 RT_ZERO(Property);
848 Property.PropertyCode = WHvPartitionPropertyCodeProcessorFeatures;
849 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
850 hrc = WHvSetPartitionProperty(hPartition, &Property, sizeof(Property));
851 if (FAILED(hrc))
852 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
853 "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
854 pVM->nem.s.uCpuFeatures.u64, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
855
856 /*
857 * Set up the partition and create EMTs.
858 *
859 * Seems like this is where the partition is actually instantiated and we get
860 * a handle to it.
861 */
862 hrc = WHvSetupPartition(hPartition);
863 if (FAILED(hrc))
864 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
865 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
866 hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
867
868 /* Get the handle. */
869 HANDLE hPartitionDevice;
870 __try
871 {
872 hPartitionDevice = ((HANDLE *)hPartition)[1];
873 }
874 __except(EXCEPTION_EXECUTE_HANDLER)
875 {
876 hrc = GetExceptionCode();
877 hPartitionDevice = NULL;
878 }
879 if ( hPartitionDevice == NULL
880 || hPartitionDevice == (HANDLE)(intptr_t)-1)
881 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
882 "Failed to get device handle for partition %p: %Rhrc", hPartition, hrc);
883
884 HV_PARTITION_ID idHvPartition = HV_PARTITION_ID_INVALID;
885 if (!g_pfnVidGetHvPartitionId(hPartitionDevice, &idHvPartition))
886 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
887 "Failed to get device handle and/or partition ID for %p (hPartitionDevice=%p, Last=%#x/%u)",
888 hPartition, hPartitionDevice, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue);
889 pVM->nem.s.hPartitionDevice = hPartitionDevice;
890 pVM->nem.s.idHvPartition = idHvPartition;
891
892 /*
893 * Create EMTs.
894 */
895 VMCPUID iCpu;
896 for (iCpu = 0; iCpu < pVM->cCpus; iCpu++)
897 {
898 hrc = WHvCreateVirtualProcessor(hPartition, iCpu, 0 /*fFlags*/);
899 if (FAILED(hrc))
900 {
901 NTSTATUS const rcNtLast = RTNtCurrentTeb()->LastStatusValue;
902 DWORD const dwErrLast = RTNtCurrentTeb()->LastErrorValue;
903 while (iCpu-- > 0)
904 {
905 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, iCpu);
906 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
907 hPartition, iCpu, hrc2, RTNtCurrentTeb()->LastStatusValue,
908 RTNtCurrentTeb()->LastErrorValue));
909 }
910 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
911 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
912 }
913 }
914 pVM->nem.s.fCreatedEmts = true;
915
916 LogRel(("NEM: Successfully set up partition (device handle %p, partition ID %#llx)\n", hPartitionDevice, idHvPartition));
917 return VINF_SUCCESS;
918}
919
920
921int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
922{
923 NOREF(pVM); NOREF(enmWhat);
924 return VINF_SUCCESS;
925}
926
927
928int nemR3NativeTerm(PVM pVM)
929{
930 /*
931 * Delete the partition.
932 */
933 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
934 pVM->nem.s.hPartition = NULL;
935 pVM->nem.s.hPartitionDevice = NULL;
936 if (hPartition != NULL)
937 {
938 VMCPUID iCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
939 LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, iCpu));
940 while (iCpu-- > 0)
941 {
942 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, iCpu);
943 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
944 hPartition, iCpu, hrc, RTNtCurrentTeb()->LastStatusValue,
945 RTNtCurrentTeb()->LastErrorValue));
946 }
947 WHvDeletePartition(hPartition);
948 }
949 pVM->nem.s.fCreatedEmts = false;
950 return VINF_SUCCESS;
951}
952
953
954/**
955 * VM reset notification.
956 *
957 * @param pVM The cross context VM structure.
958 */
959void nemR3NativeReset(PVM pVM)
960{
961 /* Unfix the A20 gate. */
962 pVM->nem.s.fA20Fixed = false;
963}
964
965
966/**
967 * Reset CPU due to INIT IPI or hot (un)plugging.
968 *
969 * @param pVCpu The cross context virtual CPU structure of the CPU being
970 * reset.
971 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
972 */
973void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
974{
975 /* Lock the A20 gate if INIT IPI, make sure it's enabled. */
976 if (fInitIpi && pVCpu->idCpu > 0)
977 {
978 PVM pVM = pVCpu->CTX_SUFF(pVM);
979 if (!pVM->nem.s.fA20Enabled)
980 nemR3NativeNotifySetA20(pVCpu, true);
981 pVM->nem.s.fA20Enabled = true;
982 pVM->nem.s.fA20Fixed = true;
983 }
984}
985
986
987#ifdef NEM_WIN_USE_HYPERCALLS
988
989/**
990 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
991 *
992 * @returns VBox status code.
993 * @param pVM The cross context VM structure.
994 * @param pVCpu The cross context virtual CPU structure of the caller.
995 * @param GCPhysSrc The source page. Does not need to be page aligned.
996 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
997 * when A20 is disabled.
998 * @param fFlags HV_MAP_GPA_XXX.
999 */
1000DECLINLINE(int) nemR3WinHypercallMapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
1001{
1002 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
1003 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
1004 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
1005 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
1006 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
1007}
1008
1009
1010/**
1011 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
1012 *
1013 * @returns VBox status code.
1014 * @param pVM The cross context VM structure.
1015 * @param pVCpu The cross context virtual CPU structure of the caller.
1016 * @param GCPhys The page to unmap. Does not need to be page aligned.
1017 */
1018DECLINLINE(int) nemR3WinHypercallUnmapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1019{
1020 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
1021 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
1022 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
1023}
1024
1025#endif /* NEM_WIN_USE_HYPERCALLS */
1026
1027static int nemR3WinCopyStateToHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1028{
1029 WHV_REGISTER_NAME aenmNames[128];
1030 WHV_REGISTER_VALUE aValues[128];
1031
1032 /* GPRs */
1033 aenmNames[0] = WHvX64RegisterRax;
1034 aValues[0].Reg64 = pCtx->rax;
1035 aenmNames[1] = WHvX64RegisterRcx;
1036 aValues[1].Reg64 = pCtx->rcx;
1037 aenmNames[2] = WHvX64RegisterRdx;
1038 aValues[2].Reg64 = pCtx->rdx;
1039 aenmNames[3] = WHvX64RegisterRbx;
1040 aValues[3].Reg64 = pCtx->rbx;
1041 aenmNames[4] = WHvX64RegisterRsp;
1042 aValues[4].Reg64 = pCtx->rsp;
1043 aenmNames[5] = WHvX64RegisterRbp;
1044 aValues[5].Reg64 = pCtx->rbp;
1045 aenmNames[6] = WHvX64RegisterRsi;
1046 aValues[6].Reg64 = pCtx->rsi;
1047 aenmNames[7] = WHvX64RegisterRdi;
1048 aValues[7].Reg64 = pCtx->rdi;
1049 aenmNames[8] = WHvX64RegisterR8;
1050 aValues[8].Reg64 = pCtx->r8;
1051 aenmNames[9] = WHvX64RegisterR9;
1052 aValues[9].Reg64 = pCtx->r9;
1053 aenmNames[10] = WHvX64RegisterR10;
1054 aValues[10].Reg64 = pCtx->r10;
1055 aenmNames[11] = WHvX64RegisterR11;
1056 aValues[11].Reg64 = pCtx->r11;
1057 aenmNames[12] = WHvX64RegisterR12;
1058 aValues[12].Reg64 = pCtx->r12;
1059 aenmNames[13] = WHvX64RegisterR13;
1060 aValues[13].Reg64 = pCtx->r13;
1061 aenmNames[14] = WHvX64RegisterR14;
1062 aValues[14].Reg64 = pCtx->r14;
1063 aenmNames[15] = WHvX64RegisterR15;
1064 aValues[15].Reg64 = pCtx->r15;
1065
1066 /* RIP & Flags */
1067 aenmNames[16] = WHvX64RegisterRip;
1068 aValues[16].Reg64 = pCtx->rip;
1069 aenmNames[17] = WHvX64RegisterRflags;
1070 aValues[17].Reg64 = pCtx->rflags.u;
1071
1072 /* Segments */
1073#define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
1074 do { \
1075 aenmNames[a_idx] = a_enmName; \
1076 aValues[a_idx].Segment.Base = (a_SReg).u64Base; \
1077 aValues[a_idx].Segment.Limit = (a_SReg).u32Limit; \
1078 aValues[a_idx].Segment.Selector = (a_SReg).Sel; \
1079 aValues[a_idx].Segment.Attributes = (a_SReg).Attr.u; \
1080 } while (0)
1081 COPY_OUT_SEG(18, WHvX64RegisterEs, pCtx->es);
1082 COPY_OUT_SEG(19, WHvX64RegisterCs, pCtx->cs);
1083 COPY_OUT_SEG(20, WHvX64RegisterSs, pCtx->ss);
1084 COPY_OUT_SEG(21, WHvX64RegisterDs, pCtx->ds);
1085 COPY_OUT_SEG(22, WHvX64RegisterFs, pCtx->fs);
1086 COPY_OUT_SEG(23, WHvX64RegisterGs, pCtx->gs);
1087 COPY_OUT_SEG(24, WHvX64RegisterLdtr, pCtx->ldtr);
1088 COPY_OUT_SEG(25, WHvX64RegisterTr, pCtx->tr);
1089
1090 uintptr_t iReg = 26;
1091 /* Descriptor tables. */
1092 aenmNames[iReg] = WHvX64RegisterIdtr;
1093 aValues[iReg].Table.Limit = pCtx->idtr.cbIdt;
1094 aValues[iReg].Table.Base = pCtx->idtr.pIdt;
1095 iReg++;
1096 aenmNames[iReg] = WHvX64RegisterGdtr;
1097 aValues[iReg].Table.Limit = pCtx->gdtr.cbGdt;
1098 aValues[iReg].Table.Base = pCtx->gdtr.pGdt;
1099 iReg++;
1100
1101 /* Control registers. */
1102 aenmNames[iReg] = WHvX64RegisterCr0;
1103 aValues[iReg].Reg64 = pCtx->cr0;
1104 iReg++;
1105 aenmNames[iReg] = WHvX64RegisterCr2;
1106 aValues[iReg].Reg64 = pCtx->cr2;
1107 iReg++;
1108 aenmNames[iReg] = WHvX64RegisterCr3;
1109 aValues[iReg].Reg64 = pCtx->cr3;
1110 iReg++;
1111 aenmNames[iReg] = WHvX64RegisterCr4;
1112 aValues[iReg].Reg64 = pCtx->cr4;
1113 iReg++;
1114 aenmNames[iReg] = WHvX64RegisterCr8;
1115 aValues[iReg].Reg64 = CPUMGetGuestCR8(pVCpu);
1116 iReg++;
1117
1118 /* Debug registers. */
1119/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
1120 aenmNames[iReg] = WHvX64RegisterDr0;
1121 //aValues[iReg].Reg64 = CPUMGetHyperDR0(pVCpu);
1122 aValues[iReg].Reg64 = pCtx->dr[0];
1123 iReg++;
1124 aenmNames[iReg] = WHvX64RegisterDr1;
1125 //aValues[iReg].Reg64 = CPUMGetHyperDR1(pVCpu);
1126 aValues[iReg].Reg64 = pCtx->dr[1];
1127 iReg++;
1128 aenmNames[iReg] = WHvX64RegisterDr2;
1129 //aValues[iReg].Reg64 = CPUMGetHyperDR2(pVCpu);
1130 aValues[iReg].Reg64 = pCtx->dr[2];
1131 iReg++;
1132 aenmNames[iReg] = WHvX64RegisterDr3;
1133 //aValues[iReg].Reg64 = CPUMGetHyperDR3(pVCpu);
1134 aValues[iReg].Reg64 = pCtx->dr[3];
1135 iReg++;
1136 aenmNames[iReg] = WHvX64RegisterDr6;
1137 //aValues[iReg].Reg64 = CPUMGetHyperDR6(pVCpu);
1138 aValues[iReg].Reg64 = pCtx->dr[6];
1139 iReg++;
1140 aenmNames[iReg] = WHvX64RegisterDr7;
1141 //aValues[iReg].Reg64 = CPUMGetHyperDR7(pVCpu);
1142 aValues[iReg].Reg64 = pCtx->dr[7];
1143 iReg++;
1144
1145 /* Vector state. */
1146 aenmNames[iReg] = WHvX64RegisterXmm0;
1147 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[0].uXmm.s.Lo;
1148 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[0].uXmm.s.Hi;
1149 iReg++;
1150 aenmNames[iReg] = WHvX64RegisterXmm1;
1151 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[1].uXmm.s.Lo;
1152 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[1].uXmm.s.Hi;
1153 iReg++;
1154 aenmNames[iReg] = WHvX64RegisterXmm2;
1155 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[2].uXmm.s.Lo;
1156 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[2].uXmm.s.Hi;
1157 iReg++;
1158 aenmNames[iReg] = WHvX64RegisterXmm3;
1159 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[3].uXmm.s.Lo;
1160 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[3].uXmm.s.Hi;
1161 iReg++;
1162 aenmNames[iReg] = WHvX64RegisterXmm4;
1163 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[4].uXmm.s.Lo;
1164 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[4].uXmm.s.Hi;
1165 iReg++;
1166 aenmNames[iReg] = WHvX64RegisterXmm5;
1167 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[5].uXmm.s.Lo;
1168 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[5].uXmm.s.Hi;
1169 iReg++;
1170 aenmNames[iReg] = WHvX64RegisterXmm6;
1171 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[6].uXmm.s.Lo;
1172 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[6].uXmm.s.Hi;
1173 iReg++;
1174 aenmNames[iReg] = WHvX64RegisterXmm7;
1175 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[7].uXmm.s.Lo;
1176 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[7].uXmm.s.Hi;
1177 iReg++;
1178 aenmNames[iReg] = WHvX64RegisterXmm8;
1179 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[8].uXmm.s.Lo;
1180 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[8].uXmm.s.Hi;
1181 iReg++;
1182 aenmNames[iReg] = WHvX64RegisterXmm9;
1183 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[9].uXmm.s.Lo;
1184 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[9].uXmm.s.Hi;
1185 iReg++;
1186 aenmNames[iReg] = WHvX64RegisterXmm10;
1187 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo;
1188 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi;
1189 iReg++;
1190 aenmNames[iReg] = WHvX64RegisterXmm11;
1191 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo;
1192 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi;
1193 iReg++;
1194 aenmNames[iReg] = WHvX64RegisterXmm12;
1195 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo;
1196 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi;
1197 iReg++;
1198 aenmNames[iReg] = WHvX64RegisterXmm13;
1199 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo;
1200 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi;
1201 iReg++;
1202 aenmNames[iReg] = WHvX64RegisterXmm14;
1203 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo;
1204 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi;
1205 iReg++;
1206 aenmNames[iReg] = WHvX64RegisterXmm15;
1207 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo;
1208 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi;
1209 iReg++;
1210
1211 /* Floating point state. */
1212 aenmNames[iReg] = WHvX64RegisterFpMmx0;
1213 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[0].au64[0];
1214 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[0].au64[1];
1215 iReg++;
1216 aenmNames[iReg] = WHvX64RegisterFpMmx1;
1217 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[1].au64[0];
1218 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[1].au64[1];
1219 iReg++;
1220 aenmNames[iReg] = WHvX64RegisterFpMmx2;
1221 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[2].au64[0];
1222 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[2].au64[1];
1223 iReg++;
1224 aenmNames[iReg] = WHvX64RegisterFpMmx3;
1225 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[3].au64[0];
1226 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[3].au64[1];
1227 iReg++;
1228 aenmNames[iReg] = WHvX64RegisterFpMmx4;
1229 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[4].au64[0];
1230 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[4].au64[1];
1231 iReg++;
1232 aenmNames[iReg] = WHvX64RegisterFpMmx5;
1233 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[5].au64[0];
1234 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[5].au64[1];
1235 iReg++;
1236 aenmNames[iReg] = WHvX64RegisterFpMmx6;
1237 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[6].au64[0];
1238 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[6].au64[1];
1239 iReg++;
1240 aenmNames[iReg] = WHvX64RegisterFpMmx7;
1241 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[7].au64[0];
1242 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[7].au64[1];
1243 iReg++;
1244
1245 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
1246 aValues[iReg].FpControlStatus.FpControl = pCtx->pXStateR3->x87.FCW;
1247 aValues[iReg].FpControlStatus.FpStatus = pCtx->pXStateR3->x87.FSW;
1248 aValues[iReg].FpControlStatus.FpTag = pCtx->pXStateR3->x87.FTW;
1249 aValues[iReg].FpControlStatus.Reserved = pCtx->pXStateR3->x87.FTW >> 8;
1250 aValues[iReg].FpControlStatus.LastFpOp = pCtx->pXStateR3->x87.FOP;
1251 aValues[iReg].FpControlStatus.LastFpRip = (pCtx->pXStateR3->x87.FPUIP)
1252 | ((uint64_t)pCtx->pXStateR3->x87.CS << 32)
1253 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd1 << 48);
1254 iReg++;
1255
1256 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
1257 aValues[iReg].XmmControlStatus.LastFpRdp = (pCtx->pXStateR3->x87.FPUDP)
1258 | ((uint64_t)pCtx->pXStateR3->x87.DS << 32)
1259 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd2 << 48);
1260 aValues[iReg].XmmControlStatus.XmmStatusControl = pCtx->pXStateR3->x87.MXCSR;
1261 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR3->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
1262 iReg++;
1263
1264 /* MSRs */
1265 // WHvX64RegisterTsc - don't touch
1266 aenmNames[iReg] = WHvX64RegisterEfer;
1267 aValues[iReg].Reg64 = pCtx->msrEFER;
1268 iReg++;
1269 aenmNames[iReg] = WHvX64RegisterKernelGsBase;
1270 aValues[iReg].Reg64 = pCtx->msrKERNELGSBASE;
1271 iReg++;
1272 aenmNames[iReg] = WHvX64RegisterApicBase;
1273 aValues[iReg].Reg64 = APICGetBaseMsrNoCheck(pVCpu);
1274 iReg++;
1275 aenmNames[iReg] = WHvX64RegisterPat;
1276 aValues[iReg].Reg64 = pCtx->msrPAT;
1277 iReg++;
1278 /// @todo WHvX64RegisterSysenterCs
1279 /// @todo WHvX64RegisterSysenterEip
1280 /// @todo WHvX64RegisterSysenterEsp
1281 aenmNames[iReg] = WHvX64RegisterStar;
1282 aValues[iReg].Reg64 = pCtx->msrSTAR;
1283 iReg++;
1284 aenmNames[iReg] = WHvX64RegisterLstar;
1285 aValues[iReg].Reg64 = pCtx->msrLSTAR;
1286 iReg++;
1287 aenmNames[iReg] = WHvX64RegisterCstar;
1288 aValues[iReg].Reg64 = pCtx->msrCSTAR;
1289 iReg++;
1290 aenmNames[iReg] = WHvX64RegisterSfmask;
1291 aValues[iReg].Reg64 = pCtx->msrSFMASK;
1292 iReg++;
1293
1294 /* event injection (always clear it). */
1295 /** @todo Someone at microsoft please explain why HV_X64_PENDING_INTERRUPTION_REGISTER
1296 * and HV_X64_INTERRUPT_STATE_REGISTER are missing from the headers. Ditto for
1297 * wathever structures WHvRegisterPendingEvent0/1 uses. */
1298 aenmNames[iReg] = WHvRegisterPendingInterruption;
1299 aValues[iReg].Reg64 = 0;
1300 iReg++;
1301 /// @todo WHvRegisterInterruptState
1302 /// @todo WHvRegisterPendingEvent0
1303 /// @todo WHvRegisterPendingEvent1
1304
1305 /*
1306 * Set the registers.
1307 */
1308 Assert(iReg < RT_ELEMENTS(aValues));
1309 Assert(iReg < RT_ELEMENTS(aenmNames));
1310#ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
1311 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
1312 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
1313#endif
1314 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
1315 if (SUCCEEDED(hrc))
1316 return VINF_SUCCESS;
1317 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1318 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
1319 hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));
1320 return VERR_INTERNAL_ERROR;
1321}
1322
1323static int nemR3WinCopyStateFromHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1324{
1325 WHV_REGISTER_NAME aenmNames[128];
1326
1327 /* GPRs */
1328 aenmNames[0] = WHvX64RegisterRax;
1329 aenmNames[1] = WHvX64RegisterRcx;
1330 aenmNames[2] = WHvX64RegisterRdx;
1331 aenmNames[3] = WHvX64RegisterRbx;
1332 aenmNames[4] = WHvX64RegisterRsp;
1333 aenmNames[5] = WHvX64RegisterRbp;
1334 aenmNames[6] = WHvX64RegisterRsi;
1335 aenmNames[7] = WHvX64RegisterRdi;
1336 aenmNames[8] = WHvX64RegisterR8;
1337 aenmNames[9] = WHvX64RegisterR9;
1338 aenmNames[10] = WHvX64RegisterR10;
1339 aenmNames[11] = WHvX64RegisterR11;
1340 aenmNames[12] = WHvX64RegisterR12;
1341 aenmNames[13] = WHvX64RegisterR13;
1342 aenmNames[14] = WHvX64RegisterR14;
1343 aenmNames[15] = WHvX64RegisterR15;
1344
1345 /* RIP & Flags */
1346 aenmNames[16] = WHvX64RegisterRip;
1347 aenmNames[17] = WHvX64RegisterRflags;
1348
1349 /* Segments */
1350 aenmNames[18] = WHvX64RegisterEs;
1351 aenmNames[19] = WHvX64RegisterCs;
1352 aenmNames[20] = WHvX64RegisterSs;
1353 aenmNames[21] = WHvX64RegisterDs;
1354 aenmNames[22] = WHvX64RegisterFs;
1355 aenmNames[23] = WHvX64RegisterGs;
1356 aenmNames[24] = WHvX64RegisterLdtr;
1357 aenmNames[25] = WHvX64RegisterTr;
1358
1359 /* Descriptor tables. */
1360 aenmNames[26] = WHvX64RegisterIdtr;
1361 aenmNames[27] = WHvX64RegisterGdtr;
1362
1363 /* Control registers. */
1364 aenmNames[28] = WHvX64RegisterCr0;
1365 aenmNames[29] = WHvX64RegisterCr2;
1366 aenmNames[30] = WHvX64RegisterCr3;
1367 aenmNames[31] = WHvX64RegisterCr4;
1368 aenmNames[32] = WHvX64RegisterCr8;
1369
1370 /* Debug registers. */
1371 aenmNames[33] = WHvX64RegisterDr0;
1372 aenmNames[34] = WHvX64RegisterDr1;
1373 aenmNames[35] = WHvX64RegisterDr2;
1374 aenmNames[36] = WHvX64RegisterDr3;
1375 aenmNames[37] = WHvX64RegisterDr6;
1376 aenmNames[38] = WHvX64RegisterDr7;
1377
1378 /* Vector state. */
1379 aenmNames[39] = WHvX64RegisterXmm0;
1380 aenmNames[40] = WHvX64RegisterXmm1;
1381 aenmNames[41] = WHvX64RegisterXmm2;
1382 aenmNames[42] = WHvX64RegisterXmm3;
1383 aenmNames[43] = WHvX64RegisterXmm4;
1384 aenmNames[44] = WHvX64RegisterXmm5;
1385 aenmNames[45] = WHvX64RegisterXmm6;
1386 aenmNames[46] = WHvX64RegisterXmm7;
1387 aenmNames[47] = WHvX64RegisterXmm8;
1388 aenmNames[48] = WHvX64RegisterXmm9;
1389 aenmNames[49] = WHvX64RegisterXmm10;
1390 aenmNames[50] = WHvX64RegisterXmm11;
1391 aenmNames[51] = WHvX64RegisterXmm12;
1392 aenmNames[52] = WHvX64RegisterXmm13;
1393 aenmNames[53] = WHvX64RegisterXmm14;
1394 aenmNames[54] = WHvX64RegisterXmm15;
1395
1396 /* Floating point state. */
1397 aenmNames[55] = WHvX64RegisterFpMmx0;
1398 aenmNames[56] = WHvX64RegisterFpMmx1;
1399 aenmNames[57] = WHvX64RegisterFpMmx2;
1400 aenmNames[58] = WHvX64RegisterFpMmx3;
1401 aenmNames[59] = WHvX64RegisterFpMmx4;
1402 aenmNames[60] = WHvX64RegisterFpMmx5;
1403 aenmNames[61] = WHvX64RegisterFpMmx6;
1404 aenmNames[62] = WHvX64RegisterFpMmx7;
1405 aenmNames[63] = WHvX64RegisterFpControlStatus;
1406 aenmNames[64] = WHvX64RegisterXmmControlStatus;
1407
1408 /* MSRs */
1409 // WHvX64RegisterTsc - don't touch
1410 aenmNames[65] = WHvX64RegisterEfer;
1411 aenmNames[66] = WHvX64RegisterKernelGsBase;
1412 aenmNames[67] = WHvX64RegisterApicBase;
1413 aenmNames[68] = WHvX64RegisterPat;
1414 aenmNames[69] = WHvX64RegisterSysenterCs;
1415 aenmNames[70] = WHvX64RegisterSysenterEip;
1416 aenmNames[71] = WHvX64RegisterSysenterEsp;
1417 aenmNames[72] = WHvX64RegisterStar;
1418 aenmNames[73] = WHvX64RegisterLstar;
1419 aenmNames[74] = WHvX64RegisterCstar;
1420 aenmNames[75] = WHvX64RegisterSfmask;
1421
1422 /* event injection */
1423 aenmNames[76] = WHvRegisterPendingInterruption;
1424 aenmNames[77] = WHvRegisterInterruptState;
1425 aenmNames[78] = WHvRegisterInterruptState;
1426 aenmNames[79] = WHvRegisterPendingEvent0;
1427 aenmNames[80] = WHvRegisterPendingEvent1;
1428 unsigned const cRegs = 81;
1429
1430 /*
1431 * Get the registers.
1432 */
1433 WHV_REGISTER_VALUE aValues[cRegs];
1434 RT_ZERO(aValues);
1435 Assert(RT_ELEMENTS(aValues) >= cRegs);
1436 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
1437#ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
1438 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
1439 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
1440#endif
1441 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues);
1442 if (SUCCEEDED(hrc))
1443 {
1444 /* GPRs */
1445 Assert(aenmNames[0] == WHvX64RegisterRax);
1446 Assert(aenmNames[15] == WHvX64RegisterR15);
1447 pCtx->rax = aValues[0].Reg64;
1448 pCtx->rcx = aValues[1].Reg64;
1449 pCtx->rdx = aValues[2].Reg64;
1450 pCtx->rbx = aValues[3].Reg64;
1451 pCtx->rsp = aValues[4].Reg64;
1452 pCtx->rbp = aValues[5].Reg64;
1453 pCtx->rsi = aValues[6].Reg64;
1454 pCtx->rdi = aValues[7].Reg64;
1455 pCtx->r8 = aValues[8].Reg64;
1456 pCtx->r9 = aValues[9].Reg64;
1457 pCtx->r10 = aValues[10].Reg64;
1458 pCtx->r11 = aValues[11].Reg64;
1459 pCtx->r12 = aValues[12].Reg64;
1460 pCtx->r13 = aValues[13].Reg64;
1461 pCtx->r14 = aValues[14].Reg64;
1462 pCtx->r15 = aValues[15].Reg64;
1463
1464 /* RIP & Flags */
1465 Assert(aenmNames[16] == WHvX64RegisterRip);
1466 pCtx->rip = aValues[16].Reg64;
1467 pCtx->rflags.u = aValues[17].Reg64;
1468
1469 /* Segments */
1470#define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1471 do { \
1472 Assert(aenmNames[a_idx] == a_enmName); \
1473 (a_SReg).u64Base = aValues[a_idx].Segment.Base; \
1474 (a_SReg).u32Limit = aValues[a_idx].Segment.Limit; \
1475 (a_SReg).ValidSel = (a_SReg).Sel = aValues[a_idx].Segment.Selector; \
1476 (a_SReg).Attr.u = aValues[a_idx].Segment.Attributes; \
1477 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1478 } while (0)
1479 COPY_BACK_SEG(18, WHvX64RegisterEs, pCtx->es);
1480 COPY_BACK_SEG(19, WHvX64RegisterCs, pCtx->cs);
1481 COPY_BACK_SEG(20, WHvX64RegisterSs, pCtx->ss);
1482 COPY_BACK_SEG(21, WHvX64RegisterDs, pCtx->ds);
1483 COPY_BACK_SEG(22, WHvX64RegisterFs, pCtx->fs);
1484 COPY_BACK_SEG(23, WHvX64RegisterGs, pCtx->gs);
1485 COPY_BACK_SEG(24, WHvX64RegisterLdtr, pCtx->ldtr);
1486 COPY_BACK_SEG(25, WHvX64RegisterTr, pCtx->tr);
1487
1488 /* Descriptor tables. */
1489 Assert(aenmNames[26] == WHvX64RegisterIdtr);
1490 pCtx->idtr.cbIdt = aValues[26].Table.Limit;
1491 pCtx->idtr.pIdt = aValues[26].Table.Base;
1492 Assert(aenmNames[27] == WHvX64RegisterGdtr);
1493 pCtx->gdtr.cbGdt = aValues[27].Table.Limit;
1494 pCtx->gdtr.pGdt = aValues[27].Table.Base;
1495
1496 /* Control registers. */
1497 Assert(aenmNames[28] == WHvX64RegisterCr0);
1498 bool fMaybeChangedMode = false;
1499 bool fFlushTlb = false;
1500 bool fFlushGlobalTlb = false;
1501 if (pCtx->cr0 != aValues[28].Reg64)
1502 {
1503 CPUMSetGuestCR0(pVCpu, aValues[28].Reg64);
1504 fMaybeChangedMode = true;
1505 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1506 }
1507 Assert(aenmNames[29] == WHvX64RegisterCr2);
1508 pCtx->cr2 = aValues[29].Reg64;
1509 if (pCtx->cr3 != aValues[30].Reg64)
1510 {
1511 CPUMSetGuestCR3(pVCpu, aValues[30].Reg64);
1512 fFlushTlb = true;
1513 }
1514 if (pCtx->cr4 != aValues[31].Reg64)
1515 {
1516 CPUMSetGuestCR4(pVCpu, aValues[31].Reg64);
1517 fMaybeChangedMode = true;
1518 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1519 }
1520 APICSetTpr(pVCpu, (uint8_t)aValues[32].Reg64 << 4);
1521
1522 /* Debug registers. */
1523 Assert(aenmNames[33] == WHvX64RegisterDr0);
1524 /** @todo fixme */
1525 if (pCtx->dr[0] != aValues[33].Reg64)
1526 CPUMSetGuestDR0(pVCpu, aValues[33].Reg64);
1527 if (pCtx->dr[1] != aValues[34].Reg64)
1528 CPUMSetGuestDR1(pVCpu, aValues[34].Reg64);
1529 if (pCtx->dr[2] != aValues[35].Reg64)
1530 CPUMSetGuestDR2(pVCpu, aValues[35].Reg64);
1531 if (pCtx->dr[3] != aValues[36].Reg64)
1532 CPUMSetGuestDR3(pVCpu, aValues[36].Reg64);
1533 Assert(aenmNames[37] == WHvX64RegisterDr6);
1534 Assert(aenmNames[38] == WHvX64RegisterDr7);
1535 if (pCtx->dr[6] != aValues[37].Reg64)
1536 CPUMSetGuestDR6(pVCpu, aValues[37].Reg64);
1537 if (pCtx->dr[7] != aValues[38].Reg64)
1538 CPUMSetGuestDR6(pVCpu, aValues[38].Reg64);
1539
1540 /* Vector state. */
1541 Assert(aenmNames[39] == WHvX64RegisterXmm0);
1542 Assert(aenmNames[54] == WHvX64RegisterXmm15);
1543 pCtx->pXStateR3->x87.aXMM[0].uXmm.s.Lo = aValues[39].Reg128.Low64;
1544 pCtx->pXStateR3->x87.aXMM[0].uXmm.s.Hi = aValues[39].Reg128.High64;
1545 pCtx->pXStateR3->x87.aXMM[1].uXmm.s.Lo = aValues[40].Reg128.Low64;
1546 pCtx->pXStateR3->x87.aXMM[1].uXmm.s.Hi = aValues[40].Reg128.High64;
1547 pCtx->pXStateR3->x87.aXMM[2].uXmm.s.Lo = aValues[41].Reg128.Low64;
1548 pCtx->pXStateR3->x87.aXMM[2].uXmm.s.Hi = aValues[41].Reg128.High64;
1549 pCtx->pXStateR3->x87.aXMM[3].uXmm.s.Lo = aValues[42].Reg128.Low64;
1550 pCtx->pXStateR3->x87.aXMM[3].uXmm.s.Hi = aValues[42].Reg128.High64;
1551 pCtx->pXStateR3->x87.aXMM[4].uXmm.s.Lo = aValues[43].Reg128.Low64;
1552 pCtx->pXStateR3->x87.aXMM[4].uXmm.s.Hi = aValues[43].Reg128.High64;
1553 pCtx->pXStateR3->x87.aXMM[5].uXmm.s.Lo = aValues[44].Reg128.Low64;
1554 pCtx->pXStateR3->x87.aXMM[5].uXmm.s.Hi = aValues[44].Reg128.High64;
1555 pCtx->pXStateR3->x87.aXMM[6].uXmm.s.Lo = aValues[45].Reg128.Low64;
1556 pCtx->pXStateR3->x87.aXMM[6].uXmm.s.Hi = aValues[45].Reg128.High64;
1557 pCtx->pXStateR3->x87.aXMM[7].uXmm.s.Lo = aValues[46].Reg128.Low64;
1558 pCtx->pXStateR3->x87.aXMM[7].uXmm.s.Hi = aValues[46].Reg128.High64;
1559 pCtx->pXStateR3->x87.aXMM[8].uXmm.s.Lo = aValues[47].Reg128.Low64;
1560 pCtx->pXStateR3->x87.aXMM[8].uXmm.s.Hi = aValues[47].Reg128.High64;
1561 pCtx->pXStateR3->x87.aXMM[9].uXmm.s.Lo = aValues[48].Reg128.Low64;
1562 pCtx->pXStateR3->x87.aXMM[9].uXmm.s.Hi = aValues[48].Reg128.High64;
1563 pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo = aValues[49].Reg128.Low64;
1564 pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi = aValues[49].Reg128.High64;
1565 pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo = aValues[50].Reg128.Low64;
1566 pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi = aValues[50].Reg128.High64;
1567 pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo = aValues[51].Reg128.Low64;
1568 pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi = aValues[51].Reg128.High64;
1569 pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo = aValues[52].Reg128.Low64;
1570 pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi = aValues[52].Reg128.High64;
1571 pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo = aValues[53].Reg128.Low64;
1572 pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi = aValues[53].Reg128.High64;
1573 pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo = aValues[54].Reg128.Low64;
1574 pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi = aValues[54].Reg128.High64;
1575
1576 /* Floating point state. */
1577 Assert(aenmNames[55] == WHvX64RegisterFpMmx0);
1578 Assert(aenmNames[62] == WHvX64RegisterFpMmx7);
1579 pCtx->pXStateR3->x87.aRegs[0].au64[0] = aValues[55].Fp.AsUINT128.Low64;
1580 pCtx->pXStateR3->x87.aRegs[0].au64[1] = aValues[55].Fp.AsUINT128.High64;
1581 pCtx->pXStateR3->x87.aRegs[1].au64[0] = aValues[56].Fp.AsUINT128.Low64;
1582 pCtx->pXStateR3->x87.aRegs[1].au64[1] = aValues[56].Fp.AsUINT128.High64;
1583 pCtx->pXStateR3->x87.aRegs[2].au64[0] = aValues[57].Fp.AsUINT128.Low64;
1584 pCtx->pXStateR3->x87.aRegs[2].au64[1] = aValues[57].Fp.AsUINT128.High64;
1585 pCtx->pXStateR3->x87.aRegs[3].au64[0] = aValues[58].Fp.AsUINT128.Low64;
1586 pCtx->pXStateR3->x87.aRegs[3].au64[1] = aValues[58].Fp.AsUINT128.High64;
1587 pCtx->pXStateR3->x87.aRegs[4].au64[0] = aValues[59].Fp.AsUINT128.Low64;
1588 pCtx->pXStateR3->x87.aRegs[4].au64[1] = aValues[59].Fp.AsUINT128.High64;
1589 pCtx->pXStateR3->x87.aRegs[5].au64[0] = aValues[60].Fp.AsUINT128.Low64;
1590 pCtx->pXStateR3->x87.aRegs[5].au64[1] = aValues[60].Fp.AsUINT128.High64;
1591 pCtx->pXStateR3->x87.aRegs[6].au64[0] = aValues[61].Fp.AsUINT128.Low64;
1592 pCtx->pXStateR3->x87.aRegs[6].au64[1] = aValues[61].Fp.AsUINT128.High64;
1593 pCtx->pXStateR3->x87.aRegs[7].au64[0] = aValues[62].Fp.AsUINT128.Low64;
1594 pCtx->pXStateR3->x87.aRegs[7].au64[1] = aValues[62].Fp.AsUINT128.High64;
1595
1596 Assert(aenmNames[63] == WHvX64RegisterFpControlStatus);
1597 pCtx->pXStateR3->x87.FCW = aValues[63].FpControlStatus.FpControl;
1598 pCtx->pXStateR3->x87.FSW = aValues[63].FpControlStatus.FpStatus;
1599 pCtx->pXStateR3->x87.FTW = aValues[63].FpControlStatus.FpTag
1600 /*| (aValues[63].FpControlStatus.Reserved << 8)*/;
1601 pCtx->pXStateR3->x87.FOP = aValues[63].FpControlStatus.LastFpOp;
1602 pCtx->pXStateR3->x87.FPUIP = (uint32_t)aValues[63].FpControlStatus.LastFpRip;
1603 pCtx->pXStateR3->x87.CS = (uint16_t)(aValues[63].FpControlStatus.LastFpRip >> 32);
1604 pCtx->pXStateR3->x87.Rsrvd1 = (uint16_t)(aValues[63].FpControlStatus.LastFpRip >> 48);
1605
1606 Assert(aenmNames[64] == WHvX64RegisterXmmControlStatus);
1607 pCtx->pXStateR3->x87.FPUDP = (uint32_t)aValues[64].XmmControlStatus.LastFpRdp;
1608 pCtx->pXStateR3->x87.DS = (uint16_t)(aValues[64].XmmControlStatus.LastFpRdp >> 32);
1609 pCtx->pXStateR3->x87.Rsrvd2 = (uint16_t)(aValues[64].XmmControlStatus.LastFpRdp >> 48);
1610 pCtx->pXStateR3->x87.MXCSR = aValues[64].XmmControlStatus.XmmStatusControl;
1611 pCtx->pXStateR3->x87.MXCSR_MASK = aValues[64].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1612
1613 /* MSRs */
1614 // WHvX64RegisterTsc - don't touch
1615 Assert(aenmNames[65] == WHvX64RegisterEfer);
1616 if (aValues[65].Reg64 != pCtx->msrEFER)
1617 {
1618 pCtx->msrEFER = aValues[65].Reg64;
1619 fMaybeChangedMode = true;
1620 }
1621
1622 Assert(aenmNames[66] == WHvX64RegisterKernelGsBase);
1623 pCtx->msrKERNELGSBASE = aValues[66].Reg64;
1624
1625 Assert(aenmNames[67] == WHvX64RegisterApicBase);
1626 if (aValues[67].Reg64 != APICGetBaseMsrNoCheck(pVCpu))
1627 {
1628 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, aValues[67].Reg64);
1629 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1630 }
1631
1632 Assert(aenmNames[68] == WHvX64RegisterPat);
1633 pCtx->msrPAT = aValues[68].Reg64;
1634 /// @todo WHvX64RegisterSysenterCs
1635 /// @todo WHvX64RegisterSysenterEip
1636 /// @todo WHvX64RegisterSysenterEsp
1637 Assert(aenmNames[72] == WHvX64RegisterStar);
1638 pCtx->msrSTAR = aValues[72].Reg64;
1639 Assert(aenmNames[73] == WHvX64RegisterLstar);
1640 pCtx->msrLSTAR = aValues[73].Reg64;
1641 Assert(aenmNames[74] == WHvX64RegisterCstar);
1642 pCtx->msrCSTAR = aValues[74].Reg64;
1643 Assert(aenmNames[75] == WHvX64RegisterSfmask);
1644 pCtx->msrSFMASK = aValues[75].Reg64;
1645
1646 /// @todo WHvRegisterPendingInterruption
1647 Assert(aenmNames[76] == WHvRegisterPendingInterruption);
1648 /** @todo Someone at microsoft please explain why HV_X64_PENDING_INTERRUPTION_REGISTER
1649 * and HV_X64_INTERRUPT_STATE_REGISTER are missing from the headers. Ditto for
1650 * wathever structures WHvRegisterPendingEvent0/1 uses. */
1651 MISSINGPENDINGINTERRUPTIONREG const * pPendingInt = (MISSINGPENDINGINTERRUPTIONREG const *)&aValues[76];
1652 if (pPendingInt->fInterruptionPending)
1653 {
1654 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x unk0=%u unk1=%u\n",
1655 pPendingInt->enmInterruptionType, pPendingInt->InterruptionVector, pPendingInt->fDeliverErrCd,
1656 pPendingInt->uErrCd, pPendingInt->fUnknown0, pPendingInt->fUnknown1));
1657 AssertMsg(pPendingInt->uReserved0 == 0 && pPendingInt->uReserved1 == 0,
1658 ("%#RX64 %#RX64\n", pPendingInt->au64[0], pPendingInt->au64[1]));
1659 }
1660
1661 /// @todo WHvRegisterInterruptState
1662 /// @todo WHvRegisterPendingEvent0
1663 /// @todo WHvRegisterPendingEvent1
1664
1665
1666 if (fMaybeChangedMode)
1667 {
1668 int rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1669 AssertRC(rc);
1670 }
1671 if (fFlushTlb)
1672 {
1673 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, fFlushGlobalTlb);
1674 AssertRC(rc);
1675 }
1676
1677 return VINF_SUCCESS;
1678 }
1679
1680 AssertLogRelMsgFailed(("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1681 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs,
1682 hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));
1683 return VERR_INTERNAL_ERROR;
1684}
1685
1686
1687#ifdef LOG_ENABLED
1688/**
1689 * Log the full details of an exit reason.
1690 *
1691 * @param pExitReason The exit reason to log.
1692 */
1693static void nemR3WinLogExitReason(WHV_RUN_VP_EXIT_CONTEXT const *pExitReason)
1694{
1695 bool fExitCtx = false;
1696 bool fExitInstr = false;
1697 switch (pExitReason->ExitReason)
1698 {
1699 case WHvRunVpExitReasonMemoryAccess:
1700 Log2(("Exit: Memory access: GCPhys=%RGp GCVirt=%RGv %s %s %s\n",
1701 pExitReason->MemoryAccess.Gpa, pExitReason->MemoryAccess.Gva,
1702 g_apszWHvMemAccesstypes[pExitReason->MemoryAccess.AccessInfo.AccessType],
1703 pExitReason->MemoryAccess.AccessInfo.GpaUnmapped ? "unmapped" : "mapped",
1704 pExitReason->MemoryAccess.AccessInfo.GvaValid ? "" : "invalid-gc-virt"));
1705 AssertMsg(!(pExitReason->MemoryAccess.AccessInfo.AsUINT32 & ~UINT32_C(0xf)),
1706 ("MemoryAccess.AccessInfo=%#x\n", pExitReason->MemoryAccess.AccessInfo.AsUINT32));
1707 fExitCtx = fExitInstr = true;
1708 break;
1709
1710 case WHvRunVpExitReasonX64IoPortAccess:
1711 Log2(("Exit: I/O port access: IoPort=%#x LB %u %s%s%s rax=%#RX64 rcx=%#RX64 rsi=%#RX64 rdi=%#RX64\n",
1712 pExitReason->IoPortAccess.PortNumber,
1713 pExitReason->IoPortAccess.AccessInfo.AccessSize,
1714 pExitReason->IoPortAccess.AccessInfo.IsWrite ? "out" : "in",
1715 pExitReason->IoPortAccess.AccessInfo.StringOp ? " string" : "",
1716 pExitReason->IoPortAccess.AccessInfo.RepPrefix ? " rep" : "",
1717 pExitReason->IoPortAccess.Rax,
1718 pExitReason->IoPortAccess.Rcx,
1719 pExitReason->IoPortAccess.Rsi,
1720 pExitReason->IoPortAccess.Rdi));
1721 Log2(("Exit: + ds=%#x:{%#RX64 LB %#RX32, %#x} es=%#x:{%#RX64 LB %#RX32, %#x}\n",
1722 pExitReason->IoPortAccess.Ds.Selector,
1723 pExitReason->IoPortAccess.Ds.Base,
1724 pExitReason->IoPortAccess.Ds.Limit,
1725 pExitReason->IoPortAccess.Ds.Attributes,
1726 pExitReason->IoPortAccess.Es.Selector,
1727 pExitReason->IoPortAccess.Es.Base,
1728 pExitReason->IoPortAccess.Es.Limit,
1729 pExitReason->IoPortAccess.Es.Attributes ));
1730
1731 AssertMsg( pExitReason->IoPortAccess.AccessInfo.AccessSize == 1
1732 || pExitReason->IoPortAccess.AccessInfo.AccessSize == 2
1733 || pExitReason->IoPortAccess.AccessInfo.AccessSize == 4,
1734 ("IoPortAccess.AccessInfo.AccessSize=%d\n", pExitReason->IoPortAccess.AccessInfo.AccessSize));
1735 AssertMsg(!(pExitReason->IoPortAccess.AccessInfo.AsUINT32 & ~UINT32_C(0x3f)),
1736 ("IoPortAccess.AccessInfo=%#x\n", pExitReason->IoPortAccess.AccessInfo.AsUINT32));
1737 fExitCtx = fExitInstr = true;
1738 break;
1739
1740# if 0
1741 case WHvRunVpExitReasonUnrecoverableException:
1742 case WHvRunVpExitReasonInvalidVpRegisterValue:
1743 case WHvRunVpExitReasonUnsupportedFeature:
1744 case WHvRunVpExitReasonX64InterruptWindow:
1745 case WHvRunVpExitReasonX64Halt:
1746 case WHvRunVpExitReasonX64MsrAccess:
1747 case WHvRunVpExitReasonX64Cpuid:
1748 case WHvRunVpExitReasonException:
1749 case WHvRunVpExitReasonCanceled:
1750 case WHvRunVpExitReasonAlerted:
1751 WHV_X64_MSR_ACCESS_CONTEXT MsrAccess;
1752 WHV_X64_CPUID_ACCESS_CONTEXT CpuidAccess;
1753 WHV_VP_EXCEPTION_CONTEXT VpException;
1754 WHV_X64_INTERRUPTION_DELIVERABLE_CONTEXT InterruptWindow;
1755 WHV_UNRECOVERABLE_EXCEPTION_CONTEXT UnrecoverableException;
1756 WHV_X64_UNSUPPORTED_FEATURE_CONTEXT UnsupportedFeature;
1757 WHV_RUN_VP_CANCELED_CONTEXT CancelReason;
1758#endif
1759
1760 case WHvRunVpExitReasonNone:
1761 Log2(("Exit: No reason\n"));
1762 AssertFailed();
1763 break;
1764
1765 default:
1766 Log(("Exit: %#x\n", pExitReason->ExitReason));
1767 break;
1768 }
1769
1770 /*
1771 * Context and maybe instruction details.
1772 */
1773 if (fExitCtx)
1774 {
1775 const WHV_VP_EXIT_CONTEXT *pVpCtx = &pExitReason->IoPortAccess.VpContext;
1776 Log2(("Exit: + CS:RIP=%04x:%08RX64 RFLAGS=%06RX64 cbInstr=%u CS={%RX64 L %#RX32, %#x}\n",
1777 pVpCtx->Cs.Selector,
1778 pVpCtx->Rip,
1779 pVpCtx->Rflags,
1780 pVpCtx->InstructionLength,
1781 pVpCtx->Cs.Base, pVpCtx->Cs.Limit, pVpCtx->Cs.Attributes));
1782 Log2(("Exit: + cpl=%d CR0.PE=%d CR0.AM=%d EFER.LMA=%d DebugActive=%d InterruptionPending=%d InterruptShadow=%d\n",
1783 pVpCtx->ExecutionState.Cpl,
1784 pVpCtx->ExecutionState.Cr0Pe,
1785 pVpCtx->ExecutionState.Cr0Am,
1786 pVpCtx->ExecutionState.EferLma,
1787 pVpCtx->ExecutionState.DebugActive,
1788 pVpCtx->ExecutionState.InterruptionPending,
1789 pVpCtx->ExecutionState.InterruptShadow));
1790 AssertMsg(!(pVpCtx->ExecutionState.AsUINT16 & ~UINT16_C(0x107f)),
1791 ("ExecutionState.AsUINT16=%#x\n", pVpCtx->ExecutionState.AsUINT16));
1792
1793 /** @todo Someone at Microsoft please explain why the InstructionBytes fields
1794 * are 16 bytes long, when 15 would've been sufficent and saved 3-7 bytes of
1795 * alignment padding? Intel max length is 15, so is this sSome ARM stuff?
1796 * Aren't ARM
1797 * instructions max 32-bit wide? Confused. */
1798 if (fExitInstr && pExitReason->IoPortAccess.InstructionByteCount > 0)
1799 Log2(("Exit: + Instruction %.*Rhxs\n",
1800 pExitReason->IoPortAccess.InstructionByteCount, pExitReason->IoPortAccess.InstructionBytes));
1801 }
1802}
1803
1804
1805/**
1806 * Logs the current CPU state.
1807 */
1808static void nemR3WinLogState(PVM pVM, PVMCPU pVCpu)
1809{
1810 if (LogIs3Enabled())
1811 {
1812 char szRegs[4096];
1813 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1814 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1815 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1816 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1817 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1818 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1819 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1820 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1821 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1822 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1823 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1824 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1825 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1826 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1827 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1828 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1829 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1830 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1831 " efer=%016VR{efer}\n"
1832 " pat=%016VR{pat}\n"
1833 " sf_mask=%016VR{sf_mask}\n"
1834 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1835 " lstar=%016VR{lstar}\n"
1836 " star=%016VR{star} cstar=%016VR{cstar}\n"
1837 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1838 );
1839
1840 char szInstr[256];
1841 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1842 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1843 szInstr, sizeof(szInstr), NULL);
1844 Log3(("%s%s\n", szRegs, szInstr));
1845 }
1846}
1847
1848#endif /* LOG_ENABLED */
1849
1850
1851/**
1852 * Advances the guest RIP and clear EFLAGS.RF.
1853 *
1854 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1855 *
1856 * @param pVCpu The cross context virtual CPU structure.
1857 * @param pCtx The CPU context to update.
1858 * @param pExitCtx The exit context.
1859 */
1860DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1861{
1862 /* Advance the RIP. */
1863 Assert(pExitCtx->InstructionLength > 0 && pExitCtx->InstructionLength < 16);
1864 pCtx->rip += pExitCtx->InstructionLength;
1865 pCtx->rflags.Bits.u1RF = 0;
1866
1867 /* Update interrupt inhibition. */
1868 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1869 { /* likely */ }
1870 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1871 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1872}
1873
1874
1875static VBOXSTRICTRC nemR3WinHandleHalt(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1876{
1877 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx);
1878 LogFlow(("nemR3WinHandleHalt\n"));
1879 return VINF_EM_HALT;
1880}
1881
1882
1883static DECLCALLBACK(int) nemR3WinUnmapOnePageCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1884{
1885 RT_NOREF_PV(pvUser);
1886#ifdef NEM_WIN_USE_HYPERCALLS
1887 int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1888 AssertRC(rc);
1889 if (RT_SUCCESS(rc))
1890#else
1891 RT_NOREF_PV(pVCpu);
1892 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1893 if (SUCCEEDED(hrc))
1894#endif
1895 {
1896 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1897 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1898 }
1899 else
1900 {
1901#ifdef NEM_WIN_USE_HYPERCALLS
1902 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1903#else
1904 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1905 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtCurrentTeb()->LastStatusValue,
1906 RTNtCurrentTeb()->LastErrorValue, pVM->nem.s.cMappedPages));
1907#endif
1908 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1909 }
1910 if (pVM->nem.s.cMappedPages > 0)
1911 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1912 return VINF_SUCCESS;
1913}
1914
1915
1916/**
1917 * State to pass between nemR3WinHandleMemoryAccess and
1918 * nemR3WinHandleMemoryAccessPageCheckerCallback.
1919 */
1920typedef struct NEMR3WINHMACPCCSTATE
1921{
1922 /** Input: Write access. */
1923 bool fWriteAccess;
1924 /** Output: Set if we did something. */
1925 bool fDidSomething;
1926 /** Output: Set it we should resume. */
1927 bool fCanResume;
1928} NEMR3WINHMACPCCSTATE;
1929
1930/**
1931 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1932 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1933 * NEMR3WINHMACPCCSTATE structure. }
1934 */
1935static DECLCALLBACK(int) nemR3WinHandleMemoryAccessPageCheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys,
1936 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1937{
1938 NEMR3WINHMACPCCSTATE *pState = (NEMR3WINHMACPCCSTATE *)pvUser;
1939 pState->fDidSomething = false;
1940 pState->fCanResume = false;
1941
1942 /* If A20 is disabled, we may need to make another query on the masked
1943 page to get the correct protection information. */
1944 uint8_t u2State = pInfo->u2NemState;
1945 RTGCPHYS GCPhysSrc;
1946 if ( pVM->nem.s.fA20Enabled
1947 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1948 GCPhysSrc = GCPhys;
1949 else
1950 {
1951 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1952 PGMPHYSNEMPAGEINFO Info2;
1953 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1954 AssertRCReturn(rc, rc);
1955
1956 *pInfo = Info2;
1957 pInfo->u2NemState = u2State;
1958 }
1959
1960 /*
1961 * Consolidate current page state with actual page protection and access type.
1962 * We don't really consider downgrades here, as they shouldn't happen.
1963 */
1964#ifndef NEM_WIN_USE_HYPERCALLS
1965 /** @todo Someone at microsoft please explain:
1966 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1967 * readonly page as writable (unmap, then map again). Specifically, this was an
1968 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1969 * a hope to work around that we no longer pre-map anything, just unmap stuff
1970 * and do it lazily here. And here we will first unmap, restart, and then remap
1971 * with new protection or backing.
1972 */
1973#endif
1974 int rc;
1975 switch (u2State)
1976 {
1977 case NEM_WIN_PAGE_STATE_UNMAPPED:
1978 case NEM_WIN_PAGE_STATE_NOT_SET:
1979 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1980 {
1981 Log4(("nemR3WinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1982 return VINF_SUCCESS;
1983 }
1984
1985 /* Don't bother remapping it if it's a write request to a non-writable page. */
1986 if ( pState->fWriteAccess
1987 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1988 {
1989 Log4(("nemR3WinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1990 return VINF_SUCCESS;
1991 }
1992
1993 /* Map the page. */
1994 rc = nemR3NativeSetPhysPage(pVM,
1995 pVCpu,
1996 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1997 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1998 pInfo->fNemProt,
1999 &u2State,
2000 true /*fBackingState*/);
2001 pInfo->u2NemState = u2State;
2002 Log4(("nemR3WinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
2003 GCPhys, g_apszPageStates[u2State], rc));
2004 pState->fDidSomething = true;
2005 pState->fCanResume = true;
2006 return rc;
2007
2008 case NEM_WIN_PAGE_STATE_READABLE:
2009 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
2010 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
2011 {
2012 Log4(("nemR3WinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
2013 return VINF_SUCCESS;
2014 }
2015
2016#ifdef NEM_WIN_USE_HYPERCALLS
2017 /* Upgrade page to writable. */
2018/** @todo test this*/
2019 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
2020 && pState->fWriteAccess)
2021 {
2022 rc = nemR3WinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
2023 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
2024 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
2025 AssertRC(rc);
2026 if (RT_SUCCESS(rc))
2027 {
2028 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
2029 pState->fDidSomething = true;
2030 pState->fCanResume = true;
2031 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
2032 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
2033 }
2034 }
2035 else
2036 {
2037 /* Need to emulate the acces. */
2038 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
2039 rc = VINF_SUCCESS;
2040 }
2041 return rc;
2042#else
2043 break;
2044#endif
2045
2046 case NEM_WIN_PAGE_STATE_WRITABLE:
2047 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
2048 {
2049 Log4(("nemR3WinHandleMemoryAccessPageCheckerCallback: %RGp - #3\n", GCPhys));
2050 return VINF_SUCCESS;
2051 }
2052#ifdef NEM_WIN_USE_HYPERCALLS
2053 AssertFailed(); /* There should be no downgrades. */
2054#endif
2055 break;
2056
2057 default:
2058 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_INTERNAL_ERROR_3);
2059 }
2060
2061 /*
2062 * Unmap and restart the instruction.
2063 * If this fails, which it does every so often, just unmap everything for now.
2064 */
2065#ifdef NEM_WIN_USE_HYPERCALLS
2066 rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhys);
2067 AssertRC(rc);
2068 if (RT_SUCCESS(rc))
2069#else
2070 /** @todo figure out whether we mess up the state or if it's WHv. */
2071 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
2072 if (SUCCEEDED(hrc))
2073#endif
2074 {
2075 pState->fDidSomething = true;
2076 pState->fCanResume = true;
2077 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
2078 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2079 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
2080 return VINF_SUCCESS;
2081 }
2082#ifdef NEM_WIN_USE_HYPERCALLS
2083 LogRel(("nemR3WinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
2084 return rc;
2085#else
2086 LogRel(("nemR3WinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
2087 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue,
2088 pVM->nem.s.cMappedPages));
2089
2090 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
2091 Log(("nemR3WinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
2092
2093 pState->fDidSomething = true;
2094 pState->fCanResume = true;
2095 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
2096 return VINF_SUCCESS;
2097#endif
2098}
2099
2100
2101/**
2102 * Handles an memory access VMEXIT.
2103 *
2104 * This can be triggered by a number of things.
2105 *
2106 * @returns Strict VBox status code.
2107 * @param pVM The cross context VM structure.
2108 * @param pVCpu The cross context virtual CPU structure.
2109 * @param pCtx The CPU context to update.
2110 * @param pMemCtx The exit reason information.
2111 */
2112static VBOXSTRICTRC nemR3WinHandleMemoryAccess(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, WHV_MEMORY_ACCESS_CONTEXT const *pMemCtx)
2113{
2114 /*
2115 * Ask PGM for information about the given GCPhys. We need to check if we're
2116 * out of sync first.
2117 */
2118 NEMR3WINHMACPCCSTATE State = { pMemCtx->AccessInfo.AccessType == WHvMemoryAccessWrite, false, false };
2119 PGMPHYSNEMPAGEINFO Info;
2120 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMemCtx->Gpa, State.fWriteAccess, &Info,
2121 nemR3WinHandleMemoryAccessPageCheckerCallback, &State);
2122 if (RT_SUCCESS(rc))
2123 {
2124 if (Info.fNemProt & (pMemCtx->AccessInfo.AccessType == WHvMemoryAccessWrite ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2125 {
2126 if (State.fCanResume)
2127 {
2128 Log4(("MemExit: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2129 pMemCtx->Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2130 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2131 State.fDidSomething ? "" : " no-change", g_apszWHvMemAccesstypes[pMemCtx->AccessInfo.AccessType]));
2132 return VINF_SUCCESS;
2133 }
2134 }
2135 Log4(("MemExit: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2136 pMemCtx->Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2137 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2138 State.fDidSomething ? "" : " no-change", g_apszWHvMemAccesstypes[pMemCtx->AccessInfo.AccessType]));
2139 }
2140 else
2141 Log4(("MemExit: %RGp rc=%Rrc%s; emulating (%s)\n", pMemCtx->Gpa, rc,
2142 State.fDidSomething ? " modified-backing" : "", g_apszWHvMemAccesstypes[pMemCtx->AccessInfo.AccessType]));
2143
2144 /*
2145 * Emulate the memory access, either access handler or special memory.
2146 */
2147 VBOXSTRICTRC rcStrict;
2148 if (pMemCtx->InstructionByteCount > 0)
2149 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pMemCtx->VpContext.Rip,
2150 pMemCtx->InstructionBytes, pMemCtx->InstructionByteCount);
2151 else
2152 rcStrict = IEMExecOne(pVCpu);
2153 /** @todo do we need to do anything wrt debugging here? */
2154 return rcStrict;
2155}
2156
2157
2158/**
2159 * Handles an I/O port access VMEXIT.
2160 *
2161 * We ASSUME that the hypervisor has don't I/O port access control.
2162 *
2163 * @returns Strict VBox status code.
2164 * @param pVM The cross context VM structure.
2165 * @param pVCpu The cross context virtual CPU structure.
2166 * @param pCtx The CPU context to update.
2167 * @param pIoPortCtx The exit reason information.
2168 */
2169static VBOXSTRICTRC nemR3WinHandleIoPortAccess(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx,
2170 WHV_X64_IO_PORT_ACCESS_CONTEXT const *pIoPortCtx)
2171{
2172 Assert( pIoPortCtx->AccessInfo.AccessSize == 1
2173 || pIoPortCtx->AccessInfo.AccessSize == 2
2174 || pIoPortCtx->AccessInfo.AccessSize == 4);
2175
2176 VBOXSTRICTRC rcStrict;
2177 if (!pIoPortCtx->AccessInfo.StringOp)
2178 {
2179 /*
2180 * Simple port I/O.
2181 */
2182 Assert(pCtx->rax == pIoPortCtx->Rax);
2183
2184 static uint32_t const s_fAndMask[8] =
2185 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2186 uint32_t const fAndMask = s_fAndMask[pIoPortCtx->AccessInfo.AccessSize];
2187 if (pIoPortCtx->AccessInfo.IsWrite)
2188 {
2189 rcStrict = IOMIOPortWrite(pVM, pVCpu, pIoPortCtx->PortNumber, (uint32_t)pIoPortCtx->Rax & fAndMask,
2190 pIoPortCtx->AccessInfo.AccessSize);
2191 if (IOM_SUCCESS(rcStrict))
2192 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pIoPortCtx->VpContext);
2193 }
2194 else
2195 {
2196 uint32_t uValue = 0;
2197 rcStrict = IOMIOPortRead(pVM, pVCpu, pIoPortCtx->PortNumber, &uValue,
2198 pIoPortCtx->AccessInfo.AccessSize);
2199 if (IOM_SUCCESS(rcStrict))
2200 {
2201 pCtx->eax = (pCtx->eax & ~fAndMask) | (uValue & fAndMask);
2202 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pIoPortCtx->VpContext);
2203 }
2204 }
2205 }
2206 else
2207 {
2208 /*
2209 * String port I/O.
2210 */
2211 /** @todo Someone at Microsoft please explain how we can get the address mode
2212 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2213 * getting the default mode, it can always be overridden by a prefix. This
2214 * forces us to interpret the instruction from opcodes, which is suboptimal.
2215 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2216 * CPUs that are reasonably new. */
2217 Assert( pIoPortCtx->Ds.Base == pCtx->ds.u64Base
2218 && pIoPortCtx->Ds.Limit == pCtx->ds.u32Limit
2219 && pIoPortCtx->Ds.Selector == pCtx->ds.Sel);
2220 Assert( pIoPortCtx->Es.Base == pCtx->es.u64Base
2221 && pIoPortCtx->Es.Limit == pCtx->es.u32Limit
2222 && pIoPortCtx->Es.Selector == pCtx->es.Sel);
2223 Assert(pIoPortCtx->Rdi == pCtx->rdi);
2224 Assert(pIoPortCtx->Rsi == pCtx->rsi);
2225 Assert(pIoPortCtx->Rcx == pCtx->rcx);
2226 Assert(pIoPortCtx->Rcx == pCtx->rcx);
2227
2228 rcStrict = IEMExecOne(pVCpu);
2229 }
2230 if (IOM_SUCCESS(rcStrict))
2231 {
2232 /*
2233 * Do debug checks.
2234 */
2235 if ( pIoPortCtx->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflext DR7? */
2236 || (pIoPortCtx->VpContext.Rflags & X86_EFL_TF)
2237 || DBGFBpIsHwIoArmed(pVM) )
2238 {
2239 /** @todo Debugging. */
2240 }
2241 }
2242 return rcStrict;
2243}
2244
2245
2246static VBOXSTRICTRC nemR3WinHandleInterruptWindow(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, WHV_RUN_VP_EXIT_CONTEXT const *pExitReason)
2247{
2248 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx); NOREF(pExitReason);
2249 AssertLogRelFailedReturn(VERR_NOT_IMPLEMENTED);
2250}
2251
2252
2253static VBOXSTRICTRC nemR3WinHandleMsrAccess(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, WHV_RUN_VP_EXIT_CONTEXT const *pExitReason)
2254{
2255 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx); NOREF(pExitReason);
2256 AssertLogRelFailedReturn(VERR_NOT_IMPLEMENTED);
2257}
2258
2259
2260static VBOXSTRICTRC nemR3WinHandleCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, WHV_RUN_VP_EXIT_CONTEXT const *pExitReason)
2261{
2262 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx); NOREF(pExitReason);
2263 AssertLogRelFailedReturn(VERR_NOT_IMPLEMENTED);
2264}
2265
2266
2267static VBOXSTRICTRC nemR3WinHandleException(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, WHV_RUN_VP_EXIT_CONTEXT const *pExitReason)
2268{
2269 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx); NOREF(pExitReason);
2270 AssertLogRelFailedReturn(VERR_NOT_IMPLEMENTED);
2271}
2272
2273
2274static VBOXSTRICTRC nemR3WinHandleUD(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, WHV_RUN_VP_EXIT_CONTEXT const *pExitReason)
2275{
2276 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx); NOREF(pExitReason);
2277 AssertLogRelFailedReturn(VERR_NOT_IMPLEMENTED);
2278}
2279
2280
2281static VBOXSTRICTRC nemR3WinHandleTripleFault(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, WHV_RUN_VP_EXIT_CONTEXT const *pExitReason)
2282{
2283 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx); NOREF(pExitReason);
2284 AssertLogRelFailedReturn(VERR_NOT_IMPLEMENTED);
2285}
2286
2287
2288static VBOXSTRICTRC nemR3WinHandleInvalidState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, WHV_RUN_VP_EXIT_CONTEXT const *pExitReason)
2289{
2290 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx); NOREF(pExitReason);
2291 AssertLogRelFailedReturn(VERR_NOT_IMPLEMENTED);
2292}
2293
2294
2295VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2296{
2297#ifdef LOG_ENABLED
2298 if (LogIs3Enabled())
2299 {
2300 Log3(("nemR3NativeRunGC: Entering #%u\n", pVCpu->idCpu));
2301 nemR3WinLogState(pVM, pVCpu);
2302 }
2303#endif
2304
2305 /*
2306 * The run loop.
2307 *
2308 * Current approach to state updating to use the sledgehammer and sync
2309 * everything every time. This will be optimized later.
2310 */
2311 const bool fSingleStepping = false; /** @todo get this from somewhere. */
2312 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2313 for (unsigned iLoop = 0;;iLoop++)
2314 {
2315 /*
2316 * Copy the state.
2317 */
2318 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2319 int rc2 = nemR3WinCopyStateToHyperV(pVM, pVCpu, pCtx);
2320 AssertRCBreakStmt(rc2, rcStrict = rc2);
2321
2322 /*
2323 * Run a bit.
2324 */
2325 WHV_RUN_VP_EXIT_CONTEXT ExitReason;
2326 RT_ZERO(ExitReason);
2327 if ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2328 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2329 {
2330 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED);
2331 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
2332 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM);
2333 AssertLogRelMsgBreakStmt(SUCCEEDED(hrc),
2334 ("WHvRunVirtualProcessor(%p, %u,,) -> %Rhrc (Last=%#x/%u)\n", pVM->nem.s.hPartition, pVCpu->idCpu,
2335 hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue),
2336 rcStrict = VERR_INTERNAL_ERROR);
2337 Log2(("WHvRunVirtualProcessor -> %#x; exit code %#x (%d)\n", hrc, ExitReason.ExitReason, ExitReason.ExitReason));
2338 }
2339 else
2340 {
2341 LogFlow(("nemR3NativeRunGC: returning: pending FF (pre exec)\n"));
2342 break;
2343 }
2344
2345 /*
2346 * Copy back the state.
2347 */
2348 rc2 = nemR3WinCopyStateFromHyperV(pVM, pVCpu, pCtx);
2349 AssertRCBreakStmt(rc2, rcStrict = rc2);
2350
2351#ifdef LOG_ENABLED
2352 /*
2353 * Do some logging.
2354 */
2355 if (LogIs2Enabled())
2356 nemR3WinLogExitReason(&ExitReason);
2357 if (LogIs3Enabled())
2358 nemR3WinLogState(pVM, pVCpu);
2359#endif
2360
2361#ifdef VBOX_STRICT
2362 /* Assert that the VpContext field makes sense. */
2363 switch (ExitReason.ExitReason)
2364 {
2365 case WHvRunVpExitReasonMemoryAccess:
2366 case WHvRunVpExitReasonX64IoPortAccess:
2367 case WHvRunVpExitReasonX64MsrAccess:
2368 case WHvRunVpExitReasonX64Cpuid:
2369 case WHvRunVpExitReasonException:
2370 case WHvRunVpExitReasonUnrecoverableException:
2371 Assert( ExitReason.IoPortAccess.VpContext.InstructionLength > 0
2372 || ( ExitReason.ExitReason == WHvRunVpExitReasonMemoryAccess
2373 && ExitReason.MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessExecute));
2374 Assert(ExitReason.IoPortAccess.VpContext.InstructionLength < 16);
2375 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.Cpl == CPUMGetGuestCPL(pVCpu));
2376 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.Cr0Pe == RT_BOOL(pCtx->cr0 & X86_CR0_PE));
2377 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.Cr0Am == RT_BOOL(pCtx->cr0 & X86_CR0_AM));
2378 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.EferLma == RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_LMA));
2379 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.DebugActive == RT_BOOL(pCtx->dr[7] & X86_DR7_ENABLED_MASK));
2380 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.Reserved0 == 0);
2381 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.Reserved1 == 0);
2382 Assert(ExitReason.IoPortAccess.VpContext.Rip == pCtx->rip);
2383 Assert(ExitReason.IoPortAccess.VpContext.Rflags == pCtx->rflags.u);
2384 Assert( ExitReason.IoPortAccess.VpContext.Cs.Base == pCtx->cs.u64Base
2385 && ExitReason.IoPortAccess.VpContext.Cs.Limit == pCtx->cs.u32Limit
2386 && ExitReason.IoPortAccess.VpContext.Cs.Selector == pCtx->cs.Sel);
2387 break;
2388 default: break; /* shut up compiler. */
2389 }
2390#endif
2391
2392 /*
2393 * Deal with the exit.
2394 */
2395 switch (ExitReason.ExitReason)
2396 {
2397 /* Frequent exits: */
2398 case WHvRunVpExitReasonCanceled:
2399 case WHvRunVpExitReasonAlerted:
2400 rcStrict = VINF_SUCCESS;
2401 break;
2402
2403 case WHvRunVpExitReasonX64Halt:
2404 rcStrict = nemR3WinHandleHalt(pVM, pVCpu, pCtx);
2405 break;
2406
2407 case WHvRunVpExitReasonMemoryAccess:
2408 rcStrict = nemR3WinHandleMemoryAccess(pVM, pVCpu, pCtx, &ExitReason.MemoryAccess);
2409 break;
2410
2411 case WHvRunVpExitReasonX64IoPortAccess:
2412 rcStrict = nemR3WinHandleIoPortAccess(pVM, pVCpu, pCtx, &ExitReason.IoPortAccess);
2413 break;
2414
2415 case WHvRunVpExitReasonX64InterruptWindow:
2416 rcStrict = nemR3WinHandleInterruptWindow(pVM, pVCpu, pCtx, &ExitReason);
2417 break;
2418
2419 case WHvRunVpExitReasonX64MsrAccess: /* needs configuring */
2420 rcStrict = nemR3WinHandleMsrAccess(pVM, pVCpu, pCtx, &ExitReason);
2421 break;
2422
2423 case WHvRunVpExitReasonX64Cpuid: /* needs configuring */
2424 rcStrict = nemR3WinHandleCpuId(pVM, pVCpu, pCtx, &ExitReason);
2425 break;
2426
2427 case WHvRunVpExitReasonException: /* needs configuring */
2428 rcStrict = nemR3WinHandleException(pVM, pVCpu, pCtx, &ExitReason);
2429 break;
2430
2431 /* Unlikely exits: */
2432 case WHvRunVpExitReasonUnsupportedFeature:
2433 rcStrict = nemR3WinHandleUD(pVM, pVCpu, pCtx, &ExitReason);
2434 break;
2435
2436 case WHvRunVpExitReasonUnrecoverableException:
2437 rcStrict = nemR3WinHandleTripleFault(pVM, pVCpu, pCtx, &ExitReason);
2438 break;
2439
2440 case WHvRunVpExitReasonInvalidVpRegisterValue:
2441 rcStrict = nemR3WinHandleInvalidState(pVM, pVCpu, pCtx, &ExitReason);
2442 break;
2443
2444 /* Undesired exits: */
2445 case WHvRunVpExitReasonNone:
2446 default:
2447 AssertLogRelMsgFailed(("Unknown ExitReason: %#x\n", ExitReason.ExitReason));
2448 rcStrict = VERR_INTERNAL_ERROR_3;
2449 break;
2450 }
2451 if (rcStrict != VINF_SUCCESS)
2452 {
2453 LogFlow(("nemR3NativeRunGC: returning: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2454 break;
2455 }
2456
2457#ifndef NEM_WIN_USE_HYPERCALLS
2458 /* Hack alert! */
2459 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
2460 if (cMappedPages < 4000)
2461 { /* likely */ }
2462 else
2463 {
2464 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
2465 Log(("nemR3NativeRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
2466 }
2467#endif
2468
2469 /* If any FF is pending, return to the EM loops. That's okay for the
2470 current sledgehammer approach. */
2471 if ( VM_FF_IS_PENDING( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2472 || VMCPU_FF_IS_PENDING(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2473 {
2474 LogFlow(("nemR3NativeRunGC: returning: pending FF (%#x / %#x)\n", pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
2475 break;
2476 }
2477 }
2478
2479 return rcStrict;
2480}
2481
2482
2483bool nemR3NativeCanExecuteGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2484{
2485 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx);
2486 return true;
2487}
2488
2489
2490bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2491{
2492 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2493 return false;
2494}
2495
2496
2497/**
2498 * Forced flag notification call from VMEmt.h.
2499 *
2500 * This is only called when pVCpu is in the VMCPUSTATE_STARTED_EXEC_NEM state.
2501 *
2502 * @param pVM The cross context VM structure.
2503 * @param pVCpu The cross context virtual CPU structure of the CPU
2504 * to be notified.
2505 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_XXX.
2506 */
2507void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2508{
2509 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
2510 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
2511
2512 RT_NOREF_PV(hrc);
2513 RT_NOREF_PV(fFlags);
2514}
2515
2516
2517DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
2518{
2519 PGMPAGEMAPLOCK Lock;
2520 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
2521 if (RT_SUCCESS(rc))
2522 PGMPhysReleasePageMappingLock(pVM, &Lock);
2523 return rc;
2524}
2525
2526
2527DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2528{
2529 PGMPAGEMAPLOCK Lock;
2530 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
2531 if (RT_SUCCESS(rc))
2532 PGMPhysReleasePageMappingLock(pVM, &Lock);
2533 return rc;
2534}
2535
2536
2537int nemR3NativeNotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
2538{
2539 LogRel(("nemR3NativeNotifyPhysRamRegister: %RGp LB %RGp\n", GCPhys, cb));
2540 NOREF(pVM); NOREF(GCPhys); NOREF(cb);
2541 return VINF_SUCCESS;
2542}
2543
2544
2545int nemR3NativeNotifyPhysMmioExMap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvMmio2)
2546{
2547 LogRel(("nemR3NativeNotifyPhysMmioExMap: %RGp LB %RGp fFlags=%#x pvMmio2=%p\n", GCPhys, cb, fFlags, pvMmio2));
2548 NOREF(pVM); NOREF(GCPhys); NOREF(cb); NOREF(fFlags); NOREF(pvMmio2);
2549 return VINF_SUCCESS;
2550}
2551
2552
2553int nemR3NativeNotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags)
2554{
2555 LogRel(("nemR3NativeNotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2556 NOREF(pVM); NOREF(GCPhys); NOREF(cb); NOREF(fFlags);
2557 return VINF_SUCCESS;
2558}
2559
2560
2561/**
2562 * Called early during ROM registration, right after the pages have been
2563 * allocated and the RAM range updated.
2564 *
2565 * This will be succeeded by a number of NEMHCNotifyPhysPageProtChanged() calls
2566 * and finally a NEMR3NotifyPhysRomRegisterEarly().
2567 *
2568 * @returns VBox status code
2569 * @param pVM The cross context VM structure.
2570 * @param GCPhys The ROM address (page aligned).
2571 * @param cb The size (page aligned).
2572 * @param fFlags NEM_NOTIFY_PHYS_ROM_F_XXX.
2573 */
2574int nemR3NativeNotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags)
2575{
2576 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2577#if 0 /* Let's not do this after all. We'll protection change notifications for each page and if not we'll map them lazily. */
2578 RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
2579 for (RTGCPHYS iPage = 0; iPage < cPages; iPage++, GCPhys += X86_PAGE_SIZE)
2580 {
2581 const void *pvPage;
2582 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
2583 if (RT_SUCCESS(rc))
2584 {
2585 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhys, X86_PAGE_SIZE,
2586 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2587 if (SUCCEEDED(hrc))
2588 { /* likely */ }
2589 else
2590 {
2591 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2592 GCPhys, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));
2593 return VERR_NEM_INIT_FAILED;
2594 }
2595 }
2596 else
2597 {
2598 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
2599 return rc;
2600 }
2601 }
2602#else
2603 NOREF(pVM); NOREF(GCPhys); NOREF(cb);
2604#endif
2605 RT_NOREF_PV(fFlags);
2606 return VINF_SUCCESS;
2607}
2608
2609
2610/**
2611 * Called after the ROM range has been fully completed.
2612 *
2613 * This will be preceeded by a NEMR3NotifyPhysRomRegisterEarly() call as well a
2614 * number of NEMHCNotifyPhysPageProtChanged calls.
2615 *
2616 * @returns VBox status code
2617 * @param pVM The cross context VM structure.
2618 * @param GCPhys The ROM address (page aligned).
2619 * @param cb The size (page aligned).
2620 * @param fFlags NEM_NOTIFY_PHYS_ROM_F_XXX.
2621 */
2622int nemR3NativeNotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags)
2623{
2624 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2625 NOREF(pVM); NOREF(GCPhys); NOREF(cb); NOREF(fFlags);
2626 return VINF_SUCCESS;
2627}
2628
2629
2630/**
2631 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
2632 */
2633static DECLCALLBACK(int) nemR3WinUnsetForA20CheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys,
2634 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
2635{
2636 /* We'll just unmap the memory. */
2637 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
2638 {
2639#ifdef NEM_WIN_USE_HYPERCALLS
2640 int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhys);
2641 AssertRC(rc);
2642 if (RT_SUCCESS(rc))
2643#else
2644 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
2645 if (SUCCEEDED(hrc))
2646#endif
2647 {
2648 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2649 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
2650 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
2651 }
2652 else
2653 {
2654#ifdef NEM_WIN_USE_HYPERCALLS
2655 LogRel(("nemR3WinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
2656 return rc;
2657#else
2658 LogRel(("nemR3WinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2659 GCPhys, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));
2660 return VERR_INTERNAL_ERROR_2;
2661#endif
2662 }
2663 }
2664 RT_NOREF(pVCpu, pvUser);
2665 return VINF_SUCCESS;
2666}
2667
2668
2669/**
2670 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
2671 *
2672 * @returns The PGMPhysNemQueryPageInfo result.
2673 * @param pVM The cross context VM structure.
2674 * @param pVCpu The cross context virtual CPU structure.
2675 * @param GCPhys The page to unmap.
2676 */
2677static int nemR3WinUnmapPageForA20Gate(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
2678{
2679 PGMPHYSNEMPAGEINFO Info;
2680 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
2681 nemR3WinUnsetForA20CheckerCallback, NULL);
2682}
2683
2684
2685/**
2686 * Called when the A20 state changes.
2687 *
2688 * Hyper-V doesn't seem to offer a simple way of implementing the A20 line
2689 * features of PCs. So, we do a very minimal emulation of the HMA to make DOS
2690 * happy.
2691 *
2692 * @param pVCpu The CPU the A20 state changed on.
2693 * @param fEnabled Whether it was enabled (true) or disabled.
2694 */
2695void nemR3NativeNotifySetA20(PVMCPU pVCpu, bool fEnabled)
2696{
2697 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
2698 PVM pVM = pVCpu->CTX_SUFF(pVM);
2699 if (!pVM->nem.s.fA20Fixed)
2700 {
2701 pVM->nem.s.fA20Enabled = fEnabled;
2702 for (RTGCPHYS GCPhys = _1M; GCPhys < _1M + _64K; GCPhys += X86_PAGE_SIZE)
2703 nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys);
2704 }
2705}
2706
2707
2708void nemR3NativeNotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2709{
2710 LogRel(("nemR3NativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2711 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2712}
2713
2714
2715void nemR3NativeNotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2716 int fRestoreAsRAM, bool fRestoreAsRAM2)
2717{
2718 LogRel(("nemR3NativeNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d fRestoreAsRAM=%d fRestoreAsRAM2=%d\n",
2719 GCPhys, cb, enmKind, fRestoreAsRAM, fRestoreAsRAM2));
2720 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb); NOREF(fRestoreAsRAM); NOREF(fRestoreAsRAM2);
2721}
2722
2723
2724void nemR3NativeNotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2725 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2726{
2727 LogRel(("nemR3NativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2728 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2729 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2730}
2731
2732
2733/**
2734 * Worker that maps pages into Hyper-V.
2735 *
2736 * This is used by the PGM physical page notifications as well as the memory
2737 * access VMEXIT handlers.
2738 *
2739 * @returns VBox status code.
2740 * @param pVM The cross context VM structure.
2741 * @param pVCpu The cross context virtual CPU structure of the
2742 * calling EMT.
2743 * @param GCPhysSrc The source page address.
2744 * @param GCPhysDst The hyper-V destination page. This may differ from
2745 * GCPhysSrc when A20 is disabled.
2746 * @param fPageProt NEM_PAGE_PROT_XXX.
2747 * @param pu2State Our page state (input/output).
2748 * @param fBackingChanged Set if the page backing is being changed.
2749 * @thread EMT(pVCpu)
2750 */
2751static int nemR3NativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fPageProt,
2752 uint8_t *pu2State, bool fBackingChanged)
2753{
2754#ifdef NEM_WIN_USE_HYPERCALLS
2755 /*
2756 * When using the hypercalls instead of the ring-3 APIs, we don't need to
2757 * unmap memory before modifying it. We still want to track the state though,
2758 * since unmap will fail when called an unmapped page and we don't want to redo
2759 * upgrades/downgrades.
2760 */
2761 uint8_t const u2OldState = *pu2State;
2762 int rc;
2763 if (fPageProt == NEM_PAGE_PROT_NONE)
2764 {
2765 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
2766 {
2767 rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
2768 if (RT_SUCCESS(rc))
2769 {
2770 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2771 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2772 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
2773 }
2774 else
2775 AssertLogRelMsgFailed(("nemR3NativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
2776 }
2777 else
2778 rc = VINF_SUCCESS;
2779 }
2780 else if (fPageProt & NEM_PAGE_PROT_WRITE)
2781 {
2782 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
2783 {
2784 rc = nemR3WinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
2785 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
2786 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
2787 if (RT_SUCCESS(rc))
2788 {
2789 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2790 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
2791 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
2792 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
2793 NOREF(cMappedPages);
2794 }
2795 else
2796 AssertLogRelMsgFailed(("nemR3NativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
2797 }
2798 else
2799 rc = VINF_SUCCESS;
2800 }
2801 else
2802 {
2803 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
2804 {
2805 rc = nemR3WinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
2806 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
2807 if (RT_SUCCESS(rc))
2808 {
2809 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2810 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
2811 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
2812 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
2813 NOREF(cMappedPages);
2814 }
2815 else
2816 AssertLogRelMsgFailed(("nemR3NativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
2817 }
2818 else
2819 rc = VINF_SUCCESS;
2820 }
2821
2822 return VINF_SUCCESS;
2823
2824#else
2825 /*
2826 * Looks like we need to unmap a page before we can change the backing
2827 * or even modify the protection. This is going to be *REALLY* efficient.
2828 * PGM lends us two bits to keep track of the state here.
2829 */
2830 uint8_t const u2OldState = *pu2State;
2831 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
2832 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
2833 if ( fBackingChanged
2834 || u2NewState != u2OldState)
2835 {
2836 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
2837 {
2838# ifdef NEM_WIN_USE_HYPERCALLS
2839 int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
2840 AssertRC(rc);
2841 if (RT_SUCCESS(rc))
2842 {
2843 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2844 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2845 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
2846 {
2847 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
2848 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
2849 return VINF_SUCCESS;
2850 }
2851 }
2852 else
2853 {
2854 LogRel(("nemR3NativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
2855 return rc;
2856 }
2857# else
2858 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
2859 if (SUCCEEDED(hrc))
2860 {
2861 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2862 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2863 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
2864 {
2865 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
2866 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
2867 return VINF_SUCCESS;
2868 }
2869 }
2870 else
2871 {
2872 LogRel(("nemR3NativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2873 GCPhysDst, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));
2874 return VERR_NEM_INIT_FAILED;
2875 }
2876# endif
2877 }
2878 }
2879
2880 /*
2881 * Writeable mapping?
2882 */
2883 if (fPageProt & NEM_PAGE_PROT_WRITE)
2884 {
2885# ifdef NEM_WIN_USE_HYPERCALLS
2886 int rc = nemR3WinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
2887 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
2888 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
2889 AssertRC(rc);
2890 if (RT_SUCCESS(rc))
2891 {
2892 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2893 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2894 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
2895 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
2896 return VINF_SUCCESS;
2897 }
2898 LogRel(("nemR3NativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
2899 return rc;
2900# else
2901 void *pvPage;
2902 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
2903 if (RT_SUCCESS(rc))
2904 {
2905 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
2906 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
2907 if (SUCCEEDED(hrc))
2908 {
2909 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2910 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2911 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
2912 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
2913 return VINF_SUCCESS;
2914 }
2915 LogRel(("nemR3NativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2916 GCPhysDst, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));
2917 return VERR_NEM_INIT_FAILED;
2918 }
2919 LogRel(("nemR3NativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
2920 return rc;
2921# endif
2922 }
2923
2924 if (fPageProt & NEM_PAGE_PROT_READ)
2925 {
2926# ifdef NEM_WIN_USE_HYPERCALLS
2927 int rc = nemR3WinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
2928 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
2929 AssertRC(rc);
2930 if (RT_SUCCESS(rc))
2931 {
2932 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2933 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2934 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
2935 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
2936 return VINF_SUCCESS;
2937 }
2938 LogRel(("nemR3NativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
2939 return rc;
2940# else
2941 const void *pvPage;
2942 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
2943 if (RT_SUCCESS(rc))
2944 {
2945 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
2946 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2947 if (SUCCEEDED(hrc))
2948 {
2949 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2950 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2951 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
2952 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
2953 return VINF_SUCCESS;
2954 }
2955 LogRel(("nemR3NativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2956 GCPhysDst, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));
2957 return VERR_NEM_INIT_FAILED;
2958 }
2959 LogRel(("nemR3NativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
2960 return rc;
2961# endif
2962 }
2963
2964 /* We already unmapped it above. */
2965 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2966 return VINF_SUCCESS;
2967#endif /* !NEM_WIN_USE_HYPERCALLS */
2968}
2969
2970
2971static int nemR3JustUnmapPageFromHyperV(PVM pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
2972{
2973 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
2974 {
2975 Log5(("nemR3JustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
2976 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2977 return VINF_SUCCESS;
2978 }
2979
2980#ifdef NEM_WIN_USE_HYPERCALLS
2981 PVMCPU pVCpu = VMMGetCpu(pVM);
2982 int rc = nemR3WinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
2983 AssertRC(rc);
2984 if (RT_SUCCESS(rc))
2985 {
2986 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2987 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
2988 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2989 return VINF_SUCCESS;
2990 }
2991 LogRel(("nemR3JustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
2992 return rc;
2993#else
2994 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
2995 if (SUCCEEDED(hrc))
2996 {
2997 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2998 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2999 Log5(("nemR3JustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
3000 return VINF_SUCCESS;
3001 }
3002 LogRel(("nemR3JustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
3003 GCPhysDst, hrc, hrc, RTNtCurrentTeb()->LastStatusValue, RTNtCurrentTeb()->LastErrorValue));
3004 return VERR_INTERNAL_ERROR_3;
3005#endif
3006}
3007
3008
3009int nemR3NativeNotifyPhysPageAllocated(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
3010 PGMPAGETYPE enmType, uint8_t *pu2State)
3011{
3012 LogRel(("nemR3NativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3013 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3014 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
3015
3016 int rc;
3017#ifdef NEM_WIN_USE_HYPERCALLS
3018 PVMCPU pVCpu = VMMGetCpu(pVM);
3019 if ( pVM->nem.s.fA20Enabled
3020 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
3021 rc = nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
3022 else
3023 {
3024 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
3025 rc = nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
3026 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
3027 rc = nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
3028
3029 }
3030#else
3031 RT_NOREF_PV(fPageProt);
3032 if ( pVM->nem.s.fA20Enabled
3033 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
3034 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3035 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
3036 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3037 else
3038 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
3039#endif
3040 return rc;
3041}
3042
3043
3044void nemR3NativeNotifyPhysPageProtChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
3045 PGMPAGETYPE enmType, uint8_t *pu2State)
3046{
3047 LogRel(("nemR3NativeNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3048 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3049 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
3050
3051#ifdef NEM_WIN_USE_HYPERCALLS
3052 PVMCPU pVCpu = VMMGetCpu(pVM);
3053 if ( pVM->nem.s.fA20Enabled
3054 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
3055 nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
3056 else
3057 {
3058 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
3059 nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
3060 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
3061 nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
3062 }
3063#else
3064 RT_NOREF_PV(fPageProt);
3065 if ( pVM->nem.s.fA20Enabled
3066 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
3067 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3068 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
3069 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3070 /* else: ignore since we've got the alias page at this address. */
3071#endif
3072}
3073
3074
3075void nemR3NativeNotifyPhysPageChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
3076 uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
3077{
3078 LogRel(("nemR3NativeNotifyPhysPageProtChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3079 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
3080 RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType);
3081
3082#ifdef NEM_WIN_USE_HYPERCALLS
3083 PVMCPU pVCpu = VMMGetCpu(pVM);
3084 if ( pVM->nem.s.fA20Enabled
3085 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
3086 nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
3087 else
3088 {
3089 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
3090 nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
3091 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
3092 nemR3NativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
3093 }
3094#else
3095 RT_NOREF_PV(fPageProt);
3096 if ( pVM->nem.s.fA20Enabled
3097 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
3098 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3099 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
3100 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3101 /* else: ignore since we've got the alias page at this address. */
3102#endif
3103}
3104
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette