VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp@ 80074

Last change on this file since 80074 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 135.7 KB
Line 
1/* $Id: NEMR3Native-win.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Windows backend.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 * Log group 6: Ring-0 memory management
9 * Log group 12: API intercepts.
10 */
11
12/*
13 * Copyright (C) 2018-2019 Oracle Corporation
14 *
15 * This file is part of VirtualBox Open Source Edition (OSE), as
16 * available from http://www.virtualbox.org. This file is free software;
17 * you can redistribute it and/or modify it under the terms of the GNU
18 * General Public License (GPL) as published by the Free Software
19 * Foundation, in version 2 as it comes in the "COPYING" file of the
20 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
21 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
22 */
23
24
25/*********************************************************************************************************************************
26* Header Files *
27*********************************************************************************************************************************/
28#define LOG_GROUP LOG_GROUP_NEM
29#define VMCPU_INCL_CPUM_GST_CTX
30#include <iprt/nt/nt-and-windows.h>
31#include <iprt/nt/hyperv.h>
32#include <iprt/nt/vid.h>
33#include <WinHvPlatform.h>
34
35#ifndef _WIN32_WINNT_WIN10
36# error "Missing _WIN32_WINNT_WIN10"
37#endif
38#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
39# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
40#endif
41#include <sysinfoapi.h>
42#include <debugapi.h>
43#include <errhandlingapi.h>
44#include <fileapi.h>
45#include <winerror.h> /* no api header for this. */
46
47#include <VBox/vmm/nem.h>
48#include <VBox/vmm/iem.h>
49#include <VBox/vmm/em.h>
50#include <VBox/vmm/apic.h>
51#include <VBox/vmm/pdm.h>
52#include <VBox/vmm/dbgftrace.h>
53#include "NEMInternal.h"
54#include <VBox/vmm/vm.h>
55
56#include <iprt/ldr.h>
57#include <iprt/path.h>
58#include <iprt/string.h>
59#include <iprt/system.h>
60#include <iprt/utf16.h>
61
62
63/*********************************************************************************************************************************
64* Defined Constants And Macros *
65*********************************************************************************************************************************/
66#ifdef LOG_ENABLED
67# define NEM_WIN_INTERCEPT_NT_IO_CTLS
68#endif
69
70/** VID I/O control detection: Fake partition handle input. */
71#define NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE ((HANDLE)(uintptr_t)38479125)
72/** VID I/O control detection: Fake partition ID return. */
73#define NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_ID UINT64_C(0xfa1e000042424242)
74/** VID I/O control detection: Fake CPU index input. */
75#define NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX UINT32_C(42)
76/** VID I/O control detection: Fake timeout input. */
77#define NEM_WIN_IOCTL_DETECTOR_FAKE_TIMEOUT UINT32_C(0x00080286)
78
79
80/*********************************************************************************************************************************
81* Global Variables *
82*********************************************************************************************************************************/
83/** @name APIs imported from WinHvPlatform.dll
84 * @{ */
85static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
86static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
87static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
88static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
89static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
90static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
91static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
92static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
93static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
94#ifndef NEM_WIN_USE_OUR_OWN_RUN_API
95static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
96static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
97static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
98static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
99static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
100static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
101#endif
102/** @} */
103
104/** @name APIs imported from Vid.dll
105 * @{ */
106static decltype(VidGetHvPartitionId) *g_pfnVidGetHvPartitionId;
107static decltype(VidStartVirtualProcessor) *g_pfnVidStartVirtualProcessor;
108static decltype(VidStopVirtualProcessor) *g_pfnVidStopVirtualProcessor;
109static decltype(VidMessageSlotMap) *g_pfnVidMessageSlotMap;
110static decltype(VidMessageSlotHandleAndGetNext) *g_pfnVidMessageSlotHandleAndGetNext;
111#ifdef LOG_ENABLED
112static decltype(VidGetVirtualProcessorState) *g_pfnVidGetVirtualProcessorState;
113static decltype(VidSetVirtualProcessorState) *g_pfnVidSetVirtualProcessorState;
114static decltype(VidGetVirtualProcessorRunningStatus) *g_pfnVidGetVirtualProcessorRunningStatus;
115#endif
116/** @} */
117
118/** The Windows build number. */
119static uint32_t g_uBuildNo = 17134;
120
121
122
123/**
124 * Import instructions.
125 */
126static const struct
127{
128 uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
129 bool fOptional; /**< Set if import is optional. */
130 PFNRT *ppfn; /**< The function pointer variable. */
131 const char *pszName; /**< The function name. */
132} g_aImports[] =
133{
134#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
135 NEM_WIN_IMPORT(0, false, WHvGetCapability),
136 NEM_WIN_IMPORT(0, false, WHvCreatePartition),
137 NEM_WIN_IMPORT(0, false, WHvSetupPartition),
138 NEM_WIN_IMPORT(0, false, WHvDeletePartition),
139 NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
140 NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
141 NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
142 NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
143 NEM_WIN_IMPORT(0, false, WHvTranslateGva),
144#ifndef NEM_WIN_USE_OUR_OWN_RUN_API
145 NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
146 NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
147 NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
148 NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
149 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
150 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
151#endif
152 NEM_WIN_IMPORT(1, false, VidGetHvPartitionId),
153 NEM_WIN_IMPORT(1, false, VidMessageSlotMap),
154 NEM_WIN_IMPORT(1, false, VidMessageSlotHandleAndGetNext),
155 NEM_WIN_IMPORT(1, false, VidStartVirtualProcessor),
156 NEM_WIN_IMPORT(1, false, VidStopVirtualProcessor),
157#ifdef LOG_ENABLED
158 NEM_WIN_IMPORT(1, false, VidGetVirtualProcessorState),
159 NEM_WIN_IMPORT(1, false, VidSetVirtualProcessorState),
160 NEM_WIN_IMPORT(1, false, VidGetVirtualProcessorRunningStatus),
161#endif
162#undef NEM_WIN_IMPORT
163};
164
165
166/** The real NtDeviceIoControlFile API in NTDLL. */
167static decltype(NtDeviceIoControlFile) *g_pfnNtDeviceIoControlFile;
168/** Pointer to the NtDeviceIoControlFile import table entry. */
169static decltype(NtDeviceIoControlFile) **g_ppfnVidNtDeviceIoControlFile;
170/** Info about the VidGetHvPartitionId I/O control interface. */
171static NEMWINIOCTL g_IoCtlGetHvPartitionId;
172/** Info about the VidStartVirtualProcessor I/O control interface. */
173static NEMWINIOCTL g_IoCtlStartVirtualProcessor;
174/** Info about the VidStopVirtualProcessor I/O control interface. */
175static NEMWINIOCTL g_IoCtlStopVirtualProcessor;
176/** Info about the VidMessageSlotHandleAndGetNext I/O control interface. */
177static NEMWINIOCTL g_IoCtlMessageSlotHandleAndGetNext;
178#ifdef LOG_ENABLED
179/** Info about the VidMessageSlotMap I/O control interface - for logging. */
180static NEMWINIOCTL g_IoCtlMessageSlotMap;
181/* Info about the VidGetVirtualProcessorState I/O control interface - for logging. */
182static NEMWINIOCTL g_IoCtlGetVirtualProcessorState;
183/* Info about the VidSetVirtualProcessorState I/O control interface - for logging. */
184static NEMWINIOCTL g_IoCtlSetVirtualProcessorState;
185/** Pointer to what nemR3WinIoctlDetector_ForLogging should fill in. */
186static NEMWINIOCTL *g_pIoCtlDetectForLogging;
187#endif
188
189#ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
190/** Mapping slot for CPU #0.
191 * @{ */
192static VID_MESSAGE_MAPPING_HEADER *g_pMsgSlotMapping = NULL;
193static const HV_MESSAGE_HEADER *g_pHvMsgHdr;
194static const HV_X64_INTERCEPT_MESSAGE_HEADER *g_pX64MsgHdr;
195/** @} */
196#endif
197
198
199/*
200 * Let the preprocessor alias the APIs to import variables for better autocompletion.
201 */
202#ifndef IN_SLICKEDIT
203# define WHvGetCapability g_pfnWHvGetCapability
204# define WHvCreatePartition g_pfnWHvCreatePartition
205# define WHvSetupPartition g_pfnWHvSetupPartition
206# define WHvDeletePartition g_pfnWHvDeletePartition
207# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
208# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
209# define WHvMapGpaRange g_pfnWHvMapGpaRange
210# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
211# define WHvTranslateGva g_pfnWHvTranslateGva
212# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
213# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
214# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
215# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
216# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
217# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
218# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
219
220# define VidMessageSlotHandleAndGetNext g_pfnVidMessageSlotHandleAndGetNext
221# define VidStartVirtualProcessor g_pfnVidStartVirtualProcessor
222# define VidStopVirtualProcessor g_pfnVidStopVirtualProcessor
223
224#endif
225
226/** WHV_MEMORY_ACCESS_TYPE names */
227static const char * const g_apszWHvMemAccesstypes[4] = { "read", "write", "exec", "!undefined!" };
228
229
230/*********************************************************************************************************************************
231* Internal Functions *
232*********************************************************************************************************************************/
233
234/*
235 * Instantate the code we share with ring-0.
236 */
237#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
238# define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
239#else
240# undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
241#endif
242#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
243
244
245
246#ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
247/**
248 * Wrapper that logs the call from VID.DLL.
249 *
250 * This is very handy for figuring out why an API call fails.
251 */
252static NTSTATUS WINAPI
253nemR3WinLogWrapper_NtDeviceIoControlFile(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
254 PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
255 PVOID pvOutput, ULONG cbOutput)
256{
257
258 char szFunction[32];
259 const char *pszFunction;
260 if (uFunction == g_IoCtlMessageSlotHandleAndGetNext.uFunction)
261 pszFunction = "VidMessageSlotHandleAndGetNext";
262 else if (uFunction == g_IoCtlStartVirtualProcessor.uFunction)
263 pszFunction = "VidStartVirtualProcessor";
264 else if (uFunction == g_IoCtlStopVirtualProcessor.uFunction)
265 pszFunction = "VidStopVirtualProcessor";
266 else if (uFunction == g_IoCtlMessageSlotMap.uFunction)
267 pszFunction = "VidMessageSlotMap";
268 else if (uFunction == g_IoCtlGetVirtualProcessorState.uFunction)
269 pszFunction = "VidGetVirtualProcessorState";
270 else if (uFunction == g_IoCtlSetVirtualProcessorState.uFunction)
271 pszFunction = "VidSetVirtualProcessorState";
272 else
273 {
274 RTStrPrintf(szFunction, sizeof(szFunction), "%#x", uFunction);
275 pszFunction = szFunction;
276 }
277
278 if (cbInput > 0 && pvInput)
279 Log12(("VID!NtDeviceIoControlFile: %s/input: %.*Rhxs\n", pszFunction, RT_MIN(cbInput, 32), pvInput));
280 NTSTATUS rcNt = g_pfnNtDeviceIoControlFile(hFile, hEvt, pfnApcCallback, pvApcCtx, pIos, uFunction,
281 pvInput, cbInput, pvOutput, cbOutput);
282 if (!hEvt && !pfnApcCallback && !pvApcCtx)
283 Log12(("VID!NtDeviceIoControlFile: hFile=%#zx pIos=%p->{s:%#x, i:%#zx} uFunction=%s Input=%p LB %#x Output=%p LB %#x) -> %#x; Caller=%p\n",
284 hFile, pIos, pIos->Status, pIos->Information, pszFunction, pvInput, cbInput, pvOutput, cbOutput, rcNt, ASMReturnAddress()));
285 else
286 Log12(("VID!NtDeviceIoControlFile: hFile=%#zx hEvt=%#zx Apc=%p/%p pIos=%p->{s:%#x, i:%#zx} uFunction=%s Input=%p LB %#x Output=%p LB %#x) -> %#x; Caller=%p\n",
287 hFile, hEvt, pfnApcCallback, pvApcCtx, pIos, pIos->Status, pIos->Information, pszFunction,
288 pvInput, cbInput, pvOutput, cbOutput, rcNt, ASMReturnAddress()));
289 if (cbOutput > 0 && pvOutput)
290 {
291 Log12(("VID!NtDeviceIoControlFile: %s/output: %.*Rhxs\n", pszFunction, RT_MIN(cbOutput, 32), pvOutput));
292 if (uFunction == 0x2210cc && g_pMsgSlotMapping == NULL && cbOutput >= sizeof(void *))
293 {
294 g_pMsgSlotMapping = *(VID_MESSAGE_MAPPING_HEADER **)pvOutput;
295 g_pHvMsgHdr = (const HV_MESSAGE_HEADER *)(g_pMsgSlotMapping + 1);
296 g_pX64MsgHdr = (const HV_X64_INTERCEPT_MESSAGE_HEADER *)(g_pHvMsgHdr + 1);
297 Log12(("VID!NtDeviceIoControlFile: Message slot mapping: %p\n", g_pMsgSlotMapping));
298 }
299 }
300 if ( g_pMsgSlotMapping
301 && ( uFunction == g_IoCtlMessageSlotHandleAndGetNext.uFunction
302 || uFunction == g_IoCtlStopVirtualProcessor.uFunction
303 || uFunction == g_IoCtlMessageSlotMap.uFunction
304 ))
305 Log12(("VID!NtDeviceIoControlFile: enmVidMsgType=%#x cb=%#x msg=%#x payload=%u cs:rip=%04x:%08RX64 (%s)\n",
306 g_pMsgSlotMapping->enmVidMsgType, g_pMsgSlotMapping->cbMessage,
307 g_pHvMsgHdr->MessageType, g_pHvMsgHdr->PayloadSize,
308 g_pX64MsgHdr->CsSegment.Selector, g_pX64MsgHdr->Rip, pszFunction));
309
310 return rcNt;
311}
312#endif /* NEM_WIN_INTERCEPT_NT_IO_CTLS */
313
314
315/**
316 * Patches the call table of VID.DLL so we can intercept NtDeviceIoControlFile.
317 *
318 * This is for used to figure out the I/O control codes and in logging builds
319 * for logging API calls that WinHvPlatform.dll does.
320 *
321 * @returns VBox status code.
322 * @param hLdrModVid The VID module handle.
323 * @param pErrInfo Where to return additional error information.
324 */
325static int nemR3WinInitVidIntercepts(RTLDRMOD hLdrModVid, PRTERRINFO pErrInfo)
326{
327 /*
328 * Locate the real API.
329 */
330 g_pfnNtDeviceIoControlFile = (decltype(NtDeviceIoControlFile) *)RTLdrGetSystemSymbol("NTDLL.DLL", "NtDeviceIoControlFile");
331 AssertReturn(g_pfnNtDeviceIoControlFile != NULL,
332 RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Failed to resolve NtDeviceIoControlFile from NTDLL.DLL"));
333
334 /*
335 * Locate the PE header and get what we need from it.
336 */
337 uint8_t const *pbImage = (uint8_t const *)RTLdrGetNativeHandle(hLdrModVid);
338 IMAGE_DOS_HEADER const *pMzHdr = (IMAGE_DOS_HEADER const *)pbImage;
339 AssertReturn(pMzHdr->e_magic == IMAGE_DOS_SIGNATURE,
340 RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL mapping doesn't start with MZ signature: %#x", pMzHdr->e_magic));
341 IMAGE_NT_HEADERS const *pNtHdrs = (IMAGE_NT_HEADERS const *)&pbImage[pMzHdr->e_lfanew];
342 AssertReturn(pNtHdrs->Signature == IMAGE_NT_SIGNATURE,
343 RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL has invalid PE signaturre: %#x @%#x",
344 pNtHdrs->Signature, pMzHdr->e_lfanew));
345
346 uint32_t const cbImage = pNtHdrs->OptionalHeader.SizeOfImage;
347 IMAGE_DATA_DIRECTORY const ImportDir = pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT];
348
349 /*
350 * Walk the import descriptor table looking for NTDLL.DLL.
351 */
352 AssertReturn( ImportDir.Size > 0
353 && ImportDir.Size < cbImage,
354 RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL bad import directory size: %#x", ImportDir.Size));
355 AssertReturn( ImportDir.VirtualAddress > 0
356 && ImportDir.VirtualAddress <= cbImage - ImportDir.Size,
357 RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL bad import directory RVA: %#x", ImportDir.VirtualAddress));
358
359 for (PIMAGE_IMPORT_DESCRIPTOR pImps = (PIMAGE_IMPORT_DESCRIPTOR)&pbImage[ImportDir.VirtualAddress];
360 pImps->Name != 0 && pImps->FirstThunk != 0;
361 pImps++)
362 {
363 AssertReturn(pImps->Name < cbImage,
364 RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL bad import directory entry name: %#x", pImps->Name));
365 const char *pszModName = (const char *)&pbImage[pImps->Name];
366 if (RTStrICmpAscii(pszModName, "ntdll.dll"))
367 continue;
368 AssertReturn(pImps->FirstThunk < cbImage,
369 RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL bad FirstThunk: %#x", pImps->FirstThunk));
370 AssertReturn(pImps->OriginalFirstThunk < cbImage,
371 RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL bad FirstThunk: %#x", pImps->FirstThunk));
372
373 /*
374 * Walk the thunks table(s) looking for NtDeviceIoControlFile.
375 */
376 PIMAGE_THUNK_DATA pFirstThunk = (PIMAGE_THUNK_DATA)&pbImage[pImps->FirstThunk]; /* update this. */
377 PIMAGE_THUNK_DATA pThunk = pImps->OriginalFirstThunk == 0 /* read from this. */
378 ? (PIMAGE_THUNK_DATA)&pbImage[pImps->FirstThunk]
379 : (PIMAGE_THUNK_DATA)&pbImage[pImps->OriginalFirstThunk];
380 while (pThunk->u1.Ordinal != 0)
381 {
382 if (!(pThunk->u1.Ordinal & IMAGE_ORDINAL_FLAG32))
383 {
384 AssertReturn(pThunk->u1.Ordinal > 0 && pThunk->u1.Ordinal < cbImage,
385 RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "VID.DLL bad FirstThunk: %#x", pImps->FirstThunk));
386
387 const char *pszSymbol = (const char *)&pbImage[(uintptr_t)pThunk->u1.AddressOfData + 2];
388 if (strcmp(pszSymbol, "NtDeviceIoControlFile") == 0)
389 {
390 DWORD fOldProt = PAGE_READONLY;
391 VirtualProtect(&pFirstThunk->u1.Function, sizeof(uintptr_t), PAGE_EXECUTE_READWRITE, &fOldProt);
392 g_ppfnVidNtDeviceIoControlFile = (decltype(NtDeviceIoControlFile) **)&pFirstThunk->u1.Function;
393 /* Don't restore the protection here, so we modify the NtDeviceIoControlFile pointer later. */
394 }
395 }
396
397 pThunk++;
398 pFirstThunk++;
399 }
400 }
401
402 if (*g_ppfnVidNtDeviceIoControlFile)
403 {
404#ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
405 *g_ppfnVidNtDeviceIoControlFile = nemR3WinLogWrapper_NtDeviceIoControlFile;
406#endif
407 return VINF_SUCCESS;
408 }
409 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Failed to patch NtDeviceIoControlFile import in VID.DLL!");
410}
411
412
413/**
414 * Worker for nemR3NativeInit that probes and load the native API.
415 *
416 * @returns VBox status code.
417 * @param fForced Whether the HMForced flag is set and we should
418 * fail if we cannot initialize.
419 * @param pErrInfo Where to always return error info.
420 */
421static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
422{
423 /*
424 * Check that the DLL files we need are present, but without loading them.
425 * We'd like to avoid loading them unnecessarily.
426 */
427 WCHAR wszPath[MAX_PATH + 64];
428 UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
429 if (cwcPath >= MAX_PATH || cwcPath < 2)
430 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
431
432 if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
433 wszPath[cwcPath++] = '\\';
434 RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
435 if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
436 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
437
438 /*
439 * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
440 */
441 if (!ASMHasCpuId())
442 return RTErrInfoSet(pErrInfo, VERR_NEM_NOT_AVAILABLE, "No CPUID support");
443 if (!ASMIsValidStdRange(ASMCpuId_EAX(0)))
444 return RTErrInfoSet(pErrInfo, VERR_NEM_NOT_AVAILABLE, "No CPUID leaf #1");
445 if (!(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_HVP))
446 return RTErrInfoSet(pErrInfo, VERR_NEM_NOT_AVAILABLE, "Not in a hypervisor partition (HVP=0)");
447
448 uint32_t cMaxHyperLeaf = 0;
449 uint32_t uEbx = 0;
450 uint32_t uEcx = 0;
451 uint32_t uEdx = 0;
452 ASMCpuIdExSlow(0x40000000, 0, 0, 0, &cMaxHyperLeaf, &uEbx, &uEcx, &uEdx);
453 if (!ASMIsValidHypervisorRange(cMaxHyperLeaf))
454 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "Invalid hypervisor CPUID range (%#x %#x %#x %#x)",
455 cMaxHyperLeaf, uEbx, uEcx, uEdx);
456 if ( uEbx != UINT32_C(0x7263694d) /* Micr */
457 || uEcx != UINT32_C(0x666f736f) /* osof */
458 || uEdx != UINT32_C(0x76482074) /* t Hv */)
459 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
460 "Not Hyper-V CPUID signature: %#x %#x %#x (expected %#x %#x %#x)",
461 uEbx, uEcx, uEdx, UINT32_C(0x7263694d), UINT32_C(0x666f736f), UINT32_C(0x76482074));
462 if (cMaxHyperLeaf < UINT32_C(0x40000005))
463 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "Too narrow hypervisor CPUID range (%#x)", cMaxHyperLeaf);
464
465 /** @todo would be great if we could recognize a root partition from the
466 * CPUID info, but I currently don't dare do that. */
467
468 /*
469 * Now try load the DLLs and resolve the APIs.
470 */
471 static const char * const s_apszDllNames[2] = { "WinHvPlatform.dll", "vid.dll" };
472 RTLDRMOD ahMods[2] = { NIL_RTLDRMOD, NIL_RTLDRMOD };
473 int rc = VINF_SUCCESS;
474 for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
475 {
476 int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
477 if (RT_FAILURE(rc2))
478 {
479 if (!RTErrInfoIsSet(pErrInfo))
480 RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
481 else
482 RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
483 ahMods[i] = NIL_RTLDRMOD;
484 rc = VERR_NEM_INIT_FAILED;
485 }
486 }
487 if (RT_SUCCESS(rc))
488 rc = nemR3WinInitVidIntercepts(ahMods[1], pErrInfo);
489 if (RT_SUCCESS(rc))
490 {
491 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
492 {
493 int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
494 if (RT_FAILURE(rc2))
495 {
496 *g_aImports[i].ppfn = NULL;
497
498 LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
499 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
500 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
501 if (!g_aImports[i].fOptional)
502 {
503 if (RTErrInfoIsSet(pErrInfo))
504 RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
505 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
506 else
507 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
508 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
509 Assert(RT_FAILURE(rc));
510 }
511 }
512 }
513 if (RT_SUCCESS(rc))
514 {
515 Assert(!RTErrInfoIsSet(pErrInfo));
516 }
517 }
518
519 for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
520 RTLdrClose(ahMods[i]);
521 return rc;
522}
523
524
525/**
526 * Wrapper for different WHvGetCapability signatures.
527 */
528DECLINLINE(HRESULT) WHvGetCapabilityWrapper(WHV_CAPABILITY_CODE enmCap, WHV_CAPABILITY *pOutput, uint32_t cbOutput)
529{
530 return g_pfnWHvGetCapability(enmCap, pOutput, cbOutput, NULL);
531}
532
533
534/**
535 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
536 *
537 * @returns VBox status code.
538 * @param pVM The cross context VM structure.
539 * @param pErrInfo Where to always return error info.
540 */
541static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
542{
543#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
544#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
545#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
546
547 /*
548 * Is the hypervisor present with the desired capability?
549 *
550 * In build 17083 this translates into:
551 * - CPUID[0x00000001].HVP is set
552 * - CPUID[0x40000000] == "Microsoft Hv"
553 * - CPUID[0x40000001].eax == "Hv#1"
554 * - CPUID[0x40000003].ebx[12] is set.
555 * - VidGetExoPartitionProperty(INVALID_HANDLE_VALUE, 0x60000, &Ignored) returns
556 * a non-zero value.
557 */
558 /**
559 * @todo Someone at Microsoft please explain weird API design:
560 * 1. Pointless CapabilityCode duplication int the output;
561 * 2. No output size.
562 */
563 WHV_CAPABILITY Caps;
564 RT_ZERO(Caps);
565 SetLastError(0);
566 HRESULT hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
567 DWORD rcWin = GetLastError();
568 if (FAILED(hrc))
569 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
570 "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
571 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
572 if (!Caps.HypervisorPresent)
573 {
574 if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
575 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
576 "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
577 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
578 }
579 LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
580
581
582 /*
583 * Check what extended VM exits are supported.
584 */
585 RT_ZERO(Caps);
586 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
587 if (FAILED(hrc))
588 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
589 "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
590 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
591 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
592 pVM->nem.s.fExtendedMsrExit = RT_BOOL(Caps.ExtendedVmExits.X64MsrExit);
593 pVM->nem.s.fExtendedCpuIdExit = RT_BOOL(Caps.ExtendedVmExits.X64CpuidExit);
594 pVM->nem.s.fExtendedXcptExit = RT_BOOL(Caps.ExtendedVmExits.ExceptionExit);
595 NEM_LOG_REL_CAP_SUB("fExtendedMsrExit", pVM->nem.s.fExtendedMsrExit);
596 NEM_LOG_REL_CAP_SUB("fExtendedCpuIdExit", pVM->nem.s.fExtendedCpuIdExit);
597 NEM_LOG_REL_CAP_SUB("fExtendedXcptExit", pVM->nem.s.fExtendedXcptExit);
598 if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
599 LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
600 /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
601
602 /*
603 * Check features in case they end up defining any.
604 */
605 RT_ZERO(Caps);
606 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
607 if (FAILED(hrc))
608 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
609 "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
610 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
611 if (Caps.Features.AsUINT64 & ~(uint64_t)0)
612 LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
613 /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
614
615 /*
616 * Check supported exception exit bitmap bits.
617 * We don't currently require this, so we just log failure.
618 */
619 RT_ZERO(Caps);
620 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExceptionExitBitmap, &Caps, sizeof(Caps));
621 if (SUCCEEDED(hrc))
622 LogRel(("NEM: Supported exception exit bitmap: %#RX64\n", Caps.ExceptionExitBitmap));
623 else
624 LogRel(("NEM: Warning! WHvGetCapability/WHvCapabilityCodeExceptionExitBitmap failed: %Rhrc (Last=%#x/%u)",
625 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
626
627 /*
628 * Check that the CPU vendor is supported.
629 */
630 RT_ZERO(Caps);
631 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
632 if (FAILED(hrc))
633 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
634 "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
635 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
636 switch (Caps.ProcessorVendor)
637 {
638 /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
639 case WHvProcessorVendorIntel:
640 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - Intel", Caps.ProcessorVendor);
641 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_INTEL;
642 break;
643 case WHvProcessorVendorAmd:
644 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - AMD", Caps.ProcessorVendor);
645 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_AMD;
646 break;
647 default:
648 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
649 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
650 }
651
652 /*
653 * CPU features, guessing these are virtual CPU features?
654 */
655 RT_ZERO(Caps);
656 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
657 if (FAILED(hrc))
658 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
659 "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
660 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
661 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
662#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
663 NEM_LOG_REL_CPU_FEATURE(Sse3Support);
664 NEM_LOG_REL_CPU_FEATURE(LahfSahfSupport);
665 NEM_LOG_REL_CPU_FEATURE(Ssse3Support);
666 NEM_LOG_REL_CPU_FEATURE(Sse4_1Support);
667 NEM_LOG_REL_CPU_FEATURE(Sse4_2Support);
668 NEM_LOG_REL_CPU_FEATURE(Sse4aSupport);
669 NEM_LOG_REL_CPU_FEATURE(XopSupport);
670 NEM_LOG_REL_CPU_FEATURE(PopCntSupport);
671 NEM_LOG_REL_CPU_FEATURE(Cmpxchg16bSupport);
672 NEM_LOG_REL_CPU_FEATURE(Altmovcr8Support);
673 NEM_LOG_REL_CPU_FEATURE(LzcntSupport);
674 NEM_LOG_REL_CPU_FEATURE(MisAlignSseSupport);
675 NEM_LOG_REL_CPU_FEATURE(MmxExtSupport);
676 NEM_LOG_REL_CPU_FEATURE(Amd3DNowSupport);
677 NEM_LOG_REL_CPU_FEATURE(ExtendedAmd3DNowSupport);
678 NEM_LOG_REL_CPU_FEATURE(Page1GbSupport);
679 NEM_LOG_REL_CPU_FEATURE(AesSupport);
680 NEM_LOG_REL_CPU_FEATURE(PclmulqdqSupport);
681 NEM_LOG_REL_CPU_FEATURE(PcidSupport);
682 NEM_LOG_REL_CPU_FEATURE(Fma4Support);
683 NEM_LOG_REL_CPU_FEATURE(F16CSupport);
684 NEM_LOG_REL_CPU_FEATURE(RdRandSupport);
685 NEM_LOG_REL_CPU_FEATURE(RdWrFsGsSupport);
686 NEM_LOG_REL_CPU_FEATURE(SmepSupport);
687 NEM_LOG_REL_CPU_FEATURE(EnhancedFastStringSupport);
688 NEM_LOG_REL_CPU_FEATURE(Bmi1Support);
689 NEM_LOG_REL_CPU_FEATURE(Bmi2Support);
690 /* two reserved bits here, see below */
691 NEM_LOG_REL_CPU_FEATURE(MovbeSupport);
692 NEM_LOG_REL_CPU_FEATURE(Npiep1Support);
693 NEM_LOG_REL_CPU_FEATURE(DepX87FPUSaveSupport);
694 NEM_LOG_REL_CPU_FEATURE(RdSeedSupport);
695 NEM_LOG_REL_CPU_FEATURE(AdxSupport);
696 NEM_LOG_REL_CPU_FEATURE(IntelPrefetchSupport);
697 NEM_LOG_REL_CPU_FEATURE(SmapSupport);
698 NEM_LOG_REL_CPU_FEATURE(HleSupport);
699 NEM_LOG_REL_CPU_FEATURE(RtmSupport);
700 NEM_LOG_REL_CPU_FEATURE(RdtscpSupport);
701 NEM_LOG_REL_CPU_FEATURE(ClflushoptSupport);
702 NEM_LOG_REL_CPU_FEATURE(ClwbSupport);
703 NEM_LOG_REL_CPU_FEATURE(ShaSupport);
704 NEM_LOG_REL_CPU_FEATURE(X87PointersSavedSupport);
705#undef NEM_LOG_REL_CPU_FEATURE
706 if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(43) - 1) | RT_BIT_64(27) | RT_BIT_64(28)))
707 LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
708 pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
709 /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
710
711 /*
712 * The cache line flush size.
713 */
714 RT_ZERO(Caps);
715 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
716 if (FAILED(hrc))
717 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
718 "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
719 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
720 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
721 if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
722 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
723 pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
724
725 /*
726 * See if they've added more properties that we're not aware of.
727 */
728 /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
729 if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
730 {
731 static const struct
732 {
733 uint32_t iMin, iMax; } s_aUnknowns[] =
734 {
735 { 0x0004, 0x000f },
736 { 0x1003, 0x100f },
737 { 0x2000, 0x200f },
738 { 0x3000, 0x300f },
739 { 0x4000, 0x400f },
740 };
741 for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
742 for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
743 {
744 RT_ZERO(Caps);
745 hrc = WHvGetCapabilityWrapper((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
746 if (SUCCEEDED(hrc))
747 LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
748 }
749 }
750
751 /*
752 * For proper operation, we require CPUID exits.
753 */
754 if (!pVM->nem.s.fExtendedCpuIdExit)
755 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Missing required extended CPUID exit support");
756 if (!pVM->nem.s.fExtendedMsrExit)
757 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Missing required extended MSR exit support");
758 if (!pVM->nem.s.fExtendedXcptExit)
759 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Missing required extended exception exit support");
760
761#undef NEM_LOG_REL_CAP_EX
762#undef NEM_LOG_REL_CAP_SUB_EX
763#undef NEM_LOG_REL_CAP_SUB
764 return VINF_SUCCESS;
765}
766
767
768/**
769 * Used to fill in g_IoCtlGetHvPartitionId.
770 */
771static NTSTATUS WINAPI
772nemR3WinIoctlDetector_GetHvPartitionId(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
773 PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
774 PVOID pvOutput, ULONG cbOutput)
775{
776 AssertLogRelMsgReturn(hFile == NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, ("hFile=%p\n", hFile), STATUS_INVALID_PARAMETER_1);
777 RT_NOREF(hEvt); RT_NOREF(pfnApcCallback); RT_NOREF(pvApcCtx);
778 AssertLogRelMsgReturn(RT_VALID_PTR(pIos), ("pIos=%p\n", pIos), STATUS_INVALID_PARAMETER_5);
779 AssertLogRelMsgReturn(cbInput == 0, ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_8);
780 RT_NOREF(pvInput);
781
782 AssertLogRelMsgReturn(RT_VALID_PTR(pvOutput), ("pvOutput=%p\n", pvOutput), STATUS_INVALID_PARAMETER_9);
783 AssertLogRelMsgReturn(cbOutput == sizeof(HV_PARTITION_ID), ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_10);
784 *(HV_PARTITION_ID *)pvOutput = NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_ID;
785
786 g_IoCtlGetHvPartitionId.cbInput = cbInput;
787 g_IoCtlGetHvPartitionId.cbOutput = cbOutput;
788 g_IoCtlGetHvPartitionId.uFunction = uFunction;
789
790 return STATUS_SUCCESS;
791}
792
793
794/**
795 * Used to fill in g_IoCtlStartVirtualProcessor.
796 */
797static NTSTATUS WINAPI
798nemR3WinIoctlDetector_StartVirtualProcessor(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
799 PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
800 PVOID pvOutput, ULONG cbOutput)
801{
802 AssertLogRelMsgReturn(hFile == NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, ("hFile=%p\n", hFile), STATUS_INVALID_PARAMETER_1);
803 RT_NOREF(hEvt); RT_NOREF(pfnApcCallback); RT_NOREF(pvApcCtx);
804 AssertLogRelMsgReturn(RT_VALID_PTR(pIos), ("pIos=%p\n", pIos), STATUS_INVALID_PARAMETER_5);
805 AssertLogRelMsgReturn(cbInput == sizeof(HV_VP_INDEX), ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_8);
806 AssertLogRelMsgReturn(RT_VALID_PTR(pvInput), ("pvInput=%p\n", pvInput), STATUS_INVALID_PARAMETER_9);
807 AssertLogRelMsgReturn(*(HV_VP_INDEX *)pvInput == NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX,
808 ("*piCpu=%u\n", *(HV_VP_INDEX *)pvInput), STATUS_INVALID_PARAMETER_9);
809 AssertLogRelMsgReturn(cbOutput == 0, ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_10);
810 RT_NOREF(pvOutput);
811
812 g_IoCtlStartVirtualProcessor.cbInput = cbInput;
813 g_IoCtlStartVirtualProcessor.cbOutput = cbOutput;
814 g_IoCtlStartVirtualProcessor.uFunction = uFunction;
815
816 return STATUS_SUCCESS;
817}
818
819
820/**
821 * Used to fill in g_IoCtlStartVirtualProcessor.
822 */
823static NTSTATUS WINAPI
824nemR3WinIoctlDetector_StopVirtualProcessor(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
825 PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
826 PVOID pvOutput, ULONG cbOutput)
827{
828 AssertLogRelMsgReturn(hFile == NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, ("hFile=%p\n", hFile), STATUS_INVALID_PARAMETER_1);
829 RT_NOREF(hEvt); RT_NOREF(pfnApcCallback); RT_NOREF(pvApcCtx);
830 AssertLogRelMsgReturn(RT_VALID_PTR(pIos), ("pIos=%p\n", pIos), STATUS_INVALID_PARAMETER_5);
831 AssertLogRelMsgReturn(cbInput == sizeof(HV_VP_INDEX), ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_8);
832 AssertLogRelMsgReturn(RT_VALID_PTR(pvInput), ("pvInput=%p\n", pvInput), STATUS_INVALID_PARAMETER_9);
833 AssertLogRelMsgReturn(*(HV_VP_INDEX *)pvInput == NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX,
834 ("*piCpu=%u\n", *(HV_VP_INDEX *)pvInput), STATUS_INVALID_PARAMETER_9);
835 AssertLogRelMsgReturn(cbOutput == 0, ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_10);
836 RT_NOREF(pvOutput);
837
838 g_IoCtlStopVirtualProcessor.cbInput = cbInput;
839 g_IoCtlStopVirtualProcessor.cbOutput = cbOutput;
840 g_IoCtlStopVirtualProcessor.uFunction = uFunction;
841
842 return STATUS_SUCCESS;
843}
844
845
846/**
847 * Used to fill in g_IoCtlMessageSlotHandleAndGetNext
848 */
849static NTSTATUS WINAPI
850nemR3WinIoctlDetector_MessageSlotHandleAndGetNext(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
851 PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
852 PVOID pvOutput, ULONG cbOutput)
853{
854 AssertLogRelMsgReturn(hFile == NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, ("hFile=%p\n", hFile), STATUS_INVALID_PARAMETER_1);
855 RT_NOREF(hEvt); RT_NOREF(pfnApcCallback); RT_NOREF(pvApcCtx);
856 AssertLogRelMsgReturn(RT_VALID_PTR(pIos), ("pIos=%p\n", pIos), STATUS_INVALID_PARAMETER_5);
857
858 if (g_uBuildNo >= 17758)
859 {
860 /* No timeout since about build 17758, it's now always an infinite wait. So, a somewhat compatible change. */
861 AssertLogRelMsgReturn(cbInput == RT_UOFFSETOF(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT, cMillies),
862 ("cbInput=%#x\n", cbInput),
863 STATUS_INVALID_PARAMETER_8);
864 AssertLogRelMsgReturn(RT_VALID_PTR(pvInput), ("pvInput=%p\n", pvInput), STATUS_INVALID_PARAMETER_9);
865 PCVID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT pVidIn = (PCVID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)pvInput;
866 AssertLogRelMsgReturn( pVidIn->iCpu == NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX
867 && pVidIn->fFlags == VID_MSHAGN_F_HANDLE_MESSAGE,
868 ("iCpu=%u fFlags=%#x cMillies=%#x\n", pVidIn->iCpu, pVidIn->fFlags, pVidIn->cMillies),
869 STATUS_INVALID_PARAMETER_9);
870 AssertLogRelMsgReturn(cbOutput == 0, ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_10);
871 }
872 else
873 {
874 AssertLogRelMsgReturn(cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT), ("cbInput=%#x\n", cbInput),
875 STATUS_INVALID_PARAMETER_8);
876 AssertLogRelMsgReturn(RT_VALID_PTR(pvInput), ("pvInput=%p\n", pvInput), STATUS_INVALID_PARAMETER_9);
877 PCVID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT pVidIn = (PCVID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)pvInput;
878 AssertLogRelMsgReturn( pVidIn->iCpu == NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX
879 && pVidIn->fFlags == VID_MSHAGN_F_HANDLE_MESSAGE
880 && pVidIn->cMillies == NEM_WIN_IOCTL_DETECTOR_FAKE_TIMEOUT,
881 ("iCpu=%u fFlags=%#x cMillies=%#x\n", pVidIn->iCpu, pVidIn->fFlags, pVidIn->cMillies),
882 STATUS_INVALID_PARAMETER_9);
883 AssertLogRelMsgReturn(cbOutput == 0, ("cbInput=%#x\n", cbInput), STATUS_INVALID_PARAMETER_10);
884 RT_NOREF(pvOutput);
885 }
886
887 g_IoCtlMessageSlotHandleAndGetNext.cbInput = cbInput;
888 g_IoCtlMessageSlotHandleAndGetNext.cbOutput = cbOutput;
889 g_IoCtlMessageSlotHandleAndGetNext.uFunction = uFunction;
890
891 return STATUS_SUCCESS;
892}
893
894
895#ifdef LOG_ENABLED
896/**
897 * Used to fill in what g_pIoCtlDetectForLogging points to.
898 */
899static NTSTATUS WINAPI nemR3WinIoctlDetector_ForLogging(HANDLE hFile, HANDLE hEvt, PIO_APC_ROUTINE pfnApcCallback, PVOID pvApcCtx,
900 PIO_STATUS_BLOCK pIos, ULONG uFunction, PVOID pvInput, ULONG cbInput,
901 PVOID pvOutput, ULONG cbOutput)
902{
903 RT_NOREF(hFile, hEvt, pfnApcCallback, pvApcCtx, pIos, pvInput, pvOutput);
904
905 g_pIoCtlDetectForLogging->cbInput = cbInput;
906 g_pIoCtlDetectForLogging->cbOutput = cbOutput;
907 g_pIoCtlDetectForLogging->uFunction = uFunction;
908
909 return STATUS_SUCCESS;
910}
911#endif
912
913
914/**
915 * Worker for nemR3NativeInit that detect I/O control function numbers for VID.
916 *
917 * We use the function numbers directly in ring-0 and to name functions when
918 * logging NtDeviceIoControlFile calls.
919 *
920 * @note We could alternatively do this by disassembling the respective
921 * functions, but hooking NtDeviceIoControlFile and making fake calls
922 * more easily provides the desired information.
923 *
924 * @returns VBox status code.
925 * @param pVM The cross context VM structure. Will set I/O
926 * control info members.
927 * @param pErrInfo Where to always return error info.
928 */
929static int nemR3WinInitDiscoverIoControlProperties(PVM pVM, PRTERRINFO pErrInfo)
930{
931 /*
932 * Probe the I/O control information for select VID APIs so we can use
933 * them directly from ring-0 and better log them.
934 *
935 */
936 decltype(NtDeviceIoControlFile) * const pfnOrg = *g_ppfnVidNtDeviceIoControlFile;
937
938 /* VidGetHvPartitionId - must work due to memory. */
939 *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_GetHvPartitionId;
940 HV_PARTITION_ID idHvPartition = HV_PARTITION_ID_INVALID;
941 BOOL fRet = g_pfnVidGetHvPartitionId(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, &idHvPartition);
942 *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
943 AssertReturn(fRet && idHvPartition == NEM_WIN_IOCTL_DETECTOR_FAKE_PARTITION_ID && g_IoCtlGetHvPartitionId.uFunction != 0,
944 RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
945 "Problem figuring out VidGetHvPartitionId: fRet=%u idHvPartition=%#x dwErr=%u",
946 fRet, idHvPartition, GetLastError()) );
947 LogRel(("NEM: VidGetHvPartitionId -> fun:%#x in:%#x out:%#x\n",
948 g_IoCtlGetHvPartitionId.uFunction, g_IoCtlGetHvPartitionId.cbInput, g_IoCtlGetHvPartitionId.cbOutput));
949
950 int rcRet = VINF_SUCCESS;
951 /* VidStartVirtualProcessor */
952 *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_StartVirtualProcessor;
953 fRet = g_pfnVidStartVirtualProcessor(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX);
954 *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
955 AssertStmt(fRet && g_IoCtlStartVirtualProcessor.uFunction != 0,
956 rcRet = RTERRINFO_LOG_REL_SET_F(pErrInfo, VERR_NEM_RING3_ONLY,
957 "Problem figuring out VidStartVirtualProcessor: fRet=%u dwErr=%u",
958 fRet, GetLastError()) );
959 LogRel(("NEM: VidStartVirtualProcessor -> fun:%#x in:%#x out:%#x\n", g_IoCtlStartVirtualProcessor.uFunction,
960 g_IoCtlStartVirtualProcessor.cbInput, g_IoCtlStartVirtualProcessor.cbOutput));
961
962 /* VidStopVirtualProcessor */
963 *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_StopVirtualProcessor;
964 fRet = g_pfnVidStopVirtualProcessor(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX);
965 *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
966 AssertStmt(fRet && g_IoCtlStopVirtualProcessor.uFunction != 0,
967 rcRet = RTERRINFO_LOG_REL_SET_F(pErrInfo, VERR_NEM_RING3_ONLY,
968 "Problem figuring out VidStopVirtualProcessor: fRet=%u dwErr=%u",
969 fRet, GetLastError()) );
970 LogRel(("NEM: VidStopVirtualProcessor -> fun:%#x in:%#x out:%#x\n", g_IoCtlStopVirtualProcessor.uFunction,
971 g_IoCtlStopVirtualProcessor.cbInput, g_IoCtlStopVirtualProcessor.cbOutput));
972
973 /* VidMessageSlotHandleAndGetNext */
974 *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_MessageSlotHandleAndGetNext;
975 fRet = g_pfnVidMessageSlotHandleAndGetNext(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE,
976 NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX, VID_MSHAGN_F_HANDLE_MESSAGE,
977 NEM_WIN_IOCTL_DETECTOR_FAKE_TIMEOUT);
978 *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
979 AssertStmt(fRet && g_IoCtlMessageSlotHandleAndGetNext.uFunction != 0,
980 rcRet = RTERRINFO_LOG_REL_SET_F(pErrInfo, VERR_NEM_RING3_ONLY,
981 "Problem figuring out VidMessageSlotHandleAndGetNext: fRet=%u dwErr=%u",
982 fRet, GetLastError()) );
983 LogRel(("NEM: VidMessageSlotHandleAndGetNext -> fun:%#x in:%#x out:%#x\n",
984 g_IoCtlMessageSlotHandleAndGetNext.uFunction, g_IoCtlMessageSlotHandleAndGetNext.cbInput,
985 g_IoCtlMessageSlotHandleAndGetNext.cbOutput));
986
987#ifdef LOG_ENABLED
988 /* The following are only for logging: */
989 union
990 {
991 VID_MAPPED_MESSAGE_SLOT MapSlot;
992 HV_REGISTER_NAME Name;
993 HV_REGISTER_VALUE Value;
994 } uBuf;
995
996 /* VidMessageSlotMap */
997 g_pIoCtlDetectForLogging = &g_IoCtlMessageSlotMap;
998 *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_ForLogging;
999 fRet = g_pfnVidMessageSlotMap(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, &uBuf.MapSlot, NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX);
1000 *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
1001 Assert(fRet);
1002 LogRel(("NEM: VidMessageSlotMap -> fun:%#x in:%#x out:%#x\n", g_pIoCtlDetectForLogging->uFunction,
1003 g_pIoCtlDetectForLogging->cbInput, g_pIoCtlDetectForLogging->cbOutput));
1004
1005 /* VidGetVirtualProcessorState */
1006 uBuf.Name = HvRegisterExplicitSuspend;
1007 g_pIoCtlDetectForLogging = &g_IoCtlGetVirtualProcessorState;
1008 *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_ForLogging;
1009 fRet = g_pfnVidGetVirtualProcessorState(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX,
1010 &uBuf.Name, 1, &uBuf.Value);
1011 *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
1012 Assert(fRet);
1013 LogRel(("NEM: VidGetVirtualProcessorState -> fun:%#x in:%#x out:%#x\n", g_pIoCtlDetectForLogging->uFunction,
1014 g_pIoCtlDetectForLogging->cbInput, g_pIoCtlDetectForLogging->cbOutput));
1015
1016 /* VidSetVirtualProcessorState */
1017 uBuf.Name = HvRegisterExplicitSuspend;
1018 g_pIoCtlDetectForLogging = &g_IoCtlSetVirtualProcessorState;
1019 *g_ppfnVidNtDeviceIoControlFile = nemR3WinIoctlDetector_ForLogging;
1020 fRet = g_pfnVidSetVirtualProcessorState(NEM_WIN_IOCTL_DETECTOR_FAKE_HANDLE, NEM_WIN_IOCTL_DETECTOR_FAKE_VP_INDEX,
1021 &uBuf.Name, 1, &uBuf.Value);
1022 *g_ppfnVidNtDeviceIoControlFile = pfnOrg;
1023 Assert(fRet);
1024 LogRel(("NEM: VidSetVirtualProcessorState -> fun:%#x in:%#x out:%#x\n", g_pIoCtlDetectForLogging->uFunction,
1025 g_pIoCtlDetectForLogging->cbInput, g_pIoCtlDetectForLogging->cbOutput));
1026
1027 g_pIoCtlDetectForLogging = NULL;
1028#endif
1029
1030 /* Done. */
1031 pVM->nem.s.IoCtlGetHvPartitionId = g_IoCtlGetHvPartitionId;
1032 pVM->nem.s.IoCtlStartVirtualProcessor = g_IoCtlStartVirtualProcessor;
1033 pVM->nem.s.IoCtlStopVirtualProcessor = g_IoCtlStopVirtualProcessor;
1034 pVM->nem.s.IoCtlMessageSlotHandleAndGetNext = g_IoCtlMessageSlotHandleAndGetNext;
1035 return rcRet;
1036}
1037
1038
1039/**
1040 * Creates and sets up a Hyper-V (exo) partition.
1041 *
1042 * @returns VBox status code.
1043 * @param pVM The cross context VM structure.
1044 * @param pErrInfo Where to always return error info.
1045 */
1046static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
1047{
1048 AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
1049 AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
1050
1051 /*
1052 * Create the partition.
1053 */
1054 WHV_PARTITION_HANDLE hPartition;
1055 HRESULT hrc = WHvCreatePartition(&hPartition);
1056 if (FAILED(hrc))
1057 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
1058 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
1059
1060 int rc;
1061
1062 /*
1063 * Set partition properties, most importantly the CPU count.
1064 */
1065 /**
1066 * @todo Someone at Microsoft please explain another weird API:
1067 * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
1068 * argument rather than as part of the struct. That is so weird if you've
1069 * used any other NT or windows API, including WHvGetCapability().
1070 * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
1071 * technically only need 9 bytes for setting/getting
1072 * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
1073 WHV_PARTITION_PROPERTY Property;
1074 RT_ZERO(Property);
1075 Property.ProcessorCount = pVM->cCpus;
1076 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
1077 if (SUCCEEDED(hrc))
1078 {
1079 RT_ZERO(Property);
1080 Property.ExtendedVmExits.X64CpuidExit = pVM->nem.s.fExtendedCpuIdExit; /** @todo Register fixed results and restrict cpuid exits */
1081 Property.ExtendedVmExits.X64MsrExit = pVM->nem.s.fExtendedMsrExit;
1082 Property.ExtendedVmExits.ExceptionExit = pVM->nem.s.fExtendedXcptExit;
1083 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
1084 if (SUCCEEDED(hrc))
1085 {
1086 /*
1087 * We'll continue setup in nemR3NativeInitAfterCPUM.
1088 */
1089 pVM->nem.s.fCreatedEmts = false;
1090 pVM->nem.s.hPartition = hPartition;
1091 LogRel(("NEM: Created partition %p.\n", hPartition));
1092 return VINF_SUCCESS;
1093 }
1094
1095 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
1096 "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
1097 Property.ExtendedVmExits.AsUINT64, hrc);
1098 }
1099 else
1100 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
1101 "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
1102 pVM->cCpus, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
1103 WHvDeletePartition(hPartition);
1104
1105 Assert(!pVM->nem.s.hPartitionDevice);
1106 Assert(!pVM->nem.s.hPartition);
1107 return rc;
1108}
1109
1110
1111/**
1112 * Makes sure APIC and firmware will not allow X2APIC mode.
1113 *
1114 * This is rather ugly.
1115 *
1116 * @returns VBox status code
1117 * @param pVM The cross context VM structure.
1118 */
1119static int nemR3WinDisableX2Apic(PVM pVM)
1120{
1121 /*
1122 * First make sure the 'Mode' config value of the APIC isn't set to X2APIC.
1123 * This defaults to APIC, so no need to change unless it's X2APIC.
1124 */
1125 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/Devices/apic/0/Config");
1126 if (pCfg)
1127 {
1128 uint8_t bMode = 0;
1129 int rc = CFGMR3QueryU8(pCfg, "Mode", &bMode);
1130 AssertLogRelMsgReturn(RT_SUCCESS(rc) || rc == VERR_CFGM_VALUE_NOT_FOUND, ("%Rrc\n", rc), rc);
1131 if (RT_SUCCESS(rc) && bMode == PDMAPICMODE_X2APIC)
1132 {
1133 LogRel(("NEM: Adjusting APIC configuration from X2APIC to APIC max mode. X2APIC is not supported by the WinHvPlatform API!\n"));
1134 LogRel(("NEM: Disable Hyper-V if you need X2APIC for your guests!\n"));
1135 rc = CFGMR3RemoveValue(pCfg, "Mode");
1136 rc = CFGMR3InsertInteger(pCfg, "Mode", PDMAPICMODE_APIC);
1137 AssertLogRelRCReturn(rc, rc);
1138 }
1139 }
1140
1141 /*
1142 * Now the firmwares.
1143 * These also defaults to APIC and only needs adjusting if configured to X2APIC (2).
1144 */
1145 static const char * const s_apszFirmwareConfigs[] =
1146 {
1147 "/Devices/efi/0/Config",
1148 "/Devices/pcbios/0/Config",
1149 };
1150 for (unsigned i = 0; i < RT_ELEMENTS(s_apszFirmwareConfigs); i++)
1151 {
1152 pCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/Devices/APIC/0/Config");
1153 if (pCfg)
1154 {
1155 uint8_t bMode = 0;
1156 int rc = CFGMR3QueryU8(pCfg, "APIC", &bMode);
1157 AssertLogRelMsgReturn(RT_SUCCESS(rc) || rc == VERR_CFGM_VALUE_NOT_FOUND, ("%Rrc\n", rc), rc);
1158 if (RT_SUCCESS(rc) && bMode == 2)
1159 {
1160 LogRel(("NEM: Adjusting %s/Mode from 2 (X2APIC) to 1 (APIC).\n", s_apszFirmwareConfigs[i]));
1161 rc = CFGMR3RemoveValue(pCfg, "APIC");
1162 rc = CFGMR3InsertInteger(pCfg, "APIC", 1);
1163 AssertLogRelRCReturn(rc, rc);
1164 }
1165 }
1166 }
1167
1168 return VINF_SUCCESS;
1169}
1170
1171
1172/**
1173 * Try initialize the native API.
1174 *
1175 * This may only do part of the job, more can be done in
1176 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
1177 *
1178 * @returns VBox status code.
1179 * @param pVM The cross context VM structure.
1180 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
1181 * the latter we'll fail if we cannot initialize.
1182 * @param fForced Whether the HMForced flag is set and we should
1183 * fail if we cannot initialize.
1184 */
1185int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
1186{
1187 g_uBuildNo = RTSystemGetNtBuildNo();
1188
1189 /*
1190 * Some state init.
1191 */
1192 pVM->nem.s.fA20Enabled = true;
1193#if 0
1194 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1195 {
1196 PNEMCPU pNemCpu = &pVM->aCpus[iCpu].nem.s;
1197 }
1198#endif
1199
1200 /*
1201 * Error state.
1202 * The error message will be non-empty on failure and 'rc' will be set too.
1203 */
1204 RTERRINFOSTATIC ErrInfo;
1205 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
1206 int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
1207 if (RT_SUCCESS(rc))
1208 {
1209 /*
1210 * Check the capabilties of the hypervisor, starting with whether it's present.
1211 */
1212 rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
1213 if (RT_SUCCESS(rc))
1214 {
1215 /*
1216 * Discover the VID I/O control function numbers we need.
1217 */
1218 rc = nemR3WinInitDiscoverIoControlProperties(pVM, pErrInfo);
1219 if (rc == VERR_NEM_RING3_ONLY)
1220 {
1221 if (pVM->nem.s.fUseRing0Runloop)
1222 {
1223 LogRel(("NEM: Disabling UseRing0Runloop.\n"));
1224 pVM->nem.s.fUseRing0Runloop = false;
1225 }
1226 rc = VINF_SUCCESS;
1227 }
1228 if (RT_SUCCESS(rc))
1229 {
1230 /*
1231 * Check out our ring-0 capabilities.
1232 */
1233 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_NEM_INIT_VM, 0, NULL);
1234 if (RT_SUCCESS(rc))
1235 {
1236 /*
1237 * Create and initialize a partition.
1238 */
1239 rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
1240 if (RT_SUCCESS(rc))
1241 {
1242 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
1243 Log(("NEM: Marked active!\n"));
1244 nemR3WinDisableX2Apic(pVM);
1245
1246 /* Register release statistics */
1247 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1248 {
1249 PNEMCPU pNemCpu = &pVM->aCpus[iCpu].nem.s;
1250 STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", iCpu);
1251 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", iCpu);
1252 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", iCpu);
1253 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", iCpu);
1254 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitInterruptWindow", iCpu);
1255 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", iCpu);
1256 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", iCpu);
1257 STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", iCpu);
1258 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", iCpu);
1259 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", iCpu);
1260 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", iCpu);
1261 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", iCpu);
1262 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", iCpu);
1263 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", iCpu);
1264 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", iCpu);
1265 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", iCpu);
1266 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", iCpu);
1267 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", iCpu);
1268 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", iCpu);
1269 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", iCpu);
1270 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", iCpu);
1271 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", iCpu);
1272 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", iCpu);
1273 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", iCpu);
1274 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", iCpu);
1275 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", iCpu);
1276 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", iCpu);
1277 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", iCpu);
1278 }
1279
1280 PUVM pUVM = pVM->pUVM;
1281 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1282 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor",
1283 "/NEM/R0Stats/cPagesAvailable");
1284 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1285 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor",
1286 "/NEM/R0Stats/cPagesInUse");
1287 }
1288 }
1289 }
1290 }
1291 }
1292
1293 /*
1294 * We only fail if in forced mode, otherwise just log the complaint and return.
1295 */
1296 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1297 if ( (fForced || !fFallback)
1298 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1299 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1300
1301 if (RTErrInfoIsSet(pErrInfo))
1302 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1303 return VINF_SUCCESS;
1304}
1305
1306
1307/**
1308 * This is called after CPUMR3Init is done.
1309 *
1310 * @returns VBox status code.
1311 * @param pVM The VM handle..
1312 */
1313int nemR3NativeInitAfterCPUM(PVM pVM)
1314{
1315 /*
1316 * Validate sanity.
1317 */
1318 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
1319 AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
1320 AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
1321 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
1322 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1323
1324 /*
1325 * Continue setting up the partition now that we've got most of the CPUID feature stuff.
1326 */
1327 WHV_PARTITION_PROPERTY Property;
1328 HRESULT hrc;
1329
1330#if 0
1331 /* Not sure if we really need to set the vendor.
1332 Update: Apparently we don't. WHvPartitionPropertyCodeProcessorVendor was removed in 17110. */
1333 RT_ZERO(Property);
1334 Property.ProcessorVendor = pVM->nem.s.enmCpuVendor == CPUMCPUVENDOR_AMD ? WHvProcessorVendorAmd
1335 : WHvProcessorVendorIntel;
1336 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorVendor, &Property, sizeof(Property));
1337 if (FAILED(hrc))
1338 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1339 "Failed to set WHvPartitionPropertyCodeProcessorVendor to %u: %Rhrc (Last=%#x/%u)",
1340 Property.ProcessorVendor, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
1341#endif
1342
1343 /* Not sure if we really need to set the cache line flush size. */
1344 RT_ZERO(Property);
1345 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
1346 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));
1347 if (FAILED(hrc))
1348 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1349 "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
1350 pVM->nem.s.cCacheLineFlushShift, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
1351
1352 /* Intercept #DB, #BP and #UD exceptions. */
1353 RT_ZERO(Property);
1354 Property.ExceptionExitBitmap = RT_BIT_64(WHvX64ExceptionTypeDebugTrapOrFault)
1355 | RT_BIT_64(WHvX64ExceptionTypeBreakpointTrap)
1356 | RT_BIT_64(WHvX64ExceptionTypeInvalidOpcodeFault);
1357 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExceptionExitBitmap, &Property, sizeof(Property));
1358 if (FAILED(hrc))
1359 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1360 "Failed to set WHvPartitionPropertyCodeExceptionExitBitmap to %#RX64: %Rhrc (Last=%#x/%u)",
1361 Property.ExceptionExitBitmap, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
1362
1363
1364 /*
1365 * Sync CPU features with CPUM.
1366 */
1367 /** @todo sync CPU features with CPUM. */
1368
1369 /* Set the partition property. */
1370 RT_ZERO(Property);
1371 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
1372 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));
1373 if (FAILED(hrc))
1374 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1375 "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
1376 pVM->nem.s.uCpuFeatures.u64, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
1377
1378 /*
1379 * Set up the partition and create EMTs.
1380 *
1381 * Seems like this is where the partition is actually instantiated and we get
1382 * a handle to it.
1383 */
1384 hrc = WHvSetupPartition(hPartition);
1385 if (FAILED(hrc))
1386 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1387 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
1388 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
1389
1390 /* Get the handle. */
1391 HANDLE hPartitionDevice;
1392 __try
1393 {
1394 hPartitionDevice = ((HANDLE *)hPartition)[1];
1395 }
1396 __except(EXCEPTION_EXECUTE_HANDLER)
1397 {
1398 hrc = GetExceptionCode();
1399 hPartitionDevice = NULL;
1400 }
1401 if ( hPartitionDevice == NULL
1402 || hPartitionDevice == (HANDLE)(intptr_t)-1)
1403 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1404 "Failed to get device handle for partition %p: %Rhrc", hPartition, hrc);
1405
1406 HV_PARTITION_ID idHvPartition = HV_PARTITION_ID_INVALID;
1407 if (!g_pfnVidGetHvPartitionId(hPartitionDevice, &idHvPartition))
1408 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1409 "Failed to get device handle and/or partition ID for %p (hPartitionDevice=%p, Last=%#x/%u)",
1410 hPartition, hPartitionDevice, RTNtLastStatusValue(), RTNtLastErrorValue());
1411 pVM->nem.s.hPartitionDevice = hPartitionDevice;
1412 pVM->nem.s.idHvPartition = idHvPartition;
1413
1414 /*
1415 * Setup the EMTs.
1416 */
1417 VMCPUID iCpu;
1418 for (iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1419 {
1420 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1421
1422 pVCpu->nem.s.hNativeThreadHandle = (RTR3PTR)RTThreadGetNativeHandle(VMR3GetThreadHandle(pVCpu->pUVCpu));
1423 Assert((HANDLE)pVCpu->nem.s.hNativeThreadHandle != INVALID_HANDLE_VALUE);
1424
1425#ifndef NEM_WIN_USE_OUR_OWN_RUN_API
1426# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1427 if (!pVM->nem.s.fUseRing0Runloop)
1428# endif
1429 {
1430 hrc = WHvCreateVirtualProcessor(hPartition, iCpu, 0 /*fFlags*/);
1431 if (FAILED(hrc))
1432 {
1433 NTSTATUS const rcNtLast = RTNtLastStatusValue();
1434 DWORD const dwErrLast = RTNtLastErrorValue();
1435 while (iCpu-- > 0)
1436 {
1437 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, iCpu);
1438 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
1439 hPartition, iCpu, hrc2, RTNtLastStatusValue(),
1440 RTNtLastErrorValue()));
1441 }
1442 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1443 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
1444 }
1445 }
1446# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1447 else
1448# endif
1449#endif /* !NEM_WIN_USE_OUR_OWN_RUN_API */
1450#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_OUR_OWN_RUN_API)
1451 {
1452 VID_MAPPED_MESSAGE_SLOT MappedMsgSlot = { NULL, UINT32_MAX, UINT32_MAX };
1453 if (g_pfnVidMessageSlotMap(hPartitionDevice, &MappedMsgSlot, iCpu))
1454 {
1455 AssertLogRelMsg(MappedMsgSlot.iCpu == iCpu && MappedMsgSlot.uParentAdvisory == UINT32_MAX,
1456 ("%#x %#x (iCpu=%#x)\n", MappedMsgSlot.iCpu, MappedMsgSlot.uParentAdvisory, iCpu));
1457 pVCpu->nem.s.pvMsgSlotMapping = MappedMsgSlot.pMsgBlock;
1458 }
1459 else
1460 {
1461 NTSTATUS const rcNtLast = RTNtLastStatusValue();
1462 DWORD const dwErrLast = RTNtLastErrorValue();
1463 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1464 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
1465 }
1466 }
1467#endif
1468 }
1469 pVM->nem.s.fCreatedEmts = true;
1470
1471 /*
1472 * Do some more ring-0 initialization now that we've got the partition handle.
1473 */
1474 int rc = VMMR3CallR0Emt(pVM, &pVM->aCpus[0], VMMR0_DO_NEM_INIT_VM_PART_2, 0, NULL);
1475 if (RT_SUCCESS(rc))
1476 {
1477 LogRel(("NEM: Successfully set up partition (device handle %p, partition ID %#llx)\n", hPartitionDevice, idHvPartition));
1478
1479#if 1
1480 VMMR3CallR0Emt(pVM, &pVM->aCpus[0], VMMR0_DO_NEM_UPDATE_STATISTICS, 0, NULL);
1481 LogRel(("NEM: Memory balance: %#RX64 out of %#RX64 pages in use\n",
1482 pVM->nem.s.R0Stats.cPagesInUse, pVM->nem.s.R0Stats.cPagesAvailable));
1483#endif
1484
1485 /*
1486 * Register statistics on shared pages.
1487 */
1488 /** @todo HvCallMapStatsPage */
1489
1490 /*
1491 * Adjust features.
1492 * Note! We've already disabled X2APIC via CFGM during the first init call.
1493 */
1494
1495#if 0 && defined(DEBUG_bird)
1496 /*
1497 * Poke and probe a little.
1498 */
1499 PVMCPU pVCpu = &pVM->aCpus[0];
1500 uint32_t aRegNames[1024];
1501 HV_REGISTER_VALUE aRegValues[1024];
1502 uint32_t aPropCodes[128];
1503 uint64_t aPropValues[128];
1504 for (int iOuter = 0; iOuter < 5; iOuter++)
1505 {
1506 LogRel(("\niOuter %d\n", iOuter));
1507# if 1
1508 /* registers */
1509 uint32_t iRegValue = 0;
1510 uint32_t cRegChanges = 0;
1511 for (uint32_t iReg = 0; iReg < 0x001101ff; iReg++)
1512 {
1513 if (iOuter != 0 && aRegNames[iRegValue] > iReg)
1514 continue;
1515 RT_ZERO(pVCpu->nem.s.Hypercall.Experiment);
1516 pVCpu->nem.s.Hypercall.Experiment.uItem = iReg;
1517 int rc2 = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPERIMENT, 0, NULL);
1518 AssertLogRelRCBreak(rc2);
1519 if (pVCpu->nem.s.Hypercall.Experiment.fSuccess)
1520 {
1521 LogRel(("Register %#010x = %#18RX64, %#18RX64\n", iReg,
1522 pVCpu->nem.s.Hypercall.Experiment.uLoValue, pVCpu->nem.s.Hypercall.Experiment.uHiValue));
1523 if (iReg == HvX64RegisterTsc)
1524 {
1525 uint64_t uTsc = ASMReadTSC();
1526 LogRel(("TSC = %#18RX64; Delta %#18RX64 or %#18RX64\n",
1527 uTsc, pVCpu->nem.s.Hypercall.Experiment.uLoValue - uTsc, uTsc - pVCpu->nem.s.Hypercall.Experiment.uLoValue));
1528 }
1529
1530 if (iOuter == 0)
1531 aRegNames[iRegValue] = iReg;
1532 else if( aRegValues[iRegValue].Reg128.Low64 != pVCpu->nem.s.Hypercall.Experiment.uLoValue
1533 || aRegValues[iRegValue].Reg128.High64 != pVCpu->nem.s.Hypercall.Experiment.uHiValue)
1534 {
1535 LogRel(("Changed from %#18RX64, %#18RX64 !!\n",
1536 aRegValues[iRegValue].Reg128.Low64, aRegValues[iRegValue].Reg128.High64));
1537 LogRel(("Delta %#18RX64, %#18RX64 !!\n",
1538 pVCpu->nem.s.Hypercall.Experiment.uLoValue - aRegValues[iRegValue].Reg128.Low64,
1539 pVCpu->nem.s.Hypercall.Experiment.uHiValue - aRegValues[iRegValue].Reg128.High64));
1540 cRegChanges++;
1541 }
1542 aRegValues[iRegValue].Reg128.Low64 = pVCpu->nem.s.Hypercall.Experiment.uLoValue;
1543 aRegValues[iRegValue].Reg128.High64 = pVCpu->nem.s.Hypercall.Experiment.uHiValue;
1544 iRegValue++;
1545 AssertBreak(iRegValue < RT_ELEMENTS(aRegValues));
1546 }
1547 }
1548 LogRel(("Found %u registers, %u changed\n", iRegValue, cRegChanges));
1549# endif
1550# if 1
1551 /* partition properties */
1552 uint32_t iPropValue = 0;
1553 uint32_t cPropChanges = 0;
1554 for (uint32_t iProp = 0; iProp < 0xc11ff; iProp++)
1555 {
1556 if (iProp == HvPartitionPropertyDebugChannelId /* hangs host */)
1557 continue;
1558 if (iOuter != 0 && aPropCodes[iPropValue] > iProp)
1559 continue;
1560 RT_ZERO(pVCpu->nem.s.Hypercall.Experiment);
1561 pVCpu->nem.s.Hypercall.Experiment.uItem = iProp;
1562 int rc2 = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPERIMENT, 1, NULL);
1563 AssertLogRelRCBreak(rc2);
1564 if (pVCpu->nem.s.Hypercall.Experiment.fSuccess)
1565 {
1566 LogRel(("Property %#010x = %#18RX64\n", iProp, pVCpu->nem.s.Hypercall.Experiment.uLoValue));
1567 if (iOuter == 0)
1568 aPropCodes[iPropValue] = iProp;
1569 else if (aPropValues[iPropValue] != pVCpu->nem.s.Hypercall.Experiment.uLoValue)
1570 {
1571 LogRel(("Changed from %#18RX64, delta %#18RX64!!\n",
1572 aPropValues[iPropValue], pVCpu->nem.s.Hypercall.Experiment.uLoValue - aPropValues[iPropValue]));
1573 cRegChanges++;
1574 }
1575 aPropValues[iPropValue] = pVCpu->nem.s.Hypercall.Experiment.uLoValue;
1576 iPropValue++;
1577 AssertBreak(iPropValue < RT_ELEMENTS(aPropValues));
1578 }
1579 }
1580 LogRel(("Found %u properties, %u changed\n", iPropValue, cPropChanges));
1581# endif
1582
1583 /* Modify the TSC register value and see what changes. */
1584 if (iOuter != 0)
1585 {
1586 RT_ZERO(pVCpu->nem.s.Hypercall.Experiment);
1587 pVCpu->nem.s.Hypercall.Experiment.uItem = HvX64RegisterTsc;
1588 pVCpu->nem.s.Hypercall.Experiment.uHiValue = UINT64_C(0x00000fffffffffff) >> iOuter;
1589 pVCpu->nem.s.Hypercall.Experiment.uLoValue = UINT64_C(0x0011100000000000) << iOuter;
1590 VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPERIMENT, 2, NULL);
1591 LogRel(("Setting HvX64RegisterTsc -> %RTbool (%#RX64)\n", pVCpu->nem.s.Hypercall.Experiment.fSuccess, pVCpu->nem.s.Hypercall.Experiment.uStatus));
1592 }
1593
1594 RT_ZERO(pVCpu->nem.s.Hypercall.Experiment);
1595 pVCpu->nem.s.Hypercall.Experiment.uItem = HvX64RegisterTsc;
1596 VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPERIMENT, 0, NULL);
1597 LogRel(("HvX64RegisterTsc = %#RX64, %#RX64\n", pVCpu->nem.s.Hypercall.Experiment.uLoValue, pVCpu->nem.s.Hypercall.Experiment.uHiValue));
1598 }
1599
1600#endif
1601 return VINF_SUCCESS;
1602 }
1603 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to NEMR0InitVMPart2 failed: %Rrc", rc);
1604}
1605
1606
1607int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1608{
1609 //BOOL fRet = SetThreadPriority(GetCurrentThread(), 0);
1610 //AssertLogRel(fRet);
1611
1612 NOREF(pVM); NOREF(enmWhat);
1613 return VINF_SUCCESS;
1614}
1615
1616
1617int nemR3NativeTerm(PVM pVM)
1618{
1619 /*
1620 * Delete the partition.
1621 */
1622 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
1623 pVM->nem.s.hPartition = NULL;
1624 pVM->nem.s.hPartitionDevice = NULL;
1625 if (hPartition != NULL)
1626 {
1627 VMCPUID iCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
1628 LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, iCpu));
1629 while (iCpu-- > 0)
1630 {
1631 pVM->aCpus[iCpu].nem.s.pvMsgSlotMapping = NULL;
1632#ifndef NEM_WIN_USE_OUR_OWN_RUN_API
1633# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1634 if (!pVM->nem.s.fUseRing0Runloop)
1635# endif
1636 {
1637 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, iCpu);
1638 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
1639 hPartition, iCpu, hrc, RTNtLastStatusValue(),
1640 RTNtLastErrorValue()));
1641 }
1642#endif
1643 }
1644 WHvDeletePartition(hPartition);
1645 }
1646 pVM->nem.s.fCreatedEmts = false;
1647 return VINF_SUCCESS;
1648}
1649
1650
1651/**
1652 * VM reset notification.
1653 *
1654 * @param pVM The cross context VM structure.
1655 */
1656void nemR3NativeReset(PVM pVM)
1657{
1658 /* Unfix the A20 gate. */
1659 pVM->nem.s.fA20Fixed = false;
1660}
1661
1662
1663/**
1664 * Reset CPU due to INIT IPI or hot (un)plugging.
1665 *
1666 * @param pVCpu The cross context virtual CPU structure of the CPU being
1667 * reset.
1668 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1669 */
1670void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1671{
1672 /* Lock the A20 gate if INIT IPI, make sure it's enabled. */
1673 if (fInitIpi && pVCpu->idCpu > 0)
1674 {
1675 PVM pVM = pVCpu->CTX_SUFF(pVM);
1676 if (!pVM->nem.s.fA20Enabled)
1677 nemR3NativeNotifySetA20(pVCpu, true);
1678 pVM->nem.s.fA20Enabled = true;
1679 pVM->nem.s.fA20Fixed = true;
1680 }
1681}
1682
1683
1684VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1685{
1686#ifdef NEM_WIN_WITH_RING0_RUNLOOP
1687 if (pVM->nem.s.fUseRing0Runloop)
1688 {
1689 for (;;)
1690 {
1691 VBOXSTRICTRC rcStrict = VMMR3CallR0EmtFast(pVM, pVCpu, VMMR0_DO_NEM_RUN);
1692 if (RT_SUCCESS(rcStrict))
1693 {
1694 /*
1695 * We deal with VINF_NEM_FLUSH_TLB here, since we're running the risk of
1696 * getting these while we already got another RC (I/O ports).
1697 */
1698 /* Status codes: */
1699 VBOXSTRICTRC rcPending = pVCpu->nem.s.rcPending;
1700 pVCpu->nem.s.rcPending = VINF_SUCCESS;
1701 if (rcStrict == VINF_NEM_FLUSH_TLB || rcPending == VINF_NEM_FLUSH_TLB)
1702 {
1703 LogFlow(("nemR3NativeRunGC: calling PGMFlushTLB...\n"));
1704 int rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true);
1705 AssertRCReturn(rc, rc);
1706 if (rcStrict == VINF_NEM_FLUSH_TLB)
1707 {
1708 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK | VM_FF_HP_R0_PRE_HM_MASK)
1709 && !VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_HIGH_PRIORITY_POST_MASK | VMCPU_FF_HP_R0_PRE_HM_MASK)
1710 & ~VMCPU_FF_RESUME_GUEST_MASK))
1711 {
1712 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
1713 continue;
1714 }
1715 rcStrict = VINF_SUCCESS;
1716 }
1717 }
1718 else
1719 AssertMsg(rcPending == VINF_SUCCESS, ("rcPending=%Rrc\n", VBOXSTRICTRC_VAL(rcPending) ));
1720 }
1721 LogFlow(("nemR3NativeRunGC: returns %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
1722 return rcStrict;
1723 }
1724 }
1725#endif
1726 return nemHCWinRunGC(pVM, pVCpu, NULL /*pGVM*/, NULL /*pGVCpu*/);
1727}
1728
1729
1730bool nemR3NativeCanExecuteGuest(PVM pVM, PVMCPU pVCpu)
1731{
1732 NOREF(pVM); NOREF(pVCpu);
1733 return true;
1734}
1735
1736
1737bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
1738{
1739 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
1740 return false;
1741}
1742
1743
1744/**
1745 * Forced flag notification call from VMEmt.h.
1746 *
1747 * This is only called when pVCpu is in the VMCPUSTATE_STARTED_EXEC_NEM state.
1748 *
1749 * @param pVM The cross context VM structure.
1750 * @param pVCpu The cross context virtual CPU structure of the CPU
1751 * to be notified.
1752 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_XXX.
1753 */
1754void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
1755{
1756#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1757 nemHCWinCancelRunVirtualProcessor(pVM, pVCpu);
1758#else
1759# ifdef NEM_WIN_WITH_RING0_RUNLOOP
1760 if (pVM->nem.s.fUseRing0Runloop)
1761 nemHCWinCancelRunVirtualProcessor(pVM, pVCpu);
1762 else
1763# endif
1764 {
1765 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu));
1766 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
1767 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
1768 RT_NOREF_PV(hrc);
1769 }
1770#endif
1771 RT_NOREF_PV(fFlags);
1772}
1773
1774
1775DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
1776{
1777 PGMPAGEMAPLOCK Lock;
1778 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
1779 if (RT_SUCCESS(rc))
1780 PGMPhysReleasePageMappingLock(pVM, &Lock);
1781 return rc;
1782}
1783
1784
1785DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
1786{
1787 PGMPAGEMAPLOCK Lock;
1788 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
1789 if (RT_SUCCESS(rc))
1790 PGMPhysReleasePageMappingLock(pVM, &Lock);
1791 return rc;
1792}
1793
1794
1795int nemR3NativeNotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
1796{
1797 Log5(("nemR3NativeNotifyPhysRamRegister: %RGp LB %RGp\n", GCPhys, cb));
1798 NOREF(pVM); NOREF(GCPhys); NOREF(cb);
1799 return VINF_SUCCESS;
1800}
1801
1802
1803int nemR3NativeNotifyPhysMmioExMap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvMmio2)
1804{
1805 Log5(("nemR3NativeNotifyPhysMmioExMap: %RGp LB %RGp fFlags=%#x pvMmio2=%p\n", GCPhys, cb, fFlags, pvMmio2));
1806 NOREF(pVM); NOREF(GCPhys); NOREF(cb); NOREF(fFlags); NOREF(pvMmio2);
1807 return VINF_SUCCESS;
1808}
1809
1810
1811int nemR3NativeNotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags)
1812{
1813 Log5(("nemR3NativeNotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x\n", GCPhys, cb, fFlags));
1814 NOREF(pVM); NOREF(GCPhys); NOREF(cb); NOREF(fFlags);
1815 return VINF_SUCCESS;
1816}
1817
1818
1819/**
1820 * Called early during ROM registration, right after the pages have been
1821 * allocated and the RAM range updated.
1822 *
1823 * This will be succeeded by a number of NEMHCNotifyPhysPageProtChanged() calls
1824 * and finally a NEMR3NotifyPhysRomRegisterEarly().
1825 *
1826 * @returns VBox status code
1827 * @param pVM The cross context VM structure.
1828 * @param GCPhys The ROM address (page aligned).
1829 * @param cb The size (page aligned).
1830 * @param fFlags NEM_NOTIFY_PHYS_ROM_F_XXX.
1831 */
1832int nemR3NativeNotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags)
1833{
1834 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x\n", GCPhys, cb, fFlags));
1835#if 0 /* Let's not do this after all. We'll protection change notifications for each page and if not we'll map them lazily. */
1836 RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
1837 for (RTGCPHYS iPage = 0; iPage < cPages; iPage++, GCPhys += X86_PAGE_SIZE)
1838 {
1839 const void *pvPage;
1840 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
1841 if (RT_SUCCESS(rc))
1842 {
1843 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhys, X86_PAGE_SIZE,
1844 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
1845 if (SUCCEEDED(hrc))
1846 { /* likely */ }
1847 else
1848 {
1849 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
1850 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1851 return VERR_NEM_INIT_FAILED;
1852 }
1853 }
1854 else
1855 {
1856 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1857 return rc;
1858 }
1859 }
1860#else
1861 NOREF(pVM); NOREF(GCPhys); NOREF(cb);
1862#endif
1863 RT_NOREF_PV(fFlags);
1864 return VINF_SUCCESS;
1865}
1866
1867
1868/**
1869 * Called after the ROM range has been fully completed.
1870 *
1871 * This will be preceeded by a NEMR3NotifyPhysRomRegisterEarly() call as well a
1872 * number of NEMHCNotifyPhysPageProtChanged calls.
1873 *
1874 * @returns VBox status code
1875 * @param pVM The cross context VM structure.
1876 * @param GCPhys The ROM address (page aligned).
1877 * @param cb The size (page aligned).
1878 * @param fFlags NEM_NOTIFY_PHYS_ROM_F_XXX.
1879 */
1880int nemR3NativeNotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags)
1881{
1882 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp fFlags=%#x\n", GCPhys, cb, fFlags));
1883 NOREF(pVM); NOREF(GCPhys); NOREF(cb); NOREF(fFlags);
1884 return VINF_SUCCESS;
1885}
1886
1887
1888/**
1889 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
1890 */
1891static DECLCALLBACK(int) nemR3WinUnsetForA20CheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys,
1892 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1893{
1894 /* We'll just unmap the memory. */
1895 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
1896 {
1897#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1898 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1899 AssertRC(rc);
1900 if (RT_SUCCESS(rc))
1901#else
1902 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1903 if (SUCCEEDED(hrc))
1904#endif
1905 {
1906 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1907 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
1908 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1909 }
1910 else
1911 {
1912#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1913 LogRel(("nemR3WinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1914 return rc;
1915#else
1916 LogRel(("nemR3WinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
1917 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1918 return VERR_INTERNAL_ERROR_2;
1919#endif
1920 }
1921 }
1922 RT_NOREF(pVCpu, pvUser);
1923 return VINF_SUCCESS;
1924}
1925
1926
1927/**
1928 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
1929 *
1930 * @returns The PGMPhysNemQueryPageInfo result.
1931 * @param pVM The cross context VM structure.
1932 * @param pVCpu The cross context virtual CPU structure.
1933 * @param GCPhys The page to unmap.
1934 */
1935static int nemR3WinUnmapPageForA20Gate(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1936{
1937 PGMPHYSNEMPAGEINFO Info;
1938 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
1939 nemR3WinUnsetForA20CheckerCallback, NULL);
1940}
1941
1942
1943/**
1944 * Called when the A20 state changes.
1945 *
1946 * Hyper-V doesn't seem to offer a simple way of implementing the A20 line
1947 * features of PCs. So, we do a very minimal emulation of the HMA to make DOS
1948 * happy.
1949 *
1950 * @param pVCpu The CPU the A20 state changed on.
1951 * @param fEnabled Whether it was enabled (true) or disabled.
1952 */
1953void nemR3NativeNotifySetA20(PVMCPU pVCpu, bool fEnabled)
1954{
1955 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
1956 PVM pVM = pVCpu->CTX_SUFF(pVM);
1957 if (!pVM->nem.s.fA20Fixed)
1958 {
1959 pVM->nem.s.fA20Enabled = fEnabled;
1960 for (RTGCPHYS GCPhys = _1M; GCPhys < _1M + _64K; GCPhys += X86_PAGE_SIZE)
1961 nemR3WinUnmapPageForA20Gate(pVM, pVCpu, GCPhys);
1962 }
1963}
1964
1965
1966/** @page pg_nem_win NEM/win - Native Execution Manager, Windows.
1967 *
1968 * On Windows the Hyper-V root partition (dom0 in zen terminology) does not have
1969 * nested VT-x or AMD-V capabilities. Early on raw-mode worked inside it, but
1970 * for a while now we've been getting \#GPs when trying to modify CR4 in the
1971 * world switcher. So, when Hyper-V is active on Windows we have little choice
1972 * but to use Hyper-V to run our VMs.
1973 *
1974 *
1975 * @section sub_nem_win_whv The WinHvPlatform API
1976 *
1977 * Since Windows 10 build 17083 there is a documented API for managing Hyper-V
1978 * VMs: header file WinHvPlatform.h and implementation in WinHvPlatform.dll.
1979 * This interface is a wrapper around the undocumented Virtualization
1980 * Infrastructure Driver (VID) API - VID.DLL and VID.SYS. The wrapper is
1981 * written in C++, namespaced, early versions (at least) was using standard C++
1982 * container templates in several places.
1983 *
1984 * When creating a VM using WHvCreatePartition, it will only create the
1985 * WinHvPlatform structures for it, to which you get an abstract pointer. The
1986 * VID API that actually creates the partition is first engaged when you call
1987 * WHvSetupPartition after first setting a lot of properties using
1988 * WHvSetPartitionProperty. Since the VID API is just a very thin wrapper
1989 * around CreateFile and NtDeviceIoControlFile, it returns an actual HANDLE for
1990 * the partition to WinHvPlatform. We fish this HANDLE out of the WinHvPlatform
1991 * partition structures because we need to talk directly to VID for reasons
1992 * we'll get to in a bit. (Btw. we could also intercept the CreateFileW or
1993 * NtDeviceIoControlFile calls from VID.DLL to get the HANDLE should fishing in
1994 * the partition structures become difficult.)
1995 *
1996 * The WinHvPlatform API requires us to both set the number of guest CPUs before
1997 * setting up the partition and call WHvCreateVirtualProcessor for each of them.
1998 * The CPU creation function boils down to a VidMessageSlotMap call that sets up
1999 * and maps a message buffer into ring-3 for async communication with hyper-V
2000 * and/or the VID.SYS thread actually running the CPU thru
2001 * WinHvRunVpDispatchLoop(). When for instance a VMEXIT is encountered, hyper-V
2002 * sends a message that the WHvRunVirtualProcessor API retrieves (and later
2003 * acknowledges) via VidMessageSlotHandleAndGetNext. Since or about build
2004 * 17757 a register page is also mapped into user space when creating the
2005 * virtual CPU. It should be noteded that WHvDeleteVirtualProcessor doesn't do
2006 * much as there seems to be no partner function VidMessagesSlotMap that
2007 * reverses what it did.
2008 *
2009 * Memory is managed thru calls to WHvMapGpaRange and WHvUnmapGpaRange (GPA does
2010 * not mean grade point average here, but rather guest physical addressspace),
2011 * which corresponds to VidCreateVaGpaRangeSpecifyUserVa and VidDestroyGpaRange
2012 * respectively. As 'UserVa' indicates, the functions works on user process
2013 * memory. The mappings are also subject to quota restrictions, so the number
2014 * of ranges are limited and probably their total size as well. Obviously
2015 * VID.SYS keeps track of the ranges, but so does WinHvPlatform, which means
2016 * there is a bit of overhead involved and quota restrctions makes sense.
2017 *
2018 * Running guest code is done through the WHvRunVirtualProcessor function. It
2019 * asynchronously starts or resumes hyper-V CPU execution and then waits for an
2020 * VMEXIT message. Hyper-V / VID.SYS will return information about the message
2021 * in the message buffer mapping, and WHvRunVirtualProcessor will convert that
2022 * finto it's own WHV_RUN_VP_EXIT_CONTEXT format.
2023 *
2024 * Other threads can interrupt the execution by using WHvCancelVirtualProcessor,
2025 * which since or about build 17757 uses VidMessageSlotHandleAndGetNext to do
2026 * the work (earlier builds would open the waiting thread, do a dummy
2027 * QueueUserAPC on it, and let it upon return use VidStopVirtualProcessor to
2028 * do the actual stopping). While there is certainly a race between cancelation
2029 * and the CPU causing a natural VMEXIT, it is not known whether this still
2030 * causes extra work on subsequent WHvRunVirtualProcessor calls (it did in and
2031 * earlier 17134).
2032 *
2033 * Registers are retrieved and set via WHvGetVirtualProcessorRegisters and
2034 * WHvSetVirtualProcessorRegisters. In addition, several VMEXITs include
2035 * essential register state in the exit context information, potentially making
2036 * it possible to emulate the instruction causing the exit without involving
2037 * WHvGetVirtualProcessorRegisters.
2038 *
2039 *
2040 * @subsection subsec_nem_win_whv_cons Issues & Feedback
2041 *
2042 * Here are some observations (mostly against build 17101):
2043 *
2044 * - The VMEXIT performance is dismal (build 17134).
2045 *
2046 * Our proof of concept implementation with a kernel runloop (i.e. not using
2047 * WHvRunVirtualProcessor and friends, but calling VID.SYS fast I/O control
2048 * entry point directly) delivers 9-10% of the port I/O performance and only
2049 * 6-7% of the MMIO performance that we have with our own hypervisor.
2050 *
2051 * When using the offical WinHvPlatform API, the numbers are %3 for port I/O
2052 * and 5% for MMIO.
2053 *
2054 * While the tests we've done are using tight tight loops only doing port I/O
2055 * and MMIO, the problem is clearly visible when running regular guest OSes.
2056 * Anything that hammers the VGA device would be suffering, for example:
2057 *
2058 * - Windows 2000 boot screen animation overloads us with MMIO exits
2059 * and won't even boot because all the time is spent in interrupt
2060 * handlers and redrawin the screen.
2061 *
2062 * - DSL 4.4 and its bootmenu logo is slower than molasses in january.
2063 *
2064 * We have not found a workaround for this yet.
2065 *
2066 * Something that might improve the issue a little is to detect blocks with
2067 * excessive MMIO and port I/O exits and emulate instructions to cover
2068 * multiple exits before letting Hyper-V have a go at the guest execution
2069 * again. This will only improve the situation under some circumstances,
2070 * since emulating instructions without recompilation can be expensive, so
2071 * there will only be real gains if the exitting instructions are tightly
2072 * packed.
2073 *
2074 * Update: Security fixes during the summer of 2018 caused the performance to
2075 * dropped even more.
2076 *
2077 * Update [build 17757]: Some performance improvements here, but they don't
2078 * yet make up for what was lost this summer.
2079 *
2080 *
2081 * - We need a way to directly modify the TSC offset (or bias if you like).
2082 *
2083 * The current approach of setting the WHvX64RegisterTsc register one by one
2084 * on each virtual CPU in sequence will introduce random inaccuracies,
2085 * especially if the thread doing the job is reschduled at a bad time.
2086 *
2087 *
2088 * - Unable to access WHvX64RegisterMsrMtrrCap (build 17134).
2089 *
2090 *
2091 * - On AMD Ryzen grub/debian 9.0 ends up with a unrecoverable exception
2092 * when IA32_MTRR_PHYSMASK0 is written.
2093 *
2094 *
2095 * - The IA32_APIC_BASE register does not work right:
2096 *
2097 * - Attempts by the guest to clear bit 11 (EN) are ignored, both the
2098 * guest and the VMM reads back the old value.
2099 *
2100 * - Attempts to modify the base address (bits NN:12) seems to be ignored
2101 * in the same way.
2102 *
2103 * - The VMM can modify both the base address as well as the the EN and
2104 * BSP bits, however this is useless if we cannot intercept the WRMSR.
2105 *
2106 * - Attempts by the guest to set the EXTD bit (X2APIC) result in \#GP(0),
2107 * while the VMM ends up with with ERROR_HV_INVALID_PARAMETER. Seems
2108 * there is no way to support X2APIC.
2109 *
2110 *
2111 * - Not sure if this is a thing, but WHvCancelVirtualProcessor seems to cause
2112 * cause a lot more spurious WHvRunVirtualProcessor returns that what we get
2113 * with the replacement code. By spurious returns we mean that the
2114 * subsequent call to WHvRunVirtualProcessor would return immediately.
2115 *
2116 * Update [build 17757]: New cancelation code might have addressed this, but
2117 * haven't had time to test it yet.
2118 *
2119 *
2120 * - There is no API for modifying protection of a page within a GPA range.
2121 *
2122 * From what we can tell, the only way to modify the protection (like readonly
2123 * -> writable, or vice versa) is to first unmap the range and then remap it
2124 * with the new protection.
2125 *
2126 * We are for instance doing this quite a bit in order to track dirty VRAM
2127 * pages. VRAM pages starts out as readonly, when the guest writes to a page
2128 * we take an exit, notes down which page it is, makes it writable and restart
2129 * the instruction. After refreshing the display, we reset all the writable
2130 * pages to readonly again, bulk fashion.
2131 *
2132 * Now to work around this issue, we do page sized GPA ranges. In addition to
2133 * add a lot of tracking overhead to WinHvPlatform and VID.SYS, this also
2134 * causes us to exceed our quota before we've even mapped a default sized
2135 * (128MB) VRAM page-by-page. So, to work around this quota issue we have to
2136 * lazily map pages and actively restrict the number of mappings.
2137 *
2138 * Our best workaround thus far is bypassing WinHvPlatform and VID entirely
2139 * when in comes to guest memory management and instead use the underlying
2140 * hypercalls (HvCallMapGpaPages, HvCallUnmapGpaPages) to do it ourselves.
2141 * (This also maps a whole lot better into our own guest page management
2142 * infrastructure.)
2143 *
2144 * Update [build 17757]: Introduces a KVM like dirty logging API which could
2145 * help tracking dirty VGA pages, while being useless for shadow ROM and
2146 * devices trying catch the guest updating descriptors and such.
2147 *
2148 *
2149 * - Observed problems doing WHvUnmapGpaRange immediately followed by
2150 * WHvMapGpaRange.
2151 *
2152 * As mentioned above, we've been forced to use this sequence when modifying
2153 * page protection. However, when transitioning from readonly to writable,
2154 * we've ended up looping forever with the same write to readonly memory
2155 * VMEXIT. We're wondering if this issue might be related to the lazy mapping
2156 * logic in WinHvPlatform.
2157 *
2158 * Workaround: Insert a WHvRunVirtualProcessor call and make sure to get a GPA
2159 * unmapped exit between the two calls. Not entirely great performance wise
2160 * (or the santity of our code).
2161 *
2162 *
2163 * - Implementing A20 gate behavior is tedious, where as correctly emulating the
2164 * A20M# pin (present on 486 and later) is near impossible for SMP setups
2165 * (e.g. possiblity of two CPUs with different A20 status).
2166 *
2167 * Workaround: Only do A20 on CPU 0, restricting the emulation to HMA. We
2168 * unmap all pages related to HMA (0x100000..0x10ffff) when the A20 state
2169 * changes, lazily syncing the right pages back when accessed.
2170 *
2171 *
2172 * - WHVRunVirtualProcessor wastes time converting VID/Hyper-V messages to its
2173 * own format (WHV_RUN_VP_EXIT_CONTEXT).
2174 *
2175 * We understand this might be because Microsoft wishes to remain free to
2176 * modify the VID/Hyper-V messages, but it's still rather silly and does slow
2177 * things down a little. We'd much rather just process the messages directly.
2178 *
2179 *
2180 * - WHVRunVirtualProcessor would've benefited from using a callback interface:
2181 *
2182 * - The potential size changes of the exit context structure wouldn't be
2183 * an issue, since the function could manage that itself.
2184 *
2185 * - State handling could probably be simplified (like cancelation).
2186 *
2187 *
2188 * - WHvGetVirtualProcessorRegisters and WHvSetVirtualProcessorRegisters
2189 * internally converts register names, probably using temporary heap buffers.
2190 *
2191 * From the looks of things, they are converting from WHV_REGISTER_NAME to
2192 * HV_REGISTER_NAME from in the "Virtual Processor Register Names" section in
2193 * the "Hypervisor Top-Level Functional Specification" document. This feels
2194 * like an awful waste of time.
2195 *
2196 * We simply cannot understand why HV_REGISTER_NAME isn't used directly here,
2197 * or at least the same values, making any conversion reduntant. Restricting
2198 * access to certain registers could easily be implement by scanning the
2199 * inputs.
2200 *
2201 * To avoid the heap + conversion overhead, we're currently using the
2202 * HvCallGetVpRegisters and HvCallSetVpRegisters calls directly, at least for
2203 * the ring-0 code.
2204 *
2205 * Update [build 17757]: Register translation has been very cleverly
2206 * optimized and made table driven (2 top level tables, 4 + 1 leaf tables).
2207 * Register information consists of the 32-bit HV register name, register page
2208 * offset, and flags (giving valid offset, size and more). Register
2209 * getting/settings seems to be done by hoping that the register page provides
2210 * it all, and falling back on the VidSetVirtualProcessorState if one or more
2211 * registers are not available there.
2212 *
2213 * Note! We have currently not updated our ring-0 code to take the register
2214 * page into account, so it's suffering a little compared to the ring-3 code
2215 * that now uses the offical APIs for registers.
2216 *
2217 *
2218 * - The YMM and XCR0 registers are not yet named (17083). This probably
2219 * wouldn't be a problem if HV_REGISTER_NAME was used, see previous point.
2220 *
2221 * Update [build 17757]: XCR0 is added. YMM register values seems to be put
2222 * into a yet undocumented XsaveState interface. Approach is a little bulky,
2223 * but saves number of enums and dispenses with register transation. Also,
2224 * the underlying Vid setter API duplicates the input buffer on the heap,
2225 * adding a 16 byte header.
2226 *
2227 *
2228 * - Why does VID.SYS only query/set 32 registers at the time thru the
2229 * HvCallGetVpRegisters and HvCallSetVpRegisters hypercalls?
2230 *
2231 * We've not trouble getting/setting all the registers defined by
2232 * WHV_REGISTER_NAME in one hypercall (around 80). Some kind of stack
2233 * buffering or similar?
2234 *
2235 *
2236 * - To handle the VMMCALL / VMCALL instructions, it seems we need to intercept
2237 * \#UD exceptions and inspect the opcodes. A dedicated exit for hypercalls
2238 * would be more efficient, esp. for guests using \#UD for other purposes..
2239 *
2240 *
2241 * - Wrong instruction length in the VpContext with unmapped GPA memory exit
2242 * contexts on 17115/AMD.
2243 *
2244 * One byte "PUSH CS" was reported as 2 bytes, while a two byte
2245 * "MOV [EBX],EAX" was reported with a 1 byte instruction length. Problem
2246 * naturally present in untranslated hyper-v messages.
2247 *
2248 *
2249 * - The I/O port exit context information seems to be missing the address size
2250 * information needed for correct string I/O emulation.
2251 *
2252 * VT-x provides this information in bits 7:9 in the instruction information
2253 * field on newer CPUs. AMD-V in bits 7:9 in the EXITINFO1 field in the VMCB.
2254 *
2255 * We can probably work around this by scanning the instruction bytes for
2256 * address size prefixes. Haven't investigated it any further yet.
2257 *
2258 *
2259 * - Querying WHvCapabilityCodeExceptionExitBitmap returns zero even when
2260 * intercepts demonstrably works (17134).
2261 *
2262 *
2263 * - Querying HvPartitionPropertyDebugChannelId via HvCallGetPartitionProperty
2264 * (hypercall) hangs the host (17134).
2265 *
2266 *
2267 *
2268 * Old concerns that have been addressed:
2269 *
2270 * - The WHvCancelVirtualProcessor API schedules a dummy usermode APC callback
2271 * in order to cancel any current or future alertable wait in VID.SYS during
2272 * the VidMessageSlotHandleAndGetNext call.
2273 *
2274 * IIRC this will make the kernel schedule the specified callback thru
2275 * NTDLL!KiUserApcDispatcher by modifying the thread context and quite
2276 * possibly the userland thread stack. When the APC callback returns to
2277 * KiUserApcDispatcher, it will call NtContinue to restore the old thread
2278 * context and resume execution from there. This naturally adds up to some
2279 * CPU cycles, ring transitions aren't for free, especially after Spectre &
2280 * Meltdown mitigations.
2281 *
2282 * Using NtAltertThread call could do the same without the thread context
2283 * modifications and the extra kernel call.
2284 *
2285 * Update: All concerns have addressed in or about build 17757.
2286 *
2287 * The WHvCancelVirtualProcessor API is now implemented using a new
2288 * VidMessageSlotHandleAndGetNext() flag (4). Codepath is slightly longer
2289 * than NtAlertThread, but has the added benefit that spurious wakeups can be
2290 * more easily reduced.
2291 *
2292 *
2293 * - When WHvRunVirtualProcessor returns without a message, or on a terse
2294 * VID message like HLT, it will make a kernel call to get some registers.
2295 * This is potentially inefficient if the caller decides he needs more
2296 * register state.
2297 *
2298 * It would be better to just return what's available and let the caller fetch
2299 * what is missing from his point of view in a single kernel call.
2300 *
2301 * Update: All concerns have been addressed in or about build 17757. Selected
2302 * registers are now available via shared memory and thus HLT should (not
2303 * verified) no longer require a system call to compose the exit context data.
2304 *
2305 *
2306 * - The WHvRunVirtualProcessor implementation does lazy GPA range mappings when
2307 * a unmapped GPA message is received from hyper-V.
2308 *
2309 * Since MMIO is currently realized as unmapped GPA, this will slow down all
2310 * MMIO accesses a tiny little bit as WHvRunVirtualProcessor looks up the
2311 * guest physical address to check if it is a pending lazy mapping.
2312 *
2313 * The lazy mapping feature makes no sense to us. We as API user have all the
2314 * information and can do lazy mapping ourselves if we want/have to (see next
2315 * point).
2316 *
2317 * Update: All concerns have been addressed in or about build 17757.
2318 *
2319 *
2320 * - The WHvGetCapability function has a weird design:
2321 * - The CapabilityCode parameter is pointlessly duplicated in the output
2322 * structure (WHV_CAPABILITY).
2323 *
2324 * - API takes void pointer, but everyone will probably be using
2325 * WHV_CAPABILITY due to WHV_CAPABILITY::CapabilityCode making it
2326 * impractical to use anything else.
2327 *
2328 * - No output size.
2329 *
2330 * - See GetFileAttributesEx, GetFileInformationByHandleEx,
2331 * FindFirstFileEx, and others for typical pattern for generic
2332 * information getters.
2333 *
2334 * Update: All concerns have been addressed in build 17110.
2335 *
2336 *
2337 * - The WHvGetPartitionProperty function uses the same weird design as
2338 * WHvGetCapability, see above.
2339 *
2340 * Update: All concerns have been addressed in build 17110.
2341 *
2342 *
2343 * - The WHvSetPartitionProperty function has a totally weird design too:
2344 * - In contrast to its partner WHvGetPartitionProperty, the property code
2345 * is not a separate input parameter here but part of the input
2346 * structure.
2347 *
2348 * - The input structure is a void pointer rather than a pointer to
2349 * WHV_PARTITION_PROPERTY which everyone probably will be using because
2350 * of the WHV_PARTITION_PROPERTY::PropertyCode field.
2351 *
2352 * - Really, why use PVOID for the input when the function isn't accepting
2353 * minimal sizes. E.g. WHVPartitionPropertyCodeProcessorClFlushSize only
2354 * requires a 9 byte input, but the function insists on 16 bytes (17083).
2355 *
2356 * - See GetFileAttributesEx, SetFileInformationByHandle, FindFirstFileEx,
2357 * and others for typical pattern for generic information setters and
2358 * getters.
2359 *
2360 * Update: All concerns have been addressed in build 17110.
2361 *
2362 *
2363 *
2364 * @section sec_nem_win_impl Our implementation.
2365 *
2366 * We set out with the goal of wanting to run as much as possible in ring-0,
2367 * reasoning that this would give use the best performance.
2368 *
2369 * This goal was approached gradually, starting out with a pure WinHvPlatform
2370 * implementation, gradually replacing parts: register access, guest memory
2371 * handling, running virtual processors. Then finally moving it all into
2372 * ring-0, while keeping most of it configurable so that we could make
2373 * comparisons (see NEMInternal.h and nemR3NativeRunGC()).
2374 *
2375 *
2376 * @subsection subsect_nem_win_impl_ioctl VID.SYS I/O control calls
2377 *
2378 * To run things in ring-0 we need to talk directly to VID.SYS thru its I/O
2379 * control interface. Looking at changes between like build 17083 and 17101 (if
2380 * memory serves) a set of the VID I/O control numbers shifted a little, which
2381 * means we need to determin them dynamically. We currently do this by hooking
2382 * the NtDeviceIoControlFile API call from VID.DLL and snooping up the
2383 * parameters when making dummy calls to relevant APIs. (We could also
2384 * disassemble the relevant APIs and try fish out the information from that, but
2385 * this is way simpler.)
2386 *
2387 * Issuing I/O control calls from ring-0 is facing a small challenge with
2388 * respect to direct buffering. When using direct buffering the device will
2389 * typically check that the buffer is actually in the user address space range
2390 * and reject kernel addresses. Fortunately, we've got the cross context VM
2391 * structure that is mapped into both kernel and user space, it's also locked
2392 * and safe to access from kernel space. So, we place the I/O control buffers
2393 * in the per-CPU part of it (NEMCPU::uIoCtlBuf) and give the driver the user
2394 * address if direct access buffering or kernel address if not.
2395 *
2396 * The I/O control calls are 'abstracted' in the support driver, see
2397 * SUPR0IoCtlSetupForHandle(), SUPR0IoCtlPerform() and SUPR0IoCtlCleanup().
2398 *
2399 *
2400 * @subsection subsect_nem_win_impl_cpumctx CPUMCTX
2401 *
2402 * Since the CPU state needs to live in Hyper-V when executing, we probably
2403 * should not transfer more than necessary when handling VMEXITs. To help us
2404 * manage this CPUMCTX got a new field CPUMCTX::fExtrn that to indicate which
2405 * part of the state is currently externalized (== in Hyper-V).
2406 *
2407 *
2408 * @subsection sec_nem_win_benchmarks Benchmarks.
2409 *
2410 * @subsubsection subsect_nem_win_benchmarks_bs2t1 17134/2018-06-22: Bootsector2-test1
2411 *
2412 * This is ValidationKit/bootsectors/bootsector2-test1.asm as of 2018-06-22
2413 * (internal r123172) running a the release build of VirtualBox from the same
2414 * source, though with exit optimizations disabled. Host is AMD Threadripper 1950X
2415 * running out an up to date 64-bit Windows 10 build 17134.
2416 *
2417 * The base line column is using the official WinHv API for everything but physical
2418 * memory mapping. The 2nd column is the default NEM/win configuration where we
2419 * put the main execution loop in ring-0, using hypercalls when we can and VID for
2420 * managing execution. The 3rd column is regular VirtualBox using AMD-V directly,
2421 * hyper-V is disabled, main execution loop in ring-0.
2422 *
2423 * @verbatim
2424TESTING... WinHv API Hypercalls + VID VirtualBox AMD-V
2425 32-bit paged protected mode, CPUID : 108 874 ins/sec 113% / 123 602 1198% / 1 305 113
2426 32-bit pae protected mode, CPUID : 106 722 ins/sec 115% / 122 740 1232% / 1 315 201
2427 64-bit long mode, CPUID : 106 798 ins/sec 114% / 122 111 1198% / 1 280 404
2428 16-bit unpaged protected mode, CPUID : 106 835 ins/sec 114% / 121 994 1216% / 1 299 665
2429 32-bit unpaged protected mode, CPUID : 105 257 ins/sec 115% / 121 772 1235% / 1 300 860
2430 real mode, CPUID : 104 507 ins/sec 116% / 121 800 1228% / 1 283 848
2431CPUID EAX=1 : PASSED
2432 32-bit paged protected mode, RDTSC : 99 581 834 ins/sec 100% / 100 323 307 93% / 93 473 299
2433 32-bit pae protected mode, RDTSC : 99 620 585 ins/sec 100% / 99 960 952 84% / 83 968 839
2434 64-bit long mode, RDTSC : 100 540 009 ins/sec 100% / 100 946 372 93% / 93 652 826
2435 16-bit unpaged protected mode, RDTSC : 99 688 473 ins/sec 100% / 100 097 751 76% / 76 281 287
2436 32-bit unpaged protected mode, RDTSC : 98 385 857 ins/sec 102% / 100 510 404 94% / 93 379 536
2437 real mode, RDTSC : 100 087 967 ins/sec 101% / 101 386 138 93% / 93 234 999
2438RDTSC : PASSED
2439 32-bit paged protected mode, Read CR4 : 2 156 102 ins/sec 98% / 2 121 967 17114% / 369 009 009
2440 32-bit pae protected mode, Read CR4 : 2 163 820 ins/sec 98% / 2 133 804 17469% / 377 999 261
2441 64-bit long mode, Read CR4 : 2 164 822 ins/sec 98% / 2 128 698 18875% / 408 619 313
2442 16-bit unpaged protected mode, Read CR4 : 2 162 367 ins/sec 100% / 2 168 508 17132% / 370 477 568
2443 32-bit unpaged protected mode, Read CR4 : 2 163 189 ins/sec 100% / 2 169 808 16768% / 362 734 679
2444 real mode, Read CR4 : 2 162 436 ins/sec 100% / 2 164 914 15551% / 336 288 998
2445Read CR4 : PASSED
2446 real mode, 32-bit IN : 104 649 ins/sec 118% / 123 513 1028% / 1 075 831
2447 real mode, 32-bit OUT : 107 102 ins/sec 115% / 123 660 982% / 1 052 259
2448 real mode, 32-bit IN-to-ring-3 : 105 697 ins/sec 98% / 104 471 201% / 213 216
2449 real mode, 32-bit OUT-to-ring-3 : 105 830 ins/sec 98% / 104 598 198% / 210 495
2450 16-bit unpaged protected mode, 32-bit IN : 104 855 ins/sec 117% / 123 174 1029% / 1 079 591
2451 16-bit unpaged protected mode, 32-bit OUT : 107 529 ins/sec 115% / 124 250 992% / 1 067 053
2452 16-bit unpaged protected mode, 32-bit IN-to-ring-3 : 106 337 ins/sec 103% / 109 565 196% / 209 367
2453 16-bit unpaged protected mode, 32-bit OUT-to-ring-3 : 107 558 ins/sec 100% / 108 237 191% / 206 387
2454 32-bit unpaged protected mode, 32-bit IN : 106 351 ins/sec 116% / 123 584 1016% / 1 081 325
2455 32-bit unpaged protected mode, 32-bit OUT : 106 424 ins/sec 116% / 124 252 995% / 1 059 408
2456 32-bit unpaged protected mode, 32-bit IN-to-ring-3 : 104 035 ins/sec 101% / 105 305 202% / 210 750
2457 32-bit unpaged protected mode, 32-bit OUT-to-ring-3 : 103 831 ins/sec 102% / 106 919 205% / 213 198
2458 32-bit paged protected mode, 32-bit IN : 103 356 ins/sec 119% / 123 870 1041% / 1 076 463
2459 32-bit paged protected mode, 32-bit OUT : 107 177 ins/sec 115% / 124 302 998% / 1 069 655
2460 32-bit paged protected mode, 32-bit IN-to-ring-3 : 104 491 ins/sec 100% / 104 744 200% / 209 264
2461 32-bit paged protected mode, 32-bit OUT-to-ring-3 : 106 603 ins/sec 97% / 103 849 197% / 210 219
2462 32-bit pae protected mode, 32-bit IN : 105 923 ins/sec 115% / 122 759 1041% / 1 103 261
2463 32-bit pae protected mode, 32-bit OUT : 107 083 ins/sec 117% / 126 057 1024% / 1 096 667
2464 32-bit pae protected mode, 32-bit IN-to-ring-3 : 106 114 ins/sec 97% / 103 496 199% / 211 312
2465 32-bit pae protected mode, 32-bit OUT-to-ring-3 : 105 675 ins/sec 96% / 102 096 198% / 209 890
2466 64-bit long mode, 32-bit IN : 105 800 ins/sec 113% / 120 006 1013% / 1 072 116
2467 64-bit long mode, 32-bit OUT : 105 635 ins/sec 113% / 120 375 997% / 1 053 655
2468 64-bit long mode, 32-bit IN-to-ring-3 : 105 274 ins/sec 95% / 100 763 197% / 208 026
2469 64-bit long mode, 32-bit OUT-to-ring-3 : 106 262 ins/sec 94% / 100 749 196% / 209 288
2470NOP I/O Port Access : PASSED
2471 32-bit paged protected mode, 32-bit read : 57 687 ins/sec 119% / 69 136 1197% / 690 548
2472 32-bit paged protected mode, 32-bit write : 57 957 ins/sec 118% / 68 935 1183% / 685 930
2473 32-bit paged protected mode, 32-bit read-to-ring-3 : 57 958 ins/sec 95% / 55 432 276% / 160 505
2474 32-bit paged protected mode, 32-bit write-to-ring-3 : 57 922 ins/sec 100% / 58 340 304% / 176 464
2475 32-bit pae protected mode, 32-bit read : 57 478 ins/sec 119% / 68 453 1141% / 656 159
2476 32-bit pae protected mode, 32-bit write : 57 226 ins/sec 118% / 68 097 1157% / 662 504
2477 32-bit pae protected mode, 32-bit read-to-ring-3 : 57 582 ins/sec 94% / 54 651 268% / 154 867
2478 32-bit pae protected mode, 32-bit write-to-ring-3 : 57 697 ins/sec 100% / 57 750 299% / 173 030
2479 64-bit long mode, 32-bit read : 57 128 ins/sec 118% / 67 779 1071% / 611 949
2480 64-bit long mode, 32-bit write : 57 127 ins/sec 118% / 67 632 1084% / 619 395
2481 64-bit long mode, 32-bit read-to-ring-3 : 57 181 ins/sec 94% / 54 123 265% / 151 937
2482 64-bit long mode, 32-bit write-to-ring-3 : 57 297 ins/sec 99% / 57 286 294% / 168 694
2483 16-bit unpaged protected mode, 32-bit read : 58 827 ins/sec 118% / 69 545 1185% / 697 602
2484 16-bit unpaged protected mode, 32-bit write : 58 678 ins/sec 118% / 69 442 1183% / 694 387
2485 16-bit unpaged protected mode, 32-bit read-to-ring-3 : 57 841 ins/sec 96% / 55 730 275% / 159 163
2486 16-bit unpaged protected mode, 32-bit write-to-ring-3 : 57 855 ins/sec 101% / 58 834 304% / 176 169
2487 32-bit unpaged protected mode, 32-bit read : 58 063 ins/sec 120% / 69 690 1233% / 716 444
2488 32-bit unpaged protected mode, 32-bit write : 57 936 ins/sec 120% / 69 633 1199% / 694 753
2489 32-bit unpaged protected mode, 32-bit read-to-ring-3 : 58 451 ins/sec 96% / 56 183 273% / 159 972
2490 32-bit unpaged protected mode, 32-bit write-to-ring-3 : 58 962 ins/sec 99% / 58 955 298% / 175 936
2491 real mode, 32-bit read : 58 571 ins/sec 118% / 69 478 1160% / 679 917
2492 real mode, 32-bit write : 58 418 ins/sec 118% / 69 320 1185% / 692 513
2493 real mode, 32-bit read-to-ring-3 : 58 072 ins/sec 96% / 55 751 274% / 159 145
2494 real mode, 32-bit write-to-ring-3 : 57 870 ins/sec 101% / 58 755 307% / 178 042
2495NOP MMIO Access : PASSED
2496SUCCESS
2497 * @endverbatim
2498 *
2499 * What we see here is:
2500 *
2501 * - The WinHv API approach is 10 to 12 times slower for exits we can
2502 * handle directly in ring-0 in the VBox AMD-V code.
2503 *
2504 * - The WinHv API approach is 2 to 3 times slower for exits we have to
2505 * go to ring-3 to handle with the VBox AMD-V code.
2506 *
2507 * - By using hypercalls and VID.SYS from ring-0 we gain between
2508 * 13% and 20% over the WinHv API on exits handled in ring-0.
2509 *
2510 * - For exits requiring ring-3 handling are between 6% slower and 3% faster
2511 * than the WinHv API.
2512 *
2513 *
2514 * As a side note, it looks like Hyper-V doesn't let the guest read CR4 but
2515 * triggers exits all the time. This isn't all that important these days since
2516 * OSes like Linux cache the CR4 value specifically to avoid these kinds of exits.
2517 *
2518 *
2519 * @subsubsection subsect_nem_win_benchmarks_bs2t1u1 17134/2018-10-02: Bootsector2-test1
2520 *
2521 * Update on 17134. While expectantly testing a couple of newer builds (17758,
2522 * 17763) hoping for some increases in performance, the numbers turned out
2523 * altogether worse than the June test run. So, we went back to the 1803
2524 * (17134) installation, made sure it was fully up to date (as per 2018-10-02)
2525 * and re-tested.
2526 *
2527 * The numbers had somehow turned significantly worse over the last 3-4 months,
2528 * dropping around 70% for the WinHv API test, more for Hypercalls + VID.
2529 *
2530 * @verbatim
2531TESTING... WinHv API Hypercalls + VID VirtualBox AMD-V *
2532 32-bit paged protected mode, CPUID : 33 270 ins/sec 33 154
2533 real mode, CPUID : 33 534 ins/sec 32 711
2534 [snip]
2535 32-bit paged protected mode, RDTSC : 102 216 011 ins/sec 98 225 419
2536 real mode, RDTSC : 102 492 243 ins/sec 98 225 419
2537 [snip]
2538 32-bit paged protected mode, Read CR4 : 2 096 165 ins/sec 2 123 815
2539 real mode, Read CR4 : 2 081 047 ins/sec 2 075 151
2540 [snip]
2541 32-bit paged protected mode, 32-bit IN : 32 739 ins/sec 33 655
2542 32-bit paged protected mode, 32-bit OUT : 32 702 ins/sec 33 777
2543 32-bit paged protected mode, 32-bit IN-to-ring-3 : 32 579 ins/sec 29 985
2544 32-bit paged protected mode, 32-bit OUT-to-ring-3 : 32 750 ins/sec 29 757
2545 [snip]
2546 32-bit paged protected mode, 32-bit read : 20 042 ins/sec 21 489
2547 32-bit paged protected mode, 32-bit write : 20 036 ins/sec 21 493
2548 32-bit paged protected mode, 32-bit read-to-ring-3 : 19 985 ins/sec 19 143
2549 32-bit paged protected mode, 32-bit write-to-ring-3 : 19 972 ins/sec 19 595
2550
2551 * @endverbatim
2552 *
2553 * Suspects are security updates and/or microcode updates installed since then.
2554 * Given that the RDTSC and CR4 numbers are reasonably unchanges, it seems that
2555 * the Hyper-V core loop (in hvax64.exe) aren't affected. Our ring-0 runloop
2556 * is equally affected as the ring-3 based runloop, so it cannot be ring
2557 * switching as such (unless the ring-0 loop is borked and we didn't notice yet).
2558 *
2559 * The issue is probably in the thread / process switching area, could be
2560 * something special for hyper-V interrupt delivery or worker thread switching.
2561 *
2562 * Really wish this thread ping-pong going on in VID.SYS could be eliminated!
2563 *
2564 *
2565 * @subsubsection subsect_nem_win_benchmarks_bs2t1u2 17763: Bootsector2-test1
2566 *
2567 * Some preliminary numbers for build 17763 on the 3.4 GHz AMD 1950X, the second
2568 * column will improve we get time to have a look the register page.
2569 *
2570 * There is a 50% performance loss here compared to the June numbers with
2571 * build 17134. The RDTSC numbers hits that it isn't in the Hyper-V core
2572 * (hvax64.exe), but something on the NT side.
2573 *
2574 * Clearing bit 20 in nt!KiSpeculationFeatures speeds things up (i.e. changing
2575 * the dword from 0x00300065 to 0x00200065 in windbg). This is checked by
2576 * nt!KePrepareToDispatchVirtualProcessor, making it a no-op if the flag is
2577 * clear. winhvr!WinHvpVpDispatchLoop call that function before making
2578 * hypercall 0xc2, which presumably does the heavy VCpu lifting in hvcax64.exe.
2579 *
2580 * @verbatim
2581TESTING... WinHv API Hypercalls + VID clr(bit-20) + WinHv API
2582 32-bit paged protected mode, CPUID : 54 145 ins/sec 51 436 130 076
2583 real mode, CPUID : 54 178 ins/sec 51 713 130 449
2584 [snip]
2585 32-bit paged protected mode, RDTSC : 98 927 639 ins/sec 100 254 552 100 549 882
2586 real mode, RDTSC : 99 601 206 ins/sec 100 886 699 100 470 957
2587 [snip]
2588 32-bit paged protected mode, 32-bit IN : 54 621 ins/sec 51 524 128 294
2589 32-bit paged protected mode, 32-bit OUT : 54 870 ins/sec 51 671 129 397
2590 32-bit paged protected mode, 32-bit IN-to-ring-3 : 54 624 ins/sec 43 964 127 874
2591 32-bit paged protected mode, 32-bit OUT-to-ring-3 : 54 803 ins/sec 44 087 129 443
2592 [snip]
2593 32-bit paged protected mode, 32-bit read : 28 230 ins/sec 34 042 48 113
2594 32-bit paged protected mode, 32-bit write : 27 962 ins/sec 34 050 48 069
2595 32-bit paged protected mode, 32-bit read-to-ring-3 : 27 841 ins/sec 28 397 48 146
2596 32-bit paged protected mode, 32-bit write-to-ring-3 : 27 896 ins/sec 29 455 47 970
2597 * @endverbatim
2598 *
2599 *
2600 * @subsubsection subsect_nem_win_benchmarks_w2k 17134/2018-06-22: Windows 2000 Boot & Shutdown
2601 *
2602 * Timing the startup and automatic shutdown of a Windows 2000 SP4 guest serves
2603 * as a real world benchmark and example of why exit performance is import. When
2604 * Windows 2000 boots up is doing a lot of VGA redrawing of the boot animation,
2605 * which is very costly. Not having installed guest additions leaves it in a VGA
2606 * mode after the bootup sequence is done, keep up the screen access expenses,
2607 * though the graphics driver more economical than the bootvid code.
2608 *
2609 * The VM was configured to automatically logon. A startup script was installed
2610 * to perform the automatic shuting down and powering off the VM (thru
2611 * vts_shutdown.exe -f -p). An offline snapshot of the VM was taken an restored
2612 * before each test run. The test time run time is calculated from the monotonic
2613 * VBox.log timestamps, starting with the state change to 'RUNNING' and stopping
2614 * at 'POWERING_OFF'.
2615 *
2616 * The host OS and VirtualBox build is the same as for the bootsector2-test1
2617 * scenario.
2618 *
2619 * Results:
2620 *
2621 * - WinHv API for all but physical page mappings:
2622 * 32 min 12.19 seconds
2623 *
2624 * - The default NEM/win configuration where we put the main execution loop
2625 * in ring-0, using hypercalls when we can and VID for managing execution:
2626 * 3 min 23.18 seconds
2627 *
2628 * - Regular VirtualBox using AMD-V directly, hyper-V is disabled, main
2629 * execution loop in ring-0:
2630 * 58.09 seconds
2631 *
2632 * - WinHv API with exit history based optimizations:
2633 * 58.66 seconds
2634 *
2635 * - Hypercall + VID.SYS with exit history base optimizations:
2636 * 58.94 seconds
2637 *
2638 * With a well above average machine needing over half an hour for booting a
2639 * nearly 20 year old guest kind of says it all. The 13%-20% exit performance
2640 * increase we get by using hypercalls and VID.SYS directly pays off a lot here.
2641 * The 3m23s is almost acceptable in comparison to the half an hour.
2642 *
2643 * The similarity between the last three results strongly hits at windows 2000
2644 * doing a lot of waiting during boot and shutdown and isn't the best testcase
2645 * once a basic performance level is reached.
2646 *
2647 *
2648 * @subsubsection subsection_iem_win_benchmarks_deb9_nat Debian 9 NAT performance
2649 *
2650 * This benchmark is about network performance over NAT from a 64-bit Debian 9
2651 * VM with a single CPU. For network performance measurements, we use our own
2652 * NetPerf tool (ValidationKit/utils/network/NetPerf.cpp) to measure latency
2653 * and throughput.
2654 *
2655 * The setups, builds and configurations are as in the previous benchmarks
2656 * (release r123172 on 1950X running 64-bit W10/17134 (2016-06-xx). Please note
2657 * that the exit optimizations hasn't yet been in tuned with NetPerf in mind.
2658 *
2659 * The NAT network setup was selected here since it's the default one and the
2660 * slowest one. There is quite a bit of IPC with worker threads and packet
2661 * processing involved.
2662 *
2663 * Latency test is first up. This is a classic back and forth between the two
2664 * NetPerf instances, where the key measurement is the roundrip latency. The
2665 * values here are the lowest result over 3-6 runs.
2666 *
2667 * Against host system:
2668 * - 152 258 ns/roundtrip - 100% - regular VirtualBox SVM
2669 * - 271 059 ns/roundtrip - 178% - Hypercalls + VID.SYS in ring-0 with exit optimizations.
2670 * - 280 149 ns/roundtrip - 184% - Hypercalls + VID.SYS in ring-0
2671 * - 317 735 ns/roundtrip - 209% - Win HV API with exit optimizations.
2672 * - 342 440 ns/roundtrip - 225% - Win HV API
2673 *
2674 * Against a remote Windows 10 system over a 10Gbps link:
2675 * - 243 969 ns/roundtrip - 100% - regular VirtualBox SVM
2676 * - 384 427 ns/roundtrip - 158% - Win HV API with exit optimizations.
2677 * - 402 411 ns/roundtrip - 165% - Hypercalls + VID.SYS in ring-0
2678 * - 406 313 ns/roundtrip - 167% - Win HV API
2679 * - 413 160 ns/roundtrip - 169% - Hypercalls + VID.SYS in ring-0 with exit optimizations.
2680 *
2681 * What we see here is:
2682 *
2683 * - Consistent and signficant latency increase using Hyper-V compared
2684 * to directly harnessing AMD-V ourselves.
2685 *
2686 * - When talking to the host, it's clear that the hypercalls + VID.SYS
2687 * in ring-0 method pays off.
2688 *
2689 * - When talking to a different host, the numbers are closer and it
2690 * is not longer clear which Hyper-V execution method is better.
2691 *
2692 *
2693 * Throughput benchmarks are performed by one side pushing data full throttle
2694 * for 10 seconds (minus a 1 second at each end of the test), then reversing
2695 * the roles and measuring it in the other direction. The tests ran 3-5 times
2696 * and below are the highest and lowest results in each direction.
2697 *
2698 * Receiving from host system:
2699 * - Regular VirtualBox SVM:
2700 * Max: 96 907 549 bytes/s - 100%
2701 * Min: 86 912 095 bytes/s - 100%
2702 * - Hypercalls + VID.SYS in ring-0:
2703 * Max: 84 036 544 bytes/s - 87%
2704 * Min: 64 978 112 bytes/s - 75%
2705 * - Hypercalls + VID.SYS in ring-0 with exit optimizations:
2706 * Max: 77 760 699 bytes/s - 80%
2707 * Min: 72 677 171 bytes/s - 84%
2708 * - Win HV API with exit optimizations:
2709 * Max: 64 465 905 bytes/s - 67%
2710 * Min: 62 286 369 bytes/s - 72%
2711 * - Win HV API:
2712 * Max: 62 466 631 bytes/s - 64%
2713 * Min: 61 362 782 bytes/s - 70%
2714 *
2715 * Sending to the host system:
2716 * - Regular VirtualBox SVM:
2717 * Max: 87 728 652 bytes/s - 100%
2718 * Min: 86 923 198 bytes/s - 100%
2719 * - Hypercalls + VID.SYS in ring-0:
2720 * Max: 84 280 749 bytes/s - 96%
2721 * Min: 78 369 842 bytes/s - 90%
2722 * - Hypercalls + VID.SYS in ring-0 with exit optimizations:
2723 * Max: 84 119 932 bytes/s - 96%
2724 * Min: 77 396 811 bytes/s - 89%
2725 * - Win HV API:
2726 * Max: 81 714 377 bytes/s - 93%
2727 * Min: 78 697 419 bytes/s - 91%
2728 * - Win HV API with exit optimizations:
2729 * Max: 80 502 488 bytes/s - 91%
2730 * Min: 71 164 978 bytes/s - 82%
2731 *
2732 * Receiving from a remote Windows 10 system over a 10Gbps link:
2733 * - Hypercalls + VID.SYS in ring-0:
2734 * Max: 115 346 922 bytes/s - 136%
2735 * Min: 112 912 035 bytes/s - 137%
2736 * - Regular VirtualBox SVM:
2737 * Max: 84 517 504 bytes/s - 100%
2738 * Min: 82 597 049 bytes/s - 100%
2739 * - Hypercalls + VID.SYS in ring-0 with exit optimizations:
2740 * Max: 77 736 251 bytes/s - 92%
2741 * Min: 73 813 784 bytes/s - 89%
2742 * - Win HV API with exit optimizations:
2743 * Max: 63 035 587 bytes/s - 75%
2744 * Min: 57 538 380 bytes/s - 70%
2745 * - Win HV API:
2746 * Max: 62 279 185 bytes/s - 74%
2747 * Min: 56 813 866 bytes/s - 69%
2748 *
2749 * Sending to a remote Windows 10 system over a 10Gbps link:
2750 * - Win HV API with exit optimizations:
2751 * Max: 116 502 357 bytes/s - 103%
2752 * Min: 49 046 550 bytes/s - 59%
2753 * - Regular VirtualBox SVM:
2754 * Max: 113 030 991 bytes/s - 100%
2755 * Min: 83 059 511 bytes/s - 100%
2756 * - Hypercalls + VID.SYS in ring-0:
2757 * Max: 106 435 031 bytes/s - 94%
2758 * Min: 47 253 510 bytes/s - 57%
2759 * - Hypercalls + VID.SYS in ring-0 with exit optimizations:
2760 * Max: 94 842 287 bytes/s - 84%
2761 * Min: 68 362 172 bytes/s - 82%
2762 * - Win HV API:
2763 * Max: 65 165 225 bytes/s - 58%
2764 * Min: 47 246 573 bytes/s - 57%
2765 *
2766 * What we see here is:
2767 *
2768 * - Again consistent numbers when talking to the host. Showing that the
2769 * ring-0 approach is preferable to the ring-3 one.
2770 *
2771 * - Again when talking to a remote host, things get more difficult to
2772 * make sense of. The spread is larger and direct AMD-V gets beaten by
2773 * a different the Hyper-V approaches in each direction.
2774 *
2775 * - However, if we treat the first entry (remote host) as weird spikes, the
2776 * other entries are consistently worse compared to direct AMD-V. For the
2777 * send case we get really bad results for WinHV.
2778 *
2779 */
2780
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette