VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp@ 108368

Last change on this file since 108368 was 108366, checked in by vboxsync, 2 months ago

VMM/NEMR3Native-win-armv8.cpp: Fix todo, don't import the whole VM state when handling a VM exit except for when state logging is enabled, bugref:10392

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 147.5 KB
Line 
1/* $Id: NEMR3Native-win-armv8.cpp 108366 2025-02-25 10:35:57Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Windows backend.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 * Log group 6: Ring-0 memory management
9 * Log group 12: API intercepts.
10 */
11
12/*
13 * Copyright (C) 2018-2024 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.virtualbox.org.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_NEM
39#define VMCPU_INCL_CPUM_GST_CTX
40#include <iprt/nt/nt-and-windows.h>
41#include <iprt/nt/hyperv.h>
42#include <WinHvPlatform.h>
43
44#ifndef _WIN32_WINNT_WIN10
45# error "Missing _WIN32_WINNT_WIN10"
46#endif
47#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
48# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
49#endif
50#include <sysinfoapi.h>
51#include <debugapi.h>
52#include <errhandlingapi.h>
53#include <fileapi.h>
54#include <winerror.h> /* no api header for this. */
55
56#include <VBox/dis.h>
57#include <VBox/vmm/nem.h>
58#include <VBox/vmm/iem.h>
59#include <VBox/vmm/em.h>
60#include <VBox/vmm/pdmapic.h>
61#include <VBox/vmm/pdm.h>
62#include <VBox/vmm/dbgftrace.h>
63#include "NEMInternal.h"
64#include <VBox/vmm/vmcc.h>
65
66#include <iprt/formats/arm-psci.h>
67
68#include <iprt/ldr.h>
69#include <iprt/path.h>
70#include <iprt/string.h>
71#include <iprt/system.h>
72#include <iprt/utf16.h>
73
74#ifndef NTDDI_WIN10_VB /* Present in W10 2004 SDK, quite possibly earlier. */
75HRESULT WINAPI WHvQueryGpaRangeDirtyBitmap(WHV_PARTITION_HANDLE, WHV_GUEST_PHYSICAL_ADDRESS, UINT64, UINT64 *, UINT32);
76# define WHvMapGpaRangeFlagTrackDirtyPages ((WHV_MAP_GPA_RANGE_FLAGS)0x00000008)
77#endif
78
79
80/*
81 * The following definitions appeared in build 27744 allow configuring the base address of the GICv3 controller,
82 * (there is no official SDK for this yet).
83 */
84/** @todo Better way of defining these which doesn't require casting later on when calling APIs. */
85#define WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS UINT32_C(0x00001012)
86/** No GIC present. */
87#define WHV_ARM64_IC_EMULATION_MODE_NONE 0
88/** Hyper-V emulates a GICv3. */
89#define WHV_ARM64_IC_EMULATION_MODE_GICV3 1
90
91/**
92 * Configures the interrupt controller emulated by Hyper-V.
93 */
94typedef struct MY_WHV_ARM64_IC_PARAMETERS
95{
96 uint32_t u32EmulationMode;
97 uint32_t u32Rsvd;
98 union
99 {
100 struct
101 {
102 RTGCPHYS GCPhysGicdBase;
103 RTGCPHYS GCPhysGitsTranslaterBase;
104 uint32_t u32Rsvd;
105 uint32_t cLpiIntIdBits;
106 uint32_t u32PpiCntvOverflw;
107 uint32_t u32PpiPmu;
108 uint32_t au32Rsvd[6];
109 } GicV3;
110 } u;
111} MY_WHV_ARM64_IC_PARAMETERS;
112AssertCompileSize(MY_WHV_ARM64_IC_PARAMETERS, 64);
113
114
115/**
116 * The hypercall exit context.
117 */
118typedef struct MY_WHV_HYPERCALL_CONTEXT
119{
120 WHV_INTERCEPT_MESSAGE_HEADER Header;
121 uint16_t Immediate;
122 uint16_t u16Rsvd;
123 uint32_t u32Rsvd;
124 uint64_t X[18];
125} MY_WHV_HYPERCALL_CONTEXT;
126typedef MY_WHV_HYPERCALL_CONTEXT *PMY_WHV_HYPERCALL_CONTEXT;
127AssertCompileSize(MY_WHV_HYPERCALL_CONTEXT, 24 + 19 * sizeof(uint64_t));
128
129
130/**
131 * The ARM64 reset context.
132 */
133typedef struct MY_WHV_ARM64_RESET_CONTEXT
134{
135 WHV_INTERCEPT_MESSAGE_HEADER Header;
136 uint32_t ResetType;
137 uint32_t u32Rsvd;
138} MY_WHV_ARM64_RESET_CONTEXT;
139typedef MY_WHV_ARM64_RESET_CONTEXT *PMY_WHV_ARM64_RESET_CONTEXT;
140AssertCompileSize(MY_WHV_ARM64_RESET_CONTEXT, 24 + 2 * sizeof(uint32_t));
141
142
143#define WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF 0
144#define WHV_ARM64_RESET_CONTEXT_TYPE_RESET 1
145
146
147/**
148 * The exit reason context for arm64, the size is different
149 * from the default SDK we build against.
150 */
151typedef struct MY_WHV_RUN_VP_EXIT_CONTEXT
152{
153 WHV_RUN_VP_EXIT_REASON ExitReason;
154 uint32_t u32Rsvd;
155 uint64_t u64Rsvd;
156 union
157 {
158 WHV_MEMORY_ACCESS_CONTEXT MemoryAccess;
159 WHV_RUN_VP_CANCELED_CONTEXT CancelReason;
160 MY_WHV_HYPERCALL_CONTEXT Hypercall;
161 WHV_UNRECOVERABLE_EXCEPTION_CONTEXT UnrecoverableException;
162 MY_WHV_ARM64_RESET_CONTEXT Arm64Reset;
163 uint64_t au64Rsvd2[32];
164 };
165} MY_WHV_RUN_VP_EXIT_CONTEXT;
166typedef MY_WHV_RUN_VP_EXIT_CONTEXT *PMY_WHV_RUN_VP_EXIT_CONTEXT;
167AssertCompileSize(MY_WHV_RUN_VP_EXIT_CONTEXT, 272);
168
169#define My_WHvArm64RegisterGicrBaseGpa ((WHV_REGISTER_NAME)UINT32_C(0x00063000))
170
171
172/*********************************************************************************************************************************
173* Defined Constants And Macros *
174*********************************************************************************************************************************/
175
176
177/*********************************************************************************************************************************
178* Global Variables *
179*********************************************************************************************************************************/
180/** @name APIs imported from WinHvPlatform.dll
181 * @{ */
182static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
183static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
184static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
185static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
186static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
187static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
188static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
189static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
190static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
191static decltype(WHvQueryGpaRangeDirtyBitmap) * g_pfnWHvQueryGpaRangeDirtyBitmap;
192static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
193static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
194static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
195static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
196static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
197static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
198//static decltype(WHvGetVirtualProcessorState) * g_pfnWHvGetVirtualProcessorState;
199decltype(WHvRequestInterrupt) * g_pfnWHvRequestInterrupt;
200/** @} */
201
202/** The Windows build number. */
203static uint32_t g_uBuildNo = 17134;
204
205
206
207/**
208 * Import instructions.
209 */
210static const struct
211{
212 uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
213 bool fOptional; /**< Set if import is optional. */
214 PFNRT *ppfn; /**< The function pointer variable. */
215 const char *pszName; /**< The function name. */
216} g_aImports[] =
217{
218#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
219 NEM_WIN_IMPORT(0, false, WHvGetCapability),
220 NEM_WIN_IMPORT(0, false, WHvCreatePartition),
221 NEM_WIN_IMPORT(0, false, WHvSetupPartition),
222 NEM_WIN_IMPORT(0, false, WHvDeletePartition),
223 NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
224 NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
225 NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
226 NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
227 NEM_WIN_IMPORT(0, false, WHvTranslateGva),
228 NEM_WIN_IMPORT(0, true, WHvQueryGpaRangeDirtyBitmap),
229 NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
230 NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
231 NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
232 NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
233 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
234 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
235// NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorState),
236 NEM_WIN_IMPORT(0, false, WHvRequestInterrupt),
237#undef NEM_WIN_IMPORT
238};
239
240
241/*
242 * Let the preprocessor alias the APIs to import variables for better autocompletion.
243 */
244#ifndef IN_SLICKEDIT
245# define WHvGetCapability g_pfnWHvGetCapability
246# define WHvCreatePartition g_pfnWHvCreatePartition
247# define WHvSetupPartition g_pfnWHvSetupPartition
248# define WHvDeletePartition g_pfnWHvDeletePartition
249# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
250# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
251# define WHvMapGpaRange g_pfnWHvMapGpaRange
252# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
253# define WHvTranslateGva g_pfnWHvTranslateGva
254# define WHvQueryGpaRangeDirtyBitmap g_pfnWHvQueryGpaRangeDirtyBitmap
255# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
256# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
257# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
258# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
259# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
260# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
261# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
262//# define WHvGetVirtualProcessorState g_pfnWHvGetVirtualProcessorState
263# define WHvRequestInterrupt g_pfnWHvRequestInterrupt
264
265# define VidMessageSlotHandleAndGetNext g_pfnVidMessageSlotHandleAndGetNext
266# define VidStartVirtualProcessor g_pfnVidStartVirtualProcessor
267# define VidStopVirtualProcessor g_pfnVidStopVirtualProcessor
268
269#endif
270
271#if 0 /* unused */
272/** WHV_MEMORY_ACCESS_TYPE names */
273static const char * const g_apszWHvMemAccesstypes[4] = { "read", "write", "exec", "!undefined!" };
274#endif
275/** NEM_WIN_PAGE_STATE_XXX names. */
276NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
277#ifdef LOG_ENABLED
278/** HV_INTERCEPT_ACCESS_TYPE names. */
279static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
280#endif
281
282
283/*********************************************************************************************************************************
284* Internal Functions *
285*********************************************************************************************************************************/
286DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv);
287DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv);
288
289NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
290 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
291
292/**
293 * Worker for nemR3NativeInit that probes and load the native API.
294 *
295 * @returns VBox status code.
296 * @param fForced Whether the HMForced flag is set and we should
297 * fail if we cannot initialize.
298 * @param pErrInfo Where to always return error info.
299 */
300static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
301{
302 /*
303 * Check that the DLL files we need are present, but without loading them.
304 * We'd like to avoid loading them unnecessarily.
305 */
306 WCHAR wszPath[MAX_PATH + 64];
307 UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
308 if (cwcPath >= MAX_PATH || cwcPath < 2)
309 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
310
311 if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
312 wszPath[cwcPath++] = '\\';
313 RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
314 if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
315 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
316
317 /*
318 * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
319 */
320 /** @todo */
321
322 /** @todo would be great if we could recognize a root partition from the
323 * CPUID info, but I currently don't dare do that. */
324
325 /*
326 * Now try load the DLLs and resolve the APIs.
327 */
328 static const char * const s_apszDllNames[1] = { "WinHvPlatform.dll" };
329 RTLDRMOD ahMods[1] = { NIL_RTLDRMOD };
330 int rc = VINF_SUCCESS;
331 for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
332 {
333 int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
334 if (RT_FAILURE(rc2))
335 {
336 if (!RTErrInfoIsSet(pErrInfo))
337 RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
338 else
339 RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
340 ahMods[i] = NIL_RTLDRMOD;
341 rc = VERR_NEM_INIT_FAILED;
342 }
343 }
344 if (RT_SUCCESS(rc))
345 {
346 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
347 {
348 int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
349 if (RT_SUCCESS(rc2))
350 {
351 if (g_aImports[i].fOptional)
352 LogRel(("NEM: info: Found optional import %s!%s.\n",
353 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName));
354 }
355 else
356 {
357 *g_aImports[i].ppfn = NULL;
358
359 LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
360 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
361 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
362 if (!g_aImports[i].fOptional)
363 {
364 if (RTErrInfoIsSet(pErrInfo))
365 RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
366 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
367 else
368 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
369 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
370 Assert(RT_FAILURE(rc));
371 }
372 }
373 }
374 if (RT_SUCCESS(rc))
375 {
376 Assert(!RTErrInfoIsSet(pErrInfo));
377 }
378 }
379
380 for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
381 RTLdrClose(ahMods[i]);
382 return rc;
383}
384
385
386/**
387 * Wrapper for different WHvGetCapability signatures.
388 */
389DECLINLINE(HRESULT) WHvGetCapabilityWrapper(WHV_CAPABILITY_CODE enmCap, WHV_CAPABILITY *pOutput, uint32_t cbOutput)
390{
391 return g_pfnWHvGetCapability(enmCap, pOutput, cbOutput, NULL);
392}
393
394
395/**
396 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
397 *
398 * @returns VBox status code.
399 * @param pVM The cross context VM structure.
400 * @param pErrInfo Where to always return error info.
401 */
402static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
403{
404#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
405#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
406#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
407
408 /*
409 * Is the hypervisor present with the desired capability?
410 *
411 * In build 17083 this translates into:
412 * - CPUID[0x00000001].HVP is set
413 * - CPUID[0x40000000] == "Microsoft Hv"
414 * - CPUID[0x40000001].eax == "Hv#1"
415 * - CPUID[0x40000003].ebx[12] is set.
416 * - VidGetExoPartitionProperty(INVALID_HANDLE_VALUE, 0x60000, &Ignored) returns
417 * a non-zero value.
418 */
419 /**
420 * @todo Someone at Microsoft please explain weird API design:
421 * 1. Pointless CapabilityCode duplication int the output;
422 * 2. No output size.
423 */
424 WHV_CAPABILITY Caps;
425 RT_ZERO(Caps);
426 SetLastError(0);
427 HRESULT hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
428 DWORD rcWin = GetLastError();
429 if (FAILED(hrc))
430 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
431 "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
432 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
433 if (!Caps.HypervisorPresent)
434 {
435 if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
436 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
437 "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
438 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
439 }
440 LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
441
442
443 /*
444 * Check what extended VM exits are supported.
445 */
446 RT_ZERO(Caps);
447 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
448 if (FAILED(hrc))
449 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
450 "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
451 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
452 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
453 pVM->nem.s.fHypercallExit = RT_BOOL(Caps.ExtendedVmExits.HypercallExit);
454 pVM->nem.s.fGpaAccessFaultExit = RT_BOOL(Caps.ExtendedVmExits.GpaAccessFaultExit);
455 NEM_LOG_REL_CAP_SUB("fHypercallExit", pVM->nem.s.fHypercallExit);
456 NEM_LOG_REL_CAP_SUB("fGpaAccessFaultExit", pVM->nem.s.fGpaAccessFaultExit);
457 if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
458 LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
459 /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
460
461 /*
462 * Check features in case they end up defining any.
463 */
464 RT_ZERO(Caps);
465 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
466 if (FAILED(hrc))
467 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
468 "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
469 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
470 if (Caps.Features.AsUINT64 & ~(uint64_t)0)
471 LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
472 /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
473
474 /*
475 * Check that the CPU vendor is supported.
476 */
477 RT_ZERO(Caps);
478 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
479 if (FAILED(hrc))
480 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
481 "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
482 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
483 switch (Caps.ProcessorVendor)
484 {
485 /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
486 case WHvProcessorVendorArm:
487 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - ARM", Caps.ProcessorVendor);
488 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_UNKNOWN;
489 break;
490 default:
491 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
492 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
493 }
494
495 /*
496 * CPU features, guessing these are virtual CPU features?
497 */
498 RT_ZERO(Caps);
499 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
500 if (FAILED(hrc))
501 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
502 "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
503 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
504 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
505#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
506 NEM_LOG_REL_CPU_FEATURE(Asid16);
507 NEM_LOG_REL_CPU_FEATURE(TGran16);
508 NEM_LOG_REL_CPU_FEATURE(TGran64);
509 NEM_LOG_REL_CPU_FEATURE(Haf);
510 NEM_LOG_REL_CPU_FEATURE(Hdbs);
511 NEM_LOG_REL_CPU_FEATURE(Pan);
512 NEM_LOG_REL_CPU_FEATURE(AtS1E1);
513 NEM_LOG_REL_CPU_FEATURE(Uao);
514 NEM_LOG_REL_CPU_FEATURE(El0Aarch32);
515 NEM_LOG_REL_CPU_FEATURE(Fp);
516 NEM_LOG_REL_CPU_FEATURE(FpHp);
517 NEM_LOG_REL_CPU_FEATURE(AdvSimd);
518 NEM_LOG_REL_CPU_FEATURE(AdvSimdHp);
519 NEM_LOG_REL_CPU_FEATURE(GicV3V4);
520 NEM_LOG_REL_CPU_FEATURE(GicV41);
521 NEM_LOG_REL_CPU_FEATURE(Ras);
522 NEM_LOG_REL_CPU_FEATURE(PmuV3);
523 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV81);
524 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV84);
525 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV85);
526 NEM_LOG_REL_CPU_FEATURE(Aes);
527 NEM_LOG_REL_CPU_FEATURE(PolyMul);
528 NEM_LOG_REL_CPU_FEATURE(Sha1);
529 NEM_LOG_REL_CPU_FEATURE(Sha256);
530 NEM_LOG_REL_CPU_FEATURE(Sha512);
531 NEM_LOG_REL_CPU_FEATURE(Crc32);
532 NEM_LOG_REL_CPU_FEATURE(Atomic);
533 NEM_LOG_REL_CPU_FEATURE(Rdm);
534 NEM_LOG_REL_CPU_FEATURE(Sha3);
535 NEM_LOG_REL_CPU_FEATURE(Sm3);
536 NEM_LOG_REL_CPU_FEATURE(Sm4);
537 NEM_LOG_REL_CPU_FEATURE(Dp);
538 NEM_LOG_REL_CPU_FEATURE(Fhm);
539 NEM_LOG_REL_CPU_FEATURE(DcCvap);
540 NEM_LOG_REL_CPU_FEATURE(DcCvadp);
541 NEM_LOG_REL_CPU_FEATURE(ApaBase);
542 NEM_LOG_REL_CPU_FEATURE(ApaEp);
543 NEM_LOG_REL_CPU_FEATURE(ApaEp2);
544 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fp);
545 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fpc);
546 NEM_LOG_REL_CPU_FEATURE(Jscvt);
547 NEM_LOG_REL_CPU_FEATURE(Fcma);
548 NEM_LOG_REL_CPU_FEATURE(RcpcV83);
549 NEM_LOG_REL_CPU_FEATURE(RcpcV84);
550 NEM_LOG_REL_CPU_FEATURE(Gpa);
551 NEM_LOG_REL_CPU_FEATURE(L1ipPipt);
552 NEM_LOG_REL_CPU_FEATURE(DzPermitted);
553
554#undef NEM_LOG_REL_CPU_FEATURE
555 if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(47) - 1)))
556 LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
557 pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
558 /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
559
560 /*
561 * The cache line flush size.
562 */
563 RT_ZERO(Caps);
564 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
565 if (FAILED(hrc))
566 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
567 "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
568 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
569 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
570 if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
571 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
572 pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
573
574 RT_ZERO(Caps);
575 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodePhysicalAddressWidth, &Caps, sizeof(Caps));
576 if (FAILED(hrc))
577 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
578 "WHvGetCapability/WHvCapabilityCodePhysicalAddressWidth failed: %Rhrc (Last=%#x/%u)",
579 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
580 NEM_LOG_REL_CAP_EX("WHvCapabilityCodePhysicalAddressWidth", "2^%u", Caps.PhysicalAddressWidth);
581 if (Caps.PhysicalAddressWidth < 32 && Caps.PhysicalAddressWidth > 52)
582 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported physical address width: %u", Caps.ProcessorClFlushSize);
583 pVM->nem.s.cPhysicalAddressWidth = Caps.PhysicalAddressWidth;
584
585
586 /*
587 * See if they've added more properties that we're not aware of.
588 */
589 /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
590 if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
591 {
592 static const struct
593 {
594 uint32_t iMin, iMax; } s_aUnknowns[] =
595 {
596 { 0x0004, 0x000f },
597 { 0x1003, 0x100f },
598 { 0x2000, 0x200f },
599 { 0x3000, 0x300f },
600 { 0x4000, 0x400f },
601 };
602 for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
603 for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
604 {
605 RT_ZERO(Caps);
606 hrc = WHvGetCapabilityWrapper((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
607 if (SUCCEEDED(hrc))
608 LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
609 }
610 }
611
612 /*
613 * For proper operation, we require CPUID exits.
614 */
615 /** @todo Any? */
616
617#undef NEM_LOG_REL_CAP_EX
618#undef NEM_LOG_REL_CAP_SUB_EX
619#undef NEM_LOG_REL_CAP_SUB
620 return VINF_SUCCESS;
621}
622
623
624/**
625 * Initializes the GIC controller emulation provided by Hyper-V.
626 *
627 * @returns VBox status code.
628 * @param pVM The cross context VM structure.
629 *
630 * @note Needs to be done early when setting up the partition so this has to live here and not in GICNem-win.cpp
631 */
632static int nemR3WinGicCreate(PVM pVM)
633{
634 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
635 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
636
637 /*
638 * Query the MMIO ranges.
639 */
640 RTGCPHYS GCPhysMmioBaseDist = 0;
641 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
642 if (RT_FAILURE(rc))
643 return VMSetError(pVM, rc, RT_SRC_POS,
644 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
645
646 RTGCPHYS GCPhysMmioBaseReDist = 0;
647 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
648 if (RT_FAILURE(rc))
649 return VMSetError(pVM, rc, RT_SRC_POS,
650 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
651
652 RTGCPHYS GCPhysMmioBaseIts = 0;
653 rc = CFGMR3QueryU64(pGicCfg, "ItsMmioBase", &GCPhysMmioBaseIts);
654 if (RT_FAILURE(rc))
655 return VMSetError(pVM, rc, RT_SRC_POS,
656 "Configuration error: Failed to get the \"ItsMmioBase\" value\n");
657
658 /*
659 * One can only set the GIC distributor base. The re-distributor regions for the individual
660 * vCPUs are configured when the vCPUs are created, so we need to save the base of the MMIO region.
661 */
662 pVM->nem.s.GCPhysMmioBaseReDist = GCPhysMmioBaseReDist;
663
664 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
665
666 MY_WHV_ARM64_IC_PARAMETERS Property; RT_ZERO(Property);
667 Property.u32EmulationMode = WHV_ARM64_IC_EMULATION_MODE_GICV3;
668 Property.u.GicV3.GCPhysGicdBase = GCPhysMmioBaseDist;
669 Property.u.GicV3.GCPhysGitsTranslaterBase = GCPhysMmioBaseIts;
670 Property.u.GicV3.cLpiIntIdBits = 1; /** @todo LPIs are currently not supported with our device emulations. */
671 Property.u.GicV3.u32PpiCntvOverflw = pVM->nem.s.u32GicPpiVTimer + 16; /* Calculate the absolute timer INTID. */
672 Property.u.GicV3.u32PpiPmu = 23; /** @todo Configure dynamically (from SBSA, needs a PMU/NEM emulation just like with the GIC probably). */
673 HRESULT hrc = WHvSetPartitionProperty(hPartition, (WHV_PARTITION_PROPERTY_CODE)WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS, &Property, sizeof(Property));
674 if (FAILED(hrc))
675 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
676 "Failed to set WHvPartitionPropertyCodeArm64IcParameters: %Rhrc (Last=%#x/%u)",
677 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
678
679 return rc;
680}
681
682
683/**
684 * Creates and sets up a Hyper-V (exo) partition.
685 *
686 * @returns VBox status code.
687 * @param pVM The cross context VM structure.
688 * @param pErrInfo Where to always return error info.
689 */
690static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
691{
692 AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
693 AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
694
695 /*
696 * Create the partition.
697 */
698 WHV_PARTITION_HANDLE hPartition;
699 HRESULT hrc = WHvCreatePartition(&hPartition);
700 if (FAILED(hrc))
701 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
702 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
703
704 int rc;
705
706 /*
707 * Set partition properties, most importantly the CPU count.
708 */
709 /**
710 * @todo Someone at Microsoft please explain another weird API:
711 * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
712 * argument rather than as part of the struct. That is so weird if you've
713 * used any other NT or windows API, including WHvGetCapability().
714 * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
715 * technically only need 9 bytes for setting/getting
716 * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
717 WHV_PARTITION_PROPERTY Property;
718 RT_ZERO(Property);
719 Property.ProcessorCount = pVM->cCpus;
720 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
721 if (SUCCEEDED(hrc))
722 {
723 RT_ZERO(Property);
724 Property.ExtendedVmExits.HypercallExit = pVM->nem.s.fHypercallExit;
725 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
726 if (SUCCEEDED(hrc))
727 {
728 /*
729 * We'll continue setup in nemR3NativeInitAfterCPUM.
730 */
731 pVM->nem.s.fCreatedEmts = false;
732 pVM->nem.s.hPartition = hPartition;
733 LogRel(("NEM: Created partition %p.\n", hPartition));
734 return VINF_SUCCESS;
735 }
736
737 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
738 "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
739 Property.ExtendedVmExits.AsUINT64, hrc);
740 }
741 else
742 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
743 "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
744 pVM->cCpus, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
745 WHvDeletePartition(hPartition);
746
747 Assert(!pVM->nem.s.hPartitionDevice);
748 Assert(!pVM->nem.s.hPartition);
749 return rc;
750}
751
752
753static int nemR3NativeInitSetupVm(PVM pVM)
754{
755 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
756 AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
757 AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
758 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
759
760 /*
761 * Continue setting up the partition now that we've got most of the CPUID feature stuff.
762 */
763 WHV_PARTITION_PROPERTY Property;
764 HRESULT hrc;
765
766 /* Not sure if we really need to set the cache line flush size. */
767 RT_ZERO(Property);
768 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
769 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));
770 if (FAILED(hrc))
771 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
772 "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
773 pVM->nem.s.cCacheLineFlushShift, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
774
775 /*
776 * Sync CPU features with CPUM.
777 */
778 /** @todo sync CPU features with CPUM. */
779
780 /* Set the partition property. */
781 RT_ZERO(Property);
782 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
783 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));
784 if (FAILED(hrc))
785 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
786 "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
787 pVM->nem.s.uCpuFeatures.u64, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
788
789 /* Configure the GIC. */
790 int rc = nemR3WinGicCreate(pVM);
791 if (RT_FAILURE(rc))
792 return rc;
793
794 /*
795 * Set up the partition.
796 *
797 * Seems like this is where the partition is actually instantiated and we get
798 * a handle to it.
799 */
800 hrc = WHvSetupPartition(hPartition);
801 if (FAILED(hrc))
802 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
803 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
804 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
805
806 /*
807 * Setup the EMTs.
808 */
809 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
810 {
811 hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/);
812 if (FAILED(hrc))
813 {
814 NTSTATUS const rcNtLast = RTNtLastStatusValue();
815 DWORD const dwErrLast = RTNtLastErrorValue();
816 while (idCpu-- > 0)
817 {
818 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu);
819 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
820 hPartition, idCpu, hrc2, RTNtLastStatusValue(),
821 RTNtLastErrorValue()));
822 }
823 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
824 "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
825 }
826
827 if (idCpu == 0)
828 {
829 /*
830 * Need to query the ID registers and populate CPUM,
831 * these are partition wide registers and need to be queried/set with WHV_ANY_VP.
832 */
833 CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
834
835 WHV_REGISTER_NAME aenmNames[10];
836 WHV_REGISTER_VALUE aValues[10];
837 RT_ZERO(aValues);
838
839 aenmNames[0] = WHvArm64RegisterIdAa64Dfr0El1;
840 aenmNames[1] = WHvArm64RegisterIdAa64Dfr1El1;
841 aenmNames[2] = WHvArm64RegisterIdAa64Isar0El1;
842 aenmNames[3] = WHvArm64RegisterIdAa64Isar1El1;
843 aenmNames[4] = WHvArm64RegisterIdAa64Isar2El1;
844 aenmNames[5] = WHvArm64RegisterIdAa64Mmfr0El1;
845 aenmNames[6] = WHvArm64RegisterIdAa64Mmfr1El1;
846 aenmNames[7] = WHvArm64RegisterIdAa64Mmfr2El1;
847 aenmNames[8] = WHvArm64RegisterIdAa64Pfr0El1;
848 aenmNames[9] = WHvArm64RegisterIdAa64Pfr1El1;
849
850 hrc = WHvGetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
851 AssertLogRelMsgReturn(SUCCEEDED(hrc),
852 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
853 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
854 , VERR_NEM_GET_REGISTERS_FAILED);
855
856 IdRegs.u64RegIdAa64Pfr0El1 = aValues[8].Reg64;
857 IdRegs.u64RegIdAa64Pfr1El1 = aValues[9].Reg64;
858 IdRegs.u64RegIdAa64Dfr0El1 = aValues[0].Reg64;
859 IdRegs.u64RegIdAa64Dfr1El1 = aValues[1].Reg64;
860 IdRegs.u64RegIdAa64Isar0El1 = aValues[2].Reg64;
861 IdRegs.u64RegIdAa64Isar1El1 = aValues[3].Reg64;
862 IdRegs.u64RegIdAa64Isar2El1 = aValues[4].Reg64;
863 IdRegs.u64RegIdAa64Mmfr0El1 = aValues[5].Reg64;
864 IdRegs.u64RegIdAa64Mmfr1El1 = aValues[6].Reg64;
865 IdRegs.u64RegIdAa64Mmfr2El1 = aValues[7].Reg64;
866
867 rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
868 if (RT_FAILURE(rc))
869 return rc;
870
871 /* Apply any overrides to the partition. */
872 PCCPUMARMV8IDREGS pIdRegsGst = NULL;
873 rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
874 AssertRCReturn(rc, rc);
875
876 aValues[0].Reg64 = pIdRegsGst->u64RegIdAa64Dfr0El1;
877 aValues[1].Reg64 = pIdRegsGst->u64RegIdAa64Dfr1El1;
878 aValues[2].Reg64 = pIdRegsGst->u64RegIdAa64Isar0El1;
879 aValues[3].Reg64 = pIdRegsGst->u64RegIdAa64Isar1El1;
880 aValues[4].Reg64 = pIdRegsGst->u64RegIdAa64Isar2El1;
881 aValues[5].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr0El1;
882 aValues[6].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr1El1;
883 aValues[7].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr2El1;
884 aValues[8].Reg64 = pIdRegsGst->u64RegIdAa64Pfr0El1;
885 aValues[9].Reg64 = pIdRegsGst->u64RegIdAa64Pfr1El1;
886
887 hrc = WHvSetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
888 AssertLogRelMsgReturn(SUCCEEDED(hrc),
889 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
890 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
891 , VERR_NEM_SET_REGISTERS_FAILED);
892
893 /* Save the amount of break-/watchpoints supported for syncing the guest register state later. */
894 pVM->nem.s.cBreakpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_BRPS) + 1;
895 pVM->nem.s.cWatchpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_WRPS) + 1;
896 }
897
898 /* Configure the GIC re-distributor region for the GIC. */
899 WHV_REGISTER_NAME enmName = My_WHvArm64RegisterGicrBaseGpa;
900 WHV_REGISTER_VALUE Value;
901 Value.Reg64 = pVM->nem.s.GCPhysMmioBaseReDist + idCpu * _128K;
902
903 hrc = WHvSetVirtualProcessorRegisters(hPartition, idCpu, &enmName, 1, &Value);
904 AssertLogRelMsgReturn(SUCCEEDED(hrc),
905 ("WHvSetVirtualProcessorRegisters(%p, %u, WHvArm64RegisterGicrBaseGpa,) -> %Rhrc (Last=%#x/%u)\n",
906 hPartition, idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
907 , VERR_NEM_SET_REGISTERS_FAILED);
908 }
909
910 pVM->nem.s.fCreatedEmts = true;
911
912 LogRel(("NEM: Successfully set up partition\n"));
913 return VINF_SUCCESS;
914}
915
916
917/**
918 * Try initialize the native API.
919 *
920 * This may only do part of the job, more can be done in
921 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
922 *
923 * @returns VBox status code.
924 * @param pVM The cross context VM structure.
925 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
926 * the latter we'll fail if we cannot initialize.
927 * @param fForced Whether the HMForced flag is set and we should
928 * fail if we cannot initialize.
929 */
930int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
931{
932 g_uBuildNo = RTSystemGetNtBuildNo();
933
934 /*
935 * Error state.
936 * The error message will be non-empty on failure and 'rc' will be set too.
937 */
938 RTERRINFOSTATIC ErrInfo;
939 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
940 int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
941 if (RT_SUCCESS(rc))
942 {
943 /*
944 * Check the capabilties of the hypervisor, starting with whether it's present.
945 */
946 rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
947 if (RT_SUCCESS(rc))
948 {
949 /*
950 * Create and initialize a partition.
951 */
952 rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
953 if (RT_SUCCESS(rc))
954 {
955 rc = nemR3NativeInitSetupVm(pVM);
956 if (RT_SUCCESS(rc))
957 {
958 /*
959 * Set ourselves as the execution engine and make config adjustments.
960 */
961 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
962 Log(("NEM: Marked active!\n"));
963 PGMR3EnableNemMode(pVM);
964
965 /*
966 * Register release statistics
967 */
968 STAMR3Register(pVM, (void *)&pVM->nem.s.cMappedPages, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
969 "/NEM/PagesCurrentlyMapped", STAMUNIT_PAGES, "Number guest pages currently mapped by the VM");
970 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
971 "/NEM/PagesMapCalls", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages");
972 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
973 "/NEM/PagesMapFails", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages that failed");
974 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
975 "/NEM/PagesUnmapCalls", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages");
976 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
977 "/NEM/PagesUnmapFails", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages that failed");
978 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
979 "/NEM/PagesMapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for bigger stuff");
980 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
981 "/NEM/PagesUnmapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for bigger stuff");
982 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
983 "/NEM/PagesMapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for single pages");
984 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
985 "/NEM/PagesUnmapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for single pages");
986
987 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
988 {
989 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
990 STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", idCpu);
991 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", idCpu);
992 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", idCpu);
993 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", idCpu);
994 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of interrupt window exits", "/NEM/CPU%u/ExitInterruptWindow", idCpu);
995 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", idCpu);
996 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", idCpu);
997 STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", idCpu);
998 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", idCpu);
999 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", idCpu);
1000 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits", "/NEM/CPU%u/ExitExceptionGp", idCpu);
1001 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGpMesa, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits from mesa driver", "/NEM/CPU%u/ExitExceptionGpMesa", idCpu);
1002 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", idCpu);
1003 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", idCpu);
1004 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", idCpu);
1005 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", idCpu);
1006 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", idCpu);
1007 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", idCpu);
1008 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", idCpu);
1009 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", idCpu);
1010 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", idCpu);
1011 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", idCpu);
1012 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", idCpu);
1013 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", idCpu);
1014 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", idCpu);
1015 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", idCpu);
1016 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
1017 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
1018 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
1019 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
1020 }
1021
1022#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
1023 if (!SUPR3IsDriverless())
1024 {
1025 PUVM pUVM = pVM->pUVM;
1026 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1027 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor",
1028 "/NEM/R0Stats/cPagesAvailable");
1029 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1030 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor",
1031 "/NEM/R0Stats/cPagesInUse");
1032 }
1033#endif /* VBOX_WITH_R0_MODULES && !VBOX_WITH_MINIMAL_R0 */
1034 }
1035 }
1036 }
1037 }
1038
1039 /*
1040 * We only fail if in forced mode, otherwise just log the complaint and return.
1041 */
1042 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1043 if ( (fForced || !fFallback)
1044 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1045 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1046
1047 if (RTErrInfoIsSet(pErrInfo))
1048 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1049 return VINF_SUCCESS;
1050}
1051
1052
1053/**
1054 * This is called after CPUMR3Init is done.
1055 *
1056 * @returns VBox status code.
1057 * @param pVM The VM handle..
1058 */
1059int nemR3NativeInitAfterCPUM(PVM pVM)
1060{
1061 /*
1062 * Validate sanity.
1063 */
1064 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1065
1066 /** @todo */
1067
1068 /*
1069 * Any hyper-v statistics we can get at now? HvCallMapStatsPage isn't accessible any more.
1070 */
1071 /** @todo stats */
1072
1073 /*
1074 * Adjust features.
1075 *
1076 * Note! We've already disabled X2APIC and MONITOR/MWAIT via CFGM during
1077 * the first init call.
1078 */
1079
1080 return VINF_SUCCESS;
1081}
1082
1083
1084int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1085{
1086 //BOOL fRet = SetThreadPriority(GetCurrentThread(), 0);
1087 //AssertLogRel(fRet);
1088
1089 NOREF(pVM); NOREF(enmWhat);
1090 return VINF_SUCCESS;
1091}
1092
1093
1094int nemR3NativeTerm(PVM pVM)
1095{
1096 /*
1097 * Delete the partition.
1098 */
1099 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
1100 pVM->nem.s.hPartition = NULL;
1101 pVM->nem.s.hPartitionDevice = NULL;
1102 if (hPartition != NULL)
1103 {
1104 VMCPUID idCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
1105 LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, idCpu));
1106 while (idCpu-- > 0)
1107 {
1108 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu);
1109 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
1110 hPartition, idCpu, hrc, RTNtLastStatusValue(),
1111 RTNtLastErrorValue()));
1112 }
1113 WHvDeletePartition(hPartition);
1114 }
1115 pVM->nem.s.fCreatedEmts = false;
1116 return VINF_SUCCESS;
1117}
1118
1119
1120/**
1121 * VM reset notification.
1122 *
1123 * @param pVM The cross context VM structure.
1124 */
1125void nemR3NativeReset(PVM pVM)
1126{
1127 RT_NOREF(pVM);
1128}
1129
1130
1131/**
1132 * Reset CPU due to INIT IPI or hot (un)plugging.
1133 *
1134 * @param pVCpu The cross context virtual CPU structure of the CPU being
1135 * reset.
1136 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1137 */
1138void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1139{
1140 RT_NOREF(pVCpu, fInitIpi);
1141}
1142
1143
1144NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
1145{
1146 WHV_REGISTER_NAME aenmNames[128];
1147 WHV_REGISTER_VALUE aValues[128];
1148
1149 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
1150 if (!fWhat)
1151 return VINF_SUCCESS;
1152 uintptr_t iReg = 0;
1153
1154#define ADD_REG64(a_enmName, a_uValue) do { \
1155 aenmNames[iReg] = (a_enmName); \
1156 aValues[iReg].Reg128.High64 = 0; \
1157 aValues[iReg].Reg64 = (a_uValue).x; \
1158 iReg++; \
1159 } while (0)
1160#define ADD_REG64_RAW(a_enmName, a_uValue) do { \
1161 aenmNames[iReg] = (a_enmName); \
1162 aValues[iReg].Reg128.High64 = 0; \
1163 aValues[iReg].Reg64 = (a_uValue); \
1164 iReg++; \
1165 } while (0)
1166#define ADD_SYSREG64(a_enmName, a_uValue) do { \
1167 aenmNames[iReg] = (a_enmName); \
1168 aValues[iReg].Reg128.High64 = 0; \
1169 aValues[iReg].Reg64 = (a_uValue).u64; \
1170 iReg++; \
1171 } while (0)
1172#define ADD_REG128(a_enmName, a_uValue) do { \
1173 aenmNames[iReg] = (a_enmName); \
1174 aValues[iReg].Reg128.Low64 = (a_uValue).au64[0]; \
1175 aValues[iReg].Reg128.High64 = (a_uValue).au64[1]; \
1176 iReg++; \
1177 } while (0)
1178
1179 /* GPRs */
1180 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1181 {
1182 if (fWhat & CPUMCTX_EXTRN_X0)
1183 ADD_REG64(WHvArm64RegisterX0, pVCpu->cpum.GstCtx.aGRegs[0]);
1184 if (fWhat & CPUMCTX_EXTRN_X1)
1185 ADD_REG64(WHvArm64RegisterX1, pVCpu->cpum.GstCtx.aGRegs[1]);
1186 if (fWhat & CPUMCTX_EXTRN_X2)
1187 ADD_REG64(WHvArm64RegisterX2, pVCpu->cpum.GstCtx.aGRegs[2]);
1188 if (fWhat & CPUMCTX_EXTRN_X3)
1189 ADD_REG64(WHvArm64RegisterX3, pVCpu->cpum.GstCtx.aGRegs[3]);
1190 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1191 {
1192 ADD_REG64(WHvArm64RegisterX4, pVCpu->cpum.GstCtx.aGRegs[4]);
1193 ADD_REG64(WHvArm64RegisterX5, pVCpu->cpum.GstCtx.aGRegs[5]);
1194 ADD_REG64(WHvArm64RegisterX6, pVCpu->cpum.GstCtx.aGRegs[6]);
1195 ADD_REG64(WHvArm64RegisterX7, pVCpu->cpum.GstCtx.aGRegs[7]);
1196 ADD_REG64(WHvArm64RegisterX8, pVCpu->cpum.GstCtx.aGRegs[8]);
1197 ADD_REG64(WHvArm64RegisterX9, pVCpu->cpum.GstCtx.aGRegs[9]);
1198 ADD_REG64(WHvArm64RegisterX10, pVCpu->cpum.GstCtx.aGRegs[10]);
1199 ADD_REG64(WHvArm64RegisterX11, pVCpu->cpum.GstCtx.aGRegs[11]);
1200 ADD_REG64(WHvArm64RegisterX12, pVCpu->cpum.GstCtx.aGRegs[12]);
1201 ADD_REG64(WHvArm64RegisterX13, pVCpu->cpum.GstCtx.aGRegs[13]);
1202 ADD_REG64(WHvArm64RegisterX14, pVCpu->cpum.GstCtx.aGRegs[14]);
1203 ADD_REG64(WHvArm64RegisterX15, pVCpu->cpum.GstCtx.aGRegs[15]);
1204 ADD_REG64(WHvArm64RegisterX16, pVCpu->cpum.GstCtx.aGRegs[16]);
1205 ADD_REG64(WHvArm64RegisterX17, pVCpu->cpum.GstCtx.aGRegs[17]);
1206 ADD_REG64(WHvArm64RegisterX18, pVCpu->cpum.GstCtx.aGRegs[18]);
1207 ADD_REG64(WHvArm64RegisterX19, pVCpu->cpum.GstCtx.aGRegs[19]);
1208 ADD_REG64(WHvArm64RegisterX20, pVCpu->cpum.GstCtx.aGRegs[20]);
1209 ADD_REG64(WHvArm64RegisterX21, pVCpu->cpum.GstCtx.aGRegs[21]);
1210 ADD_REG64(WHvArm64RegisterX22, pVCpu->cpum.GstCtx.aGRegs[22]);
1211 ADD_REG64(WHvArm64RegisterX23, pVCpu->cpum.GstCtx.aGRegs[23]);
1212 ADD_REG64(WHvArm64RegisterX24, pVCpu->cpum.GstCtx.aGRegs[24]);
1213 ADD_REG64(WHvArm64RegisterX25, pVCpu->cpum.GstCtx.aGRegs[25]);
1214 ADD_REG64(WHvArm64RegisterX26, pVCpu->cpum.GstCtx.aGRegs[26]);
1215 ADD_REG64(WHvArm64RegisterX27, pVCpu->cpum.GstCtx.aGRegs[27]);
1216 ADD_REG64(WHvArm64RegisterX28, pVCpu->cpum.GstCtx.aGRegs[28]);
1217 }
1218 if (fWhat & CPUMCTX_EXTRN_LR)
1219 ADD_REG64(WHvArm64RegisterLr, pVCpu->cpum.GstCtx.aGRegs[30]);
1220 if (fWhat & CPUMCTX_EXTRN_FP)
1221 ADD_REG64(WHvArm64RegisterFp, pVCpu->cpum.GstCtx.aGRegs[29]);
1222 }
1223
1224 /* RIP & Flags */
1225 if (fWhat & CPUMCTX_EXTRN_PC)
1226 ADD_SYSREG64(WHvArm64RegisterPc, pVCpu->cpum.GstCtx.Pc);
1227 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1228 ADD_REG64_RAW(WHvArm64RegisterPstate, pVCpu->cpum.GstCtx.fPState);
1229 if (fWhat & CPUMCTX_EXTRN_SPSR)
1230 ADD_SYSREG64(WHvArm64RegisterSpsrEl1, pVCpu->cpum.GstCtx.Spsr);
1231 if (fWhat & CPUMCTX_EXTRN_ELR)
1232 ADD_SYSREG64(WHvArm64RegisterElrEl1, pVCpu->cpum.GstCtx.Elr);
1233 if (fWhat & CPUMCTX_EXTRN_SP)
1234 {
1235 ADD_SYSREG64(WHvArm64RegisterSpEl0, pVCpu->cpum.GstCtx.aSpReg[0]);
1236 ADD_SYSREG64(WHvArm64RegisterSpEl1, pVCpu->cpum.GstCtx.aSpReg[1]);
1237 }
1238 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1239 {
1240 ADD_SYSREG64(WHvArm64RegisterSctlrEl1, pVCpu->cpum.GstCtx.Sctlr);
1241 ADD_SYSREG64(WHvArm64RegisterTcrEl1, pVCpu->cpum.GstCtx.Tcr);
1242 ADD_SYSREG64(WHvArm64RegisterTtbr0El1, pVCpu->cpum.GstCtx.Ttbr0);
1243 ADD_SYSREG64(WHvArm64RegisterTtbr1El1, pVCpu->cpum.GstCtx.Ttbr1);
1244 }
1245
1246 /* Vector state. */
1247 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1248 {
1249 ADD_REG128(WHvArm64RegisterQ0, pVCpu->cpum.GstCtx.aVRegs[0]);
1250 ADD_REG128(WHvArm64RegisterQ1, pVCpu->cpum.GstCtx.aVRegs[1]);
1251 ADD_REG128(WHvArm64RegisterQ2, pVCpu->cpum.GstCtx.aVRegs[2]);
1252 ADD_REG128(WHvArm64RegisterQ3, pVCpu->cpum.GstCtx.aVRegs[3]);
1253 ADD_REG128(WHvArm64RegisterQ4, pVCpu->cpum.GstCtx.aVRegs[4]);
1254 ADD_REG128(WHvArm64RegisterQ5, pVCpu->cpum.GstCtx.aVRegs[5]);
1255 ADD_REG128(WHvArm64RegisterQ6, pVCpu->cpum.GstCtx.aVRegs[6]);
1256 ADD_REG128(WHvArm64RegisterQ7, pVCpu->cpum.GstCtx.aVRegs[7]);
1257 ADD_REG128(WHvArm64RegisterQ8, pVCpu->cpum.GstCtx.aVRegs[8]);
1258 ADD_REG128(WHvArm64RegisterQ9, pVCpu->cpum.GstCtx.aVRegs[9]);
1259 ADD_REG128(WHvArm64RegisterQ10, pVCpu->cpum.GstCtx.aVRegs[10]);
1260 ADD_REG128(WHvArm64RegisterQ11, pVCpu->cpum.GstCtx.aVRegs[11]);
1261 ADD_REG128(WHvArm64RegisterQ12, pVCpu->cpum.GstCtx.aVRegs[12]);
1262 ADD_REG128(WHvArm64RegisterQ13, pVCpu->cpum.GstCtx.aVRegs[13]);
1263 ADD_REG128(WHvArm64RegisterQ14, pVCpu->cpum.GstCtx.aVRegs[14]);
1264 ADD_REG128(WHvArm64RegisterQ15, pVCpu->cpum.GstCtx.aVRegs[15]);
1265 ADD_REG128(WHvArm64RegisterQ16, pVCpu->cpum.GstCtx.aVRegs[16]);
1266 ADD_REG128(WHvArm64RegisterQ17, pVCpu->cpum.GstCtx.aVRegs[17]);
1267 ADD_REG128(WHvArm64RegisterQ18, pVCpu->cpum.GstCtx.aVRegs[18]);
1268 ADD_REG128(WHvArm64RegisterQ19, pVCpu->cpum.GstCtx.aVRegs[19]);
1269 ADD_REG128(WHvArm64RegisterQ20, pVCpu->cpum.GstCtx.aVRegs[20]);
1270 ADD_REG128(WHvArm64RegisterQ21, pVCpu->cpum.GstCtx.aVRegs[21]);
1271 ADD_REG128(WHvArm64RegisterQ22, pVCpu->cpum.GstCtx.aVRegs[22]);
1272 ADD_REG128(WHvArm64RegisterQ23, pVCpu->cpum.GstCtx.aVRegs[23]);
1273 ADD_REG128(WHvArm64RegisterQ24, pVCpu->cpum.GstCtx.aVRegs[24]);
1274 ADD_REG128(WHvArm64RegisterQ25, pVCpu->cpum.GstCtx.aVRegs[25]);
1275 ADD_REG128(WHvArm64RegisterQ26, pVCpu->cpum.GstCtx.aVRegs[26]);
1276 ADD_REG128(WHvArm64RegisterQ27, pVCpu->cpum.GstCtx.aVRegs[27]);
1277 ADD_REG128(WHvArm64RegisterQ28, pVCpu->cpum.GstCtx.aVRegs[28]);
1278 ADD_REG128(WHvArm64RegisterQ29, pVCpu->cpum.GstCtx.aVRegs[29]);
1279 ADD_REG128(WHvArm64RegisterQ30, pVCpu->cpum.GstCtx.aVRegs[30]);
1280 ADD_REG128(WHvArm64RegisterQ31, pVCpu->cpum.GstCtx.aVRegs[31]);
1281 }
1282
1283 if (fWhat & CPUMCTX_EXTRN_FPCR)
1284 ADD_REG64_RAW(WHvArm64RegisterFpcr, pVCpu->cpum.GstCtx.fpcr);
1285 if (fWhat & CPUMCTX_EXTRN_FPSR)
1286 ADD_REG64_RAW(WHvArm64RegisterFpsr, pVCpu->cpum.GstCtx.fpsr);
1287
1288 /* System registers. */
1289 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1290 {
1291 ADD_SYSREG64(WHvArm64RegisterVbarEl1, pVCpu->cpum.GstCtx.VBar);
1292 ADD_SYSREG64(WHvArm64RegisterEsrEl1, pVCpu->cpum.GstCtx.Esr);
1293 ADD_SYSREG64(WHvArm64RegisterFarEl1, pVCpu->cpum.GstCtx.Far);
1294 ADD_SYSREG64(WHvArm64RegisterCntkctlEl1, pVCpu->cpum.GstCtx.CntKCtl);
1295 ADD_SYSREG64(WHvArm64RegisterContextidrEl1, pVCpu->cpum.GstCtx.ContextIdr);
1296 ADD_SYSREG64(WHvArm64RegisterCpacrEl1, pVCpu->cpum.GstCtx.Cpacr);
1297 ADD_SYSREG64(WHvArm64RegisterCsselrEl1, pVCpu->cpum.GstCtx.Csselr);
1298 ADD_SYSREG64(WHvArm64RegisterMairEl1, pVCpu->cpum.GstCtx.Mair);
1299 ADD_SYSREG64(WHvArm64RegisterParEl1, pVCpu->cpum.GstCtx.Par);
1300 ADD_SYSREG64(WHvArm64RegisterTpidrroEl0, pVCpu->cpum.GstCtx.TpIdrRoEl0);
1301 ADD_SYSREG64(WHvArm64RegisterTpidrEl0, pVCpu->cpum.GstCtx.aTpIdr[0]);
1302 ADD_SYSREG64(WHvArm64RegisterTpidrEl1, pVCpu->cpum.GstCtx.aTpIdr[1]);
1303 }
1304
1305 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1306 {
1307 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1308 {
1309 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Ctrl);
1310 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Value);
1311 }
1312
1313 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1314 {
1315 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Ctrl);
1316 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Value);
1317 }
1318
1319 ADD_SYSREG64(WHvArm64RegisterMdscrEl1, pVCpu->cpum.GstCtx.Mdscr);
1320 }
1321
1322 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1323 {
1324 ADD_SYSREG64(WHvArm64RegisterApdAKeyHiEl1, pVCpu->cpum.GstCtx.Apda.High);
1325 ADD_SYSREG64(WHvArm64RegisterApdAKeyLoEl1, pVCpu->cpum.GstCtx.Apda.Low);
1326 ADD_SYSREG64(WHvArm64RegisterApdBKeyHiEl1, pVCpu->cpum.GstCtx.Apdb.High);
1327 ADD_SYSREG64(WHvArm64RegisterApdBKeyLoEl1, pVCpu->cpum.GstCtx.Apdb.Low);
1328 ADD_SYSREG64(WHvArm64RegisterApgAKeyHiEl1, pVCpu->cpum.GstCtx.Apga.High);
1329 ADD_SYSREG64(WHvArm64RegisterApgAKeyLoEl1, pVCpu->cpum.GstCtx.Apga.Low);
1330 ADD_SYSREG64(WHvArm64RegisterApiAKeyHiEl1, pVCpu->cpum.GstCtx.Apia.High);
1331 ADD_SYSREG64(WHvArm64RegisterApiAKeyLoEl1, pVCpu->cpum.GstCtx.Apia.Low);
1332 ADD_SYSREG64(WHvArm64RegisterApiBKeyHiEl1, pVCpu->cpum.GstCtx.Apib.High);
1333 ADD_SYSREG64(WHvArm64RegisterApiBKeyLoEl1, pVCpu->cpum.GstCtx.Apib.Low);
1334 }
1335
1336#undef ADD_REG64
1337#undef ADD_REG64_RAW
1338#undef ADD_REG128
1339
1340 /*
1341 * Set the registers.
1342 */
1343 Assert(iReg < RT_ELEMENTS(aValues));
1344 Assert(iReg < RT_ELEMENTS(aenmNames));
1345 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
1346 if (SUCCEEDED(hrc))
1347 {
1348 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1349 return VINF_SUCCESS;
1350 }
1351 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1352 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
1353 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1354 return VERR_INTERNAL_ERROR;
1355}
1356
1357
1358NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
1359{
1360 WHV_REGISTER_NAME aenmNames[256];
1361
1362 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
1363 if (!fWhat)
1364 return VINF_SUCCESS;
1365
1366 uintptr_t iReg = 0;
1367
1368 /* GPRs */
1369 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1370 {
1371 if (fWhat & CPUMCTX_EXTRN_X0)
1372 aenmNames[iReg++] = WHvArm64RegisterX0;
1373 if (fWhat & CPUMCTX_EXTRN_X1)
1374 aenmNames[iReg++] = WHvArm64RegisterX1;
1375 if (fWhat & CPUMCTX_EXTRN_X2)
1376 aenmNames[iReg++] = WHvArm64RegisterX2;
1377 if (fWhat & CPUMCTX_EXTRN_X3)
1378 aenmNames[iReg++] = WHvArm64RegisterX3;
1379 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1380 {
1381 aenmNames[iReg++] = WHvArm64RegisterX4;
1382 aenmNames[iReg++] = WHvArm64RegisterX5;
1383 aenmNames[iReg++] = WHvArm64RegisterX6;
1384 aenmNames[iReg++] = WHvArm64RegisterX7;
1385 aenmNames[iReg++] = WHvArm64RegisterX8;
1386 aenmNames[iReg++] = WHvArm64RegisterX9;
1387 aenmNames[iReg++] = WHvArm64RegisterX10;
1388 aenmNames[iReg++] = WHvArm64RegisterX11;
1389 aenmNames[iReg++] = WHvArm64RegisterX12;
1390 aenmNames[iReg++] = WHvArm64RegisterX13;
1391 aenmNames[iReg++] = WHvArm64RegisterX14;
1392 aenmNames[iReg++] = WHvArm64RegisterX15;
1393 aenmNames[iReg++] = WHvArm64RegisterX16;
1394 aenmNames[iReg++] = WHvArm64RegisterX17;
1395 aenmNames[iReg++] = WHvArm64RegisterX18;
1396 aenmNames[iReg++] = WHvArm64RegisterX19;
1397 aenmNames[iReg++] = WHvArm64RegisterX20;
1398 aenmNames[iReg++] = WHvArm64RegisterX21;
1399 aenmNames[iReg++] = WHvArm64RegisterX22;
1400 aenmNames[iReg++] = WHvArm64RegisterX23;
1401 aenmNames[iReg++] = WHvArm64RegisterX24;
1402 aenmNames[iReg++] = WHvArm64RegisterX25;
1403 aenmNames[iReg++] = WHvArm64RegisterX26;
1404 aenmNames[iReg++] = WHvArm64RegisterX27;
1405 aenmNames[iReg++] = WHvArm64RegisterX28;
1406 }
1407 if (fWhat & CPUMCTX_EXTRN_LR)
1408 aenmNames[iReg++] = WHvArm64RegisterLr;
1409 if (fWhat & CPUMCTX_EXTRN_FP)
1410 aenmNames[iReg++] = WHvArm64RegisterFp;
1411 }
1412
1413 /* PC & Flags */
1414 if (fWhat & CPUMCTX_EXTRN_PC)
1415 aenmNames[iReg++] = WHvArm64RegisterPc;
1416 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1417 aenmNames[iReg++] = WHvArm64RegisterPstate;
1418 if (fWhat & CPUMCTX_EXTRN_SPSR)
1419 aenmNames[iReg++] = WHvArm64RegisterSpsrEl1;
1420 if (fWhat & CPUMCTX_EXTRN_ELR)
1421 aenmNames[iReg++] = WHvArm64RegisterElrEl1;
1422 if (fWhat & CPUMCTX_EXTRN_SP)
1423 {
1424 aenmNames[iReg++] = WHvArm64RegisterSpEl0;
1425 aenmNames[iReg++] = WHvArm64RegisterSpEl1;
1426 }
1427 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1428 {
1429 aenmNames[iReg++] = WHvArm64RegisterSctlrEl1;
1430 aenmNames[iReg++] = WHvArm64RegisterTcrEl1;
1431 aenmNames[iReg++] = WHvArm64RegisterTtbr0El1;
1432 aenmNames[iReg++] = WHvArm64RegisterTtbr1El1;
1433 }
1434
1435 /* Vector state. */
1436 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1437 {
1438 aenmNames[iReg++] = WHvArm64RegisterQ0;
1439 aenmNames[iReg++] = WHvArm64RegisterQ1;
1440 aenmNames[iReg++] = WHvArm64RegisterQ2;
1441 aenmNames[iReg++] = WHvArm64RegisterQ3;
1442 aenmNames[iReg++] = WHvArm64RegisterQ4;
1443 aenmNames[iReg++] = WHvArm64RegisterQ5;
1444 aenmNames[iReg++] = WHvArm64RegisterQ6;
1445 aenmNames[iReg++] = WHvArm64RegisterQ7;
1446 aenmNames[iReg++] = WHvArm64RegisterQ8;
1447 aenmNames[iReg++] = WHvArm64RegisterQ9;
1448 aenmNames[iReg++] = WHvArm64RegisterQ10;
1449 aenmNames[iReg++] = WHvArm64RegisterQ11;
1450 aenmNames[iReg++] = WHvArm64RegisterQ12;
1451 aenmNames[iReg++] = WHvArm64RegisterQ13;
1452 aenmNames[iReg++] = WHvArm64RegisterQ14;
1453 aenmNames[iReg++] = WHvArm64RegisterQ15;
1454
1455 aenmNames[iReg++] = WHvArm64RegisterQ16;
1456 aenmNames[iReg++] = WHvArm64RegisterQ17;
1457 aenmNames[iReg++] = WHvArm64RegisterQ18;
1458 aenmNames[iReg++] = WHvArm64RegisterQ19;
1459 aenmNames[iReg++] = WHvArm64RegisterQ20;
1460 aenmNames[iReg++] = WHvArm64RegisterQ21;
1461 aenmNames[iReg++] = WHvArm64RegisterQ22;
1462 aenmNames[iReg++] = WHvArm64RegisterQ23;
1463 aenmNames[iReg++] = WHvArm64RegisterQ24;
1464 aenmNames[iReg++] = WHvArm64RegisterQ25;
1465 aenmNames[iReg++] = WHvArm64RegisterQ26;
1466 aenmNames[iReg++] = WHvArm64RegisterQ27;
1467 aenmNames[iReg++] = WHvArm64RegisterQ28;
1468 aenmNames[iReg++] = WHvArm64RegisterQ29;
1469 aenmNames[iReg++] = WHvArm64RegisterQ30;
1470 aenmNames[iReg++] = WHvArm64RegisterQ31;
1471 }
1472 if (fWhat & CPUMCTX_EXTRN_FPCR)
1473 aenmNames[iReg++] = WHvArm64RegisterFpcr;
1474 if (fWhat & CPUMCTX_EXTRN_FPSR)
1475 aenmNames[iReg++] = WHvArm64RegisterFpsr;
1476
1477 /* System registers. */
1478 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1479 {
1480 aenmNames[iReg++] = WHvArm64RegisterVbarEl1;
1481 aenmNames[iReg++] = WHvArm64RegisterEsrEl1;
1482 aenmNames[iReg++] = WHvArm64RegisterFarEl1;
1483 aenmNames[iReg++] = WHvArm64RegisterCntkctlEl1;
1484 aenmNames[iReg++] = WHvArm64RegisterContextidrEl1;
1485 aenmNames[iReg++] = WHvArm64RegisterCpacrEl1;
1486 aenmNames[iReg++] = WHvArm64RegisterCsselrEl1;
1487 aenmNames[iReg++] = WHvArm64RegisterMairEl1;
1488 aenmNames[iReg++] = WHvArm64RegisterParEl1;
1489 aenmNames[iReg++] = WHvArm64RegisterTpidrroEl0;
1490 aenmNames[iReg++] = WHvArm64RegisterTpidrEl0;
1491 aenmNames[iReg++] = WHvArm64RegisterTpidrEl1;
1492 }
1493
1494 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1495 {
1496 /* Hyper-V doesn't allow syncing debug break-/watchpoint registers which aren't there. */
1497 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1498 {
1499 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i);
1500 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i);
1501 }
1502
1503 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1504 {
1505 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i);
1506 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i);
1507 }
1508
1509 aenmNames[iReg++] = WHvArm64RegisterMdscrEl1;
1510 }
1511
1512 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1513 {
1514 aenmNames[iReg++] = WHvArm64RegisterApdAKeyHiEl1;
1515 aenmNames[iReg++] = WHvArm64RegisterApdAKeyLoEl1;
1516 aenmNames[iReg++] = WHvArm64RegisterApdBKeyHiEl1;
1517 aenmNames[iReg++] = WHvArm64RegisterApdBKeyLoEl1;
1518 aenmNames[iReg++] = WHvArm64RegisterApgAKeyHiEl1;
1519 aenmNames[iReg++] = WHvArm64RegisterApgAKeyLoEl1;
1520 aenmNames[iReg++] = WHvArm64RegisterApiAKeyHiEl1;
1521 aenmNames[iReg++] = WHvArm64RegisterApiAKeyLoEl1;
1522 aenmNames[iReg++] = WHvArm64RegisterApiBKeyHiEl1;
1523 aenmNames[iReg++] = WHvArm64RegisterApiBKeyLoEl1;
1524 }
1525
1526 size_t const cRegs = iReg;
1527 Assert(cRegs < RT_ELEMENTS(aenmNames));
1528
1529 /*
1530 * Get the registers.
1531 */
1532 WHV_REGISTER_VALUE aValues[256];
1533 RT_ZERO(aValues);
1534 Assert(RT_ELEMENTS(aValues) >= cRegs);
1535 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
1536 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
1537 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1538 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1539 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1540 , VERR_NEM_GET_REGISTERS_FAILED);
1541
1542 iReg = 0;
1543#define GET_REG64(a_DstVar, a_enmName) do { \
1544 Assert(aenmNames[iReg] == (a_enmName)); \
1545 (a_DstVar).x = aValues[iReg].Reg64; \
1546 iReg++; \
1547 } while (0)
1548#define GET_REG64_RAW(a_DstVar, a_enmName) do { \
1549 Assert(aenmNames[iReg] == (a_enmName)); \
1550 (a_DstVar) = aValues[iReg].Reg64; \
1551 iReg++; \
1552 } while (0)
1553#define GET_SYSREG64(a_DstVar, a_enmName) do { \
1554 Assert(aenmNames[iReg] == (a_enmName)); \
1555 (a_DstVar).u64 = aValues[iReg].Reg64; \
1556 iReg++; \
1557 } while (0)
1558#define GET_REG128(a_DstVar, a_enmName) do { \
1559 Assert(aenmNames[iReg] == a_enmName); \
1560 (a_DstVar).au64[0] = aValues[iReg].Reg128.Low64; \
1561 (a_DstVar).au64[1] = aValues[iReg].Reg128.High64; \
1562 iReg++; \
1563 } while (0)
1564
1565 /* GPRs */
1566 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1567 {
1568 if (fWhat & CPUMCTX_EXTRN_X0)
1569 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[0], WHvArm64RegisterX0);
1570 if (fWhat & CPUMCTX_EXTRN_X1)
1571 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[1], WHvArm64RegisterX1);
1572 if (fWhat & CPUMCTX_EXTRN_X2)
1573 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[2], WHvArm64RegisterX2);
1574 if (fWhat & CPUMCTX_EXTRN_X3)
1575 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[3], WHvArm64RegisterX3);
1576 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1577 {
1578 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[4], WHvArm64RegisterX4);
1579 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[5], WHvArm64RegisterX5);
1580 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[6], WHvArm64RegisterX6);
1581 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[7], WHvArm64RegisterX7);
1582 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[8], WHvArm64RegisterX8);
1583 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[9], WHvArm64RegisterX9);
1584 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[10], WHvArm64RegisterX10);
1585 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[11], WHvArm64RegisterX11);
1586 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[12], WHvArm64RegisterX12);
1587 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[13], WHvArm64RegisterX13);
1588 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[14], WHvArm64RegisterX14);
1589 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[15], WHvArm64RegisterX15);
1590 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[16], WHvArm64RegisterX16);
1591 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[17], WHvArm64RegisterX17);
1592 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[18], WHvArm64RegisterX18);
1593 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[19], WHvArm64RegisterX19);
1594 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[20], WHvArm64RegisterX20);
1595 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[21], WHvArm64RegisterX21);
1596 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[22], WHvArm64RegisterX22);
1597 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[23], WHvArm64RegisterX23);
1598 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[24], WHvArm64RegisterX24);
1599 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[25], WHvArm64RegisterX25);
1600 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[26], WHvArm64RegisterX26);
1601 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[27], WHvArm64RegisterX27);
1602 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[28], WHvArm64RegisterX28);
1603 }
1604 if (fWhat & CPUMCTX_EXTRN_LR)
1605 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[30], WHvArm64RegisterLr);
1606 if (fWhat & CPUMCTX_EXTRN_FP)
1607 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[29], WHvArm64RegisterFp);
1608 }
1609
1610 /* RIP & Flags */
1611 if (fWhat & CPUMCTX_EXTRN_PC)
1612 GET_REG64_RAW(pVCpu->cpum.GstCtx.Pc.u64, WHvArm64RegisterPc);
1613 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1614 GET_REG64_RAW(pVCpu->cpum.GstCtx.fPState, WHvArm64RegisterPstate);
1615 if (fWhat & CPUMCTX_EXTRN_SPSR)
1616 GET_SYSREG64(pVCpu->cpum.GstCtx.Spsr, WHvArm64RegisterSpsrEl1);
1617 if (fWhat & CPUMCTX_EXTRN_ELR)
1618 GET_SYSREG64(pVCpu->cpum.GstCtx.Elr, WHvArm64RegisterElrEl1);
1619 if (fWhat & CPUMCTX_EXTRN_SP)
1620 {
1621 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[0], WHvArm64RegisterSpEl0);
1622 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[1], WHvArm64RegisterSpEl1);
1623 }
1624 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1625 {
1626 GET_SYSREG64(pVCpu->cpum.GstCtx.Sctlr, WHvArm64RegisterSctlrEl1);
1627 GET_SYSREG64(pVCpu->cpum.GstCtx.Tcr, WHvArm64RegisterTcrEl1);
1628 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr0, WHvArm64RegisterTtbr0El1);
1629 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr1, WHvArm64RegisterTtbr1El1);
1630 }
1631
1632 /* Vector state. */
1633 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1634 {
1635 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[0], WHvArm64RegisterQ0);
1636 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[1], WHvArm64RegisterQ1);
1637 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[2], WHvArm64RegisterQ2);
1638 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[3], WHvArm64RegisterQ3);
1639 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[4], WHvArm64RegisterQ4);
1640 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[5], WHvArm64RegisterQ5);
1641 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[6], WHvArm64RegisterQ6);
1642 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[7], WHvArm64RegisterQ7);
1643 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[8], WHvArm64RegisterQ8);
1644 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[9], WHvArm64RegisterQ9);
1645 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[10], WHvArm64RegisterQ10);
1646 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[11], WHvArm64RegisterQ11);
1647 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[12], WHvArm64RegisterQ12);
1648 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[13], WHvArm64RegisterQ13);
1649 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[14], WHvArm64RegisterQ14);
1650 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[15], WHvArm64RegisterQ15);
1651
1652 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[16], WHvArm64RegisterQ16);
1653 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[17], WHvArm64RegisterQ17);
1654 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[18], WHvArm64RegisterQ18);
1655 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[19], WHvArm64RegisterQ19);
1656 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[20], WHvArm64RegisterQ20);
1657 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[21], WHvArm64RegisterQ21);
1658 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[22], WHvArm64RegisterQ22);
1659 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[23], WHvArm64RegisterQ23);
1660 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[24], WHvArm64RegisterQ24);
1661 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[25], WHvArm64RegisterQ25);
1662 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[26], WHvArm64RegisterQ26);
1663 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[27], WHvArm64RegisterQ27);
1664 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[28], WHvArm64RegisterQ28);
1665 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[29], WHvArm64RegisterQ29);
1666 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[30], WHvArm64RegisterQ30);
1667 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[31], WHvArm64RegisterQ31);
1668 }
1669 if (fWhat & CPUMCTX_EXTRN_FPCR)
1670 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpcr, WHvArm64RegisterFpcr);
1671 if (fWhat & CPUMCTX_EXTRN_FPSR)
1672 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpsr, WHvArm64RegisterFpsr);
1673
1674 /* System registers. */
1675 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1676 {
1677 GET_SYSREG64(pVCpu->cpum.GstCtx.VBar, WHvArm64RegisterVbarEl1);
1678 GET_SYSREG64(pVCpu->cpum.GstCtx.Esr, WHvArm64RegisterEsrEl1);
1679 GET_SYSREG64(pVCpu->cpum.GstCtx.Far, WHvArm64RegisterFarEl1);
1680 GET_SYSREG64(pVCpu->cpum.GstCtx.CntKCtl, WHvArm64RegisterCntkctlEl1);
1681 GET_SYSREG64(pVCpu->cpum.GstCtx.ContextIdr, WHvArm64RegisterContextidrEl1);
1682 GET_SYSREG64(pVCpu->cpum.GstCtx.Cpacr, WHvArm64RegisterCpacrEl1);
1683 GET_SYSREG64(pVCpu->cpum.GstCtx.Csselr, WHvArm64RegisterCsselrEl1);
1684 GET_SYSREG64(pVCpu->cpum.GstCtx.Mair, WHvArm64RegisterMairEl1);
1685 GET_SYSREG64(pVCpu->cpum.GstCtx.Par, WHvArm64RegisterParEl1);
1686 GET_SYSREG64(pVCpu->cpum.GstCtx.TpIdrRoEl0, WHvArm64RegisterTpidrroEl0);
1687 GET_SYSREG64(pVCpu->cpum.GstCtx.aTpIdr[0], WHvArm64RegisterTpidrEl0);
1688 GET_SYSREG64(pVCpu->cpum.GstCtx.aTpIdr[1], WHvArm64RegisterTpidrEl1);
1689 }
1690
1691 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1692 {
1693 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1694 {
1695 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i));
1696 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i));
1697 }
1698
1699 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1700 {
1701 GET_SYSREG64(pVCpu->cpum.GstCtx.aWp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i));
1702 GET_SYSREG64(pVCpu->cpum.GstCtx.aWp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i));
1703 }
1704
1705 GET_SYSREG64(pVCpu->cpum.GstCtx.Mdscr, WHvArm64RegisterMdscrEl1);
1706 }
1707
1708 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1709 {
1710 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.High, WHvArm64RegisterApdAKeyHiEl1);
1711 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.Low, WHvArm64RegisterApdAKeyLoEl1);
1712 GET_SYSREG64(pVCpu->cpum.GstCtx.Apdb.High, WHvArm64RegisterApdBKeyHiEl1);
1713 GET_SYSREG64(pVCpu->cpum.GstCtx.Apdb.Low, WHvArm64RegisterApdBKeyLoEl1);
1714 GET_SYSREG64(pVCpu->cpum.GstCtx.Apga.High, WHvArm64RegisterApgAKeyHiEl1);
1715 GET_SYSREG64(pVCpu->cpum.GstCtx.Apga.Low, WHvArm64RegisterApgAKeyLoEl1);
1716 GET_SYSREG64(pVCpu->cpum.GstCtx.Apia.High, WHvArm64RegisterApiAKeyHiEl1);
1717 GET_SYSREG64(pVCpu->cpum.GstCtx.Apia.Low, WHvArm64RegisterApiAKeyLoEl1);
1718 GET_SYSREG64(pVCpu->cpum.GstCtx.Apib.High, WHvArm64RegisterApiBKeyHiEl1);
1719 GET_SYSREG64(pVCpu->cpum.GstCtx.Apib.Low, WHvArm64RegisterApiBKeyLoEl1);
1720 }
1721
1722 /* Almost done, just update extrn flags. */
1723 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1724 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1725 pVCpu->cpum.GstCtx.fExtrn = 0;
1726
1727 return VINF_SUCCESS;
1728}
1729
1730
1731/**
1732 * Interface for importing state on demand (used by IEM).
1733 *
1734 * @returns VBox status code.
1735 * @param pVCpu The cross context CPU structure.
1736 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1737 */
1738VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1739{
1740 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1741 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1742}
1743
1744
1745/**
1746 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1747 *
1748 * @returns VBox status code.
1749 * @param pVCpu The cross context CPU structure.
1750 * @param pcTicks Where to return the CPU tick count.
1751 * @param puAux Where to return the TSC_AUX register value.
1752 */
1753VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1754{
1755 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1756
1757 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1758 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1759 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1760
1761#pragma message("NEMHCQueryCpuTick: Implement it!")
1762#if 0 /** @todo */
1763 /* Call the offical API. */
1764 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1765 WHV_REGISTER_VALUE aValues[2] = { { {0, 0} }, { {0, 0} } };
1766 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1767 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1768 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1769 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1770 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1771 , VERR_NEM_GET_REGISTERS_FAILED);
1772 *pcTicks = aValues[0].Reg64;
1773 if (puAux)
1774 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[1].Reg64 : CPUMGetGuestTscAux(pVCpu);
1775#else
1776 RT_NOREF(pVCpu, pcTicks, puAux);
1777#endif
1778 return VINF_SUCCESS;
1779}
1780
1781
1782/**
1783 * Resumes CPU clock (TSC) on all virtual CPUs.
1784 *
1785 * This is called by TM when the VM is started, restored, resumed or similar.
1786 *
1787 * @returns VBox status code.
1788 * @param pVM The cross context VM structure.
1789 * @param pVCpu The cross context CPU structure of the calling EMT.
1790 * @param uPausedTscValue The TSC value at the time of pausing.
1791 */
1792VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1793{
1794 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1795 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1796
1797 /*
1798 * Call the offical API to do the job.
1799 */
1800 if (pVM->cCpus > 1)
1801 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */
1802
1803#pragma message("NEMHCResumeCpuTickOnAll: Implement it!")
1804#if 0 /** @todo */
1805 /* Start with the first CPU. */
1806 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc;
1807 WHV_REGISTER_VALUE Value = { {0, 0} };
1808 Value.Reg64 = uPausedTscValue;
1809 uint64_t const uFirstTsc = ASMReadTSC();
1810 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value);
1811 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1812 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1813 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1814 , VERR_NEM_SET_TSC);
1815
1816 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed
1817 that we don't introduce too much drift here. */
1818 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++)
1819 {
1820 Assert(enmName == WHvX64RegisterTsc);
1821 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
1822 Value.Reg64 = uPausedTscValue + offDelta;
1823 hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value);
1824 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1825 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n",
1826 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1827 , VERR_NEM_SET_TSC);
1828 }
1829#else
1830 RT_NOREF(uPausedTscValue);
1831#endif
1832
1833 return VINF_SUCCESS;
1834}
1835
1836
1837#ifdef LOG_ENABLED
1838/**
1839 * Logs the current CPU state.
1840 */
1841static void nemR3WinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1842{
1843 if (LogIs3Enabled())
1844 {
1845 char szRegs[4096];
1846 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1847 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
1848 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
1849 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
1850 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
1851 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
1852 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
1853 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
1854 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
1855 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
1856 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
1857 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
1858 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
1859 "vbar_el1=%016VR{vbar_el1}\n"
1860 );
1861 char szInstr[256]; RT_ZERO(szInstr);
1862 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1863 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1864 szInstr, sizeof(szInstr), NULL);
1865 Log3(("%s%s\n", szRegs, szInstr));
1866 }
1867}
1868#endif /* LOG_ENABLED */
1869
1870
1871/**
1872 * Copies register state from the (common) exit context.
1873 *
1874 * ASSUMES no state copied yet.
1875 *
1876 * @param pVCpu The cross context per CPU structure.
1877 * @param pMsgHdr The common message header.
1878 */
1879DECLINLINE(void) nemR3WinCopyStateFromArmHeader(PVMCPUCC pVCpu, WHV_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1880{
1881#ifdef LOG_ENABLED /* When state logging is enabled the state is synced completely upon VM exit. */
1882 if (!LogIs3Enabled())
1883#endif
1884 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE))
1885 == (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE));
1886
1887 pVCpu->cpum.GstCtx.Pc.u64 = pMsgHdr->Pc;
1888 pVCpu->cpum.GstCtx.fPState = pMsgHdr->Cpsr;
1889
1890 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE);
1891}
1892
1893
1894/**
1895 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1896 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1897 */
1898typedef struct NEMHCWINHMACPCCSTATE
1899{
1900 /** Input: Write access. */
1901 bool fWriteAccess;
1902 /** Output: Set if we did something. */
1903 bool fDidSomething;
1904 /** Output: Set it we should resume. */
1905 bool fCanResume;
1906} NEMHCWINHMACPCCSTATE;
1907
1908/**
1909 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1910 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1911 * NEMHCWINHMACPCCSTATE structure. }
1912 */
1913NEM_TMPL_STATIC DECLCALLBACK(int)
1914nemHCWinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1915{
1916 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1917 pState->fDidSomething = false;
1918 pState->fCanResume = false;
1919
1920 /* If A20 is disabled, we may need to make another query on the masked
1921 page to get the correct protection information. */
1922 uint8_t u2State = pInfo->u2NemState;
1923 RTGCPHYS GCPhysSrc = GCPhys;
1924
1925 /*
1926 * Consolidate current page state with actual page protection and access type.
1927 * We don't really consider downgrades here, as they shouldn't happen.
1928 */
1929 int rc;
1930 switch (u2State)
1931 {
1932 case NEM_WIN_PAGE_STATE_UNMAPPED:
1933 case NEM_WIN_PAGE_STATE_NOT_SET:
1934 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1935 {
1936 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1937 return VINF_SUCCESS;
1938 }
1939
1940 /* Don't bother remapping it if it's a write request to a non-writable page. */
1941 if ( pState->fWriteAccess
1942 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1943 {
1944 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1945 return VINF_SUCCESS;
1946 }
1947
1948 /* Map the page. */
1949 rc = nemHCNativeSetPhysPage(pVM,
1950 pVCpu,
1951 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1952 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1953 pInfo->fNemProt,
1954 &u2State,
1955 true /*fBackingState*/);
1956 pInfo->u2NemState = u2State;
1957 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1958 GCPhys, g_apszPageStates[u2State], rc));
1959 pState->fDidSomething = true;
1960 pState->fCanResume = true;
1961 return rc;
1962
1963 case NEM_WIN_PAGE_STATE_READABLE:
1964 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1965 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1966 {
1967 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1968 return VINF_SUCCESS;
1969 }
1970
1971 break;
1972
1973 case NEM_WIN_PAGE_STATE_WRITABLE:
1974 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1975 {
1976 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
1977 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1978 else
1979 {
1980 pState->fCanResume = true;
1981 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1982 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1983 }
1984 return VINF_SUCCESS;
1985 }
1986 break;
1987
1988 default:
1989 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1990 }
1991
1992 /*
1993 * Unmap and restart the instruction.
1994 * If this fails, which it does every so often, just unmap everything for now.
1995 */
1996 /** @todo figure out whether we mess up the state or if it's WHv. */
1997 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1998 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1999 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2000 if (SUCCEEDED(hrc))
2001 {
2002 pState->fDidSomething = true;
2003 pState->fCanResume = true;
2004 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
2005 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
2006 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2007 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
2008 return VINF_SUCCESS;
2009 }
2010 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2011 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n",
2012 GCPhys, g_apszPageStates[u2State], hrc, hrc));
2013 return VERR_NEM_UNMAP_PAGES_FAILED;
2014}
2015
2016
2017/**
2018 * Returns the byte size from the given access SAS value.
2019 *
2020 * @returns Number of bytes to transfer.
2021 * @param uSas The SAS value to convert.
2022 */
2023DECLINLINE(size_t) nemR3WinGetByteCountFromSas(uint8_t uSas)
2024{
2025 switch (uSas)
2026 {
2027 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
2028 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
2029 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
2030 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
2031 default:
2032 AssertReleaseFailed();
2033 }
2034
2035 return 0;
2036}
2037
2038
2039/**
2040 * Sets the given general purpose register to the given value.
2041 *
2042 * @param pVCpu The cross context virtual CPU structure of the
2043 * calling EMT.
2044 * @param uReg The register index.
2045 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
2046 * @param fSignExtend Flag whether to sign extend the value.
2047 * @param u64Val The value.
2048 */
2049DECLINLINE(void) nemR3WinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
2050{
2051 AssertReturnVoid(uReg < 31);
2052
2053 if (f64BitReg)
2054 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
2055 else
2056 pVCpu->cpum.GstCtx.aGRegs[uReg].x = (uint64_t)(fSignExtend ? (int32_t)u64Val : (uint32_t)u64Val);
2057
2058 /* Mark the register as not extern anymore. */
2059 switch (uReg)
2060 {
2061 case 0:
2062 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
2063 break;
2064 case 1:
2065 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
2066 break;
2067 case 2:
2068 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
2069 break;
2070 case 3:
2071 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
2072 break;
2073 default:
2074 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
2075 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
2076 }
2077}
2078
2079
2080/**
2081 * Gets the given general purpose register and returns the value.
2082 *
2083 * @returns Value from the given register.
2084 * @param pVCpu The cross context virtual CPU structure of the
2085 * calling EMT.
2086 * @param uReg The register index.
2087 */
2088DECLINLINE(uint64_t) nemR3WinGetGReg(PVMCPU pVCpu, uint8_t uReg)
2089{
2090 AssertReturn(uReg <= ARMV8_A64_REG_XZR, 0);
2091
2092 if (uReg == ARMV8_A64_REG_XZR)
2093 return 0;
2094
2095 /** @todo Import the register if extern. */
2096 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
2097
2098 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
2099}
2100
2101
2102/**
2103 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2104 *
2105 * @returns Strict VBox status code.
2106 * @param pVM The cross context VM structure.
2107 * @param pVCpu The cross context per CPU structure.
2108 * @param pExit The VM exit information to handle.
2109 * @sa nemHCWinHandleMessageMemory
2110 */
2111NEM_TMPL_STATIC VBOXSTRICTRC
2112nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2113{
2114 uint64_t const uHostTsc = ASMReadTSC();
2115 Assert(pExit->MemoryAccess.Header.InterceptAccessType != 3);
2116
2117 /*
2118 * Ask PGM for information about the given GCPhys. We need to check if we're
2119 * out of sync first.
2120 */
2121 WHV_INTERCEPT_MESSAGE_HEADER const *pHdr = &pExit->MemoryAccess.Header;
2122 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite, false, false };
2123 PGMPHYSNEMPAGEINFO Info;
2124 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
2125 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2126 if (RT_SUCCESS(rc))
2127 {
2128 if (Info.fNemProt & ( pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
2129 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2130 {
2131 if (State.fCanResume)
2132 {
2133 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2134 pVCpu->idCpu, pHdr->Pc,
2135 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2136 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2137 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2138 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2139 pHdr->Pc, uHostTsc);
2140 return VINF_SUCCESS;
2141 }
2142 }
2143 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2144 pVCpu->idCpu, pHdr->Pc,
2145 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2146 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2147 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2148 }
2149 else
2150 Log4(("MemExit/%u: %08RX64: %RGp rc=%Rrc%s; emulating (%s)\n",
2151 pVCpu->idCpu, pHdr->Pc,
2152 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2153 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2154
2155 /*
2156 * Emulate the memory access, either access handler or special memory.
2157 */
2158 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2159 pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
2160 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2161 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2162 pHdr->Pc, uHostTsc);
2163 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->MemoryAccess.Header);
2164 RT_NOREF_PV(pExitRec);
2165 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
2166 AssertRCReturn(rc, rc);
2167
2168#ifdef LOG_ENABLED
2169 uint8_t const cbInstr = pExit->MemoryAccess.InstructionByteCount;
2170 RTGCPTR const GCPtrVa = pExit->MemoryAccess.Gva;
2171#endif
2172 RTGCPHYS const GCPhys = pExit->MemoryAccess.Gpa;
2173 uint64_t const uIss = pExit->MemoryAccess.Syndrome;
2174 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
2175 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
2176 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
2177 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
2178 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
2179 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
2180 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
2181 size_t cbAcc = nemR3WinGetByteCountFromSas(uAcc);
2182 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhys=%RGp cbInstr=%u\n",
2183 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrVa, GCPhys, cbInstr));
2184
2185 RT_NOREF(fL2Fault);
2186
2187 VBOXSTRICTRC rcStrict;
2188 if (fIsv)
2189 {
2190 EMHistoryAddExit(pVCpu,
2191 fWrite
2192 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2193 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2194 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
2195
2196 uint64_t u64Val = 0;
2197 if (fWrite)
2198 {
2199 u64Val = nemR3WinGetGReg(pVCpu, uReg);
2200 rcStrict = PGMPhysWrite(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2201 Log4(("MmioExit/%u: %08RX64: WRITE %RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
2202 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2203 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2204 }
2205 else
2206 {
2207 rcStrict = PGMPhysRead(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2208 Log4(("MmioExit/%u: %08RX64: READ %RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2209 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2210 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2211 if (rcStrict == VINF_SUCCESS)
2212 nemR3WinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
2213 }
2214 }
2215 else
2216 {
2217 /** @todo Our UEFI firmware accesses the flash region with the following instruction
2218 * when the NVRAM actually contains data:
2219 * ldrb w9, [x6, #-0x0001]!
2220 * This is too complicated for the hardware so the ISV bit is not set. Until there
2221 * is a proper IEM implementation we just handle this here for now to avoid annoying
2222 * users too much.
2223 */
2224 /* The following ASSUMES that the vCPU state is completely synced. */
2225
2226 /* Read instruction. */
2227 RTGCPTR GCPtrPage = pVCpu->cpum.GstCtx.Pc.u64 & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2228 const void *pvPageR3 = NULL;
2229 PGMPAGEMAPLOCK PageMapLock;
2230
2231 rcStrict = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrPage, &pvPageR3, &PageMapLock);
2232 if (rcStrict == VINF_SUCCESS)
2233 {
2234 uint32_t u32Instr = *(uint32_t *)((uint8_t *)pvPageR3 + (pVCpu->cpum.GstCtx.Pc.u64 - GCPtrPage));
2235 PGMPhysReleasePageMappingLock(pVCpu->pVMR3, &PageMapLock);
2236
2237 DISSTATE Dis;
2238 rcStrict = DISInstrWithPrefetchedBytes((uintptr_t)pVCpu->cpum.GstCtx.Pc.u64, DISCPUMODE_ARMV8_A64, 0 /*fFilter - none */,
2239 &u32Instr, sizeof(u32Instr), NULL, NULL, &Dis, NULL);
2240 if (rcStrict == VINF_SUCCESS)
2241 {
2242 if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDRB
2243 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2244 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2245 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmAddrInGpr
2246 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit
2247 && (Dis.aParams[1].fUse & DISUSE_PRE_INDEXED))
2248 {
2249 /* The fault address is already the final address. */
2250 uint8_t bVal = 0;
2251 rcStrict = PGMPhysRead(pVM, GCPhys, &bVal, 1, PGMACCESSORIGIN_HM);
2252 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2253 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, sizeof(bVal), sizeof(bVal),
2254 &bVal, VBOXSTRICTRC_VAL(rcStrict) ));
2255 if (rcStrict == VINF_SUCCESS)
2256 {
2257 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, bVal);
2258 /* Update the indexed register. */
2259 pVCpu->cpum.GstCtx.aGRegs[Dis.aParams[1].armv8.Op.Reg.idReg].x += Dis.aParams[1].armv8.u.offBase;
2260 }
2261 }
2262 /*
2263 * Seeing the following with the Windows 11/ARM TPM driver:
2264 * %fffff800e5342888 48 25 45 29 ldp w8, w9, [x10, #+0x0028]
2265 */
2266 else if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDP
2267 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2268 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2269 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmReg
2270 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2271 && Dis.aParams[2].armv8.enmType == kDisArmv8OpParmAddrInGpr
2272 && Dis.aParams[2].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit)
2273 {
2274 /** @todo This is tricky to handle if the first register read returns something else than VINF_SUCCESS... */
2275 /* The fault address is already the final address. */
2276 uint32_t u32Val1 = 0;
2277 uint32_t u32Val2 = 0;
2278 rcStrict = PGMPhysRead(pVM, GCPhys, &u32Val1, sizeof(u32Val1), PGMACCESSORIGIN_HM);
2279 if (rcStrict == VINF_SUCCESS)
2280 rcStrict = PGMPhysRead(pVM, GCPhys + sizeof(uint32_t), &u32Val2, sizeof(u32Val2), PGMACCESSORIGIN_HM);
2281 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs %.*Rhxs rcStrict=%Rrc\n",
2282 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, 2 * sizeof(uint32_t), sizeof(u32Val1),
2283 &u32Val1, sizeof(u32Val2), &u32Val2, VBOXSTRICTRC_VAL(rcStrict) ));
2284 if (rcStrict == VINF_SUCCESS)
2285 {
2286 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val1);
2287 nemR3WinSetGReg(pVCpu, Dis.aParams[1].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val2);
2288 }
2289 }
2290 else
2291 AssertFailedReturn(VERR_NOT_SUPPORTED);
2292 }
2293 }
2294 }
2295
2296 if (rcStrict == VINF_SUCCESS)
2297 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t); /** @todo Why is InstructionByteCount always 0? */
2298
2299 return rcStrict;
2300}
2301
2302
2303/**
2304 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2305 *
2306 * @returns Strict VBox status code.
2307 * @param pVM The cross context VM structure.
2308 * @param pVCpu The cross context per CPU structure.
2309 * @param pExit The VM exit information to handle.
2310 * @sa nemHCWinHandleMessageMemory
2311 */
2312NEM_TMPL_STATIC VBOXSTRICTRC
2313nemR3WinHandleExitHypercall(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2314{
2315 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2316
2317 /** @todo Raise exception to EL1 if PSCI not configured. */
2318 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
2319 uint32_t uFunId = pExit->Hypercall.Immediate;
2320 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
2321 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
2322 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
2323 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
2324 {
2325 switch (uFunNum)
2326 {
2327 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2328 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
2329 break;
2330 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2331 rcStrict = VMR3PowerOff(pVM->pUVM);
2332 break;
2333 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2334 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2335 {
2336 bool fHaltOnReset;
2337 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
2338 if (RT_SUCCESS(rc) && fHaltOnReset)
2339 {
2340 Log(("nemHCLnxHandleExitHypercall: Halt On Reset!\n"));
2341 rcStrict = VINF_EM_HALT;
2342 }
2343 else
2344 {
2345 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
2346 VM_FF_SET(pVM, VM_FF_RESET);
2347 rcStrict = VINF_EM_RESET;
2348 }
2349 break;
2350 }
2351 case ARM_PSCI_FUNC_ID_CPU_ON:
2352 {
2353 uint64_t u64TgtCpu = pExit->Hypercall.X[1];
2354 RTGCPHYS GCPhysExecAddr = pExit->Hypercall.X[2];
2355 uint64_t u64CtxId = pExit->Hypercall.X[3];
2356 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
2357 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
2358 break;
2359 }
2360 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
2361 {
2362 uint32_t u32FunNum = (uint32_t)pExit->Hypercall.X[1];
2363 switch (u32FunNum)
2364 {
2365 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2366 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2367 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2368 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2369 case ARM_PSCI_FUNC_ID_CPU_ON:
2370 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2371 false /*f64BitReg*/, false /*fSignExtend*/,
2372 (uint64_t)ARM_PSCI_STS_SUCCESS);
2373 break;
2374 default:
2375 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2376 false /*f64BitReg*/, false /*fSignExtend*/,
2377 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2378 }
2379 break;
2380 }
2381 default:
2382 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2383 }
2384 }
2385 else
2386 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2387
2388 /** @todo What to do if immediate is != 0? */
2389
2390 if (rcStrict == VINF_SUCCESS)
2391 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
2392
2393 return rcStrict;
2394}
2395
2396
2397/**
2398 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
2399 *
2400 * @returns Strict VBox status code.
2401 * @param pVM The cross context VM structure.
2402 * @param pVCpu The cross context per CPU structure.
2403 * @param pExit The VM exit information to handle.
2404 * @sa nemHCWinHandleMessageUnrecoverableException
2405 */
2406NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2407{
2408#if 0
2409 /*
2410 * Just copy the state we've got and handle it in the loop for now.
2411 */
2412 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2413 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2414 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
2415 RT_NOREF_PV(pVM);
2416 return VINF_EM_TRIPLE_FAULT;
2417#else
2418 /*
2419 * Let IEM decide whether this is really it.
2420 */
2421 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
2422 pExit->UnrecoverableException.Header.Pc, ASMReadTSC());
2423 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->UnrecoverableException.Header);
2424 AssertReleaseFailed();
2425 RT_NOREF_PV(pVM);
2426 return VINF_SUCCESS;
2427#endif
2428}
2429
2430
2431/**
2432 * Handles VM exits.
2433 *
2434 * @returns Strict VBox status code.
2435 * @param pVM The cross context VM structure.
2436 * @param pVCpu The cross context per CPU structure.
2437 * @param pExit The VM exit information to handle.
2438 * @sa nemHCWinHandleMessage
2439 */
2440NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2441{
2442#ifdef LOG_ENABLED
2443 if (LogIs3Enabled())
2444 {
2445 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
2446 AssertRCReturn(rc, rc);
2447
2448 nemR3WinLogState(pVM, pVCpu);
2449 }
2450#endif
2451
2452 switch (pExit->ExitReason)
2453 {
2454 case WHvRunVpExitReasonUnmappedGpa:
2455 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
2456 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
2457
2458 case WHvRunVpExitReasonCanceled:
2459 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
2460 return VINF_SUCCESS;
2461
2462 case WHvRunVpExitReasonHypercall:
2463 return nemR3WinHandleExitHypercall(pVM, pVCpu, pExit);
2464
2465 case 0x8001000c: /* WHvRunVpExitReasonArm64Reset */
2466 {
2467 if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF)
2468 return VMR3PowerOff(pVM->pUVM);
2469 else if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_RESET)
2470 {
2471 VM_FF_SET(pVM, VM_FF_RESET);
2472 return VINF_EM_RESET;
2473 }
2474 else
2475 AssertLogRelFailedReturn(VERR_NEM_IPE_3);
2476 }
2477
2478 case WHvRunVpExitReasonUnrecoverableException:
2479 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
2480 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
2481
2482 case WHvRunVpExitReasonUnsupportedFeature:
2483 case WHvRunVpExitReasonInvalidVpRegisterValue:
2484 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2485 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
2486 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
2487
2488 /* Undesired exits: */
2489 case WHvRunVpExitReasonNone:
2490 default:
2491 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2492 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
2493 }
2494}
2495
2496
2497VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2498{
2499 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.fPState));
2500#ifdef LOG_ENABLED
2501 if (LogIs3Enabled())
2502 nemR3WinLogState(pVM, pVCpu);
2503#endif
2504
2505 /*
2506 * Try switch to NEM runloop state.
2507 */
2508 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2509 { /* likely */ }
2510 else
2511 {
2512 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2513 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2514 return VINF_SUCCESS;
2515 }
2516
2517 /*
2518 * The run loop.
2519 *
2520 * Current approach to state updating to use the sledgehammer and sync
2521 * everything every time. This will be optimized later.
2522 */
2523 const bool fSingleStepping = DBGFIsStepping(pVCpu);
2524 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2525 for (unsigned iLoop = 0;; iLoop++)
2526 {
2527 /*
2528 * Poll timers and run for a bit.
2529 *
2530 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
2531 * so we take the time of the next timer event and uses that as a deadline.
2532 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
2533 */
2534 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2535 * the whole polling job when timers have changed... */
2536 uint64_t offDeltaIgnored;
2537 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2538 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2539 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2540 {
2541 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
2542 {
2543 /* Ensure that Hyper-V has the whole state. */
2544 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
2545 AssertRCReturn(rc2, rc2);
2546
2547#ifdef LOG_ENABLED
2548 if (LogIsFlowEnabled())
2549 {
2550 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2551 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2552 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2553 LogFlow(("NEM/%u: Entry @ %08RX64 pstate=%#RX64\n", pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64));
2554 }
2555#endif
2556
2557 MY_WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
2558 TMNotifyStartOfExecution(pVM, pVCpu);
2559
2560 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
2561
2562 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
2563 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2564#ifdef LOG_ENABLED
2565 if (LogIsFlowEnabled())
2566 {
2567 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2568 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2569 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2570 LogFlow(("NEM/%u: Exit @ %08RX64 pstate=%#RX64 Reason=%#x\n",
2571 pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64, ExitReason.ExitReason));
2572 }
2573#endif
2574 if (SUCCEEDED(hrc))
2575 {
2576 /*
2577 * Deal with the message.
2578 */
2579 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
2580 if (rcStrict == VINF_SUCCESS)
2581 { /* hopefully likely */ }
2582 else
2583 {
2584 LogFlow(("NEM/%u: breaking: nemR3WinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2585 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2586 break;
2587 }
2588 }
2589 else
2590 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
2591 pVCpu->idCpu, hrc, GetLastError()),
2592 VERR_NEM_IPE_0);
2593
2594 /*
2595 * If no relevant FFs are pending, loop.
2596 */
2597 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2598 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2599 continue;
2600
2601 /** @todo Try handle pending flags, not just return to EM loops. Take care
2602 * not to set important RCs here unless we've handled a message. */
2603 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2604 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2605 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2606 }
2607 else
2608 {
2609 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2610 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2611 }
2612 }
2613 else
2614 {
2615 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2616 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2617 }
2618 break;
2619 } /* the run loop */
2620
2621
2622 /*
2623 * If the CPU is running, make sure to stop it before we try sync back the
2624 * state and return to EM. We don't sync back the whole state if we can help it.
2625 */
2626 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2627 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2628
2629 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
2630 {
2631 /* Try anticipate what we might need. */
2632 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK;
2633 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2634 || RT_FAILURE(rcStrict))
2635 fImport = CPUMCTX_EXTRN_ALL;
2636 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
2637 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2638
2639 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2640 {
2641 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport);
2642 if (RT_SUCCESS(rc2))
2643 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2644 else if (RT_SUCCESS(rcStrict))
2645 rcStrict = rc2;
2646 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2647 pVCpu->cpum.GstCtx.fExtrn = 0;
2648 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2649 }
2650 else
2651 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2652 }
2653 else
2654 {
2655 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2656 pVCpu->cpum.GstCtx.fExtrn = 0;
2657 }
2658
2659#if 0
2660 UINT32 cbWritten;
2661 WHV_ARM64_LOCAL_INTERRUPT_CONTROLLER_STATE IntrState;
2662 HRESULT hrc = WHvGetVirtualProcessorState(pVM->nem.s.hPartition, pVCpu->idCpu, WHvVirtualProcessorStateTypeInterruptControllerState2,
2663 &IntrState, sizeof(IntrState), &cbWritten);
2664 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2665 ("WHvGetVirtualProcessorState(%p, %u,WHvVirtualProcessorStateTypeInterruptControllerState2,) -> %Rhrc (Last=%#x/%u)\n",
2666 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2667 , VERR_NEM_GET_REGISTERS_FAILED);
2668 LogFlowFunc(("IntrState: cbWritten=%u\n"));
2669 for (uint32_t i = 0; i < RT_ELEMENTS(IntrState.BankedInterruptState); i++)
2670 {
2671 WHV_ARM64_INTERRUPT_STATE *pState = &IntrState.BankedInterruptState[i];
2672 LogFlowFunc(("IntrState: Intr %u:\n"
2673 " Enabled=%RTbool\n"
2674 " EdgeTriggered=%RTbool\n"
2675 " Asserted=%RTbool\n"
2676 " SetPending=%RTbool\n"
2677 " Active=%RTbool\n"
2678 " Direct=%RTbool\n"
2679 " GicrIpriorityrConfigured=%u\n"
2680 " GicrIpriorityrActive=%u\n",
2681 i, pState->Enabled, pState->EdgeTriggered, pState->Asserted, pState->SetPending, pState->Active, pState->Direct,
2682 pState->GicrIpriorityrConfigured, pState->GicrIpriorityrActive));
2683 }
2684#endif
2685
2686 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64,
2687 pVCpu->cpum.GstCtx.fPState, VBOXSTRICTRC_VAL(rcStrict) ));
2688 return rcStrict;
2689}
2690
2691
2692VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2693{
2694 Assert(VM_IS_NEM_ENABLED(pVM));
2695 RT_NOREF(pVM, pVCpu);
2696 return true;
2697}
2698
2699
2700bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2701{
2702 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2703 return false;
2704}
2705
2706
2707void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2708{
2709 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu));
2710 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
2711 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
2712 RT_NOREF_PV(hrc);
2713 RT_NOREF_PV(fFlags);
2714}
2715
2716
2717DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2718{
2719 RT_NOREF(pVM, fUseDebugLoop);
2720 return false;
2721}
2722
2723
2724DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2725{
2726 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2727 return false;
2728}
2729
2730
2731DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
2732{
2733 PGMPAGEMAPLOCK Lock;
2734 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
2735 if (RT_SUCCESS(rc))
2736 PGMPhysReleasePageMappingLock(pVM, &Lock);
2737 return rc;
2738}
2739
2740
2741DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2742{
2743 PGMPAGEMAPLOCK Lock;
2744 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
2745 if (RT_SUCCESS(rc))
2746 PGMPhysReleasePageMappingLock(pVM, &Lock);
2747 return rc;
2748}
2749
2750
2751VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2752 uint8_t *pu2State, uint32_t *puNemRange)
2753{
2754 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d)\n",
2755 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange));
2756
2757 *pu2State = UINT8_MAX;
2758 RT_NOREF(puNemRange);
2759
2760 if (pvR3)
2761 {
2762 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2763 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvR3, GCPhys, cb,
2764 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2765 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2766 if (SUCCEEDED(hrc))
2767 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2768 else
2769 {
2770 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2771 GCPhys, cb, pvR3, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2772 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2773 return VERR_NEM_MAP_PAGES_FAILED;
2774 }
2775 }
2776 return VINF_SUCCESS;
2777}
2778
2779
2780VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2781{
2782 RT_NOREF(pVM);
2783 return g_pfnWHvQueryGpaRangeDirtyBitmap != NULL;
2784}
2785
2786
2787VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2788 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2789{
2790 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
2791 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
2792 RT_NOREF(puNemRange);
2793
2794 /*
2795 * Unmap the RAM we're replacing.
2796 */
2797 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2798 {
2799 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2800 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2801 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2802 if (SUCCEEDED(hrc))
2803 { /* likely */ }
2804 else if (pvMmio2)
2805 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2806 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2807 else
2808 {
2809 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2810 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2811 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2812 return VERR_NEM_UNMAP_PAGES_FAILED;
2813 }
2814 }
2815
2816 /*
2817 * Map MMIO2 if any.
2818 */
2819 if (pvMmio2)
2820 {
2821 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2822 WHV_MAP_GPA_RANGE_FLAGS fWHvFlags = WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute;
2823 if ((fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) && g_pfnWHvQueryGpaRangeDirtyBitmap)
2824 fWHvFlags |= WHvMapGpaRangeFlagTrackDirtyPages;
2825 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2826 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMmio2, GCPhys, cb, fWHvFlags);
2827 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2828 if (SUCCEEDED(hrc))
2829 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2830 else
2831 {
2832 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p fWHvFlags=%#x: Map -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2833 GCPhys, cb, fFlags, pvMmio2, fWHvFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2834 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2835 return VERR_NEM_MAP_PAGES_FAILED;
2836 }
2837 }
2838 else
2839 {
2840 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2841 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2842 }
2843 RT_NOREF(pvRam);
2844 return VINF_SUCCESS;
2845}
2846
2847
2848VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2849 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2850{
2851 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2852 return VINF_SUCCESS;
2853}
2854
2855
2856VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2857 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2858{
2859 int rc = VINF_SUCCESS;
2860 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2861 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2862
2863 /*
2864 * Unmap the MMIO2 pages.
2865 */
2866 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2867 * we may have more stuff to unmap even in case of pure MMIO... */
2868 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2869 {
2870 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2871 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2872 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2873 if (FAILED(hrc))
2874 {
2875 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2876 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2877 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2878 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2879 }
2880 }
2881
2882 /*
2883 * Restore the RAM we replaced.
2884 */
2885 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2886 {
2887 AssertPtr(pvRam);
2888 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2889 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvRam, GCPhys, cb,
2890 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2891 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2892 if (SUCCEEDED(hrc))
2893 { /* likely */ }
2894 else
2895 {
2896 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2897 GCPhys, cb, pvMmio2, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2898 rc = VERR_NEM_MAP_PAGES_FAILED;
2899 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2900 }
2901 if (pu2State)
2902 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2903 }
2904 /* Mark the pages as unmapped if relevant. */
2905 else if (pu2State)
2906 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2907
2908 RT_NOREF(pvMmio2, puNemRange);
2909 return rc;
2910}
2911
2912
2913VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2914 void *pvBitmap, size_t cbBitmap)
2915{
2916 Assert(VM_IS_NEM_ENABLED(pVM));
2917 AssertReturn(g_pfnWHvQueryGpaRangeDirtyBitmap, VERR_INTERNAL_ERROR_2);
2918 Assert(cbBitmap == (uint32_t)cbBitmap);
2919 RT_NOREF(uNemRange);
2920
2921 /* This is being profiled by PGM, see /PGM/Mmio2QueryAndResetDirtyBitmap. */
2922 HRESULT hrc = WHvQueryGpaRangeDirtyBitmap(pVM->nem.s.hPartition, GCPhys, cb, (UINT64 *)pvBitmap, (uint32_t)cbBitmap);
2923 if (SUCCEEDED(hrc))
2924 return VINF_SUCCESS;
2925
2926 AssertLogRelMsgFailed(("GCPhys=%RGp LB %RGp pvBitmap=%p LB %#zx hrc=%Rhrc (%#x) Last=%#x/%u\n",
2927 GCPhys, cb, pvBitmap, cbBitmap, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2928 return VERR_NEM_QUERY_DIRTY_BITMAP_FAILED;
2929}
2930
2931
2932VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2933 uint8_t *pu2State, uint32_t *puNemRange)
2934{
2935 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2936 *pu2State = UINT8_MAX;
2937 *puNemRange = 0;
2938
2939#if 0 /* Let's not do this after all. We'll protection change notifications for each page and if not we'll map them lazily. */
2940 RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
2941 for (RTGCPHYS iPage = 0; iPage < cPages; iPage++, GCPhys += X86_PAGE_SIZE)
2942 {
2943 const void *pvPage;
2944 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
2945 if (RT_SUCCESS(rc))
2946 {
2947 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhys, X86_PAGE_SIZE,
2948 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2949 if (SUCCEEDED(hrc))
2950 { /* likely */ }
2951 else
2952 {
2953 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
2954 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2955 return VERR_NEM_INIT_FAILED;
2956 }
2957 }
2958 else
2959 {
2960 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
2961 return rc;
2962 }
2963 }
2964 RT_NOREF_PV(fFlags);
2965#else
2966 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags);
2967#endif
2968 return VINF_SUCCESS;
2969}
2970
2971
2972VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2973 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2974{
2975 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2976 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2977 *pu2State = UINT8_MAX;
2978
2979 /*
2980 * (Re-)map readonly.
2981 */
2982 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2983 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2984 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPages, GCPhys, cb, WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
2985 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2986 if (SUCCEEDED(hrc))
2987 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
2988 else
2989 {
2990 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x hrc=%Rhrc (%#x) Last=%#x/%u\n",
2991 GCPhys, cb, pvPages, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2992 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2993 return VERR_NEM_MAP_PAGES_FAILED;
2994 }
2995 RT_NOREF(fFlags, puNemRange);
2996 return VINF_SUCCESS;
2997}
2998
2999VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
3000{
3001 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
3002 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
3003 RT_NOREF(pVCpu, fEnabled);
3004}
3005
3006
3007void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
3008{
3009 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
3010 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
3011}
3012
3013
3014VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3015 RTR3PTR pvMemR3, uint8_t *pu2State)
3016{
3017 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
3018 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
3019
3020 *pu2State = UINT8_MAX;
3021 if (pvMemR3)
3022 {
3023 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
3024 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
3025 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
3026 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
3027 if (SUCCEEDED(hrc))
3028 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3029 else
3030 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
3031 pvMemR3, GCPhys, cb, hrc));
3032 }
3033 RT_NOREF(enmKind);
3034}
3035
3036
3037void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
3038 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
3039{
3040 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
3041 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
3042 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
3043}
3044
3045
3046/**
3047 * Worker that maps pages into Hyper-V.
3048 *
3049 * This is used by the PGM physical page notifications as well as the memory
3050 * access VMEXIT handlers.
3051 *
3052 * @returns VBox status code.
3053 * @param pVM The cross context VM structure.
3054 * @param pVCpu The cross context virtual CPU structure of the
3055 * calling EMT.
3056 * @param GCPhysSrc The source page address.
3057 * @param GCPhysDst The hyper-V destination page. This may differ from
3058 * GCPhysSrc when A20 is disabled.
3059 * @param fPageProt NEM_PAGE_PROT_XXX.
3060 * @param pu2State Our page state (input/output).
3061 * @param fBackingChanged Set if the page backing is being changed.
3062 * @thread EMT(pVCpu)
3063 */
3064NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
3065 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
3066{
3067 /*
3068 * Looks like we need to unmap a page before we can change the backing
3069 * or even modify the protection. This is going to be *REALLY* efficient.
3070 * PGM lends us two bits to keep track of the state here.
3071 */
3072 RT_NOREF(pVCpu);
3073 uint8_t const u2OldState = *pu2State;
3074 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
3075 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
3076 if ( fBackingChanged
3077 || u2NewState != u2OldState)
3078 {
3079 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
3080 {
3081 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3082 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
3083 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3084 if (SUCCEEDED(hrc))
3085 {
3086 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3087 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
3088 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3089 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
3090 {
3091 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
3092 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3093 return VINF_SUCCESS;
3094 }
3095 }
3096 else
3097 {
3098 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
3099 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3100 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3101 return VERR_NEM_INIT_FAILED;
3102 }
3103 }
3104 }
3105
3106 /*
3107 * Writeable mapping?
3108 */
3109 if (fPageProt & NEM_PAGE_PROT_WRITE)
3110 {
3111 void *pvPage;
3112 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
3113 if (RT_SUCCESS(rc))
3114 {
3115 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
3116 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
3117 if (SUCCEEDED(hrc))
3118 {
3119 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3120 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
3121 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3122 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
3123 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
3124 return VINF_SUCCESS;
3125 }
3126 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
3127 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3128 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3129 return VERR_NEM_INIT_FAILED;
3130 }
3131 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
3132 return rc;
3133 }
3134
3135 if (fPageProt & NEM_PAGE_PROT_READ)
3136 {
3137 const void *pvPage;
3138 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
3139 if (RT_SUCCESS(rc))
3140 {
3141 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRangePage, a);
3142 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
3143 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
3144 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRangePage, a);
3145 if (SUCCEEDED(hrc))
3146 {
3147 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
3148 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
3149 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3150 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
3151 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
3152 return VINF_SUCCESS;
3153 }
3154 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
3155 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3156 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3157 return VERR_NEM_INIT_FAILED;
3158 }
3159 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
3160 return rc;
3161 }
3162
3163 /* We already unmapped it above. */
3164 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3165 return VINF_SUCCESS;
3166}
3167
3168
3169NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
3170{
3171 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
3172 {
3173 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
3174 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3175 return VINF_SUCCESS;
3176 }
3177
3178 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3179 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
3180 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3181 if (SUCCEEDED(hrc))
3182 {
3183 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
3184 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3185 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3186 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
3187 return VINF_SUCCESS;
3188 }
3189 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
3190 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
3191 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3192 return VERR_NEM_IPE_6;
3193}
3194
3195
3196int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
3197 PGMPAGETYPE enmType, uint8_t *pu2State)
3198{
3199 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3200 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3201 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
3202
3203 int rc;
3204 RT_NOREF_PV(fPageProt);
3205 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3206 return rc;
3207}
3208
3209
3210VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
3211 PGMPAGETYPE enmType, uint8_t *pu2State)
3212{
3213 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3214 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3215 Assert(VM_IS_NEM_ENABLED(pVM));
3216 RT_NOREF(HCPhys, enmType, pvR3);
3217
3218 RT_NOREF_PV(fPageProt);
3219 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3220}
3221
3222
3223VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
3224 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
3225{
3226 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
3227 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
3228 Assert(VM_IS_NEM_ENABLED(pVM));
3229 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, enmType);
3230
3231 RT_NOREF_PV(fPageProt);
3232 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3233}
3234
3235
3236/**
3237 * Returns features supported by the NEM backend.
3238 *
3239 * @returns Flags of features supported by the native NEM backend.
3240 * @param pVM The cross context VM structure.
3241 */
3242VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
3243{
3244 RT_NOREF(pVM);
3245 /** @todo Is NEM_FEAT_F_FULL_GST_EXEC always true? */
3246 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC;
3247}
3248
3249
3250/** @page pg_nem_win_aarmv8 NEM/win - Native Execution Manager, Windows.
3251 *
3252 * Open questions:
3253 * - Why can't one read and write WHvArm64RegisterId*
3254 * - WHvArm64RegisterDbgbcr0El1 is not readable?
3255 * - Getting notified about system register reads/writes (GIC)?
3256 * - InstructionByteCount and InstructionBytes for unmapped GPA exit are zero...
3257 * - Handling of (vTimer) interrupts, how is WHvRequestInterrupt() supposed to be used?
3258 */
3259
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette