VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp@ 108400

Last change on this file since 108400 was 108398, checked in by vboxsync, 2 months ago

VMM/GICR3Nem-win.cpp: Some preliminary saved state code in an attempt to get saved states working, the format is not final, bugref:10392

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 152.1 KB
Line 
1/* $Id: NEMR3Native-win-armv8.cpp 108398 2025-02-26 16:41:49Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Windows backend.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 * Log group 6: Ring-0 memory management
9 * Log group 12: API intercepts.
10 */
11
12/*
13 * Copyright (C) 2018-2024 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.virtualbox.org.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_NEM
39#define VMCPU_INCL_CPUM_GST_CTX
40#include <iprt/nt/nt-and-windows.h>
41#include <iprt/nt/hyperv.h>
42#include <WinHvPlatform.h>
43
44#ifndef _WIN32_WINNT_WIN10
45# error "Missing _WIN32_WINNT_WIN10"
46#endif
47#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
48# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
49#endif
50#include <sysinfoapi.h>
51#include <debugapi.h>
52#include <errhandlingapi.h>
53#include <fileapi.h>
54#include <winerror.h> /* no api header for this. */
55
56#include <VBox/dis.h>
57#include <VBox/vmm/nem.h>
58#include <VBox/vmm/iem.h>
59#include <VBox/vmm/em.h>
60#include <VBox/vmm/pdmapic.h>
61#include <VBox/vmm/pdm.h>
62#include <VBox/vmm/dbgftrace.h>
63#include "NEMInternal.h"
64#include <VBox/vmm/vmcc.h>
65
66#include <iprt/formats/arm-psci.h>
67
68#include <iprt/ldr.h>
69#include <iprt/path.h>
70#include <iprt/string.h>
71#include <iprt/system.h>
72#include <iprt/utf16.h>
73
74#ifndef NTDDI_WIN10_VB /* Present in W10 2004 SDK, quite possibly earlier. */
75HRESULT WINAPI WHvQueryGpaRangeDirtyBitmap(WHV_PARTITION_HANDLE, WHV_GUEST_PHYSICAL_ADDRESS, UINT64, UINT64 *, UINT32);
76# define WHvMapGpaRangeFlagTrackDirtyPages ((WHV_MAP_GPA_RANGE_FLAGS)0x00000008)
77#endif
78
79
80/*
81 * The following definitions appeared in build 27744 allow configuring the base address of the GICv3 controller,
82 * (there is no official SDK for this yet).
83 */
84/** @todo Better way of defining these which doesn't require casting later on when calling APIs. */
85#define WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS UINT32_C(0x00001012)
86/** No GIC present. */
87#define WHV_ARM64_IC_EMULATION_MODE_NONE 0
88/** Hyper-V emulates a GICv3. */
89#define WHV_ARM64_IC_EMULATION_MODE_GICV3 1
90
91/**
92 * Configures the interrupt controller emulated by Hyper-V.
93 */
94typedef struct MY_WHV_ARM64_IC_PARAMETERS
95{
96 uint32_t u32EmulationMode;
97 uint32_t u32Rsvd;
98 union
99 {
100 struct
101 {
102 RTGCPHYS GCPhysGicdBase;
103 RTGCPHYS GCPhysGitsTranslaterBase;
104 uint32_t u32Rsvd;
105 uint32_t cLpiIntIdBits;
106 uint32_t u32PpiCntvOverflw;
107 uint32_t u32PpiPmu;
108 uint32_t au32Rsvd[6];
109 } GicV3;
110 } u;
111} MY_WHV_ARM64_IC_PARAMETERS;
112AssertCompileSize(MY_WHV_ARM64_IC_PARAMETERS, 64);
113
114
115/**
116 * The hypercall exit context.
117 */
118typedef struct MY_WHV_HYPERCALL_CONTEXT
119{
120 WHV_INTERCEPT_MESSAGE_HEADER Header;
121 uint16_t Immediate;
122 uint16_t u16Rsvd;
123 uint32_t u32Rsvd;
124 uint64_t X[18];
125} MY_WHV_HYPERCALL_CONTEXT;
126typedef MY_WHV_HYPERCALL_CONTEXT *PMY_WHV_HYPERCALL_CONTEXT;
127AssertCompileSize(MY_WHV_HYPERCALL_CONTEXT, 24 + 19 * sizeof(uint64_t));
128
129
130/**
131 * The ARM64 reset context.
132 */
133typedef struct MY_WHV_ARM64_RESET_CONTEXT
134{
135 WHV_INTERCEPT_MESSAGE_HEADER Header;
136 uint32_t ResetType;
137 uint32_t u32Rsvd;
138} MY_WHV_ARM64_RESET_CONTEXT;
139typedef MY_WHV_ARM64_RESET_CONTEXT *PMY_WHV_ARM64_RESET_CONTEXT;
140AssertCompileSize(MY_WHV_ARM64_RESET_CONTEXT, 24 + 2 * sizeof(uint32_t));
141
142
143#define WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF 0
144#define WHV_ARM64_RESET_CONTEXT_TYPE_RESET 1
145
146
147/**
148 * The exit reason context for arm64, the size is different
149 * from the default SDK we build against.
150 */
151typedef struct MY_WHV_RUN_VP_EXIT_CONTEXT
152{
153 WHV_RUN_VP_EXIT_REASON ExitReason;
154 uint32_t u32Rsvd;
155 uint64_t u64Rsvd;
156 union
157 {
158 WHV_MEMORY_ACCESS_CONTEXT MemoryAccess;
159 WHV_RUN_VP_CANCELED_CONTEXT CancelReason;
160 MY_WHV_HYPERCALL_CONTEXT Hypercall;
161 WHV_UNRECOVERABLE_EXCEPTION_CONTEXT UnrecoverableException;
162 MY_WHV_ARM64_RESET_CONTEXT Arm64Reset;
163 uint64_t au64Rsvd2[32];
164 };
165} MY_WHV_RUN_VP_EXIT_CONTEXT;
166typedef MY_WHV_RUN_VP_EXIT_CONTEXT *PMY_WHV_RUN_VP_EXIT_CONTEXT;
167AssertCompileSize(MY_WHV_RUN_VP_EXIT_CONTEXT, 272);
168
169#define My_WHvArm64RegisterGicrBaseGpa ((WHV_REGISTER_NAME)UINT32_C(0x00063000))
170
171
172/*********************************************************************************************************************************
173* Defined Constants And Macros *
174*********************************************************************************************************************************/
175
176
177/*********************************************************************************************************************************
178* Global Variables *
179*********************************************************************************************************************************/
180/** @name APIs imported from WinHvPlatform.dll
181 * @{ */
182static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
183static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
184static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
185static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
186static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
187static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
188static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
189static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
190static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
191static decltype(WHvQueryGpaRangeDirtyBitmap) * g_pfnWHvQueryGpaRangeDirtyBitmap;
192static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
193static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
194static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
195static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
196static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
197static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
198static decltype(WHvSuspendPartitionTime) * g_pfnWHvSuspendPartitionTime;
199static decltype(WHvResumePartitionTime) * g_pfnWHvResumePartitionTime;
200decltype(WHvGetVirtualProcessorState) * g_pfnWHvGetVirtualProcessorState;
201decltype(WHvSetVirtualProcessorState) * g_pfnWHvSetVirtualProcessorState;
202decltype(WHvRequestInterrupt) * g_pfnWHvRequestInterrupt;
203/** @} */
204
205/** The Windows build number. */
206static uint32_t g_uBuildNo = 17134;
207
208
209
210/**
211 * Import instructions.
212 */
213static const struct
214{
215 uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
216 bool fOptional; /**< Set if import is optional. */
217 PFNRT *ppfn; /**< The function pointer variable. */
218 const char *pszName; /**< The function name. */
219} g_aImports[] =
220{
221#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
222 NEM_WIN_IMPORT(0, false, WHvGetCapability),
223 NEM_WIN_IMPORT(0, false, WHvCreatePartition),
224 NEM_WIN_IMPORT(0, false, WHvSetupPartition),
225 NEM_WIN_IMPORT(0, false, WHvDeletePartition),
226 NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
227 NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
228 NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
229 NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
230 NEM_WIN_IMPORT(0, false, WHvTranslateGva),
231 NEM_WIN_IMPORT(0, true, WHvQueryGpaRangeDirtyBitmap),
232 NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
233 NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
234 NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
235 NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
236 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
237 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
238 NEM_WIN_IMPORT(0, false, WHvSuspendPartitionTime),
239 NEM_WIN_IMPORT(0, false, WHvResumePartitionTime),
240 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorState),
241 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorState),
242 NEM_WIN_IMPORT(0, false, WHvRequestInterrupt),
243#undef NEM_WIN_IMPORT
244};
245
246
247/*
248 * Let the preprocessor alias the APIs to import variables for better autocompletion.
249 */
250#ifndef IN_SLICKEDIT
251# define WHvGetCapability g_pfnWHvGetCapability
252# define WHvCreatePartition g_pfnWHvCreatePartition
253# define WHvSetupPartition g_pfnWHvSetupPartition
254# define WHvDeletePartition g_pfnWHvDeletePartition
255# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
256# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
257# define WHvMapGpaRange g_pfnWHvMapGpaRange
258# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
259# define WHvTranslateGva g_pfnWHvTranslateGva
260# define WHvQueryGpaRangeDirtyBitmap g_pfnWHvQueryGpaRangeDirtyBitmap
261# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
262# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
263# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
264# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
265# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
266# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
267# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
268# define WHvSuspendPartitionTime g_pfnWHvSuspendPartitionTime
269# define WHvResumePartitionTime g_pfnWHvResumePartitionTime
270# define WHvGetVirtualProcessorState g_pfnWHvGetVirtualProcessorState
271# define WHvSetVirtualProcessorState g_pfnWHvSetVirtualProcessorState
272# define WHvRequestInterrupt g_pfnWHvRequestInterrupt
273#endif
274
275#if 0 /* unused */
276/** WHV_MEMORY_ACCESS_TYPE names */
277static const char * const g_apszWHvMemAccesstypes[4] = { "read", "write", "exec", "!undefined!" };
278#endif
279/** NEM_WIN_PAGE_STATE_XXX names. */
280NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
281#ifdef LOG_ENABLED
282/** HV_INTERCEPT_ACCESS_TYPE names. */
283static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
284#endif
285
286
287/*********************************************************************************************************************************
288* Internal Functions *
289*********************************************************************************************************************************/
290DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv);
291DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv);
292
293NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
294 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
295
296/**
297 * Worker for nemR3NativeInit that probes and load the native API.
298 *
299 * @returns VBox status code.
300 * @param fForced Whether the HMForced flag is set and we should
301 * fail if we cannot initialize.
302 * @param pErrInfo Where to always return error info.
303 */
304static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
305{
306 /*
307 * Check that the DLL files we need are present, but without loading them.
308 * We'd like to avoid loading them unnecessarily.
309 */
310 WCHAR wszPath[MAX_PATH + 64];
311 UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
312 if (cwcPath >= MAX_PATH || cwcPath < 2)
313 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
314
315 if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
316 wszPath[cwcPath++] = '\\';
317 RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
318 if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
319 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
320
321 /*
322 * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
323 */
324 /** @todo */
325
326 /** @todo would be great if we could recognize a root partition from the
327 * CPUID info, but I currently don't dare do that. */
328
329 /*
330 * Now try load the DLLs and resolve the APIs.
331 */
332 static const char * const s_apszDllNames[1] = { "WinHvPlatform.dll" };
333 RTLDRMOD ahMods[1] = { NIL_RTLDRMOD };
334 int rc = VINF_SUCCESS;
335 for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
336 {
337 int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
338 if (RT_FAILURE(rc2))
339 {
340 if (!RTErrInfoIsSet(pErrInfo))
341 RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
342 else
343 RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
344 ahMods[i] = NIL_RTLDRMOD;
345 rc = VERR_NEM_INIT_FAILED;
346 }
347 }
348 if (RT_SUCCESS(rc))
349 {
350 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
351 {
352 int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
353 if (RT_SUCCESS(rc2))
354 {
355 if (g_aImports[i].fOptional)
356 LogRel(("NEM: info: Found optional import %s!%s.\n",
357 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName));
358 }
359 else
360 {
361 *g_aImports[i].ppfn = NULL;
362
363 LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
364 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
365 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
366 if (!g_aImports[i].fOptional)
367 {
368 if (RTErrInfoIsSet(pErrInfo))
369 RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
370 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
371 else
372 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
373 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
374 Assert(RT_FAILURE(rc));
375 }
376 }
377 }
378 if (RT_SUCCESS(rc))
379 {
380 Assert(!RTErrInfoIsSet(pErrInfo));
381 }
382 }
383
384 for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
385 RTLdrClose(ahMods[i]);
386 return rc;
387}
388
389
390/**
391 * Wrapper for different WHvGetCapability signatures.
392 */
393DECLINLINE(HRESULT) WHvGetCapabilityWrapper(WHV_CAPABILITY_CODE enmCap, WHV_CAPABILITY *pOutput, uint32_t cbOutput)
394{
395 return g_pfnWHvGetCapability(enmCap, pOutput, cbOutput, NULL);
396}
397
398
399/**
400 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
401 *
402 * @returns VBox status code.
403 * @param pVM The cross context VM structure.
404 * @param pErrInfo Where to always return error info.
405 */
406static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
407{
408#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
409#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
410#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
411
412 /*
413 * Is the hypervisor present with the desired capability?
414 *
415 * In build 17083 this translates into:
416 * - CPUID[0x00000001].HVP is set
417 * - CPUID[0x40000000] == "Microsoft Hv"
418 * - CPUID[0x40000001].eax == "Hv#1"
419 * - CPUID[0x40000003].ebx[12] is set.
420 * - VidGetExoPartitionProperty(INVALID_HANDLE_VALUE, 0x60000, &Ignored) returns
421 * a non-zero value.
422 */
423 /**
424 * @todo Someone at Microsoft please explain weird API design:
425 * 1. Pointless CapabilityCode duplication int the output;
426 * 2. No output size.
427 */
428 WHV_CAPABILITY Caps;
429 RT_ZERO(Caps);
430 SetLastError(0);
431 HRESULT hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
432 DWORD rcWin = GetLastError();
433 if (FAILED(hrc))
434 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
435 "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
436 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
437 if (!Caps.HypervisorPresent)
438 {
439 if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
440 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
441 "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
442 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
443 }
444 LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
445
446
447 /*
448 * Check what extended VM exits are supported.
449 */
450 RT_ZERO(Caps);
451 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
452 if (FAILED(hrc))
453 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
454 "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
455 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
456 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
457 pVM->nem.s.fHypercallExit = RT_BOOL(Caps.ExtendedVmExits.HypercallExit);
458 pVM->nem.s.fGpaAccessFaultExit = RT_BOOL(Caps.ExtendedVmExits.GpaAccessFaultExit);
459 NEM_LOG_REL_CAP_SUB("fHypercallExit", pVM->nem.s.fHypercallExit);
460 NEM_LOG_REL_CAP_SUB("fGpaAccessFaultExit", pVM->nem.s.fGpaAccessFaultExit);
461 if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
462 LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
463 /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
464
465 /*
466 * Check features in case they end up defining any.
467 */
468 RT_ZERO(Caps);
469 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
470 if (FAILED(hrc))
471 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
472 "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
473 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
474 if (Caps.Features.AsUINT64 & ~(uint64_t)0)
475 LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
476 /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
477
478 /*
479 * Check that the CPU vendor is supported.
480 */
481 RT_ZERO(Caps);
482 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
483 if (FAILED(hrc))
484 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
485 "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
486 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
487 switch (Caps.ProcessorVendor)
488 {
489 /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
490 case WHvProcessorVendorArm:
491 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - ARM", Caps.ProcessorVendor);
492 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_UNKNOWN;
493 break;
494 default:
495 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
496 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
497 }
498
499 /*
500 * CPU features, guessing these are virtual CPU features?
501 */
502 RT_ZERO(Caps);
503 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
504 if (FAILED(hrc))
505 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
506 "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
507 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
508 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
509#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
510 NEM_LOG_REL_CPU_FEATURE(Asid16);
511 NEM_LOG_REL_CPU_FEATURE(TGran16);
512 NEM_LOG_REL_CPU_FEATURE(TGran64);
513 NEM_LOG_REL_CPU_FEATURE(Haf);
514 NEM_LOG_REL_CPU_FEATURE(Hdbs);
515 NEM_LOG_REL_CPU_FEATURE(Pan);
516 NEM_LOG_REL_CPU_FEATURE(AtS1E1);
517 NEM_LOG_REL_CPU_FEATURE(Uao);
518 NEM_LOG_REL_CPU_FEATURE(El0Aarch32);
519 NEM_LOG_REL_CPU_FEATURE(Fp);
520 NEM_LOG_REL_CPU_FEATURE(FpHp);
521 NEM_LOG_REL_CPU_FEATURE(AdvSimd);
522 NEM_LOG_REL_CPU_FEATURE(AdvSimdHp);
523 NEM_LOG_REL_CPU_FEATURE(GicV3V4);
524 NEM_LOG_REL_CPU_FEATURE(GicV41);
525 NEM_LOG_REL_CPU_FEATURE(Ras);
526 NEM_LOG_REL_CPU_FEATURE(PmuV3);
527 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV81);
528 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV84);
529 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV85);
530 NEM_LOG_REL_CPU_FEATURE(Aes);
531 NEM_LOG_REL_CPU_FEATURE(PolyMul);
532 NEM_LOG_REL_CPU_FEATURE(Sha1);
533 NEM_LOG_REL_CPU_FEATURE(Sha256);
534 NEM_LOG_REL_CPU_FEATURE(Sha512);
535 NEM_LOG_REL_CPU_FEATURE(Crc32);
536 NEM_LOG_REL_CPU_FEATURE(Atomic);
537 NEM_LOG_REL_CPU_FEATURE(Rdm);
538 NEM_LOG_REL_CPU_FEATURE(Sha3);
539 NEM_LOG_REL_CPU_FEATURE(Sm3);
540 NEM_LOG_REL_CPU_FEATURE(Sm4);
541 NEM_LOG_REL_CPU_FEATURE(Dp);
542 NEM_LOG_REL_CPU_FEATURE(Fhm);
543 NEM_LOG_REL_CPU_FEATURE(DcCvap);
544 NEM_LOG_REL_CPU_FEATURE(DcCvadp);
545 NEM_LOG_REL_CPU_FEATURE(ApaBase);
546 NEM_LOG_REL_CPU_FEATURE(ApaEp);
547 NEM_LOG_REL_CPU_FEATURE(ApaEp2);
548 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fp);
549 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fpc);
550 NEM_LOG_REL_CPU_FEATURE(Jscvt);
551 NEM_LOG_REL_CPU_FEATURE(Fcma);
552 NEM_LOG_REL_CPU_FEATURE(RcpcV83);
553 NEM_LOG_REL_CPU_FEATURE(RcpcV84);
554 NEM_LOG_REL_CPU_FEATURE(Gpa);
555 NEM_LOG_REL_CPU_FEATURE(L1ipPipt);
556 NEM_LOG_REL_CPU_FEATURE(DzPermitted);
557
558#undef NEM_LOG_REL_CPU_FEATURE
559 if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(47) - 1)))
560 LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
561 pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
562 /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
563
564 /*
565 * The cache line flush size.
566 */
567 RT_ZERO(Caps);
568 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
569 if (FAILED(hrc))
570 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
571 "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
572 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
573 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
574 if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
575 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
576 pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
577
578 RT_ZERO(Caps);
579 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodePhysicalAddressWidth, &Caps, sizeof(Caps));
580 if (FAILED(hrc))
581 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
582 "WHvGetCapability/WHvCapabilityCodePhysicalAddressWidth failed: %Rhrc (Last=%#x/%u)",
583 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
584 NEM_LOG_REL_CAP_EX("WHvCapabilityCodePhysicalAddressWidth", "2^%u", Caps.PhysicalAddressWidth);
585 if (Caps.PhysicalAddressWidth < 32 && Caps.PhysicalAddressWidth > 52)
586 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported physical address width: %u", Caps.ProcessorClFlushSize);
587 pVM->nem.s.cPhysicalAddressWidth = Caps.PhysicalAddressWidth;
588
589
590 /*
591 * See if they've added more properties that we're not aware of.
592 */
593 /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
594 if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
595 {
596 static const struct
597 {
598 uint32_t iMin, iMax; } s_aUnknowns[] =
599 {
600 { 0x0004, 0x000f },
601 { 0x1003, 0x100f },
602 { 0x2000, 0x200f },
603 { 0x3000, 0x300f },
604 { 0x4000, 0x400f },
605 };
606 for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
607 for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
608 {
609 RT_ZERO(Caps);
610 hrc = WHvGetCapabilityWrapper((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
611 if (SUCCEEDED(hrc))
612 LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
613 }
614 }
615
616 /*
617 * For proper operation, we require CPUID exits.
618 */
619 /** @todo Any? */
620
621#undef NEM_LOG_REL_CAP_EX
622#undef NEM_LOG_REL_CAP_SUB_EX
623#undef NEM_LOG_REL_CAP_SUB
624 return VINF_SUCCESS;
625}
626
627
628/**
629 * Initializes the GIC controller emulation provided by Hyper-V.
630 *
631 * @returns VBox status code.
632 * @param pVM The cross context VM structure.
633 *
634 * @note Needs to be done early when setting up the partition so this has to live here and not in GICNem-win.cpp
635 */
636static int nemR3WinGicCreate(PVM pVM)
637{
638 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
639 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
640
641 /*
642 * Query the MMIO ranges.
643 */
644 RTGCPHYS GCPhysMmioBaseDist = 0;
645 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
646 if (RT_FAILURE(rc))
647 return VMSetError(pVM, rc, RT_SRC_POS,
648 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
649
650 RTGCPHYS GCPhysMmioBaseReDist = 0;
651 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
652 if (RT_FAILURE(rc))
653 return VMSetError(pVM, rc, RT_SRC_POS,
654 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
655
656 RTGCPHYS GCPhysMmioBaseIts = 0;
657 rc = CFGMR3QueryU64(pGicCfg, "ItsMmioBase", &GCPhysMmioBaseIts);
658 if (RT_FAILURE(rc))
659 return VMSetError(pVM, rc, RT_SRC_POS,
660 "Configuration error: Failed to get the \"ItsMmioBase\" value\n");
661
662 /*
663 * One can only set the GIC distributor base. The re-distributor regions for the individual
664 * vCPUs are configured when the vCPUs are created, so we need to save the base of the MMIO region.
665 */
666 pVM->nem.s.GCPhysMmioBaseReDist = GCPhysMmioBaseReDist;
667
668 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
669
670 MY_WHV_ARM64_IC_PARAMETERS Property; RT_ZERO(Property);
671 Property.u32EmulationMode = WHV_ARM64_IC_EMULATION_MODE_GICV3;
672 Property.u.GicV3.GCPhysGicdBase = GCPhysMmioBaseDist;
673 Property.u.GicV3.GCPhysGitsTranslaterBase = GCPhysMmioBaseIts;
674 Property.u.GicV3.cLpiIntIdBits = 1; /** @todo LPIs are currently not supported with our device emulations. */
675 Property.u.GicV3.u32PpiCntvOverflw = pVM->nem.s.u32GicPpiVTimer + 16; /* Calculate the absolute timer INTID. */
676 Property.u.GicV3.u32PpiPmu = 23; /** @todo Configure dynamically (from SBSA, needs a PMU/NEM emulation just like with the GIC probably). */
677 HRESULT hrc = WHvSetPartitionProperty(hPartition, (WHV_PARTITION_PROPERTY_CODE)WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS, &Property, sizeof(Property));
678 if (FAILED(hrc))
679 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
680 "Failed to set WHvPartitionPropertyCodeArm64IcParameters: %Rhrc (Last=%#x/%u)",
681 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
682
683 return rc;
684}
685
686
687/**
688 * Creates and sets up a Hyper-V (exo) partition.
689 *
690 * @returns VBox status code.
691 * @param pVM The cross context VM structure.
692 * @param pErrInfo Where to always return error info.
693 */
694static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
695{
696 AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
697 AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
698
699 /*
700 * Create the partition.
701 */
702 WHV_PARTITION_HANDLE hPartition;
703 HRESULT hrc = WHvCreatePartition(&hPartition);
704 if (FAILED(hrc))
705 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
706 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
707
708 int rc;
709
710 /*
711 * Set partition properties, most importantly the CPU count.
712 */
713 /**
714 * @todo Someone at Microsoft please explain another weird API:
715 * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
716 * argument rather than as part of the struct. That is so weird if you've
717 * used any other NT or windows API, including WHvGetCapability().
718 * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
719 * technically only need 9 bytes for setting/getting
720 * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
721 WHV_PARTITION_PROPERTY Property;
722 RT_ZERO(Property);
723 Property.ProcessorCount = pVM->cCpus;
724 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
725 if (SUCCEEDED(hrc))
726 {
727 RT_ZERO(Property);
728 Property.ExtendedVmExits.HypercallExit = pVM->nem.s.fHypercallExit;
729 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
730 if (SUCCEEDED(hrc))
731 {
732 /*
733 * We'll continue setup in nemR3NativeInitAfterCPUM.
734 */
735 pVM->nem.s.fCreatedEmts = false;
736 pVM->nem.s.hPartition = hPartition;
737 LogRel(("NEM: Created partition %p.\n", hPartition));
738 return VINF_SUCCESS;
739 }
740
741 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
742 "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
743 Property.ExtendedVmExits.AsUINT64, hrc);
744 }
745 else
746 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
747 "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
748 pVM->cCpus, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
749 WHvDeletePartition(hPartition);
750
751 Assert(!pVM->nem.s.hPartitionDevice);
752 Assert(!pVM->nem.s.hPartition);
753 return rc;
754}
755
756
757static int nemR3NativeInitSetupVm(PVM pVM)
758{
759 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
760 AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
761 AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
762 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
763
764 /*
765 * Continue setting up the partition now that we've got most of the CPUID feature stuff.
766 */
767 WHV_PARTITION_PROPERTY Property;
768 HRESULT hrc;
769
770 /* Not sure if we really need to set the cache line flush size. */
771 RT_ZERO(Property);
772 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
773 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));
774 if (FAILED(hrc))
775 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
776 "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
777 pVM->nem.s.cCacheLineFlushShift, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
778
779 /*
780 * Sync CPU features with CPUM.
781 */
782 /** @todo sync CPU features with CPUM. */
783
784 /* Set the partition property. */
785 RT_ZERO(Property);
786 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
787 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));
788 if (FAILED(hrc))
789 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
790 "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
791 pVM->nem.s.uCpuFeatures.u64, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
792
793 /* Configure the GIC. */
794 int rc = nemR3WinGicCreate(pVM);
795 if (RT_FAILURE(rc))
796 return rc;
797
798 /*
799 * Set up the partition.
800 *
801 * Seems like this is where the partition is actually instantiated and we get
802 * a handle to it.
803 */
804 hrc = WHvSetupPartition(hPartition);
805 if (FAILED(hrc))
806 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
807 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
808 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
809
810 /*
811 * Setup the EMTs.
812 */
813 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
814 {
815 hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/);
816 if (FAILED(hrc))
817 {
818 NTSTATUS const rcNtLast = RTNtLastStatusValue();
819 DWORD const dwErrLast = RTNtLastErrorValue();
820 while (idCpu-- > 0)
821 {
822 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu);
823 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
824 hPartition, idCpu, hrc2, RTNtLastStatusValue(),
825 RTNtLastErrorValue()));
826 }
827 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
828 "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
829 }
830
831 if (idCpu == 0)
832 {
833 /*
834 * Need to query the ID registers and populate CPUM,
835 * these are partition wide registers and need to be queried/set with WHV_ANY_VP.
836 */
837 CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
838
839 WHV_REGISTER_NAME aenmNames[10];
840 WHV_REGISTER_VALUE aValues[10];
841 RT_ZERO(aValues);
842
843 aenmNames[0] = WHvArm64RegisterIdAa64Dfr0El1;
844 aenmNames[1] = WHvArm64RegisterIdAa64Dfr1El1;
845 aenmNames[2] = WHvArm64RegisterIdAa64Isar0El1;
846 aenmNames[3] = WHvArm64RegisterIdAa64Isar1El1;
847 aenmNames[4] = WHvArm64RegisterIdAa64Isar2El1;
848 aenmNames[5] = WHvArm64RegisterIdAa64Mmfr0El1;
849 aenmNames[6] = WHvArm64RegisterIdAa64Mmfr1El1;
850 aenmNames[7] = WHvArm64RegisterIdAa64Mmfr2El1;
851 aenmNames[8] = WHvArm64RegisterIdAa64Pfr0El1;
852 aenmNames[9] = WHvArm64RegisterIdAa64Pfr1El1;
853
854 hrc = WHvGetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
855 AssertLogRelMsgReturn(SUCCEEDED(hrc),
856 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
857 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
858 , VERR_NEM_GET_REGISTERS_FAILED);
859
860 IdRegs.u64RegIdAa64Pfr0El1 = aValues[8].Reg64;
861 IdRegs.u64RegIdAa64Pfr1El1 = aValues[9].Reg64;
862 IdRegs.u64RegIdAa64Dfr0El1 = aValues[0].Reg64;
863 IdRegs.u64RegIdAa64Dfr1El1 = aValues[1].Reg64;
864 IdRegs.u64RegIdAa64Isar0El1 = aValues[2].Reg64;
865 IdRegs.u64RegIdAa64Isar1El1 = aValues[3].Reg64;
866 IdRegs.u64RegIdAa64Isar2El1 = aValues[4].Reg64;
867 IdRegs.u64RegIdAa64Mmfr0El1 = aValues[5].Reg64;
868 IdRegs.u64RegIdAa64Mmfr1El1 = aValues[6].Reg64;
869 IdRegs.u64RegIdAa64Mmfr2El1 = aValues[7].Reg64;
870
871 rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
872 if (RT_FAILURE(rc))
873 return rc;
874
875 /* Apply any overrides to the partition. */
876 PCCPUMARMV8IDREGS pIdRegsGst = NULL;
877 rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
878 AssertRCReturn(rc, rc);
879
880 aValues[0].Reg64 = pIdRegsGst->u64RegIdAa64Dfr0El1;
881 aValues[1].Reg64 = pIdRegsGst->u64RegIdAa64Dfr1El1;
882 aValues[2].Reg64 = pIdRegsGst->u64RegIdAa64Isar0El1;
883 aValues[3].Reg64 = pIdRegsGst->u64RegIdAa64Isar1El1;
884 aValues[4].Reg64 = pIdRegsGst->u64RegIdAa64Isar2El1;
885 aValues[5].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr0El1;
886 aValues[6].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr1El1;
887 aValues[7].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr2El1;
888 aValues[8].Reg64 = pIdRegsGst->u64RegIdAa64Pfr0El1;
889 aValues[9].Reg64 = pIdRegsGst->u64RegIdAa64Pfr1El1;
890
891 hrc = WHvSetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
892 AssertLogRelMsgReturn(SUCCEEDED(hrc),
893 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
894 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
895 , VERR_NEM_SET_REGISTERS_FAILED);
896
897 /* Save the amount of break-/watchpoints supported for syncing the guest register state later. */
898 pVM->nem.s.cBreakpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_BRPS) + 1;
899 pVM->nem.s.cWatchpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_WRPS) + 1;
900 }
901
902 /* Configure the GIC re-distributor region for the GIC. */
903 WHV_REGISTER_NAME enmName = My_WHvArm64RegisterGicrBaseGpa;
904 WHV_REGISTER_VALUE Value;
905 Value.Reg64 = pVM->nem.s.GCPhysMmioBaseReDist + idCpu * _128K;
906
907 hrc = WHvSetVirtualProcessorRegisters(hPartition, idCpu, &enmName, 1, &Value);
908 AssertLogRelMsgReturn(SUCCEEDED(hrc),
909 ("WHvSetVirtualProcessorRegisters(%p, %u, WHvArm64RegisterGicrBaseGpa,) -> %Rhrc (Last=%#x/%u)\n",
910 hPartition, idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
911 , VERR_NEM_SET_REGISTERS_FAILED);
912 }
913
914 pVM->nem.s.fCreatedEmts = true;
915
916 LogRel(("NEM: Successfully set up partition\n"));
917 return VINF_SUCCESS;
918}
919
920
921/**
922 * Try initialize the native API.
923 *
924 * This may only do part of the job, more can be done in
925 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
926 *
927 * @returns VBox status code.
928 * @param pVM The cross context VM structure.
929 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
930 * the latter we'll fail if we cannot initialize.
931 * @param fForced Whether the HMForced flag is set and we should
932 * fail if we cannot initialize.
933 */
934int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
935{
936 g_uBuildNo = RTSystemGetNtBuildNo();
937
938 /*
939 * Error state.
940 * The error message will be non-empty on failure and 'rc' will be set too.
941 */
942 RTERRINFOSTATIC ErrInfo;
943 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
944 int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
945 if (RT_SUCCESS(rc))
946 {
947 /*
948 * Check the capabilties of the hypervisor, starting with whether it's present.
949 */
950 rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
951 if (RT_SUCCESS(rc))
952 {
953 /*
954 * Create and initialize a partition.
955 */
956 rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
957 if (RT_SUCCESS(rc))
958 {
959 rc = nemR3NativeInitSetupVm(pVM);
960 if (RT_SUCCESS(rc))
961 {
962 /*
963 * Set ourselves as the execution engine and make config adjustments.
964 */
965 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
966 Log(("NEM: Marked active!\n"));
967 PGMR3EnableNemMode(pVM);
968
969 /*
970 * Register release statistics
971 */
972 STAMR3Register(pVM, (void *)&pVM->nem.s.cMappedPages, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
973 "/NEM/PagesCurrentlyMapped", STAMUNIT_PAGES, "Number guest pages currently mapped by the VM");
974 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
975 "/NEM/PagesMapCalls", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages");
976 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
977 "/NEM/PagesMapFails", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages that failed");
978 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
979 "/NEM/PagesUnmapCalls", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages");
980 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
981 "/NEM/PagesUnmapFails", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages that failed");
982 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
983 "/NEM/PagesMapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for bigger stuff");
984 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
985 "/NEM/PagesUnmapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for bigger stuff");
986 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
987 "/NEM/PagesMapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for single pages");
988 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
989 "/NEM/PagesUnmapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for single pages");
990
991 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
992 {
993 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
994 STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", idCpu);
995 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", idCpu);
996 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", idCpu);
997 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", idCpu);
998 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of interrupt window exits", "/NEM/CPU%u/ExitInterruptWindow", idCpu);
999 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", idCpu);
1000 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", idCpu);
1001 STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", idCpu);
1002 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", idCpu);
1003 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", idCpu);
1004 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits", "/NEM/CPU%u/ExitExceptionGp", idCpu);
1005 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGpMesa, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits from mesa driver", "/NEM/CPU%u/ExitExceptionGpMesa", idCpu);
1006 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", idCpu);
1007 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", idCpu);
1008 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", idCpu);
1009 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", idCpu);
1010 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", idCpu);
1011 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", idCpu);
1012 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", idCpu);
1013 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", idCpu);
1014 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", idCpu);
1015 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", idCpu);
1016 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", idCpu);
1017 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", idCpu);
1018 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", idCpu);
1019 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", idCpu);
1020 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
1021 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
1022 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
1023 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
1024 }
1025
1026#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
1027 if (!SUPR3IsDriverless())
1028 {
1029 PUVM pUVM = pVM->pUVM;
1030 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1031 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor",
1032 "/NEM/R0Stats/cPagesAvailable");
1033 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1034 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor",
1035 "/NEM/R0Stats/cPagesInUse");
1036 }
1037#endif /* VBOX_WITH_R0_MODULES && !VBOX_WITH_MINIMAL_R0 */
1038 }
1039 }
1040 }
1041 }
1042
1043 /*
1044 * We only fail if in forced mode, otherwise just log the complaint and return.
1045 */
1046 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1047 if ( (fForced || !fFallback)
1048 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1049 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1050
1051 if (RTErrInfoIsSet(pErrInfo))
1052 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1053 return VINF_SUCCESS;
1054}
1055
1056
1057/**
1058 * This is called after CPUMR3Init is done.
1059 *
1060 * @returns VBox status code.
1061 * @param pVM The VM handle..
1062 */
1063int nemR3NativeInitAfterCPUM(PVM pVM)
1064{
1065 /*
1066 * Validate sanity.
1067 */
1068 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1069
1070 /** @todo */
1071
1072 /*
1073 * Any hyper-v statistics we can get at now? HvCallMapStatsPage isn't accessible any more.
1074 */
1075 /** @todo stats */
1076
1077 /*
1078 * Adjust features.
1079 *
1080 * Note! We've already disabled X2APIC and MONITOR/MWAIT via CFGM during
1081 * the first init call.
1082 */
1083
1084 return VINF_SUCCESS;
1085}
1086
1087
1088int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1089{
1090 //BOOL fRet = SetThreadPriority(GetCurrentThread(), 0);
1091 //AssertLogRel(fRet);
1092
1093 NOREF(pVM); NOREF(enmWhat);
1094 return VINF_SUCCESS;
1095}
1096
1097
1098int nemR3NativeTerm(PVM pVM)
1099{
1100 /*
1101 * Delete the partition.
1102 */
1103 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
1104 pVM->nem.s.hPartition = NULL;
1105 pVM->nem.s.hPartitionDevice = NULL;
1106 if (hPartition != NULL)
1107 {
1108 VMCPUID idCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
1109 LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, idCpu));
1110 while (idCpu-- > 0)
1111 {
1112 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu);
1113 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
1114 hPartition, idCpu, hrc, RTNtLastStatusValue(),
1115 RTNtLastErrorValue()));
1116 }
1117 WHvDeletePartition(hPartition);
1118 }
1119 pVM->nem.s.fCreatedEmts = false;
1120 return VINF_SUCCESS;
1121}
1122
1123
1124/**
1125 * VM reset notification.
1126 *
1127 * @param pVM The cross context VM structure.
1128 */
1129void nemR3NativeReset(PVM pVM)
1130{
1131 RT_NOREF(pVM);
1132}
1133
1134
1135/**
1136 * Reset CPU due to INIT IPI or hot (un)plugging.
1137 *
1138 * @param pVCpu The cross context virtual CPU structure of the CPU being
1139 * reset.
1140 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1141 */
1142void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1143{
1144 RT_NOREF(pVCpu, fInitIpi);
1145}
1146
1147
1148NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
1149{
1150 WHV_REGISTER_NAME aenmNames[128];
1151 WHV_REGISTER_VALUE aValues[128];
1152
1153 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
1154 if (!fWhat)
1155 return VINF_SUCCESS;
1156 uintptr_t iReg = 0;
1157
1158#define ADD_REG64(a_enmName, a_uValue) do { \
1159 aenmNames[iReg] = (a_enmName); \
1160 aValues[iReg].Reg128.High64 = 0; \
1161 aValues[iReg].Reg64 = (a_uValue).x; \
1162 iReg++; \
1163 } while (0)
1164#define ADD_REG64_RAW(a_enmName, a_uValue) do { \
1165 aenmNames[iReg] = (a_enmName); \
1166 aValues[iReg].Reg128.High64 = 0; \
1167 aValues[iReg].Reg64 = (a_uValue); \
1168 iReg++; \
1169 } while (0)
1170#define ADD_SYSREG64(a_enmName, a_uValue) do { \
1171 aenmNames[iReg] = (a_enmName); \
1172 aValues[iReg].Reg128.High64 = 0; \
1173 aValues[iReg].Reg64 = (a_uValue).u64; \
1174 iReg++; \
1175 } while (0)
1176#define ADD_REG128(a_enmName, a_uValue) do { \
1177 aenmNames[iReg] = (a_enmName); \
1178 aValues[iReg].Reg128.Low64 = (a_uValue).au64[0]; \
1179 aValues[iReg].Reg128.High64 = (a_uValue).au64[1]; \
1180 iReg++; \
1181 } while (0)
1182
1183 /* GPRs */
1184 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1185 {
1186 if (fWhat & CPUMCTX_EXTRN_X0)
1187 ADD_REG64(WHvArm64RegisterX0, pVCpu->cpum.GstCtx.aGRegs[0]);
1188 if (fWhat & CPUMCTX_EXTRN_X1)
1189 ADD_REG64(WHvArm64RegisterX1, pVCpu->cpum.GstCtx.aGRegs[1]);
1190 if (fWhat & CPUMCTX_EXTRN_X2)
1191 ADD_REG64(WHvArm64RegisterX2, pVCpu->cpum.GstCtx.aGRegs[2]);
1192 if (fWhat & CPUMCTX_EXTRN_X3)
1193 ADD_REG64(WHvArm64RegisterX3, pVCpu->cpum.GstCtx.aGRegs[3]);
1194 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1195 {
1196 ADD_REG64(WHvArm64RegisterX4, pVCpu->cpum.GstCtx.aGRegs[4]);
1197 ADD_REG64(WHvArm64RegisterX5, pVCpu->cpum.GstCtx.aGRegs[5]);
1198 ADD_REG64(WHvArm64RegisterX6, pVCpu->cpum.GstCtx.aGRegs[6]);
1199 ADD_REG64(WHvArm64RegisterX7, pVCpu->cpum.GstCtx.aGRegs[7]);
1200 ADD_REG64(WHvArm64RegisterX8, pVCpu->cpum.GstCtx.aGRegs[8]);
1201 ADD_REG64(WHvArm64RegisterX9, pVCpu->cpum.GstCtx.aGRegs[9]);
1202 ADD_REG64(WHvArm64RegisterX10, pVCpu->cpum.GstCtx.aGRegs[10]);
1203 ADD_REG64(WHvArm64RegisterX11, pVCpu->cpum.GstCtx.aGRegs[11]);
1204 ADD_REG64(WHvArm64RegisterX12, pVCpu->cpum.GstCtx.aGRegs[12]);
1205 ADD_REG64(WHvArm64RegisterX13, pVCpu->cpum.GstCtx.aGRegs[13]);
1206 ADD_REG64(WHvArm64RegisterX14, pVCpu->cpum.GstCtx.aGRegs[14]);
1207 ADD_REG64(WHvArm64RegisterX15, pVCpu->cpum.GstCtx.aGRegs[15]);
1208 ADD_REG64(WHvArm64RegisterX16, pVCpu->cpum.GstCtx.aGRegs[16]);
1209 ADD_REG64(WHvArm64RegisterX17, pVCpu->cpum.GstCtx.aGRegs[17]);
1210 ADD_REG64(WHvArm64RegisterX18, pVCpu->cpum.GstCtx.aGRegs[18]);
1211 ADD_REG64(WHvArm64RegisterX19, pVCpu->cpum.GstCtx.aGRegs[19]);
1212 ADD_REG64(WHvArm64RegisterX20, pVCpu->cpum.GstCtx.aGRegs[20]);
1213 ADD_REG64(WHvArm64RegisterX21, pVCpu->cpum.GstCtx.aGRegs[21]);
1214 ADD_REG64(WHvArm64RegisterX22, pVCpu->cpum.GstCtx.aGRegs[22]);
1215 ADD_REG64(WHvArm64RegisterX23, pVCpu->cpum.GstCtx.aGRegs[23]);
1216 ADD_REG64(WHvArm64RegisterX24, pVCpu->cpum.GstCtx.aGRegs[24]);
1217 ADD_REG64(WHvArm64RegisterX25, pVCpu->cpum.GstCtx.aGRegs[25]);
1218 ADD_REG64(WHvArm64RegisterX26, pVCpu->cpum.GstCtx.aGRegs[26]);
1219 ADD_REG64(WHvArm64RegisterX27, pVCpu->cpum.GstCtx.aGRegs[27]);
1220 ADD_REG64(WHvArm64RegisterX28, pVCpu->cpum.GstCtx.aGRegs[28]);
1221 }
1222 if (fWhat & CPUMCTX_EXTRN_LR)
1223 ADD_REG64(WHvArm64RegisterLr, pVCpu->cpum.GstCtx.aGRegs[30]);
1224 if (fWhat & CPUMCTX_EXTRN_FP)
1225 ADD_REG64(WHvArm64RegisterFp, pVCpu->cpum.GstCtx.aGRegs[29]);
1226 }
1227
1228 /* RIP & Flags */
1229 if (fWhat & CPUMCTX_EXTRN_PC)
1230 ADD_SYSREG64(WHvArm64RegisterPc, pVCpu->cpum.GstCtx.Pc);
1231 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1232 ADD_REG64_RAW(WHvArm64RegisterPstate, pVCpu->cpum.GstCtx.fPState);
1233 if (fWhat & CPUMCTX_EXTRN_SPSR)
1234 ADD_SYSREG64(WHvArm64RegisterSpsrEl1, pVCpu->cpum.GstCtx.Spsr);
1235 if (fWhat & CPUMCTX_EXTRN_ELR)
1236 ADD_SYSREG64(WHvArm64RegisterElrEl1, pVCpu->cpum.GstCtx.Elr);
1237 if (fWhat & CPUMCTX_EXTRN_SP)
1238 {
1239 ADD_SYSREG64(WHvArm64RegisterSpEl0, pVCpu->cpum.GstCtx.aSpReg[0]);
1240 ADD_SYSREG64(WHvArm64RegisterSpEl1, pVCpu->cpum.GstCtx.aSpReg[1]);
1241 }
1242 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1243 {
1244 ADD_SYSREG64(WHvArm64RegisterSctlrEl1, pVCpu->cpum.GstCtx.Sctlr);
1245 ADD_SYSREG64(WHvArm64RegisterTcrEl1, pVCpu->cpum.GstCtx.Tcr);
1246 ADD_SYSREG64(WHvArm64RegisterTtbr0El1, pVCpu->cpum.GstCtx.Ttbr0);
1247 ADD_SYSREG64(WHvArm64RegisterTtbr1El1, pVCpu->cpum.GstCtx.Ttbr1);
1248 }
1249
1250 /* Vector state. */
1251 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1252 {
1253 ADD_REG128(WHvArm64RegisterQ0, pVCpu->cpum.GstCtx.aVRegs[0]);
1254 ADD_REG128(WHvArm64RegisterQ1, pVCpu->cpum.GstCtx.aVRegs[1]);
1255 ADD_REG128(WHvArm64RegisterQ2, pVCpu->cpum.GstCtx.aVRegs[2]);
1256 ADD_REG128(WHvArm64RegisterQ3, pVCpu->cpum.GstCtx.aVRegs[3]);
1257 ADD_REG128(WHvArm64RegisterQ4, pVCpu->cpum.GstCtx.aVRegs[4]);
1258 ADD_REG128(WHvArm64RegisterQ5, pVCpu->cpum.GstCtx.aVRegs[5]);
1259 ADD_REG128(WHvArm64RegisterQ6, pVCpu->cpum.GstCtx.aVRegs[6]);
1260 ADD_REG128(WHvArm64RegisterQ7, pVCpu->cpum.GstCtx.aVRegs[7]);
1261 ADD_REG128(WHvArm64RegisterQ8, pVCpu->cpum.GstCtx.aVRegs[8]);
1262 ADD_REG128(WHvArm64RegisterQ9, pVCpu->cpum.GstCtx.aVRegs[9]);
1263 ADD_REG128(WHvArm64RegisterQ10, pVCpu->cpum.GstCtx.aVRegs[10]);
1264 ADD_REG128(WHvArm64RegisterQ11, pVCpu->cpum.GstCtx.aVRegs[11]);
1265 ADD_REG128(WHvArm64RegisterQ12, pVCpu->cpum.GstCtx.aVRegs[12]);
1266 ADD_REG128(WHvArm64RegisterQ13, pVCpu->cpum.GstCtx.aVRegs[13]);
1267 ADD_REG128(WHvArm64RegisterQ14, pVCpu->cpum.GstCtx.aVRegs[14]);
1268 ADD_REG128(WHvArm64RegisterQ15, pVCpu->cpum.GstCtx.aVRegs[15]);
1269 ADD_REG128(WHvArm64RegisterQ16, pVCpu->cpum.GstCtx.aVRegs[16]);
1270 ADD_REG128(WHvArm64RegisterQ17, pVCpu->cpum.GstCtx.aVRegs[17]);
1271 ADD_REG128(WHvArm64RegisterQ18, pVCpu->cpum.GstCtx.aVRegs[18]);
1272 ADD_REG128(WHvArm64RegisterQ19, pVCpu->cpum.GstCtx.aVRegs[19]);
1273 ADD_REG128(WHvArm64RegisterQ20, pVCpu->cpum.GstCtx.aVRegs[20]);
1274 ADD_REG128(WHvArm64RegisterQ21, pVCpu->cpum.GstCtx.aVRegs[21]);
1275 ADD_REG128(WHvArm64RegisterQ22, pVCpu->cpum.GstCtx.aVRegs[22]);
1276 ADD_REG128(WHvArm64RegisterQ23, pVCpu->cpum.GstCtx.aVRegs[23]);
1277 ADD_REG128(WHvArm64RegisterQ24, pVCpu->cpum.GstCtx.aVRegs[24]);
1278 ADD_REG128(WHvArm64RegisterQ25, pVCpu->cpum.GstCtx.aVRegs[25]);
1279 ADD_REG128(WHvArm64RegisterQ26, pVCpu->cpum.GstCtx.aVRegs[26]);
1280 ADD_REG128(WHvArm64RegisterQ27, pVCpu->cpum.GstCtx.aVRegs[27]);
1281 ADD_REG128(WHvArm64RegisterQ28, pVCpu->cpum.GstCtx.aVRegs[28]);
1282 ADD_REG128(WHvArm64RegisterQ29, pVCpu->cpum.GstCtx.aVRegs[29]);
1283 ADD_REG128(WHvArm64RegisterQ30, pVCpu->cpum.GstCtx.aVRegs[30]);
1284 ADD_REG128(WHvArm64RegisterQ31, pVCpu->cpum.GstCtx.aVRegs[31]);
1285 }
1286
1287 if (fWhat & CPUMCTX_EXTRN_FPCR)
1288 ADD_REG64_RAW(WHvArm64RegisterFpcr, pVCpu->cpum.GstCtx.fpcr);
1289 if (fWhat & CPUMCTX_EXTRN_FPSR)
1290 ADD_REG64_RAW(WHvArm64RegisterFpsr, pVCpu->cpum.GstCtx.fpsr);
1291
1292 /* System registers. */
1293 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1294 {
1295 ADD_SYSREG64(WHvArm64RegisterVbarEl1, pVCpu->cpum.GstCtx.VBar);
1296 ADD_SYSREG64(WHvArm64RegisterEsrEl1, pVCpu->cpum.GstCtx.Esr);
1297 ADD_SYSREG64(WHvArm64RegisterFarEl1, pVCpu->cpum.GstCtx.Far);
1298 ADD_SYSREG64(WHvArm64RegisterCntkctlEl1, pVCpu->cpum.GstCtx.CntKCtl);
1299 ADD_SYSREG64(WHvArm64RegisterContextidrEl1, pVCpu->cpum.GstCtx.ContextIdr);
1300 ADD_SYSREG64(WHvArm64RegisterCpacrEl1, pVCpu->cpum.GstCtx.Cpacr);
1301 ADD_SYSREG64(WHvArm64RegisterCsselrEl1, pVCpu->cpum.GstCtx.Csselr);
1302 ADD_SYSREG64(WHvArm64RegisterMairEl1, pVCpu->cpum.GstCtx.Mair);
1303 ADD_SYSREG64(WHvArm64RegisterParEl1, pVCpu->cpum.GstCtx.Par);
1304 ADD_SYSREG64(WHvArm64RegisterTpidrroEl0, pVCpu->cpum.GstCtx.TpIdrRoEl0);
1305 ADD_SYSREG64(WHvArm64RegisterTpidrEl0, pVCpu->cpum.GstCtx.aTpIdr[0]);
1306 ADD_SYSREG64(WHvArm64RegisterTpidrEl1, pVCpu->cpum.GstCtx.aTpIdr[1]);
1307 }
1308
1309 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1310 {
1311 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1312 {
1313 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Ctrl);
1314 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Value);
1315 }
1316
1317 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1318 {
1319 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Ctrl);
1320 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Value);
1321 }
1322
1323 ADD_SYSREG64(WHvArm64RegisterMdscrEl1, pVCpu->cpum.GstCtx.Mdscr);
1324 }
1325
1326 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1327 {
1328 ADD_SYSREG64(WHvArm64RegisterApdAKeyHiEl1, pVCpu->cpum.GstCtx.Apda.High);
1329 ADD_SYSREG64(WHvArm64RegisterApdAKeyLoEl1, pVCpu->cpum.GstCtx.Apda.Low);
1330 ADD_SYSREG64(WHvArm64RegisterApdBKeyHiEl1, pVCpu->cpum.GstCtx.Apdb.High);
1331 ADD_SYSREG64(WHvArm64RegisterApdBKeyLoEl1, pVCpu->cpum.GstCtx.Apdb.Low);
1332 ADD_SYSREG64(WHvArm64RegisterApgAKeyHiEl1, pVCpu->cpum.GstCtx.Apga.High);
1333 ADD_SYSREG64(WHvArm64RegisterApgAKeyLoEl1, pVCpu->cpum.GstCtx.Apga.Low);
1334 ADD_SYSREG64(WHvArm64RegisterApiAKeyHiEl1, pVCpu->cpum.GstCtx.Apia.High);
1335 ADD_SYSREG64(WHvArm64RegisterApiAKeyLoEl1, pVCpu->cpum.GstCtx.Apia.Low);
1336 ADD_SYSREG64(WHvArm64RegisterApiBKeyHiEl1, pVCpu->cpum.GstCtx.Apib.High);
1337 ADD_SYSREG64(WHvArm64RegisterApiBKeyLoEl1, pVCpu->cpum.GstCtx.Apib.Low);
1338 }
1339
1340#undef ADD_REG64
1341#undef ADD_REG64_RAW
1342#undef ADD_REG128
1343
1344 /*
1345 * Set the registers.
1346 */
1347 Assert(iReg < RT_ELEMENTS(aValues));
1348 Assert(iReg < RT_ELEMENTS(aenmNames));
1349 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
1350 if (SUCCEEDED(hrc))
1351 {
1352 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1353 return VINF_SUCCESS;
1354 }
1355 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1356 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
1357 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1358 return VERR_INTERNAL_ERROR;
1359}
1360
1361
1362NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
1363{
1364 WHV_REGISTER_NAME aenmNames[256];
1365
1366 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
1367 if (!fWhat)
1368 return VINF_SUCCESS;
1369
1370 uintptr_t iReg = 0;
1371
1372 /* GPRs */
1373 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1374 {
1375 if (fWhat & CPUMCTX_EXTRN_X0)
1376 aenmNames[iReg++] = WHvArm64RegisterX0;
1377 if (fWhat & CPUMCTX_EXTRN_X1)
1378 aenmNames[iReg++] = WHvArm64RegisterX1;
1379 if (fWhat & CPUMCTX_EXTRN_X2)
1380 aenmNames[iReg++] = WHvArm64RegisterX2;
1381 if (fWhat & CPUMCTX_EXTRN_X3)
1382 aenmNames[iReg++] = WHvArm64RegisterX3;
1383 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1384 {
1385 aenmNames[iReg++] = WHvArm64RegisterX4;
1386 aenmNames[iReg++] = WHvArm64RegisterX5;
1387 aenmNames[iReg++] = WHvArm64RegisterX6;
1388 aenmNames[iReg++] = WHvArm64RegisterX7;
1389 aenmNames[iReg++] = WHvArm64RegisterX8;
1390 aenmNames[iReg++] = WHvArm64RegisterX9;
1391 aenmNames[iReg++] = WHvArm64RegisterX10;
1392 aenmNames[iReg++] = WHvArm64RegisterX11;
1393 aenmNames[iReg++] = WHvArm64RegisterX12;
1394 aenmNames[iReg++] = WHvArm64RegisterX13;
1395 aenmNames[iReg++] = WHvArm64RegisterX14;
1396 aenmNames[iReg++] = WHvArm64RegisterX15;
1397 aenmNames[iReg++] = WHvArm64RegisterX16;
1398 aenmNames[iReg++] = WHvArm64RegisterX17;
1399 aenmNames[iReg++] = WHvArm64RegisterX18;
1400 aenmNames[iReg++] = WHvArm64RegisterX19;
1401 aenmNames[iReg++] = WHvArm64RegisterX20;
1402 aenmNames[iReg++] = WHvArm64RegisterX21;
1403 aenmNames[iReg++] = WHvArm64RegisterX22;
1404 aenmNames[iReg++] = WHvArm64RegisterX23;
1405 aenmNames[iReg++] = WHvArm64RegisterX24;
1406 aenmNames[iReg++] = WHvArm64RegisterX25;
1407 aenmNames[iReg++] = WHvArm64RegisterX26;
1408 aenmNames[iReg++] = WHvArm64RegisterX27;
1409 aenmNames[iReg++] = WHvArm64RegisterX28;
1410 }
1411 if (fWhat & CPUMCTX_EXTRN_LR)
1412 aenmNames[iReg++] = WHvArm64RegisterLr;
1413 if (fWhat & CPUMCTX_EXTRN_FP)
1414 aenmNames[iReg++] = WHvArm64RegisterFp;
1415 }
1416
1417 /* PC & Flags */
1418 if (fWhat & CPUMCTX_EXTRN_PC)
1419 aenmNames[iReg++] = WHvArm64RegisterPc;
1420 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1421 aenmNames[iReg++] = WHvArm64RegisterPstate;
1422 if (fWhat & CPUMCTX_EXTRN_SPSR)
1423 aenmNames[iReg++] = WHvArm64RegisterSpsrEl1;
1424 if (fWhat & CPUMCTX_EXTRN_ELR)
1425 aenmNames[iReg++] = WHvArm64RegisterElrEl1;
1426 if (fWhat & CPUMCTX_EXTRN_SP)
1427 {
1428 aenmNames[iReg++] = WHvArm64RegisterSpEl0;
1429 aenmNames[iReg++] = WHvArm64RegisterSpEl1;
1430 }
1431 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1432 {
1433 aenmNames[iReg++] = WHvArm64RegisterSctlrEl1;
1434 aenmNames[iReg++] = WHvArm64RegisterTcrEl1;
1435 aenmNames[iReg++] = WHvArm64RegisterTtbr0El1;
1436 aenmNames[iReg++] = WHvArm64RegisterTtbr1El1;
1437 }
1438
1439 /* Vector state. */
1440 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1441 {
1442 aenmNames[iReg++] = WHvArm64RegisterQ0;
1443 aenmNames[iReg++] = WHvArm64RegisterQ1;
1444 aenmNames[iReg++] = WHvArm64RegisterQ2;
1445 aenmNames[iReg++] = WHvArm64RegisterQ3;
1446 aenmNames[iReg++] = WHvArm64RegisterQ4;
1447 aenmNames[iReg++] = WHvArm64RegisterQ5;
1448 aenmNames[iReg++] = WHvArm64RegisterQ6;
1449 aenmNames[iReg++] = WHvArm64RegisterQ7;
1450 aenmNames[iReg++] = WHvArm64RegisterQ8;
1451 aenmNames[iReg++] = WHvArm64RegisterQ9;
1452 aenmNames[iReg++] = WHvArm64RegisterQ10;
1453 aenmNames[iReg++] = WHvArm64RegisterQ11;
1454 aenmNames[iReg++] = WHvArm64RegisterQ12;
1455 aenmNames[iReg++] = WHvArm64RegisterQ13;
1456 aenmNames[iReg++] = WHvArm64RegisterQ14;
1457 aenmNames[iReg++] = WHvArm64RegisterQ15;
1458
1459 aenmNames[iReg++] = WHvArm64RegisterQ16;
1460 aenmNames[iReg++] = WHvArm64RegisterQ17;
1461 aenmNames[iReg++] = WHvArm64RegisterQ18;
1462 aenmNames[iReg++] = WHvArm64RegisterQ19;
1463 aenmNames[iReg++] = WHvArm64RegisterQ20;
1464 aenmNames[iReg++] = WHvArm64RegisterQ21;
1465 aenmNames[iReg++] = WHvArm64RegisterQ22;
1466 aenmNames[iReg++] = WHvArm64RegisterQ23;
1467 aenmNames[iReg++] = WHvArm64RegisterQ24;
1468 aenmNames[iReg++] = WHvArm64RegisterQ25;
1469 aenmNames[iReg++] = WHvArm64RegisterQ26;
1470 aenmNames[iReg++] = WHvArm64RegisterQ27;
1471 aenmNames[iReg++] = WHvArm64RegisterQ28;
1472 aenmNames[iReg++] = WHvArm64RegisterQ29;
1473 aenmNames[iReg++] = WHvArm64RegisterQ30;
1474 aenmNames[iReg++] = WHvArm64RegisterQ31;
1475 }
1476 if (fWhat & CPUMCTX_EXTRN_FPCR)
1477 aenmNames[iReg++] = WHvArm64RegisterFpcr;
1478 if (fWhat & CPUMCTX_EXTRN_FPSR)
1479 aenmNames[iReg++] = WHvArm64RegisterFpsr;
1480
1481 /* System registers. */
1482 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1483 {
1484 aenmNames[iReg++] = WHvArm64RegisterVbarEl1;
1485 aenmNames[iReg++] = WHvArm64RegisterEsrEl1;
1486 aenmNames[iReg++] = WHvArm64RegisterFarEl1;
1487 aenmNames[iReg++] = WHvArm64RegisterCntkctlEl1;
1488 aenmNames[iReg++] = WHvArm64RegisterContextidrEl1;
1489 aenmNames[iReg++] = WHvArm64RegisterCpacrEl1;
1490 aenmNames[iReg++] = WHvArm64RegisterCsselrEl1;
1491 aenmNames[iReg++] = WHvArm64RegisterMairEl1;
1492 aenmNames[iReg++] = WHvArm64RegisterParEl1;
1493 aenmNames[iReg++] = WHvArm64RegisterTpidrroEl0;
1494 aenmNames[iReg++] = WHvArm64RegisterTpidrEl0;
1495 aenmNames[iReg++] = WHvArm64RegisterTpidrEl1;
1496 }
1497
1498 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1499 {
1500 /* Hyper-V doesn't allow syncing debug break-/watchpoint registers which aren't there. */
1501 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1502 {
1503 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i);
1504 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i);
1505 }
1506
1507 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1508 {
1509 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i);
1510 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i);
1511 }
1512
1513 aenmNames[iReg++] = WHvArm64RegisterMdscrEl1;
1514 }
1515
1516 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1517 {
1518 aenmNames[iReg++] = WHvArm64RegisterApdAKeyHiEl1;
1519 aenmNames[iReg++] = WHvArm64RegisterApdAKeyLoEl1;
1520 aenmNames[iReg++] = WHvArm64RegisterApdBKeyHiEl1;
1521 aenmNames[iReg++] = WHvArm64RegisterApdBKeyLoEl1;
1522 aenmNames[iReg++] = WHvArm64RegisterApgAKeyHiEl1;
1523 aenmNames[iReg++] = WHvArm64RegisterApgAKeyLoEl1;
1524 aenmNames[iReg++] = WHvArm64RegisterApiAKeyHiEl1;
1525 aenmNames[iReg++] = WHvArm64RegisterApiAKeyLoEl1;
1526 aenmNames[iReg++] = WHvArm64RegisterApiBKeyHiEl1;
1527 aenmNames[iReg++] = WHvArm64RegisterApiBKeyLoEl1;
1528 }
1529
1530 size_t const cRegs = iReg;
1531 Assert(cRegs < RT_ELEMENTS(aenmNames));
1532
1533 /*
1534 * Get the registers.
1535 */
1536 WHV_REGISTER_VALUE aValues[256];
1537 RT_ZERO(aValues);
1538 Assert(RT_ELEMENTS(aValues) >= cRegs);
1539 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
1540 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
1541 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1542 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1543 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1544 , VERR_NEM_GET_REGISTERS_FAILED);
1545
1546 iReg = 0;
1547#define GET_REG64(a_DstVar, a_enmName) do { \
1548 Assert(aenmNames[iReg] == (a_enmName)); \
1549 (a_DstVar).x = aValues[iReg].Reg64; \
1550 iReg++; \
1551 } while (0)
1552#define GET_REG64_RAW(a_DstVar, a_enmName) do { \
1553 Assert(aenmNames[iReg] == (a_enmName)); \
1554 (a_DstVar) = aValues[iReg].Reg64; \
1555 iReg++; \
1556 } while (0)
1557#define GET_SYSREG64(a_DstVar, a_enmName) do { \
1558 Assert(aenmNames[iReg] == (a_enmName)); \
1559 (a_DstVar).u64 = aValues[iReg].Reg64; \
1560 iReg++; \
1561 } while (0)
1562#define GET_REG128(a_DstVar, a_enmName) do { \
1563 Assert(aenmNames[iReg] == a_enmName); \
1564 (a_DstVar).au64[0] = aValues[iReg].Reg128.Low64; \
1565 (a_DstVar).au64[1] = aValues[iReg].Reg128.High64; \
1566 iReg++; \
1567 } while (0)
1568
1569 /* GPRs */
1570 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1571 {
1572 if (fWhat & CPUMCTX_EXTRN_X0)
1573 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[0], WHvArm64RegisterX0);
1574 if (fWhat & CPUMCTX_EXTRN_X1)
1575 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[1], WHvArm64RegisterX1);
1576 if (fWhat & CPUMCTX_EXTRN_X2)
1577 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[2], WHvArm64RegisterX2);
1578 if (fWhat & CPUMCTX_EXTRN_X3)
1579 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[3], WHvArm64RegisterX3);
1580 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1581 {
1582 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[4], WHvArm64RegisterX4);
1583 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[5], WHvArm64RegisterX5);
1584 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[6], WHvArm64RegisterX6);
1585 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[7], WHvArm64RegisterX7);
1586 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[8], WHvArm64RegisterX8);
1587 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[9], WHvArm64RegisterX9);
1588 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[10], WHvArm64RegisterX10);
1589 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[11], WHvArm64RegisterX11);
1590 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[12], WHvArm64RegisterX12);
1591 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[13], WHvArm64RegisterX13);
1592 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[14], WHvArm64RegisterX14);
1593 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[15], WHvArm64RegisterX15);
1594 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[16], WHvArm64RegisterX16);
1595 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[17], WHvArm64RegisterX17);
1596 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[18], WHvArm64RegisterX18);
1597 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[19], WHvArm64RegisterX19);
1598 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[20], WHvArm64RegisterX20);
1599 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[21], WHvArm64RegisterX21);
1600 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[22], WHvArm64RegisterX22);
1601 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[23], WHvArm64RegisterX23);
1602 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[24], WHvArm64RegisterX24);
1603 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[25], WHvArm64RegisterX25);
1604 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[26], WHvArm64RegisterX26);
1605 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[27], WHvArm64RegisterX27);
1606 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[28], WHvArm64RegisterX28);
1607 }
1608 if (fWhat & CPUMCTX_EXTRN_LR)
1609 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[30], WHvArm64RegisterLr);
1610 if (fWhat & CPUMCTX_EXTRN_FP)
1611 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[29], WHvArm64RegisterFp);
1612 }
1613
1614 /* RIP & Flags */
1615 if (fWhat & CPUMCTX_EXTRN_PC)
1616 GET_REG64_RAW(pVCpu->cpum.GstCtx.Pc.u64, WHvArm64RegisterPc);
1617 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1618 GET_REG64_RAW(pVCpu->cpum.GstCtx.fPState, WHvArm64RegisterPstate);
1619 if (fWhat & CPUMCTX_EXTRN_SPSR)
1620 GET_SYSREG64(pVCpu->cpum.GstCtx.Spsr, WHvArm64RegisterSpsrEl1);
1621 if (fWhat & CPUMCTX_EXTRN_ELR)
1622 GET_SYSREG64(pVCpu->cpum.GstCtx.Elr, WHvArm64RegisterElrEl1);
1623 if (fWhat & CPUMCTX_EXTRN_SP)
1624 {
1625 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[0], WHvArm64RegisterSpEl0);
1626 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[1], WHvArm64RegisterSpEl1);
1627 }
1628 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1629 {
1630 GET_SYSREG64(pVCpu->cpum.GstCtx.Sctlr, WHvArm64RegisterSctlrEl1);
1631 GET_SYSREG64(pVCpu->cpum.GstCtx.Tcr, WHvArm64RegisterTcrEl1);
1632 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr0, WHvArm64RegisterTtbr0El1);
1633 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr1, WHvArm64RegisterTtbr1El1);
1634 }
1635
1636 /* Vector state. */
1637 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1638 {
1639 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[0], WHvArm64RegisterQ0);
1640 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[1], WHvArm64RegisterQ1);
1641 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[2], WHvArm64RegisterQ2);
1642 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[3], WHvArm64RegisterQ3);
1643 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[4], WHvArm64RegisterQ4);
1644 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[5], WHvArm64RegisterQ5);
1645 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[6], WHvArm64RegisterQ6);
1646 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[7], WHvArm64RegisterQ7);
1647 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[8], WHvArm64RegisterQ8);
1648 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[9], WHvArm64RegisterQ9);
1649 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[10], WHvArm64RegisterQ10);
1650 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[11], WHvArm64RegisterQ11);
1651 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[12], WHvArm64RegisterQ12);
1652 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[13], WHvArm64RegisterQ13);
1653 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[14], WHvArm64RegisterQ14);
1654 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[15], WHvArm64RegisterQ15);
1655
1656 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[16], WHvArm64RegisterQ16);
1657 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[17], WHvArm64RegisterQ17);
1658 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[18], WHvArm64RegisterQ18);
1659 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[19], WHvArm64RegisterQ19);
1660 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[20], WHvArm64RegisterQ20);
1661 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[21], WHvArm64RegisterQ21);
1662 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[22], WHvArm64RegisterQ22);
1663 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[23], WHvArm64RegisterQ23);
1664 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[24], WHvArm64RegisterQ24);
1665 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[25], WHvArm64RegisterQ25);
1666 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[26], WHvArm64RegisterQ26);
1667 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[27], WHvArm64RegisterQ27);
1668 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[28], WHvArm64RegisterQ28);
1669 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[29], WHvArm64RegisterQ29);
1670 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[30], WHvArm64RegisterQ30);
1671 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[31], WHvArm64RegisterQ31);
1672 }
1673 if (fWhat & CPUMCTX_EXTRN_FPCR)
1674 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpcr, WHvArm64RegisterFpcr);
1675 if (fWhat & CPUMCTX_EXTRN_FPSR)
1676 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpsr, WHvArm64RegisterFpsr);
1677
1678 /* System registers. */
1679 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1680 {
1681 GET_SYSREG64(pVCpu->cpum.GstCtx.VBar, WHvArm64RegisterVbarEl1);
1682 GET_SYSREG64(pVCpu->cpum.GstCtx.Esr, WHvArm64RegisterEsrEl1);
1683 GET_SYSREG64(pVCpu->cpum.GstCtx.Far, WHvArm64RegisterFarEl1);
1684 GET_SYSREG64(pVCpu->cpum.GstCtx.CntKCtl, WHvArm64RegisterCntkctlEl1);
1685 GET_SYSREG64(pVCpu->cpum.GstCtx.ContextIdr, WHvArm64RegisterContextidrEl1);
1686 GET_SYSREG64(pVCpu->cpum.GstCtx.Cpacr, WHvArm64RegisterCpacrEl1);
1687 GET_SYSREG64(pVCpu->cpum.GstCtx.Csselr, WHvArm64RegisterCsselrEl1);
1688 GET_SYSREG64(pVCpu->cpum.GstCtx.Mair, WHvArm64RegisterMairEl1);
1689 GET_SYSREG64(pVCpu->cpum.GstCtx.Par, WHvArm64RegisterParEl1);
1690 GET_SYSREG64(pVCpu->cpum.GstCtx.TpIdrRoEl0, WHvArm64RegisterTpidrroEl0);
1691 GET_SYSREG64(pVCpu->cpum.GstCtx.aTpIdr[0], WHvArm64RegisterTpidrEl0);
1692 GET_SYSREG64(pVCpu->cpum.GstCtx.aTpIdr[1], WHvArm64RegisterTpidrEl1);
1693 }
1694
1695 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1696 {
1697 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1698 {
1699 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i));
1700 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i));
1701 }
1702
1703 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1704 {
1705 GET_SYSREG64(pVCpu->cpum.GstCtx.aWp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i));
1706 GET_SYSREG64(pVCpu->cpum.GstCtx.aWp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i));
1707 }
1708
1709 GET_SYSREG64(pVCpu->cpum.GstCtx.Mdscr, WHvArm64RegisterMdscrEl1);
1710 }
1711
1712 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1713 {
1714 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.High, WHvArm64RegisterApdAKeyHiEl1);
1715 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.Low, WHvArm64RegisterApdAKeyLoEl1);
1716 GET_SYSREG64(pVCpu->cpum.GstCtx.Apdb.High, WHvArm64RegisterApdBKeyHiEl1);
1717 GET_SYSREG64(pVCpu->cpum.GstCtx.Apdb.Low, WHvArm64RegisterApdBKeyLoEl1);
1718 GET_SYSREG64(pVCpu->cpum.GstCtx.Apga.High, WHvArm64RegisterApgAKeyHiEl1);
1719 GET_SYSREG64(pVCpu->cpum.GstCtx.Apga.Low, WHvArm64RegisterApgAKeyLoEl1);
1720 GET_SYSREG64(pVCpu->cpum.GstCtx.Apia.High, WHvArm64RegisterApiAKeyHiEl1);
1721 GET_SYSREG64(pVCpu->cpum.GstCtx.Apia.Low, WHvArm64RegisterApiAKeyLoEl1);
1722 GET_SYSREG64(pVCpu->cpum.GstCtx.Apib.High, WHvArm64RegisterApiBKeyHiEl1);
1723 GET_SYSREG64(pVCpu->cpum.GstCtx.Apib.Low, WHvArm64RegisterApiBKeyLoEl1);
1724 }
1725
1726 /* Almost done, just update extrn flags. */
1727 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1728 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1729 pVCpu->cpum.GstCtx.fExtrn = 0;
1730
1731 return VINF_SUCCESS;
1732}
1733
1734
1735/**
1736 * Interface for importing state on demand (used by IEM).
1737 *
1738 * @returns VBox status code.
1739 * @param pVCpu The cross context CPU structure.
1740 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1741 */
1742VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1743{
1744 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1745 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1746}
1747
1748
1749/**
1750 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1751 *
1752 * @returns VBox status code.
1753 * @param pVCpu The cross context CPU structure.
1754 * @param pcTicks Where to return the CPU tick count.
1755 * @param puAux Where to return the TSC_AUX register value.
1756 */
1757VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1758{
1759 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1760
1761 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1762 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1763 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1764
1765 /* Call the offical API. */
1766 WHV_REGISTER_NAME enmName = WHvArm64RegisterCntvctEl0;
1767 WHV_REGISTER_VALUE Value = { { {0, 0} } };
1768 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, &enmName, 1, &Value);
1769 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1770 ("WHvGetVirtualProcessorRegisters(%p, %u,{CNTVCT_EL0},1,) -> %Rhrc (Last=%#x/%u)\n",
1771 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1772 , VERR_NEM_GET_REGISTERS_FAILED);
1773 *pcTicks = Value.Reg64;
1774 if (puAux)
1775 *puAux =0;
1776
1777 return VINF_SUCCESS;
1778}
1779
1780
1781/**
1782 * Resumes CPU clock (TSC) on all virtual CPUs.
1783 *
1784 * This is called by TM when the VM is started, restored, resumed or similar.
1785 *
1786 * @returns VBox status code.
1787 * @param pVM The cross context VM structure.
1788 * @param pVCpu The cross context CPU structure of the calling EMT.
1789 * @param uPausedTscValue The TSC value at the time of pausing.
1790 */
1791VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1792{
1793 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1794 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1795
1796 /*
1797 * Call the offical API to do the job.
1798 */
1799 /* Ensure time for the partition is suspended - it will be resumed as soon as a vCPU starts executing. */
1800 HRESULT hrc = WHvSuspendPartitionTime(pVM->nem.s.hPartition);
1801 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1802 ("WHvSuspendPartitionTime(%p) -> %Rhrc (Last=%#x/%u)\n",
1803 pVM->nem.s.hPartition, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1804 , VERR_NEM_SET_TSC);
1805
1806 /*
1807 * Now set the CNTVCT_EL0 register for each vCPU, Hyper-V will program the timer offset in
1808 * CNTVOFF_EL2 accordingly. ARM guarantees that CNTVCT_EL0 is synchronised across all CPUs,
1809 * as long as CNTVOFF_EL2 is the same everywhere. Lets just hope scheduling will not affect it
1810 * if the partition time is suspended.
1811 */
1812 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1813 {
1814 WHV_REGISTER_NAME enmName = WHvArm64RegisterCntvctEl0;
1815 WHV_REGISTER_VALUE Value;
1816 Value.Reg64 = uPausedTscValue;
1817 hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, idCpu, &enmName, 1, &Value);
1818 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1819 ("WHvSetVirtualProcessorRegisters(%p, 0,{CNTVCT_EL0},1,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1820 pVM->nem.s.hPartition, idCpu, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1821 , VERR_NEM_SET_TSC);
1822
1823 /* Make sure the CNTV_CTL_EL0 and CNTV_CVAL_EL0 registers are up to date after resuming (saved state load). */
1824 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
1825 pVCpuDst->nem.s.fSyncCntvRegs = true;
1826 }
1827
1828 return VINF_SUCCESS;
1829}
1830
1831
1832#ifdef LOG_ENABLED
1833/**
1834 * Logs the current CPU state.
1835 */
1836static void nemR3WinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1837{
1838 if (LogIs3Enabled())
1839 {
1840 char szRegs[4096];
1841 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1842 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
1843 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
1844 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
1845 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
1846 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
1847 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
1848 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
1849 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
1850 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
1851 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
1852 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
1853 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
1854 "vbar_el1=%016VR{vbar_el1}\n"
1855 );
1856 char szInstr[256]; RT_ZERO(szInstr);
1857 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1858 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1859 szInstr, sizeof(szInstr), NULL);
1860 Log3(("%s%s\n", szRegs, szInstr));
1861 }
1862}
1863#endif /* LOG_ENABLED */
1864
1865
1866/**
1867 * Copies register state from the (common) exit context.
1868 *
1869 * ASSUMES no state copied yet.
1870 *
1871 * @param pVCpu The cross context per CPU structure.
1872 * @param pMsgHdr The common message header.
1873 */
1874DECLINLINE(void) nemR3WinCopyStateFromArmHeader(PVMCPUCC pVCpu, WHV_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1875{
1876#ifdef LOG_ENABLED /* When state logging is enabled the state is synced completely upon VM exit. */
1877 if (!LogIs3Enabled())
1878#endif
1879 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE))
1880 == (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE));
1881
1882 pVCpu->cpum.GstCtx.Pc.u64 = pMsgHdr->Pc;
1883 pVCpu->cpum.GstCtx.fPState = pMsgHdr->Cpsr;
1884
1885 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE);
1886}
1887
1888
1889/**
1890 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1891 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1892 */
1893typedef struct NEMHCWINHMACPCCSTATE
1894{
1895 /** Input: Write access. */
1896 bool fWriteAccess;
1897 /** Output: Set if we did something. */
1898 bool fDidSomething;
1899 /** Output: Set it we should resume. */
1900 bool fCanResume;
1901} NEMHCWINHMACPCCSTATE;
1902
1903/**
1904 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1905 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1906 * NEMHCWINHMACPCCSTATE structure. }
1907 */
1908NEM_TMPL_STATIC DECLCALLBACK(int)
1909nemHCWinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1910{
1911 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1912 pState->fDidSomething = false;
1913 pState->fCanResume = false;
1914
1915 /* If A20 is disabled, we may need to make another query on the masked
1916 page to get the correct protection information. */
1917 uint8_t u2State = pInfo->u2NemState;
1918 RTGCPHYS GCPhysSrc = GCPhys;
1919
1920 /*
1921 * Consolidate current page state with actual page protection and access type.
1922 * We don't really consider downgrades here, as they shouldn't happen.
1923 */
1924 int rc;
1925 switch (u2State)
1926 {
1927 case NEM_WIN_PAGE_STATE_UNMAPPED:
1928 case NEM_WIN_PAGE_STATE_NOT_SET:
1929 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1930 {
1931 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1932 return VINF_SUCCESS;
1933 }
1934
1935 /* Don't bother remapping it if it's a write request to a non-writable page. */
1936 if ( pState->fWriteAccess
1937 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1938 {
1939 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1940 return VINF_SUCCESS;
1941 }
1942
1943 /* Map the page. */
1944 rc = nemHCNativeSetPhysPage(pVM,
1945 pVCpu,
1946 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1947 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1948 pInfo->fNemProt,
1949 &u2State,
1950 true /*fBackingState*/);
1951 pInfo->u2NemState = u2State;
1952 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1953 GCPhys, g_apszPageStates[u2State], rc));
1954 pState->fDidSomething = true;
1955 pState->fCanResume = true;
1956 return rc;
1957
1958 case NEM_WIN_PAGE_STATE_READABLE:
1959 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1960 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1961 {
1962 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1963 return VINF_SUCCESS;
1964 }
1965
1966 break;
1967
1968 case NEM_WIN_PAGE_STATE_WRITABLE:
1969 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1970 {
1971 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
1972 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1973 else
1974 {
1975 pState->fCanResume = true;
1976 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1977 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1978 }
1979 return VINF_SUCCESS;
1980 }
1981 break;
1982
1983 default:
1984 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1985 }
1986
1987 /*
1988 * Unmap and restart the instruction.
1989 * If this fails, which it does every so often, just unmap everything for now.
1990 */
1991 /** @todo figure out whether we mess up the state or if it's WHv. */
1992 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1993 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1994 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
1995 if (SUCCEEDED(hrc))
1996 {
1997 pState->fDidSomething = true;
1998 pState->fCanResume = true;
1999 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
2000 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
2001 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2002 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
2003 return VINF_SUCCESS;
2004 }
2005 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2006 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n",
2007 GCPhys, g_apszPageStates[u2State], hrc, hrc));
2008 return VERR_NEM_UNMAP_PAGES_FAILED;
2009}
2010
2011
2012/**
2013 * Returns the byte size from the given access SAS value.
2014 *
2015 * @returns Number of bytes to transfer.
2016 * @param uSas The SAS value to convert.
2017 */
2018DECLINLINE(size_t) nemR3WinGetByteCountFromSas(uint8_t uSas)
2019{
2020 switch (uSas)
2021 {
2022 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
2023 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
2024 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
2025 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
2026 default:
2027 AssertReleaseFailed();
2028 }
2029
2030 return 0;
2031}
2032
2033
2034/**
2035 * Sets the given general purpose register to the given value.
2036 *
2037 * @param pVCpu The cross context virtual CPU structure of the
2038 * calling EMT.
2039 * @param uReg The register index.
2040 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
2041 * @param fSignExtend Flag whether to sign extend the value.
2042 * @param u64Val The value.
2043 */
2044DECLINLINE(void) nemR3WinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
2045{
2046 AssertReturnVoid(uReg < 31);
2047
2048 if (f64BitReg)
2049 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
2050 else
2051 pVCpu->cpum.GstCtx.aGRegs[uReg].x = (uint64_t)(fSignExtend ? (int32_t)u64Val : (uint32_t)u64Val);
2052
2053 /* Mark the register as not extern anymore. */
2054 switch (uReg)
2055 {
2056 case 0:
2057 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
2058 break;
2059 case 1:
2060 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
2061 break;
2062 case 2:
2063 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
2064 break;
2065 case 3:
2066 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
2067 break;
2068 default:
2069 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
2070 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
2071 }
2072}
2073
2074
2075/**
2076 * Gets the given general purpose register and returns the value.
2077 *
2078 * @returns Value from the given register.
2079 * @param pVCpu The cross context virtual CPU structure of the
2080 * calling EMT.
2081 * @param uReg The register index.
2082 */
2083DECLINLINE(uint64_t) nemR3WinGetGReg(PVMCPU pVCpu, uint8_t uReg)
2084{
2085 AssertReturn(uReg <= ARMV8_A64_REG_XZR, 0);
2086
2087 if (uReg == ARMV8_A64_REG_XZR)
2088 return 0;
2089
2090 /** @todo Import the register if extern. */
2091 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
2092
2093 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
2094}
2095
2096
2097/**
2098 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2099 *
2100 * @returns Strict VBox status code.
2101 * @param pVM The cross context VM structure.
2102 * @param pVCpu The cross context per CPU structure.
2103 * @param pExit The VM exit information to handle.
2104 * @sa nemHCWinHandleMessageMemory
2105 */
2106NEM_TMPL_STATIC VBOXSTRICTRC
2107nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2108{
2109 uint64_t const uHostTsc = ASMReadTSC();
2110 Assert(pExit->MemoryAccess.Header.InterceptAccessType != 3);
2111
2112 /*
2113 * Ask PGM for information about the given GCPhys. We need to check if we're
2114 * out of sync first.
2115 */
2116 WHV_INTERCEPT_MESSAGE_HEADER const *pHdr = &pExit->MemoryAccess.Header;
2117 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite, false, false };
2118 PGMPHYSNEMPAGEINFO Info;
2119 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
2120 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2121 if (RT_SUCCESS(rc))
2122 {
2123 if (Info.fNemProt & ( pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
2124 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2125 {
2126 if (State.fCanResume)
2127 {
2128 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2129 pVCpu->idCpu, pHdr->Pc,
2130 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2131 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2132 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2133 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2134 pHdr->Pc, uHostTsc);
2135 return VINF_SUCCESS;
2136 }
2137 }
2138 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2139 pVCpu->idCpu, pHdr->Pc,
2140 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2141 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2142 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2143 }
2144 else
2145 Log4(("MemExit/%u: %08RX64: %RGp rc=%Rrc%s; emulating (%s)\n",
2146 pVCpu->idCpu, pHdr->Pc,
2147 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2148 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2149
2150 /*
2151 * Emulate the memory access, either access handler or special memory.
2152 */
2153 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2154 pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
2155 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2156 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2157 pHdr->Pc, uHostTsc);
2158 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->MemoryAccess.Header);
2159 RT_NOREF_PV(pExitRec);
2160 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
2161 AssertRCReturn(rc, rc);
2162
2163#ifdef LOG_ENABLED
2164 uint8_t const cbInstr = pExit->MemoryAccess.InstructionByteCount;
2165 RTGCPTR const GCPtrVa = pExit->MemoryAccess.Gva;
2166#endif
2167 RTGCPHYS const GCPhys = pExit->MemoryAccess.Gpa;
2168 uint64_t const uIss = pExit->MemoryAccess.Syndrome;
2169 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
2170 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
2171 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
2172 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
2173 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
2174 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
2175 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
2176 size_t cbAcc = nemR3WinGetByteCountFromSas(uAcc);
2177 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhys=%RGp cbInstr=%u\n",
2178 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrVa, GCPhys, cbInstr));
2179
2180 RT_NOREF(fL2Fault);
2181
2182 VBOXSTRICTRC rcStrict;
2183 if (fIsv)
2184 {
2185 EMHistoryAddExit(pVCpu,
2186 fWrite
2187 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2188 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2189 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
2190
2191 uint64_t u64Val = 0;
2192 if (fWrite)
2193 {
2194 u64Val = nemR3WinGetGReg(pVCpu, uReg);
2195 rcStrict = PGMPhysWrite(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2196 Log4(("MmioExit/%u: %08RX64: WRITE %RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
2197 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2198 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2199 }
2200 else
2201 {
2202 rcStrict = PGMPhysRead(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2203 Log4(("MmioExit/%u: %08RX64: READ %RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2204 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2205 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2206 if (rcStrict == VINF_SUCCESS)
2207 nemR3WinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
2208 }
2209 }
2210 else
2211 {
2212 /** @todo Our UEFI firmware accesses the flash region with the following instruction
2213 * when the NVRAM actually contains data:
2214 * ldrb w9, [x6, #-0x0001]!
2215 * This is too complicated for the hardware so the ISV bit is not set. Until there
2216 * is a proper IEM implementation we just handle this here for now to avoid annoying
2217 * users too much.
2218 */
2219 /* The following ASSUMES that the vCPU state is completely synced. */
2220
2221 /* Read instruction. */
2222 RTGCPTR GCPtrPage = pVCpu->cpum.GstCtx.Pc.u64 & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2223 const void *pvPageR3 = NULL;
2224 PGMPAGEMAPLOCK PageMapLock;
2225
2226 rcStrict = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrPage, &pvPageR3, &PageMapLock);
2227 if (rcStrict == VINF_SUCCESS)
2228 {
2229 uint32_t u32Instr = *(uint32_t *)((uint8_t *)pvPageR3 + (pVCpu->cpum.GstCtx.Pc.u64 - GCPtrPage));
2230 PGMPhysReleasePageMappingLock(pVCpu->pVMR3, &PageMapLock);
2231
2232 DISSTATE Dis;
2233 rcStrict = DISInstrWithPrefetchedBytes((uintptr_t)pVCpu->cpum.GstCtx.Pc.u64, DISCPUMODE_ARMV8_A64, 0 /*fFilter - none */,
2234 &u32Instr, sizeof(u32Instr), NULL, NULL, &Dis, NULL);
2235 if (rcStrict == VINF_SUCCESS)
2236 {
2237 if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDRB
2238 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2239 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2240 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmAddrInGpr
2241 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit
2242 && (Dis.aParams[1].fUse & DISUSE_PRE_INDEXED))
2243 {
2244 /* The fault address is already the final address. */
2245 uint8_t bVal = 0;
2246 rcStrict = PGMPhysRead(pVM, GCPhys, &bVal, 1, PGMACCESSORIGIN_HM);
2247 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2248 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, sizeof(bVal), sizeof(bVal),
2249 &bVal, VBOXSTRICTRC_VAL(rcStrict) ));
2250 if (rcStrict == VINF_SUCCESS)
2251 {
2252 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, bVal);
2253 /* Update the indexed register. */
2254 pVCpu->cpum.GstCtx.aGRegs[Dis.aParams[1].armv8.Op.Reg.idReg].x += Dis.aParams[1].armv8.u.offBase;
2255 }
2256 }
2257 /*
2258 * Seeing the following with the Windows 11/ARM TPM driver:
2259 * %fffff800e5342888 48 25 45 29 ldp w8, w9, [x10, #+0x0028]
2260 */
2261 else if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDP
2262 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2263 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2264 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmReg
2265 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2266 && Dis.aParams[2].armv8.enmType == kDisArmv8OpParmAddrInGpr
2267 && Dis.aParams[2].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit)
2268 {
2269 /** @todo This is tricky to handle if the first register read returns something else than VINF_SUCCESS... */
2270 /* The fault address is already the final address. */
2271 uint32_t u32Val1 = 0;
2272 uint32_t u32Val2 = 0;
2273 rcStrict = PGMPhysRead(pVM, GCPhys, &u32Val1, sizeof(u32Val1), PGMACCESSORIGIN_HM);
2274 if (rcStrict == VINF_SUCCESS)
2275 rcStrict = PGMPhysRead(pVM, GCPhys + sizeof(uint32_t), &u32Val2, sizeof(u32Val2), PGMACCESSORIGIN_HM);
2276 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs %.*Rhxs rcStrict=%Rrc\n",
2277 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, 2 * sizeof(uint32_t), sizeof(u32Val1),
2278 &u32Val1, sizeof(u32Val2), &u32Val2, VBOXSTRICTRC_VAL(rcStrict) ));
2279 if (rcStrict == VINF_SUCCESS)
2280 {
2281 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val1);
2282 nemR3WinSetGReg(pVCpu, Dis.aParams[1].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val2);
2283 }
2284 }
2285 else
2286 AssertFailedReturn(VERR_NOT_SUPPORTED);
2287 }
2288 }
2289 }
2290
2291 if (rcStrict == VINF_SUCCESS)
2292 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t); /** @todo Why is InstructionByteCount always 0? */
2293
2294 return rcStrict;
2295}
2296
2297
2298/**
2299 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2300 *
2301 * @returns Strict VBox status code.
2302 * @param pVM The cross context VM structure.
2303 * @param pVCpu The cross context per CPU structure.
2304 * @param pExit The VM exit information to handle.
2305 * @sa nemHCWinHandleMessageMemory
2306 */
2307NEM_TMPL_STATIC VBOXSTRICTRC
2308nemR3WinHandleExitHypercall(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2309{
2310 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2311
2312 /** @todo Raise exception to EL1 if PSCI not configured. */
2313 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
2314 uint32_t uFunId = pExit->Hypercall.Immediate;
2315 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
2316 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
2317 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
2318 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
2319 {
2320 switch (uFunNum)
2321 {
2322 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2323 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
2324 break;
2325 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2326 rcStrict = VMR3PowerOff(pVM->pUVM);
2327 break;
2328 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2329 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2330 {
2331 bool fHaltOnReset;
2332 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
2333 if (RT_SUCCESS(rc) && fHaltOnReset)
2334 {
2335 Log(("nemHCLnxHandleExitHypercall: Halt On Reset!\n"));
2336 rcStrict = VINF_EM_HALT;
2337 }
2338 else
2339 {
2340 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
2341 VM_FF_SET(pVM, VM_FF_RESET);
2342 rcStrict = VINF_EM_RESET;
2343 }
2344 break;
2345 }
2346 case ARM_PSCI_FUNC_ID_CPU_ON:
2347 {
2348 uint64_t u64TgtCpu = pExit->Hypercall.X[1];
2349 RTGCPHYS GCPhysExecAddr = pExit->Hypercall.X[2];
2350 uint64_t u64CtxId = pExit->Hypercall.X[3];
2351 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
2352 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
2353 break;
2354 }
2355 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
2356 {
2357 uint32_t u32FunNum = (uint32_t)pExit->Hypercall.X[1];
2358 switch (u32FunNum)
2359 {
2360 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2361 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2362 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2363 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2364 case ARM_PSCI_FUNC_ID_CPU_ON:
2365 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2366 false /*f64BitReg*/, false /*fSignExtend*/,
2367 (uint64_t)ARM_PSCI_STS_SUCCESS);
2368 break;
2369 default:
2370 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2371 false /*f64BitReg*/, false /*fSignExtend*/,
2372 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2373 }
2374 break;
2375 }
2376 default:
2377 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2378 }
2379 }
2380 else
2381 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2382
2383 /** @todo What to do if immediate is != 0? */
2384
2385 if (rcStrict == VINF_SUCCESS)
2386 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
2387
2388 return rcStrict;
2389}
2390
2391
2392/**
2393 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
2394 *
2395 * @returns Strict VBox status code.
2396 * @param pVM The cross context VM structure.
2397 * @param pVCpu The cross context per CPU structure.
2398 * @param pExit The VM exit information to handle.
2399 * @sa nemHCWinHandleMessageUnrecoverableException
2400 */
2401NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2402{
2403#if 0
2404 /*
2405 * Just copy the state we've got and handle it in the loop for now.
2406 */
2407 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2408 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2409 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
2410 RT_NOREF_PV(pVM);
2411 return VINF_EM_TRIPLE_FAULT;
2412#else
2413 /*
2414 * Let IEM decide whether this is really it.
2415 */
2416 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
2417 pExit->UnrecoverableException.Header.Pc, ASMReadTSC());
2418 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->UnrecoverableException.Header);
2419 AssertReleaseFailed();
2420 RT_NOREF_PV(pVM);
2421 return VINF_SUCCESS;
2422#endif
2423}
2424
2425
2426/**
2427 * Handles VM exits.
2428 *
2429 * @returns Strict VBox status code.
2430 * @param pVM The cross context VM structure.
2431 * @param pVCpu The cross context per CPU structure.
2432 * @param pExit The VM exit information to handle.
2433 * @sa nemHCWinHandleMessage
2434 */
2435NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2436{
2437#ifdef LOG_ENABLED
2438 if (LogIs3Enabled())
2439 {
2440 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
2441 AssertRCReturn(rc, rc);
2442
2443 nemR3WinLogState(pVM, pVCpu);
2444 }
2445#endif
2446
2447 switch (pExit->ExitReason)
2448 {
2449 case WHvRunVpExitReasonUnmappedGpa:
2450 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
2451 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
2452
2453 case WHvRunVpExitReasonCanceled:
2454 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
2455 return VINF_SUCCESS;
2456
2457 case WHvRunVpExitReasonHypercall:
2458 return nemR3WinHandleExitHypercall(pVM, pVCpu, pExit);
2459
2460 case 0x8001000c: /* WHvRunVpExitReasonArm64Reset */
2461 {
2462 if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF)
2463 return VMR3PowerOff(pVM->pUVM);
2464 else if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_RESET)
2465 {
2466 VM_FF_SET(pVM, VM_FF_RESET);
2467 return VINF_EM_RESET;
2468 }
2469 else
2470 AssertLogRelFailedReturn(VERR_NEM_IPE_3);
2471 }
2472
2473 case WHvRunVpExitReasonUnrecoverableException:
2474 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
2475 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
2476
2477 case WHvRunVpExitReasonUnsupportedFeature:
2478 case WHvRunVpExitReasonInvalidVpRegisterValue:
2479 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2480 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
2481 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
2482
2483 /* Undesired exits: */
2484 case WHvRunVpExitReasonNone:
2485 default:
2486 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2487 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
2488 }
2489}
2490
2491
2492VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2493{
2494 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.fPState));
2495#ifdef LOG_ENABLED
2496 if (LogIs3Enabled())
2497 nemR3WinLogState(pVM, pVCpu);
2498#endif
2499
2500 /*
2501 * Try switch to NEM runloop state.
2502 */
2503 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2504 { /* likely */ }
2505 else
2506 {
2507 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2508 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2509 return VINF_SUCCESS;
2510 }
2511
2512 if (pVCpu->nem.s.fSyncCntvRegs)
2513 {
2514 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterCntvCtlEl0, WHvArm64RegisterCntvCvalEl0 };
2515 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)];
2516 aRegs[0].Reg64 = pVCpu->cpum.GstCtx.CntvCtlEl0;
2517 aRegs[1].Reg64 = pVCpu->cpum.GstCtx.CntvCValEl0;
2518
2519 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2520 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2521 ("WHvSetVirtualProcessorRegisters(%p, 0,{CNTV_CTL_EL0, CNTV_CVAL_EL0}, 2,) -> %Rhrc (Last=%#x/%u)\n",
2522 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2523 , VERR_NEM_IPE_9);
2524 pVCpu->nem.s.fSyncCntvRegs = false;
2525 }
2526
2527
2528 /*
2529 * The run loop.
2530 *
2531 * Current approach to state updating to use the sledgehammer and sync
2532 * everything every time. This will be optimized later.
2533 */
2534 const bool fSingleStepping = DBGFIsStepping(pVCpu);
2535 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2536 for (unsigned iLoop = 0;; iLoop++)
2537 {
2538 /*
2539 * Poll timers and run for a bit.
2540 *
2541 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
2542 * so we take the time of the next timer event and uses that as a deadline.
2543 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
2544 */
2545 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2546 * the whole polling job when timers have changed... */
2547 uint64_t offDeltaIgnored;
2548 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2549 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2550 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2551 {
2552 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
2553 {
2554 /* Ensure that Hyper-V has the whole state. */
2555 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
2556 AssertRCReturn(rc2, rc2);
2557
2558#ifdef LOG_ENABLED
2559 if (LogIsFlowEnabled())
2560 {
2561 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2562 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2563 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2564 LogFlow(("NEM/%u: Entry @ %08RX64 pstate=%#RX64\n", pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64));
2565 }
2566#endif
2567
2568 MY_WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
2569 TMNotifyStartOfExecution(pVM, pVCpu);
2570
2571 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
2572
2573 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
2574 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2575#ifdef LOG_ENABLED
2576 if (LogIsFlowEnabled())
2577 {
2578 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2579 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2580 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2581 LogFlow(("NEM/%u: Exit @ %08RX64 pstate=%#RX64 Reason=%#x\n",
2582 pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64, ExitReason.ExitReason));
2583 }
2584#endif
2585 if (SUCCEEDED(hrc))
2586 {
2587 /* Always sync the CNTV_CTL_EL0/CNTV_CVAL_EL0 registers, just like we do on macOS. */
2588 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterCntvCtlEl0, WHvArm64RegisterCntvCvalEl0 };
2589 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2590 hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2591 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2592 ("WHvGetVirtualProcessorRegisters(%p, 0,{CNTV_CTL_EL0, CNTV_CVAL_EL0}, 2,) -> %Rhrc (Last=%#x/%u)\n",
2593 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2594 , VERR_NEM_IPE_9);
2595
2596 pVCpu->cpum.GstCtx.CntvCtlEl0 = aRegs[0].Reg64;
2597 pVCpu->cpum.GstCtx.CntvCValEl0 = aRegs[1].Reg64;
2598
2599 /*
2600 * Deal with the message.
2601 */
2602 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
2603 if (rcStrict == VINF_SUCCESS)
2604 { /* hopefully likely */ }
2605 else
2606 {
2607 LogFlow(("NEM/%u: breaking: nemR3WinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2608 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2609 break;
2610 }
2611 }
2612 else
2613 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
2614 pVCpu->idCpu, hrc, GetLastError()),
2615 VERR_NEM_IPE_0);
2616
2617 /*
2618 * If no relevant FFs are pending, loop.
2619 */
2620 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2621 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2622 continue;
2623
2624 /** @todo Try handle pending flags, not just return to EM loops. Take care
2625 * not to set important RCs here unless we've handled a message. */
2626 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2627 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2628 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2629 }
2630 else
2631 {
2632 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2633 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2634 }
2635 }
2636 else
2637 {
2638 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2639 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2640 }
2641 break;
2642 } /* the run loop */
2643
2644
2645 /*
2646 * If the CPU is running, make sure to stop it before we try sync back the
2647 * state and return to EM. We don't sync back the whole state if we can help it.
2648 */
2649 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2650 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2651
2652 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
2653 {
2654 /* Try anticipate what we might need. */
2655 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK;
2656 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2657 || RT_FAILURE(rcStrict))
2658 fImport = CPUMCTX_EXTRN_ALL;
2659 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
2660 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2661
2662 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2663 {
2664 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport);
2665 if (RT_SUCCESS(rc2))
2666 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2667 else if (RT_SUCCESS(rcStrict))
2668 rcStrict = rc2;
2669 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2670 pVCpu->cpum.GstCtx.fExtrn = 0;
2671 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2672 }
2673 else
2674 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2675 }
2676 else
2677 {
2678 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2679 pVCpu->cpum.GstCtx.fExtrn = 0;
2680 }
2681
2682#if 0
2683 UINT32 cbWritten;
2684 WHV_ARM64_LOCAL_INTERRUPT_CONTROLLER_STATE IntrState;
2685 HRESULT hrc = WHvGetVirtualProcessorState(pVM->nem.s.hPartition, pVCpu->idCpu, WHvVirtualProcessorStateTypeInterruptControllerState2,
2686 &IntrState, sizeof(IntrState), &cbWritten);
2687 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2688 ("WHvGetVirtualProcessorState(%p, %u,WHvVirtualProcessorStateTypeInterruptControllerState2,) -> %Rhrc (Last=%#x/%u)\n",
2689 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2690 , VERR_NEM_GET_REGISTERS_FAILED);
2691 LogFlowFunc(("IntrState: cbWritten=%u\n"));
2692 for (uint32_t i = 0; i < RT_ELEMENTS(IntrState.BankedInterruptState); i++)
2693 {
2694 WHV_ARM64_INTERRUPT_STATE *pState = &IntrState.BankedInterruptState[i];
2695 LogFlowFunc(("IntrState: Intr %u:\n"
2696 " Enabled=%RTbool\n"
2697 " EdgeTriggered=%RTbool\n"
2698 " Asserted=%RTbool\n"
2699 " SetPending=%RTbool\n"
2700 " Active=%RTbool\n"
2701 " Direct=%RTbool\n"
2702 " GicrIpriorityrConfigured=%u\n"
2703 " GicrIpriorityrActive=%u\n",
2704 i, pState->Enabled, pState->EdgeTriggered, pState->Asserted, pState->SetPending, pState->Active, pState->Direct,
2705 pState->GicrIpriorityrConfigured, pState->GicrIpriorityrActive));
2706 }
2707#endif
2708
2709 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64,
2710 pVCpu->cpum.GstCtx.fPState, VBOXSTRICTRC_VAL(rcStrict) ));
2711 return rcStrict;
2712}
2713
2714
2715VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2716{
2717 Assert(VM_IS_NEM_ENABLED(pVM));
2718 RT_NOREF(pVM, pVCpu);
2719 return true;
2720}
2721
2722
2723VMMR3_INT_DECL(int) NEMR3Halt(PVM pVM, PVMCPU pVCpu)
2724{
2725 /*
2726 * Try switch to NEM runloop state.
2727 */
2728 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_HALTED))
2729 { /* likely */ }
2730 else
2731 {
2732 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2733 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2734 return VINF_SUCCESS;
2735 }
2736
2737 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2738
2739 /* Ensure that Hyper-V has the whole state. */
2740 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
2741 AssertRCReturn(rc2, rc2);
2742
2743 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2744 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2745 {
2746 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
2747 {
2748 MY_WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
2749 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
2750 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
2751 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2752#ifdef LOG_ENABLED
2753 LogFlow(("NEM/%u: Exit @ @todo Reason=%#x\n", pVCpu->idCpu, ExitReason.ExitReason));
2754#endif
2755 if (SUCCEEDED(hrc))
2756 {
2757 /*
2758 * Deal with the message.
2759 */
2760 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
2761 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2762 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2763 }
2764 else
2765 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
2766 pVCpu->idCpu, hrc, GetLastError()),
2767 VERR_NEM_IPE_0);
2768
2769 /** @todo Try handle pending flags, not just return to EM loops. Take care
2770 * not to set important RCs here unless we've handled a message. */
2771 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2772 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2773 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2774 }
2775 else
2776 {
2777 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2778 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2779 }
2780 }
2781 else
2782 {
2783 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2784 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2785 }
2786
2787 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED_EXEC_NEM))
2788 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2789
2790 return rcStrict;
2791}
2792
2793
2794bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2795{
2796 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2797 return false;
2798}
2799
2800
2801void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2802{
2803 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu));
2804 if (pVM->nem.s.fCreatedEmts)
2805 {
2806 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
2807 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
2808 RT_NOREF_PV(hrc);
2809 }
2810 RT_NOREF_PV(fFlags);
2811}
2812
2813
2814DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2815{
2816 RT_NOREF(pVM, fUseDebugLoop);
2817 return false;
2818}
2819
2820
2821DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2822{
2823 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2824 return false;
2825}
2826
2827
2828DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
2829{
2830 PGMPAGEMAPLOCK Lock;
2831 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
2832 if (RT_SUCCESS(rc))
2833 PGMPhysReleasePageMappingLock(pVM, &Lock);
2834 return rc;
2835}
2836
2837
2838DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2839{
2840 PGMPAGEMAPLOCK Lock;
2841 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
2842 if (RT_SUCCESS(rc))
2843 PGMPhysReleasePageMappingLock(pVM, &Lock);
2844 return rc;
2845}
2846
2847
2848VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2849 uint8_t *pu2State, uint32_t *puNemRange)
2850{
2851 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d)\n",
2852 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange));
2853
2854 *pu2State = UINT8_MAX;
2855 RT_NOREF(puNemRange);
2856
2857 if (pvR3)
2858 {
2859 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2860 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvR3, GCPhys, cb,
2861 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2862 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2863 if (SUCCEEDED(hrc))
2864 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2865 else
2866 {
2867 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2868 GCPhys, cb, pvR3, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2869 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2870 return VERR_NEM_MAP_PAGES_FAILED;
2871 }
2872 }
2873 return VINF_SUCCESS;
2874}
2875
2876
2877VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2878{
2879 RT_NOREF(pVM);
2880 return g_pfnWHvQueryGpaRangeDirtyBitmap != NULL;
2881}
2882
2883
2884VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2885 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2886{
2887 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
2888 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
2889 RT_NOREF(puNemRange);
2890
2891 /*
2892 * Unmap the RAM we're replacing.
2893 */
2894 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2895 {
2896 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2897 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2898 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2899 if (SUCCEEDED(hrc))
2900 { /* likely */ }
2901 else if (pvMmio2)
2902 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2903 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2904 else
2905 {
2906 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2907 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2908 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2909 return VERR_NEM_UNMAP_PAGES_FAILED;
2910 }
2911 }
2912
2913 /*
2914 * Map MMIO2 if any.
2915 */
2916 if (pvMmio2)
2917 {
2918 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2919 WHV_MAP_GPA_RANGE_FLAGS fWHvFlags = WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute;
2920 if ((fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) && g_pfnWHvQueryGpaRangeDirtyBitmap)
2921 fWHvFlags |= WHvMapGpaRangeFlagTrackDirtyPages;
2922 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2923 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMmio2, GCPhys, cb, fWHvFlags);
2924 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2925 if (SUCCEEDED(hrc))
2926 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2927 else
2928 {
2929 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p fWHvFlags=%#x: Map -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2930 GCPhys, cb, fFlags, pvMmio2, fWHvFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2931 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2932 return VERR_NEM_MAP_PAGES_FAILED;
2933 }
2934 }
2935 else
2936 {
2937 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2938 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2939 }
2940 RT_NOREF(pvRam);
2941 return VINF_SUCCESS;
2942}
2943
2944
2945VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2946 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2947{
2948 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2949 return VINF_SUCCESS;
2950}
2951
2952
2953VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2954 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2955{
2956 int rc = VINF_SUCCESS;
2957 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2958 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2959
2960 /*
2961 * Unmap the MMIO2 pages.
2962 */
2963 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2964 * we may have more stuff to unmap even in case of pure MMIO... */
2965 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2966 {
2967 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2968 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2969 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2970 if (FAILED(hrc))
2971 {
2972 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2973 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2974 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2975 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2976 }
2977 }
2978
2979 /*
2980 * Restore the RAM we replaced.
2981 */
2982 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2983 {
2984 AssertPtr(pvRam);
2985 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2986 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvRam, GCPhys, cb,
2987 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2988 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2989 if (SUCCEEDED(hrc))
2990 { /* likely */ }
2991 else
2992 {
2993 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2994 GCPhys, cb, pvMmio2, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2995 rc = VERR_NEM_MAP_PAGES_FAILED;
2996 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2997 }
2998 if (pu2State)
2999 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3000 }
3001 /* Mark the pages as unmapped if relevant. */
3002 else if (pu2State)
3003 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3004
3005 RT_NOREF(pvMmio2, puNemRange);
3006 return rc;
3007}
3008
3009
3010VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
3011 void *pvBitmap, size_t cbBitmap)
3012{
3013 Assert(VM_IS_NEM_ENABLED(pVM));
3014 AssertReturn(g_pfnWHvQueryGpaRangeDirtyBitmap, VERR_INTERNAL_ERROR_2);
3015 Assert(cbBitmap == (uint32_t)cbBitmap);
3016 RT_NOREF(uNemRange);
3017
3018 /* This is being profiled by PGM, see /PGM/Mmio2QueryAndResetDirtyBitmap. */
3019 HRESULT hrc = WHvQueryGpaRangeDirtyBitmap(pVM->nem.s.hPartition, GCPhys, cb, (UINT64 *)pvBitmap, (uint32_t)cbBitmap);
3020 if (SUCCEEDED(hrc))
3021 return VINF_SUCCESS;
3022
3023 AssertLogRelMsgFailed(("GCPhys=%RGp LB %RGp pvBitmap=%p LB %#zx hrc=%Rhrc (%#x) Last=%#x/%u\n",
3024 GCPhys, cb, pvBitmap, cbBitmap, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3025 return VERR_NEM_QUERY_DIRTY_BITMAP_FAILED;
3026}
3027
3028
3029VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
3030 uint8_t *pu2State, uint32_t *puNemRange)
3031{
3032 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
3033 *pu2State = UINT8_MAX;
3034 *puNemRange = 0;
3035
3036#if 0 /* Let's not do this after all. We'll protection change notifications for each page and if not we'll map them lazily. */
3037 RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
3038 for (RTGCPHYS iPage = 0; iPage < cPages; iPage++, GCPhys += X86_PAGE_SIZE)
3039 {
3040 const void *pvPage;
3041 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
3042 if (RT_SUCCESS(rc))
3043 {
3044 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhys, X86_PAGE_SIZE,
3045 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
3046 if (SUCCEEDED(hrc))
3047 { /* likely */ }
3048 else
3049 {
3050 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3051 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3052 return VERR_NEM_INIT_FAILED;
3053 }
3054 }
3055 else
3056 {
3057 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
3058 return rc;
3059 }
3060 }
3061 RT_NOREF_PV(fFlags);
3062#else
3063 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags);
3064#endif
3065 return VINF_SUCCESS;
3066}
3067
3068
3069VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
3070 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
3071{
3072 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
3073 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
3074 *pu2State = UINT8_MAX;
3075
3076 /*
3077 * (Re-)map readonly.
3078 */
3079 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
3080 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
3081 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPages, GCPhys, cb, WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
3082 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
3083 if (SUCCEEDED(hrc))
3084 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
3085 else
3086 {
3087 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x hrc=%Rhrc (%#x) Last=%#x/%u\n",
3088 GCPhys, cb, pvPages, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3089 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
3090 return VERR_NEM_MAP_PAGES_FAILED;
3091 }
3092 RT_NOREF(fFlags, puNemRange);
3093 return VINF_SUCCESS;
3094}
3095
3096VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
3097{
3098 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
3099 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
3100 RT_NOREF(pVCpu, fEnabled);
3101}
3102
3103
3104void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
3105{
3106 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
3107 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
3108}
3109
3110
3111VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3112 RTR3PTR pvMemR3, uint8_t *pu2State)
3113{
3114 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
3115 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
3116
3117 *pu2State = UINT8_MAX;
3118 if (pvMemR3)
3119 {
3120 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
3121 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
3122 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
3123 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
3124 if (SUCCEEDED(hrc))
3125 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3126 else
3127 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
3128 pvMemR3, GCPhys, cb, hrc));
3129 }
3130 RT_NOREF(enmKind);
3131}
3132
3133
3134void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
3135 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
3136{
3137 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
3138 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
3139 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
3140}
3141
3142
3143/**
3144 * Worker that maps pages into Hyper-V.
3145 *
3146 * This is used by the PGM physical page notifications as well as the memory
3147 * access VMEXIT handlers.
3148 *
3149 * @returns VBox status code.
3150 * @param pVM The cross context VM structure.
3151 * @param pVCpu The cross context virtual CPU structure of the
3152 * calling EMT.
3153 * @param GCPhysSrc The source page address.
3154 * @param GCPhysDst The hyper-V destination page. This may differ from
3155 * GCPhysSrc when A20 is disabled.
3156 * @param fPageProt NEM_PAGE_PROT_XXX.
3157 * @param pu2State Our page state (input/output).
3158 * @param fBackingChanged Set if the page backing is being changed.
3159 * @thread EMT(pVCpu)
3160 */
3161NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
3162 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
3163{
3164 /*
3165 * Looks like we need to unmap a page before we can change the backing
3166 * or even modify the protection. This is going to be *REALLY* efficient.
3167 * PGM lends us two bits to keep track of the state here.
3168 */
3169 RT_NOREF(pVCpu);
3170 uint8_t const u2OldState = *pu2State;
3171 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
3172 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
3173 if ( fBackingChanged
3174 || u2NewState != u2OldState)
3175 {
3176 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
3177 {
3178 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3179 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
3180 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3181 if (SUCCEEDED(hrc))
3182 {
3183 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3184 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
3185 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3186 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
3187 {
3188 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
3189 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3190 return VINF_SUCCESS;
3191 }
3192 }
3193 else
3194 {
3195 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
3196 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3197 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3198 return VERR_NEM_INIT_FAILED;
3199 }
3200 }
3201 }
3202
3203 /*
3204 * Writeable mapping?
3205 */
3206 if (fPageProt & NEM_PAGE_PROT_WRITE)
3207 {
3208 void *pvPage;
3209 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
3210 if (RT_SUCCESS(rc))
3211 {
3212 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
3213 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
3214 if (SUCCEEDED(hrc))
3215 {
3216 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3217 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
3218 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3219 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
3220 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
3221 return VINF_SUCCESS;
3222 }
3223 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
3224 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3225 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3226 return VERR_NEM_INIT_FAILED;
3227 }
3228 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
3229 return rc;
3230 }
3231
3232 if (fPageProt & NEM_PAGE_PROT_READ)
3233 {
3234 const void *pvPage;
3235 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
3236 if (RT_SUCCESS(rc))
3237 {
3238 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRangePage, a);
3239 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
3240 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
3241 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRangePage, a);
3242 if (SUCCEEDED(hrc))
3243 {
3244 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
3245 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
3246 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3247 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
3248 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
3249 return VINF_SUCCESS;
3250 }
3251 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
3252 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3253 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3254 return VERR_NEM_INIT_FAILED;
3255 }
3256 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
3257 return rc;
3258 }
3259
3260 /* We already unmapped it above. */
3261 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3262 return VINF_SUCCESS;
3263}
3264
3265
3266NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
3267{
3268 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
3269 {
3270 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
3271 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3272 return VINF_SUCCESS;
3273 }
3274
3275 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3276 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
3277 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3278 if (SUCCEEDED(hrc))
3279 {
3280 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
3281 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3282 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3283 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
3284 return VINF_SUCCESS;
3285 }
3286 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
3287 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
3288 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3289 return VERR_NEM_IPE_6;
3290}
3291
3292
3293int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
3294 PGMPAGETYPE enmType, uint8_t *pu2State)
3295{
3296 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3297 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3298 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
3299
3300 int rc;
3301 RT_NOREF_PV(fPageProt);
3302 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3303 return rc;
3304}
3305
3306
3307VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
3308 PGMPAGETYPE enmType, uint8_t *pu2State)
3309{
3310 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3311 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3312 Assert(VM_IS_NEM_ENABLED(pVM));
3313 RT_NOREF(HCPhys, enmType, pvR3);
3314
3315 RT_NOREF_PV(fPageProt);
3316 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3317}
3318
3319
3320VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
3321 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
3322{
3323 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
3324 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
3325 Assert(VM_IS_NEM_ENABLED(pVM));
3326 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, enmType);
3327
3328 RT_NOREF_PV(fPageProt);
3329 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3330}
3331
3332
3333/**
3334 * Returns features supported by the NEM backend.
3335 *
3336 * @returns Flags of features supported by the native NEM backend.
3337 * @param pVM The cross context VM structure.
3338 */
3339VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
3340{
3341 RT_NOREF(pVM);
3342 /** @todo Is NEM_FEAT_F_FULL_GST_EXEC always true? */
3343 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC;
3344}
3345
3346
3347/** @page pg_nem_win_aarmv8 NEM/win - Native Execution Manager, Windows.
3348 *
3349 * Open questions:
3350 * - InstructionByteCount and InstructionBytes for unmapped GPA exit are zero...
3351 */
3352
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette