VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp@ 108472

Last change on this file since 108472 was 108411, checked in by vboxsync, 2 months ago

VMM/NEMR3Native-win-armv8.cpp: Drastically simplify the SMP hack for win.arm64 by just forcing the EMT out of the halt state and into the normal runloop where Hyper-V can manage the halting, bugref:10392

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 150.2 KB
Line 
1/* $Id: NEMR3Native-win-armv8.cpp 108411 2025-02-27 11:46:20Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Windows backend.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 * Log group 6: Ring-0 memory management
9 * Log group 12: API intercepts.
10 */
11
12/*
13 * Copyright (C) 2018-2024 Oracle and/or its affiliates.
14 *
15 * This file is part of VirtualBox base platform packages, as
16 * available from https://www.virtualbox.org.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation, in version 3 of the
21 * License.
22 *
23 * This program is distributed in the hope that it will be useful, but
24 * WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
26 * General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, see <https://www.gnu.org/licenses>.
30 *
31 * SPDX-License-Identifier: GPL-3.0-only
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_NEM
39#define VMCPU_INCL_CPUM_GST_CTX
40#include <iprt/nt/nt-and-windows.h>
41#include <iprt/nt/hyperv.h>
42#include <WinHvPlatform.h>
43
44#ifndef _WIN32_WINNT_WIN10
45# error "Missing _WIN32_WINNT_WIN10"
46#endif
47#ifndef _WIN32_WINNT_WIN10_RS1 /* Missing define, causing trouble for us. */
48# define _WIN32_WINNT_WIN10_RS1 (_WIN32_WINNT_WIN10 + 1)
49#endif
50#include <sysinfoapi.h>
51#include <debugapi.h>
52#include <errhandlingapi.h>
53#include <fileapi.h>
54#include <winerror.h> /* no api header for this. */
55
56#include <VBox/dis.h>
57#include <VBox/vmm/nem.h>
58#include <VBox/vmm/iem.h>
59#include <VBox/vmm/em.h>
60#include <VBox/vmm/pdmapic.h>
61#include <VBox/vmm/pdm.h>
62#include <VBox/vmm/dbgftrace.h>
63#include "NEMInternal.h"
64#include <VBox/vmm/vmcc.h>
65
66#include <iprt/formats/arm-psci.h>
67
68#include <iprt/ldr.h>
69#include <iprt/path.h>
70#include <iprt/string.h>
71#include <iprt/system.h>
72#include <iprt/utf16.h>
73
74#ifndef NTDDI_WIN10_VB /* Present in W10 2004 SDK, quite possibly earlier. */
75HRESULT WINAPI WHvQueryGpaRangeDirtyBitmap(WHV_PARTITION_HANDLE, WHV_GUEST_PHYSICAL_ADDRESS, UINT64, UINT64 *, UINT32);
76# define WHvMapGpaRangeFlagTrackDirtyPages ((WHV_MAP_GPA_RANGE_FLAGS)0x00000008)
77#endif
78
79
80/*
81 * The following definitions appeared in build 27744 allow configuring the base address of the GICv3 controller,
82 * (there is no official SDK for this yet).
83 */
84/** @todo Better way of defining these which doesn't require casting later on when calling APIs. */
85#define WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS UINT32_C(0x00001012)
86/** No GIC present. */
87#define WHV_ARM64_IC_EMULATION_MODE_NONE 0
88/** Hyper-V emulates a GICv3. */
89#define WHV_ARM64_IC_EMULATION_MODE_GICV3 1
90
91/**
92 * Configures the interrupt controller emulated by Hyper-V.
93 */
94typedef struct MY_WHV_ARM64_IC_PARAMETERS
95{
96 uint32_t u32EmulationMode;
97 uint32_t u32Rsvd;
98 union
99 {
100 struct
101 {
102 RTGCPHYS GCPhysGicdBase;
103 RTGCPHYS GCPhysGitsTranslaterBase;
104 uint32_t u32Rsvd;
105 uint32_t cLpiIntIdBits;
106 uint32_t u32PpiCntvOverflw;
107 uint32_t u32PpiPmu;
108 uint32_t au32Rsvd[6];
109 } GicV3;
110 } u;
111} MY_WHV_ARM64_IC_PARAMETERS;
112AssertCompileSize(MY_WHV_ARM64_IC_PARAMETERS, 64);
113
114
115/**
116 * The hypercall exit context.
117 */
118typedef struct MY_WHV_HYPERCALL_CONTEXT
119{
120 WHV_INTERCEPT_MESSAGE_HEADER Header;
121 uint16_t Immediate;
122 uint16_t u16Rsvd;
123 uint32_t u32Rsvd;
124 uint64_t X[18];
125} MY_WHV_HYPERCALL_CONTEXT;
126typedef MY_WHV_HYPERCALL_CONTEXT *PMY_WHV_HYPERCALL_CONTEXT;
127AssertCompileSize(MY_WHV_HYPERCALL_CONTEXT, 24 + 19 * sizeof(uint64_t));
128
129
130/**
131 * The ARM64 reset context.
132 */
133typedef struct MY_WHV_ARM64_RESET_CONTEXT
134{
135 WHV_INTERCEPT_MESSAGE_HEADER Header;
136 uint32_t ResetType;
137 uint32_t u32Rsvd;
138} MY_WHV_ARM64_RESET_CONTEXT;
139typedef MY_WHV_ARM64_RESET_CONTEXT *PMY_WHV_ARM64_RESET_CONTEXT;
140AssertCompileSize(MY_WHV_ARM64_RESET_CONTEXT, 24 + 2 * sizeof(uint32_t));
141
142
143#define WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF 0
144#define WHV_ARM64_RESET_CONTEXT_TYPE_RESET 1
145
146
147/**
148 * The exit reason context for arm64, the size is different
149 * from the default SDK we build against.
150 */
151typedef struct MY_WHV_RUN_VP_EXIT_CONTEXT
152{
153 WHV_RUN_VP_EXIT_REASON ExitReason;
154 uint32_t u32Rsvd;
155 uint64_t u64Rsvd;
156 union
157 {
158 WHV_MEMORY_ACCESS_CONTEXT MemoryAccess;
159 WHV_RUN_VP_CANCELED_CONTEXT CancelReason;
160 MY_WHV_HYPERCALL_CONTEXT Hypercall;
161 WHV_UNRECOVERABLE_EXCEPTION_CONTEXT UnrecoverableException;
162 MY_WHV_ARM64_RESET_CONTEXT Arm64Reset;
163 uint64_t au64Rsvd2[32];
164 };
165} MY_WHV_RUN_VP_EXIT_CONTEXT;
166typedef MY_WHV_RUN_VP_EXIT_CONTEXT *PMY_WHV_RUN_VP_EXIT_CONTEXT;
167AssertCompileSize(MY_WHV_RUN_VP_EXIT_CONTEXT, 272);
168
169#define My_WHvArm64RegisterGicrBaseGpa ((WHV_REGISTER_NAME)UINT32_C(0x00063000))
170
171
172/*********************************************************************************************************************************
173* Defined Constants And Macros *
174*********************************************************************************************************************************/
175
176
177/*********************************************************************************************************************************
178* Global Variables *
179*********************************************************************************************************************************/
180/** @name APIs imported from WinHvPlatform.dll
181 * @{ */
182static decltype(WHvGetCapability) * g_pfnWHvGetCapability;
183static decltype(WHvCreatePartition) * g_pfnWHvCreatePartition;
184static decltype(WHvSetupPartition) * g_pfnWHvSetupPartition;
185static decltype(WHvDeletePartition) * g_pfnWHvDeletePartition;
186static decltype(WHvGetPartitionProperty) * g_pfnWHvGetPartitionProperty;
187static decltype(WHvSetPartitionProperty) * g_pfnWHvSetPartitionProperty;
188static decltype(WHvMapGpaRange) * g_pfnWHvMapGpaRange;
189static decltype(WHvUnmapGpaRange) * g_pfnWHvUnmapGpaRange;
190static decltype(WHvTranslateGva) * g_pfnWHvTranslateGva;
191static decltype(WHvQueryGpaRangeDirtyBitmap) * g_pfnWHvQueryGpaRangeDirtyBitmap;
192static decltype(WHvCreateVirtualProcessor) * g_pfnWHvCreateVirtualProcessor;
193static decltype(WHvDeleteVirtualProcessor) * g_pfnWHvDeleteVirtualProcessor;
194static decltype(WHvRunVirtualProcessor) * g_pfnWHvRunVirtualProcessor;
195static decltype(WHvCancelRunVirtualProcessor) * g_pfnWHvCancelRunVirtualProcessor;
196static decltype(WHvGetVirtualProcessorRegisters) * g_pfnWHvGetVirtualProcessorRegisters;
197static decltype(WHvSetVirtualProcessorRegisters) * g_pfnWHvSetVirtualProcessorRegisters;
198static decltype(WHvSuspendPartitionTime) * g_pfnWHvSuspendPartitionTime;
199static decltype(WHvResumePartitionTime) * g_pfnWHvResumePartitionTime;
200decltype(WHvGetVirtualProcessorState) * g_pfnWHvGetVirtualProcessorState;
201decltype(WHvSetVirtualProcessorState) * g_pfnWHvSetVirtualProcessorState;
202decltype(WHvRequestInterrupt) * g_pfnWHvRequestInterrupt;
203/** @} */
204
205/** The Windows build number. */
206static uint32_t g_uBuildNo = 17134;
207
208
209
210/**
211 * Import instructions.
212 */
213static const struct
214{
215 uint8_t idxDll; /**< 0 for WinHvPlatform.dll, 1 for vid.dll. */
216 bool fOptional; /**< Set if import is optional. */
217 PFNRT *ppfn; /**< The function pointer variable. */
218 const char *pszName; /**< The function name. */
219} g_aImports[] =
220{
221#define NEM_WIN_IMPORT(a_idxDll, a_fOptional, a_Name) { (a_idxDll), (a_fOptional), (PFNRT *)&RT_CONCAT(g_pfn,a_Name), #a_Name }
222 NEM_WIN_IMPORT(0, false, WHvGetCapability),
223 NEM_WIN_IMPORT(0, false, WHvCreatePartition),
224 NEM_WIN_IMPORT(0, false, WHvSetupPartition),
225 NEM_WIN_IMPORT(0, false, WHvDeletePartition),
226 NEM_WIN_IMPORT(0, false, WHvGetPartitionProperty),
227 NEM_WIN_IMPORT(0, false, WHvSetPartitionProperty),
228 NEM_WIN_IMPORT(0, false, WHvMapGpaRange),
229 NEM_WIN_IMPORT(0, false, WHvUnmapGpaRange),
230 NEM_WIN_IMPORT(0, false, WHvTranslateGva),
231 NEM_WIN_IMPORT(0, true, WHvQueryGpaRangeDirtyBitmap),
232 NEM_WIN_IMPORT(0, false, WHvCreateVirtualProcessor),
233 NEM_WIN_IMPORT(0, false, WHvDeleteVirtualProcessor),
234 NEM_WIN_IMPORT(0, false, WHvRunVirtualProcessor),
235 NEM_WIN_IMPORT(0, false, WHvCancelRunVirtualProcessor),
236 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorRegisters),
237 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorRegisters),
238 NEM_WIN_IMPORT(0, false, WHvSuspendPartitionTime),
239 NEM_WIN_IMPORT(0, false, WHvResumePartitionTime),
240 NEM_WIN_IMPORT(0, false, WHvGetVirtualProcessorState),
241 NEM_WIN_IMPORT(0, false, WHvSetVirtualProcessorState),
242 NEM_WIN_IMPORT(0, false, WHvRequestInterrupt),
243#undef NEM_WIN_IMPORT
244};
245
246
247/*
248 * Let the preprocessor alias the APIs to import variables for better autocompletion.
249 */
250#ifndef IN_SLICKEDIT
251# define WHvGetCapability g_pfnWHvGetCapability
252# define WHvCreatePartition g_pfnWHvCreatePartition
253# define WHvSetupPartition g_pfnWHvSetupPartition
254# define WHvDeletePartition g_pfnWHvDeletePartition
255# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
256# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
257# define WHvMapGpaRange g_pfnWHvMapGpaRange
258# define WHvUnmapGpaRange g_pfnWHvUnmapGpaRange
259# define WHvTranslateGva g_pfnWHvTranslateGva
260# define WHvQueryGpaRangeDirtyBitmap g_pfnWHvQueryGpaRangeDirtyBitmap
261# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
262# define WHvDeleteVirtualProcessor g_pfnWHvDeleteVirtualProcessor
263# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
264# define WHvGetRunExitContextSize g_pfnWHvGetRunExitContextSize
265# define WHvCancelRunVirtualProcessor g_pfnWHvCancelRunVirtualProcessor
266# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
267# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
268# define WHvSuspendPartitionTime g_pfnWHvSuspendPartitionTime
269# define WHvResumePartitionTime g_pfnWHvResumePartitionTime
270# define WHvGetVirtualProcessorState g_pfnWHvGetVirtualProcessorState
271# define WHvSetVirtualProcessorState g_pfnWHvSetVirtualProcessorState
272# define WHvRequestInterrupt g_pfnWHvRequestInterrupt
273#endif
274
275#if 0 /* unused */
276/** WHV_MEMORY_ACCESS_TYPE names */
277static const char * const g_apszWHvMemAccesstypes[4] = { "read", "write", "exec", "!undefined!" };
278#endif
279/** NEM_WIN_PAGE_STATE_XXX names. */
280NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
281#ifdef LOG_ENABLED
282/** HV_INTERCEPT_ACCESS_TYPE names. */
283static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
284#endif
285
286
287/*********************************************************************************************************************************
288* Internal Functions *
289*********************************************************************************************************************************/
290DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv);
291DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv);
292
293NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
294 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
295
296/**
297 * Worker for nemR3NativeInit that probes and load the native API.
298 *
299 * @returns VBox status code.
300 * @param fForced Whether the HMForced flag is set and we should
301 * fail if we cannot initialize.
302 * @param pErrInfo Where to always return error info.
303 */
304static int nemR3WinInitProbeAndLoad(bool fForced, PRTERRINFO pErrInfo)
305{
306 /*
307 * Check that the DLL files we need are present, but without loading them.
308 * We'd like to avoid loading them unnecessarily.
309 */
310 WCHAR wszPath[MAX_PATH + 64];
311 UINT cwcPath = GetSystemDirectoryW(wszPath, MAX_PATH);
312 if (cwcPath >= MAX_PATH || cwcPath < 2)
313 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "GetSystemDirectoryW failed (%#x / %u)", cwcPath, GetLastError());
314
315 if (wszPath[cwcPath - 1] != '\\' || wszPath[cwcPath - 1] != '/')
316 wszPath[cwcPath++] = '\\';
317 RTUtf16CopyAscii(&wszPath[cwcPath], RT_ELEMENTS(wszPath) - cwcPath, "WinHvPlatform.dll");
318 if (GetFileAttributesW(wszPath) == INVALID_FILE_ATTRIBUTES)
319 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "The native API dll was not found (%ls)", wszPath);
320
321 /*
322 * Check that we're in a VM and that the hypervisor identifies itself as Hyper-V.
323 */
324 /** @todo */
325
326 /** @todo would be great if we could recognize a root partition from the
327 * CPUID info, but I currently don't dare do that. */
328
329 /*
330 * Now try load the DLLs and resolve the APIs.
331 */
332 static const char * const s_apszDllNames[1] = { "WinHvPlatform.dll" };
333 RTLDRMOD ahMods[1] = { NIL_RTLDRMOD };
334 int rc = VINF_SUCCESS;
335 for (unsigned i = 0; i < RT_ELEMENTS(s_apszDllNames); i++)
336 {
337 int rc2 = RTLdrLoadSystem(s_apszDllNames[i], true /*fNoUnload*/, &ahMods[i]);
338 if (RT_FAILURE(rc2))
339 {
340 if (!RTErrInfoIsSet(pErrInfo))
341 RTErrInfoSetF(pErrInfo, rc2, "Failed to load API DLL: %s: %Rrc", s_apszDllNames[i], rc2);
342 else
343 RTErrInfoAddF(pErrInfo, rc2, "; %s: %Rrc", s_apszDllNames[i], rc2);
344 ahMods[i] = NIL_RTLDRMOD;
345 rc = VERR_NEM_INIT_FAILED;
346 }
347 }
348 if (RT_SUCCESS(rc))
349 {
350 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
351 {
352 int rc2 = RTLdrGetSymbol(ahMods[g_aImports[i].idxDll], g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
353 if (RT_SUCCESS(rc2))
354 {
355 if (g_aImports[i].fOptional)
356 LogRel(("NEM: info: Found optional import %s!%s.\n",
357 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName));
358 }
359 else
360 {
361 *g_aImports[i].ppfn = NULL;
362
363 LogRel(("NEM: %s: Failed to import %s!%s: %Rrc",
364 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
365 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName, rc2));
366 if (!g_aImports[i].fOptional)
367 {
368 if (RTErrInfoIsSet(pErrInfo))
369 RTErrInfoAddF(pErrInfo, rc2, ", %s!%s",
370 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
371 else
372 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: %s!%s",
373 s_apszDllNames[g_aImports[i].idxDll], g_aImports[i].pszName);
374 Assert(RT_FAILURE(rc));
375 }
376 }
377 }
378 if (RT_SUCCESS(rc))
379 {
380 Assert(!RTErrInfoIsSet(pErrInfo));
381 }
382 }
383
384 for (unsigned i = 0; i < RT_ELEMENTS(ahMods); i++)
385 RTLdrClose(ahMods[i]);
386 return rc;
387}
388
389
390/**
391 * Wrapper for different WHvGetCapability signatures.
392 */
393DECLINLINE(HRESULT) WHvGetCapabilityWrapper(WHV_CAPABILITY_CODE enmCap, WHV_CAPABILITY *pOutput, uint32_t cbOutput)
394{
395 return g_pfnWHvGetCapability(enmCap, pOutput, cbOutput, NULL);
396}
397
398
399/**
400 * Worker for nemR3NativeInit that gets the hypervisor capabilities.
401 *
402 * @returns VBox status code.
403 * @param pVM The cross context VM structure.
404 * @param pErrInfo Where to always return error info.
405 */
406static int nemR3WinInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
407{
408#define NEM_LOG_REL_CAP_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %-38s= " a_szFmt "\n", a_szField, a_Value))
409#define NEM_LOG_REL_CAP_SUB_EX(a_szField, a_szFmt, a_Value) LogRel(("NEM: %36s: " a_szFmt "\n", a_szField, a_Value))
410#define NEM_LOG_REL_CAP_SUB(a_szField, a_Value) NEM_LOG_REL_CAP_SUB_EX(a_szField, "%d", a_Value)
411
412 /*
413 * Is the hypervisor present with the desired capability?
414 *
415 * In build 17083 this translates into:
416 * - CPUID[0x00000001].HVP is set
417 * - CPUID[0x40000000] == "Microsoft Hv"
418 * - CPUID[0x40000001].eax == "Hv#1"
419 * - CPUID[0x40000003].ebx[12] is set.
420 * - VidGetExoPartitionProperty(INVALID_HANDLE_VALUE, 0x60000, &Ignored) returns
421 * a non-zero value.
422 */
423 /**
424 * @todo Someone at Microsoft please explain weird API design:
425 * 1. Pointless CapabilityCode duplication int the output;
426 * 2. No output size.
427 */
428 WHV_CAPABILITY Caps;
429 RT_ZERO(Caps);
430 SetLastError(0);
431 HRESULT hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeHypervisorPresent, &Caps, sizeof(Caps));
432 DWORD rcWin = GetLastError();
433 if (FAILED(hrc))
434 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
435 "WHvGetCapability/WHvCapabilityCodeHypervisorPresent failed: %Rhrc (Last=%#x/%u)",
436 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
437 if (!Caps.HypervisorPresent)
438 {
439 if (!RTPathExists(RTPATH_NT_PASSTHRU_PREFIX "Device\\VidExo"))
440 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE,
441 "WHvCapabilityCodeHypervisorPresent is FALSE! Make sure you have enabled the 'Windows Hypervisor Platform' feature.");
442 return RTErrInfoSetF(pErrInfo, VERR_NEM_NOT_AVAILABLE, "WHvCapabilityCodeHypervisorPresent is FALSE! (%u)", rcWin);
443 }
444 LogRel(("NEM: WHvCapabilityCodeHypervisorPresent is TRUE, so this might work...\n"));
445
446
447 /*
448 * Check what extended VM exits are supported.
449 */
450 RT_ZERO(Caps);
451 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeExtendedVmExits, &Caps, sizeof(Caps));
452 if (FAILED(hrc))
453 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
454 "WHvGetCapability/WHvCapabilityCodeExtendedVmExits failed: %Rhrc (Last=%#x/%u)",
455 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
456 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeExtendedVmExits", "%'#018RX64", Caps.ExtendedVmExits.AsUINT64);
457 pVM->nem.s.fHypercallExit = RT_BOOL(Caps.ExtendedVmExits.HypercallExit);
458 pVM->nem.s.fGpaAccessFaultExit = RT_BOOL(Caps.ExtendedVmExits.GpaAccessFaultExit);
459 NEM_LOG_REL_CAP_SUB("fHypercallExit", pVM->nem.s.fHypercallExit);
460 NEM_LOG_REL_CAP_SUB("fGpaAccessFaultExit", pVM->nem.s.fGpaAccessFaultExit);
461 if (Caps.ExtendedVmExits.AsUINT64 & ~(uint64_t)7)
462 LogRel(("NEM: Warning! Unknown VM exit definitions: %#RX64\n", Caps.ExtendedVmExits.AsUINT64));
463 /** @todo RECHECK: WHV_EXTENDED_VM_EXITS typedef. */
464
465 /*
466 * Check features in case they end up defining any.
467 */
468 RT_ZERO(Caps);
469 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeFeatures, &Caps, sizeof(Caps));
470 if (FAILED(hrc))
471 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
472 "WHvGetCapability/WHvCapabilityCodeFeatures failed: %Rhrc (Last=%#x/%u)",
473 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
474 if (Caps.Features.AsUINT64 & ~(uint64_t)0)
475 LogRel(("NEM: Warning! Unknown feature definitions: %#RX64\n", Caps.Features.AsUINT64));
476 /** @todo RECHECK: WHV_CAPABILITY_FEATURES typedef. */
477
478 /*
479 * Check that the CPU vendor is supported.
480 */
481 RT_ZERO(Caps);
482 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorVendor, &Caps, sizeof(Caps));
483 if (FAILED(hrc))
484 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
485 "WHvGetCapability/WHvCapabilityCodeProcessorVendor failed: %Rhrc (Last=%#x/%u)",
486 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
487 switch (Caps.ProcessorVendor)
488 {
489 /** @todo RECHECK: WHV_PROCESSOR_VENDOR typedef. */
490 case WHvProcessorVendorArm:
491 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d - ARM", Caps.ProcessorVendor);
492 pVM->nem.s.enmCpuVendor = CPUMCPUVENDOR_UNKNOWN;
493 break;
494 default:
495 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorVendor", "%d", Caps.ProcessorVendor);
496 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unknown processor vendor: %d", Caps.ProcessorVendor);
497 }
498
499 /*
500 * CPU features, guessing these are virtual CPU features?
501 */
502 RT_ZERO(Caps);
503 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorFeatures, &Caps, sizeof(Caps));
504 if (FAILED(hrc))
505 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
506 "WHvGetCapability/WHvCapabilityCodeProcessorFeatures failed: %Rhrc (Last=%#x/%u)",
507 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
508 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorFeatures", "%'#018RX64", Caps.ProcessorFeatures.AsUINT64);
509#define NEM_LOG_REL_CPU_FEATURE(a_Field) NEM_LOG_REL_CAP_SUB(#a_Field, Caps.ProcessorFeatures.a_Field)
510 NEM_LOG_REL_CPU_FEATURE(Asid16);
511 NEM_LOG_REL_CPU_FEATURE(TGran16);
512 NEM_LOG_REL_CPU_FEATURE(TGran64);
513 NEM_LOG_REL_CPU_FEATURE(Haf);
514 NEM_LOG_REL_CPU_FEATURE(Hdbs);
515 NEM_LOG_REL_CPU_FEATURE(Pan);
516 NEM_LOG_REL_CPU_FEATURE(AtS1E1);
517 NEM_LOG_REL_CPU_FEATURE(Uao);
518 NEM_LOG_REL_CPU_FEATURE(El0Aarch32);
519 NEM_LOG_REL_CPU_FEATURE(Fp);
520 NEM_LOG_REL_CPU_FEATURE(FpHp);
521 NEM_LOG_REL_CPU_FEATURE(AdvSimd);
522 NEM_LOG_REL_CPU_FEATURE(AdvSimdHp);
523 NEM_LOG_REL_CPU_FEATURE(GicV3V4);
524 NEM_LOG_REL_CPU_FEATURE(GicV41);
525 NEM_LOG_REL_CPU_FEATURE(Ras);
526 NEM_LOG_REL_CPU_FEATURE(PmuV3);
527 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV81);
528 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV84);
529 NEM_LOG_REL_CPU_FEATURE(PmuV3ArmV85);
530 NEM_LOG_REL_CPU_FEATURE(Aes);
531 NEM_LOG_REL_CPU_FEATURE(PolyMul);
532 NEM_LOG_REL_CPU_FEATURE(Sha1);
533 NEM_LOG_REL_CPU_FEATURE(Sha256);
534 NEM_LOG_REL_CPU_FEATURE(Sha512);
535 NEM_LOG_REL_CPU_FEATURE(Crc32);
536 NEM_LOG_REL_CPU_FEATURE(Atomic);
537 NEM_LOG_REL_CPU_FEATURE(Rdm);
538 NEM_LOG_REL_CPU_FEATURE(Sha3);
539 NEM_LOG_REL_CPU_FEATURE(Sm3);
540 NEM_LOG_REL_CPU_FEATURE(Sm4);
541 NEM_LOG_REL_CPU_FEATURE(Dp);
542 NEM_LOG_REL_CPU_FEATURE(Fhm);
543 NEM_LOG_REL_CPU_FEATURE(DcCvap);
544 NEM_LOG_REL_CPU_FEATURE(DcCvadp);
545 NEM_LOG_REL_CPU_FEATURE(ApaBase);
546 NEM_LOG_REL_CPU_FEATURE(ApaEp);
547 NEM_LOG_REL_CPU_FEATURE(ApaEp2);
548 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fp);
549 NEM_LOG_REL_CPU_FEATURE(ApaEp2Fpc);
550 NEM_LOG_REL_CPU_FEATURE(Jscvt);
551 NEM_LOG_REL_CPU_FEATURE(Fcma);
552 NEM_LOG_REL_CPU_FEATURE(RcpcV83);
553 NEM_LOG_REL_CPU_FEATURE(RcpcV84);
554 NEM_LOG_REL_CPU_FEATURE(Gpa);
555 NEM_LOG_REL_CPU_FEATURE(L1ipPipt);
556 NEM_LOG_REL_CPU_FEATURE(DzPermitted);
557
558#undef NEM_LOG_REL_CPU_FEATURE
559 if (Caps.ProcessorFeatures.AsUINT64 & (~(RT_BIT_64(47) - 1)))
560 LogRel(("NEM: Warning! Unknown CPU features: %#RX64\n", Caps.ProcessorFeatures.AsUINT64));
561 pVM->nem.s.uCpuFeatures.u64 = Caps.ProcessorFeatures.AsUINT64;
562 /** @todo RECHECK: WHV_PROCESSOR_FEATURES typedef. */
563
564 /*
565 * The cache line flush size.
566 */
567 RT_ZERO(Caps);
568 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodeProcessorClFlushSize, &Caps, sizeof(Caps));
569 if (FAILED(hrc))
570 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
571 "WHvGetCapability/WHvCapabilityCodeProcessorClFlushSize failed: %Rhrc (Last=%#x/%u)",
572 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
573 NEM_LOG_REL_CAP_EX("WHvCapabilityCodeProcessorClFlushSize", "2^%u", Caps.ProcessorClFlushSize);
574 if (Caps.ProcessorClFlushSize < 8 && Caps.ProcessorClFlushSize > 9)
575 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported cache line flush size: %u", Caps.ProcessorClFlushSize);
576 pVM->nem.s.cCacheLineFlushShift = Caps.ProcessorClFlushSize;
577
578 RT_ZERO(Caps);
579 hrc = WHvGetCapabilityWrapper(WHvCapabilityCodePhysicalAddressWidth, &Caps, sizeof(Caps));
580 if (FAILED(hrc))
581 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
582 "WHvGetCapability/WHvCapabilityCodePhysicalAddressWidth failed: %Rhrc (Last=%#x/%u)",
583 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
584 NEM_LOG_REL_CAP_EX("WHvCapabilityCodePhysicalAddressWidth", "2^%u", Caps.PhysicalAddressWidth);
585 if (Caps.PhysicalAddressWidth < 32 && Caps.PhysicalAddressWidth > 52)
586 return RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "Unsupported physical address width: %u", Caps.ProcessorClFlushSize);
587 pVM->nem.s.cPhysicalAddressWidth = Caps.PhysicalAddressWidth;
588
589
590 /*
591 * See if they've added more properties that we're not aware of.
592 */
593 /** @todo RECHECK: WHV_CAPABILITY_CODE typedef. */
594 if (!IsDebuggerPresent()) /* Too noisy when in debugger, so skip. */
595 {
596 static const struct
597 {
598 uint32_t iMin, iMax; } s_aUnknowns[] =
599 {
600 { 0x0004, 0x000f },
601 { 0x1003, 0x100f },
602 { 0x2000, 0x200f },
603 { 0x3000, 0x300f },
604 { 0x4000, 0x400f },
605 };
606 for (uint32_t j = 0; j < RT_ELEMENTS(s_aUnknowns); j++)
607 for (uint32_t i = s_aUnknowns[j].iMin; i <= s_aUnknowns[j].iMax; i++)
608 {
609 RT_ZERO(Caps);
610 hrc = WHvGetCapabilityWrapper((WHV_CAPABILITY_CODE)i, &Caps, sizeof(Caps));
611 if (SUCCEEDED(hrc))
612 LogRel(("NEM: Warning! Unknown capability %#x returning: %.*Rhxs\n", i, sizeof(Caps), &Caps));
613 }
614 }
615
616 /*
617 * For proper operation, we require CPUID exits.
618 */
619 /** @todo Any? */
620
621#undef NEM_LOG_REL_CAP_EX
622#undef NEM_LOG_REL_CAP_SUB_EX
623#undef NEM_LOG_REL_CAP_SUB
624 return VINF_SUCCESS;
625}
626
627
628/**
629 * Initializes the GIC controller emulation provided by Hyper-V.
630 *
631 * @returns VBox status code.
632 * @param pVM The cross context VM structure.
633 *
634 * @note Needs to be done early when setting up the partition so this has to live here and not in GICNem-win.cpp
635 */
636static int nemR3WinGicCreate(PVM pVM)
637{
638 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
639 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
640
641 /*
642 * Query the MMIO ranges.
643 */
644 RTGCPHYS GCPhysMmioBaseDist = 0;
645 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
646 if (RT_FAILURE(rc))
647 return VMSetError(pVM, rc, RT_SRC_POS,
648 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
649
650 RTGCPHYS GCPhysMmioBaseReDist = 0;
651 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
652 if (RT_FAILURE(rc))
653 return VMSetError(pVM, rc, RT_SRC_POS,
654 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
655
656 RTGCPHYS GCPhysMmioBaseIts = 0;
657 rc = CFGMR3QueryU64(pGicCfg, "ItsMmioBase", &GCPhysMmioBaseIts);
658 if (RT_FAILURE(rc))
659 return VMSetError(pVM, rc, RT_SRC_POS,
660 "Configuration error: Failed to get the \"ItsMmioBase\" value\n");
661
662 /*
663 * One can only set the GIC distributor base. The re-distributor regions for the individual
664 * vCPUs are configured when the vCPUs are created, so we need to save the base of the MMIO region.
665 */
666 pVM->nem.s.GCPhysMmioBaseReDist = GCPhysMmioBaseReDist;
667
668 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
669
670 MY_WHV_ARM64_IC_PARAMETERS Property; RT_ZERO(Property);
671 Property.u32EmulationMode = WHV_ARM64_IC_EMULATION_MODE_GICV3;
672 Property.u.GicV3.GCPhysGicdBase = GCPhysMmioBaseDist;
673 Property.u.GicV3.GCPhysGitsTranslaterBase = GCPhysMmioBaseIts;
674 Property.u.GicV3.cLpiIntIdBits = 1; /** @todo LPIs are currently not supported with our device emulations. */
675 Property.u.GicV3.u32PpiCntvOverflw = pVM->nem.s.u32GicPpiVTimer + 16; /* Calculate the absolute timer INTID. */
676 Property.u.GicV3.u32PpiPmu = 23; /** @todo Configure dynamically (from SBSA, needs a PMU/NEM emulation just like with the GIC probably). */
677 HRESULT hrc = WHvSetPartitionProperty(hPartition, (WHV_PARTITION_PROPERTY_CODE)WHV_PARTITION_PROPERTY_CODE_ARM64_IC_PARAMETERS, &Property, sizeof(Property));
678 if (FAILED(hrc))
679 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
680 "Failed to set WHvPartitionPropertyCodeArm64IcParameters: %Rhrc (Last=%#x/%u)",
681 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
682
683 return rc;
684}
685
686
687/**
688 * Creates and sets up a Hyper-V (exo) partition.
689 *
690 * @returns VBox status code.
691 * @param pVM The cross context VM structure.
692 * @param pErrInfo Where to always return error info.
693 */
694static int nemR3WinInitCreatePartition(PVM pVM, PRTERRINFO pErrInfo)
695{
696 AssertReturn(!pVM->nem.s.hPartition, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
697 AssertReturn(!pVM->nem.s.hPartitionDevice, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
698
699 /*
700 * Create the partition.
701 */
702 WHV_PARTITION_HANDLE hPartition;
703 HRESULT hrc = WHvCreatePartition(&hPartition);
704 if (FAILED(hrc))
705 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "WHvCreatePartition failed with %Rhrc (Last=%#x/%u)",
706 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
707
708 int rc;
709
710 /*
711 * Set partition properties, most importantly the CPU count.
712 */
713 /**
714 * @todo Someone at Microsoft please explain another weird API:
715 * - Why this API doesn't take the WHV_PARTITION_PROPERTY_CODE value as an
716 * argument rather than as part of the struct. That is so weird if you've
717 * used any other NT or windows API, including WHvGetCapability().
718 * - Why use PVOID when WHV_PARTITION_PROPERTY is what's expected. We
719 * technically only need 9 bytes for setting/getting
720 * WHVPartitionPropertyCodeProcessorClFlushSize, but the API insists on 16. */
721 WHV_PARTITION_PROPERTY Property;
722 RT_ZERO(Property);
723 Property.ProcessorCount = pVM->cCpus;
724 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
725 if (SUCCEEDED(hrc))
726 {
727 RT_ZERO(Property);
728 Property.ExtendedVmExits.HypercallExit = pVM->nem.s.fHypercallExit;
729 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
730 if (SUCCEEDED(hrc))
731 {
732 /*
733 * We'll continue setup in nemR3NativeInitAfterCPUM.
734 */
735 pVM->nem.s.fCreatedEmts = false;
736 pVM->nem.s.hPartition = hPartition;
737 LogRel(("NEM: Created partition %p.\n", hPartition));
738 return VINF_SUCCESS;
739 }
740
741 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
742 "Failed setting WHvPartitionPropertyCodeExtendedVmExits to %'#RX64: %Rhrc",
743 Property.ExtendedVmExits.AsUINT64, hrc);
744 }
745 else
746 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED,
747 "Failed setting WHvPartitionPropertyCodeProcessorCount to %u: %Rhrc (Last=%#x/%u)",
748 pVM->cCpus, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
749 WHvDeletePartition(hPartition);
750
751 Assert(!pVM->nem.s.hPartitionDevice);
752 Assert(!pVM->nem.s.hPartition);
753 return rc;
754}
755
756
757static int nemR3NativeInitSetupVm(PVM pVM)
758{
759 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
760 AssertReturn(hPartition != NULL, VERR_WRONG_ORDER);
761 AssertReturn(!pVM->nem.s.hPartitionDevice, VERR_WRONG_ORDER);
762 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
763
764 /*
765 * Continue setting up the partition now that we've got most of the CPUID feature stuff.
766 */
767 WHV_PARTITION_PROPERTY Property;
768 HRESULT hrc;
769
770 /* Not sure if we really need to set the cache line flush size. */
771 RT_ZERO(Property);
772 Property.ProcessorClFlushSize = pVM->nem.s.cCacheLineFlushShift;
773 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorClFlushSize, &Property, sizeof(Property));
774 if (FAILED(hrc))
775 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
776 "Failed to set WHvPartitionPropertyCodeProcessorClFlushSize to %u: %Rhrc (Last=%#x/%u)",
777 pVM->nem.s.cCacheLineFlushShift, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
778
779 /*
780 * Sync CPU features with CPUM.
781 */
782 /** @todo sync CPU features with CPUM. */
783
784 /* Set the partition property. */
785 RT_ZERO(Property);
786 Property.ProcessorFeatures.AsUINT64 = pVM->nem.s.uCpuFeatures.u64;
787 hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorFeatures, &Property, sizeof(Property));
788 if (FAILED(hrc))
789 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
790 "Failed to set WHvPartitionPropertyCodeProcessorFeatures to %'#RX64: %Rhrc (Last=%#x/%u)",
791 pVM->nem.s.uCpuFeatures.u64, hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
792
793 /* Configure the GIC. */
794 int rc = nemR3WinGicCreate(pVM);
795 if (RT_FAILURE(rc))
796 return rc;
797
798 /*
799 * Set up the partition.
800 *
801 * Seems like this is where the partition is actually instantiated and we get
802 * a handle to it.
803 */
804 hrc = WHvSetupPartition(hPartition);
805 if (FAILED(hrc))
806 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
807 "Call to WHvSetupPartition failed: %Rhrc (Last=%#x/%u)",
808 hrc, RTNtLastStatusValue(), RTNtLastErrorValue());
809
810 /*
811 * Setup the EMTs.
812 */
813 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
814 {
815 hrc = WHvCreateVirtualProcessor(hPartition, idCpu, 0 /*fFlags*/);
816 if (FAILED(hrc))
817 {
818 NTSTATUS const rcNtLast = RTNtLastStatusValue();
819 DWORD const dwErrLast = RTNtLastErrorValue();
820 while (idCpu-- > 0)
821 {
822 HRESULT hrc2 = WHvDeleteVirtualProcessor(hPartition, idCpu);
823 AssertLogRelMsg(SUCCEEDED(hrc2), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
824 hPartition, idCpu, hrc2, RTNtLastStatusValue(),
825 RTNtLastErrorValue()));
826 }
827 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
828 "Call to WHvCreateVirtualProcessor failed: %Rhrc (Last=%#x/%u)", hrc, rcNtLast, dwErrLast);
829 }
830
831 if (idCpu == 0)
832 {
833 /*
834 * Need to query the ID registers and populate CPUM,
835 * these are partition wide registers and need to be queried/set with WHV_ANY_VP.
836 */
837 CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
838
839 WHV_REGISTER_NAME aenmNames[10];
840 WHV_REGISTER_VALUE aValues[10];
841 RT_ZERO(aValues);
842
843 aenmNames[0] = WHvArm64RegisterIdAa64Dfr0El1;
844 aenmNames[1] = WHvArm64RegisterIdAa64Dfr1El1;
845 aenmNames[2] = WHvArm64RegisterIdAa64Isar0El1;
846 aenmNames[3] = WHvArm64RegisterIdAa64Isar1El1;
847 aenmNames[4] = WHvArm64RegisterIdAa64Isar2El1;
848 aenmNames[5] = WHvArm64RegisterIdAa64Mmfr0El1;
849 aenmNames[6] = WHvArm64RegisterIdAa64Mmfr1El1;
850 aenmNames[7] = WHvArm64RegisterIdAa64Mmfr2El1;
851 aenmNames[8] = WHvArm64RegisterIdAa64Pfr0El1;
852 aenmNames[9] = WHvArm64RegisterIdAa64Pfr1El1;
853
854 hrc = WHvGetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
855 AssertLogRelMsgReturn(SUCCEEDED(hrc),
856 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
857 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
858 , VERR_NEM_GET_REGISTERS_FAILED);
859
860 IdRegs.u64RegIdAa64Pfr0El1 = aValues[8].Reg64;
861 IdRegs.u64RegIdAa64Pfr1El1 = aValues[9].Reg64;
862 IdRegs.u64RegIdAa64Dfr0El1 = aValues[0].Reg64;
863 IdRegs.u64RegIdAa64Dfr1El1 = aValues[1].Reg64;
864 IdRegs.u64RegIdAa64Isar0El1 = aValues[2].Reg64;
865 IdRegs.u64RegIdAa64Isar1El1 = aValues[3].Reg64;
866 IdRegs.u64RegIdAa64Isar2El1 = aValues[4].Reg64;
867 IdRegs.u64RegIdAa64Mmfr0El1 = aValues[5].Reg64;
868 IdRegs.u64RegIdAa64Mmfr1El1 = aValues[6].Reg64;
869 IdRegs.u64RegIdAa64Mmfr2El1 = aValues[7].Reg64;
870
871 rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
872 if (RT_FAILURE(rc))
873 return rc;
874
875 /* Apply any overrides to the partition. */
876 PCCPUMARMV8IDREGS pIdRegsGst = NULL;
877 rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
878 AssertRCReturn(rc, rc);
879
880 aValues[0].Reg64 = pIdRegsGst->u64RegIdAa64Dfr0El1;
881 aValues[1].Reg64 = pIdRegsGst->u64RegIdAa64Dfr1El1;
882 aValues[2].Reg64 = pIdRegsGst->u64RegIdAa64Isar0El1;
883 aValues[3].Reg64 = pIdRegsGst->u64RegIdAa64Isar1El1;
884 aValues[4].Reg64 = pIdRegsGst->u64RegIdAa64Isar2El1;
885 aValues[5].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr0El1;
886 aValues[6].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr1El1;
887 aValues[7].Reg64 = pIdRegsGst->u64RegIdAa64Mmfr2El1;
888 aValues[8].Reg64 = pIdRegsGst->u64RegIdAa64Pfr0El1;
889 aValues[9].Reg64 = pIdRegsGst->u64RegIdAa64Pfr1El1;
890
891 hrc = WHvSetVirtualProcessorRegisters(hPartition, WHV_ANY_VP /*idCpu*/, aenmNames, RT_ELEMENTS(aenmNames), aValues);
892 AssertLogRelMsgReturn(SUCCEEDED(hrc),
893 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
894 hPartition, WHV_ANY_VP, RT_ELEMENTS(aenmNames), hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
895 , VERR_NEM_SET_REGISTERS_FAILED);
896
897 /* Save the amount of break-/watchpoints supported for syncing the guest register state later. */
898 pVM->nem.s.cBreakpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_BRPS) + 1;
899 pVM->nem.s.cWatchpoints = RT_BF_GET(pIdRegsGst->u64RegIdAa64Dfr0El1, ARMV8_ID_AA64DFR0_EL1_WRPS) + 1;
900 }
901
902 /* Configure the GIC re-distributor region for the GIC. */
903 WHV_REGISTER_NAME enmName = My_WHvArm64RegisterGicrBaseGpa;
904 WHV_REGISTER_VALUE Value;
905 Value.Reg64 = pVM->nem.s.GCPhysMmioBaseReDist + idCpu * _128K;
906
907 hrc = WHvSetVirtualProcessorRegisters(hPartition, idCpu, &enmName, 1, &Value);
908 AssertLogRelMsgReturn(SUCCEEDED(hrc),
909 ("WHvSetVirtualProcessorRegisters(%p, %u, WHvArm64RegisterGicrBaseGpa,) -> %Rhrc (Last=%#x/%u)\n",
910 hPartition, idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
911 , VERR_NEM_SET_REGISTERS_FAILED);
912 }
913
914 pVM->nem.s.fCreatedEmts = true;
915
916 LogRel(("NEM: Successfully set up partition\n"));
917 return VINF_SUCCESS;
918}
919
920
921/**
922 * Try initialize the native API.
923 *
924 * This may only do part of the job, more can be done in
925 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
926 *
927 * @returns VBox status code.
928 * @param pVM The cross context VM structure.
929 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
930 * the latter we'll fail if we cannot initialize.
931 * @param fForced Whether the HMForced flag is set and we should
932 * fail if we cannot initialize.
933 */
934int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
935{
936 g_uBuildNo = RTSystemGetNtBuildNo();
937
938 /*
939 * Error state.
940 * The error message will be non-empty on failure and 'rc' will be set too.
941 */
942 RTERRINFOSTATIC ErrInfo;
943 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
944 int rc = nemR3WinInitProbeAndLoad(fForced, pErrInfo);
945 if (RT_SUCCESS(rc))
946 {
947 /*
948 * Check the capabilties of the hypervisor, starting with whether it's present.
949 */
950 rc = nemR3WinInitCheckCapabilities(pVM, pErrInfo);
951 if (RT_SUCCESS(rc))
952 {
953 /*
954 * Create and initialize a partition.
955 */
956 rc = nemR3WinInitCreatePartition(pVM, pErrInfo);
957 if (RT_SUCCESS(rc))
958 {
959 rc = nemR3NativeInitSetupVm(pVM);
960 if (RT_SUCCESS(rc))
961 {
962 /*
963 * Set ourselves as the execution engine and make config adjustments.
964 */
965 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
966 Log(("NEM: Marked active!\n"));
967 PGMR3EnableNemMode(pVM);
968
969 /*
970 * Register release statistics
971 */
972 STAMR3Register(pVM, (void *)&pVM->nem.s.cMappedPages, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,
973 "/NEM/PagesCurrentlyMapped", STAMUNIT_PAGES, "Number guest pages currently mapped by the VM");
974 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
975 "/NEM/PagesMapCalls", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages");
976 STAMR3Register(pVM, (void *)&pVM->nem.s.StatMapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
977 "/NEM/PagesMapFails", STAMUNIT_PAGES, "Calls to WHvMapGpaRange/HvCallMapGpaPages that failed");
978 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
979 "/NEM/PagesUnmapCalls", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages");
980 STAMR3Register(pVM, (void *)&pVM->nem.s.StatUnmapPageFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS,
981 "/NEM/PagesUnmapFails", STAMUNIT_PAGES, "Calls to WHvUnmapGpaRange/HvCallUnmapGpaPages that failed");
982 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
983 "/NEM/PagesMapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for bigger stuff");
984 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRange, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
985 "/NEM/PagesUnmapGpaRange", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for bigger stuff");
986 STAMR3Register(pVM, &pVM->nem.s.StatProfMapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
987 "/NEM/PagesMapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvMapGpaRange for single pages");
988 STAMR3Register(pVM, &pVM->nem.s.StatProfUnmapGpaRangePage, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS,
989 "/NEM/PagesUnmapGpaRangePage", STAMUNIT_TICKS_PER_CALL, "Profiling calls to WHvUnmapGpaRange for single pages");
990
991 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
992 {
993 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
994 STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", idCpu);
995 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", idCpu);
996 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", idCpu);
997 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", idCpu);
998 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of interrupt window exits", "/NEM/CPU%u/ExitInterruptWindow", idCpu);
999 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", idCpu);
1000 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", idCpu);
1001 STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", idCpu);
1002 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", idCpu);
1003 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", idCpu);
1004 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits", "/NEM/CPU%u/ExitExceptionGp", idCpu);
1005 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGpMesa, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits from mesa driver", "/NEM/CPU%u/ExitExceptionGpMesa", idCpu);
1006 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", idCpu);
1007 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", idCpu);
1008 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", idCpu);
1009 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", idCpu);
1010 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", idCpu);
1011 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", idCpu);
1012 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", idCpu);
1013 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", idCpu);
1014 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", idCpu);
1015 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", idCpu);
1016 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", idCpu);
1017 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", idCpu);
1018 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", idCpu);
1019 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", idCpu);
1020 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
1021 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
1022 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
1023 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
1024 }
1025
1026#if defined(VBOX_WITH_R0_MODULES) && !defined(VBOX_WITH_MINIMAL_R0)
1027 if (!SUPR3IsDriverless())
1028 {
1029 PUVM pUVM = pVM->pUVM;
1030 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesAvailable, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1031 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Free pages available to the hypervisor",
1032 "/NEM/R0Stats/cPagesAvailable");
1033 STAMR3RegisterRefresh(pUVM, &pVM->nem.s.R0Stats.cPagesInUse, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,
1034 STAMUNIT_PAGES, STAM_REFRESH_GRP_NEM, "Pages in use by hypervisor",
1035 "/NEM/R0Stats/cPagesInUse");
1036 }
1037#endif /* VBOX_WITH_R0_MODULES && !VBOX_WITH_MINIMAL_R0 */
1038 }
1039 }
1040 }
1041 }
1042
1043 /*
1044 * We only fail if in forced mode, otherwise just log the complaint and return.
1045 */
1046 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1047 if ( (fForced || !fFallback)
1048 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1049 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1050
1051 if (RTErrInfoIsSet(pErrInfo))
1052 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1053 return VINF_SUCCESS;
1054}
1055
1056
1057/**
1058 * This is called after CPUMR3Init is done.
1059 *
1060 * @returns VBox status code.
1061 * @param pVM The VM handle..
1062 */
1063int nemR3NativeInitAfterCPUM(PVM pVM)
1064{
1065 /*
1066 * Validate sanity.
1067 */
1068 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1069
1070 /** @todo */
1071
1072 /*
1073 * Any hyper-v statistics we can get at now? HvCallMapStatsPage isn't accessible any more.
1074 */
1075 /** @todo stats */
1076
1077 /*
1078 * Adjust features.
1079 *
1080 * Note! We've already disabled X2APIC and MONITOR/MWAIT via CFGM during
1081 * the first init call.
1082 */
1083
1084 return VINF_SUCCESS;
1085}
1086
1087
1088int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1089{
1090 //BOOL fRet = SetThreadPriority(GetCurrentThread(), 0);
1091 //AssertLogRel(fRet);
1092
1093 NOREF(pVM); NOREF(enmWhat);
1094 return VINF_SUCCESS;
1095}
1096
1097
1098int nemR3NativeTerm(PVM pVM)
1099{
1100 /*
1101 * Delete the partition.
1102 */
1103 WHV_PARTITION_HANDLE hPartition = pVM->nem.s.hPartition;
1104 pVM->nem.s.hPartition = NULL;
1105 pVM->nem.s.hPartitionDevice = NULL;
1106 if (hPartition != NULL)
1107 {
1108 VMCPUID idCpu = pVM->nem.s.fCreatedEmts ? pVM->cCpus : 0;
1109 LogRel(("NEM: Destroying partition %p with its %u VCpus...\n", hPartition, idCpu));
1110 while (idCpu-- > 0)
1111 {
1112 HRESULT hrc = WHvDeleteVirtualProcessor(hPartition, idCpu);
1113 AssertLogRelMsg(SUCCEEDED(hrc), ("WHvDeleteVirtualProcessor(%p, %u) -> %Rhrc (Last=%#x/%u)\n",
1114 hPartition, idCpu, hrc, RTNtLastStatusValue(),
1115 RTNtLastErrorValue()));
1116 }
1117 WHvDeletePartition(hPartition);
1118 }
1119 pVM->nem.s.fCreatedEmts = false;
1120 return VINF_SUCCESS;
1121}
1122
1123
1124/**
1125 * VM reset notification.
1126 *
1127 * @param pVM The cross context VM structure.
1128 */
1129void nemR3NativeReset(PVM pVM)
1130{
1131 RT_NOREF(pVM);
1132}
1133
1134
1135/**
1136 * Reset CPU due to INIT IPI or hot (un)plugging.
1137 *
1138 * @param pVCpu The cross context virtual CPU structure of the CPU being
1139 * reset.
1140 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1141 */
1142void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1143{
1144 RT_NOREF(pVCpu, fInitIpi);
1145}
1146
1147
1148NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVMCC pVM, PVMCPUCC pVCpu)
1149{
1150 WHV_REGISTER_NAME aenmNames[128];
1151 WHV_REGISTER_VALUE aValues[128];
1152
1153 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
1154 if (!fWhat)
1155 return VINF_SUCCESS;
1156 uintptr_t iReg = 0;
1157
1158#define ADD_REG64(a_enmName, a_uValue) do { \
1159 aenmNames[iReg] = (a_enmName); \
1160 aValues[iReg].Reg128.High64 = 0; \
1161 aValues[iReg].Reg64 = (a_uValue).x; \
1162 iReg++; \
1163 } while (0)
1164#define ADD_REG64_RAW(a_enmName, a_uValue) do { \
1165 aenmNames[iReg] = (a_enmName); \
1166 aValues[iReg].Reg128.High64 = 0; \
1167 aValues[iReg].Reg64 = (a_uValue); \
1168 iReg++; \
1169 } while (0)
1170#define ADD_SYSREG64(a_enmName, a_uValue) do { \
1171 aenmNames[iReg] = (a_enmName); \
1172 aValues[iReg].Reg128.High64 = 0; \
1173 aValues[iReg].Reg64 = (a_uValue).u64; \
1174 iReg++; \
1175 } while (0)
1176#define ADD_REG128(a_enmName, a_uValue) do { \
1177 aenmNames[iReg] = (a_enmName); \
1178 aValues[iReg].Reg128.Low64 = (a_uValue).au64[0]; \
1179 aValues[iReg].Reg128.High64 = (a_uValue).au64[1]; \
1180 iReg++; \
1181 } while (0)
1182
1183 /* GPRs */
1184 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1185 {
1186 if (fWhat & CPUMCTX_EXTRN_X0)
1187 ADD_REG64(WHvArm64RegisterX0, pVCpu->cpum.GstCtx.aGRegs[0]);
1188 if (fWhat & CPUMCTX_EXTRN_X1)
1189 ADD_REG64(WHvArm64RegisterX1, pVCpu->cpum.GstCtx.aGRegs[1]);
1190 if (fWhat & CPUMCTX_EXTRN_X2)
1191 ADD_REG64(WHvArm64RegisterX2, pVCpu->cpum.GstCtx.aGRegs[2]);
1192 if (fWhat & CPUMCTX_EXTRN_X3)
1193 ADD_REG64(WHvArm64RegisterX3, pVCpu->cpum.GstCtx.aGRegs[3]);
1194 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1195 {
1196 ADD_REG64(WHvArm64RegisterX4, pVCpu->cpum.GstCtx.aGRegs[4]);
1197 ADD_REG64(WHvArm64RegisterX5, pVCpu->cpum.GstCtx.aGRegs[5]);
1198 ADD_REG64(WHvArm64RegisterX6, pVCpu->cpum.GstCtx.aGRegs[6]);
1199 ADD_REG64(WHvArm64RegisterX7, pVCpu->cpum.GstCtx.aGRegs[7]);
1200 ADD_REG64(WHvArm64RegisterX8, pVCpu->cpum.GstCtx.aGRegs[8]);
1201 ADD_REG64(WHvArm64RegisterX9, pVCpu->cpum.GstCtx.aGRegs[9]);
1202 ADD_REG64(WHvArm64RegisterX10, pVCpu->cpum.GstCtx.aGRegs[10]);
1203 ADD_REG64(WHvArm64RegisterX11, pVCpu->cpum.GstCtx.aGRegs[11]);
1204 ADD_REG64(WHvArm64RegisterX12, pVCpu->cpum.GstCtx.aGRegs[12]);
1205 ADD_REG64(WHvArm64RegisterX13, pVCpu->cpum.GstCtx.aGRegs[13]);
1206 ADD_REG64(WHvArm64RegisterX14, pVCpu->cpum.GstCtx.aGRegs[14]);
1207 ADD_REG64(WHvArm64RegisterX15, pVCpu->cpum.GstCtx.aGRegs[15]);
1208 ADD_REG64(WHvArm64RegisterX16, pVCpu->cpum.GstCtx.aGRegs[16]);
1209 ADD_REG64(WHvArm64RegisterX17, pVCpu->cpum.GstCtx.aGRegs[17]);
1210 ADD_REG64(WHvArm64RegisterX18, pVCpu->cpum.GstCtx.aGRegs[18]);
1211 ADD_REG64(WHvArm64RegisterX19, pVCpu->cpum.GstCtx.aGRegs[19]);
1212 ADD_REG64(WHvArm64RegisterX20, pVCpu->cpum.GstCtx.aGRegs[20]);
1213 ADD_REG64(WHvArm64RegisterX21, pVCpu->cpum.GstCtx.aGRegs[21]);
1214 ADD_REG64(WHvArm64RegisterX22, pVCpu->cpum.GstCtx.aGRegs[22]);
1215 ADD_REG64(WHvArm64RegisterX23, pVCpu->cpum.GstCtx.aGRegs[23]);
1216 ADD_REG64(WHvArm64RegisterX24, pVCpu->cpum.GstCtx.aGRegs[24]);
1217 ADD_REG64(WHvArm64RegisterX25, pVCpu->cpum.GstCtx.aGRegs[25]);
1218 ADD_REG64(WHvArm64RegisterX26, pVCpu->cpum.GstCtx.aGRegs[26]);
1219 ADD_REG64(WHvArm64RegisterX27, pVCpu->cpum.GstCtx.aGRegs[27]);
1220 ADD_REG64(WHvArm64RegisterX28, pVCpu->cpum.GstCtx.aGRegs[28]);
1221 }
1222 if (fWhat & CPUMCTX_EXTRN_LR)
1223 ADD_REG64(WHvArm64RegisterLr, pVCpu->cpum.GstCtx.aGRegs[30]);
1224 if (fWhat & CPUMCTX_EXTRN_FP)
1225 ADD_REG64(WHvArm64RegisterFp, pVCpu->cpum.GstCtx.aGRegs[29]);
1226 }
1227
1228 /* RIP & Flags */
1229 if (fWhat & CPUMCTX_EXTRN_PC)
1230 ADD_SYSREG64(WHvArm64RegisterPc, pVCpu->cpum.GstCtx.Pc);
1231 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1232 ADD_REG64_RAW(WHvArm64RegisterPstate, pVCpu->cpum.GstCtx.fPState);
1233 if (fWhat & CPUMCTX_EXTRN_SPSR)
1234 ADD_SYSREG64(WHvArm64RegisterSpsrEl1, pVCpu->cpum.GstCtx.Spsr);
1235 if (fWhat & CPUMCTX_EXTRN_ELR)
1236 ADD_SYSREG64(WHvArm64RegisterElrEl1, pVCpu->cpum.GstCtx.Elr);
1237 if (fWhat & CPUMCTX_EXTRN_SP)
1238 {
1239 ADD_SYSREG64(WHvArm64RegisterSpEl0, pVCpu->cpum.GstCtx.aSpReg[0]);
1240 ADD_SYSREG64(WHvArm64RegisterSpEl1, pVCpu->cpum.GstCtx.aSpReg[1]);
1241 }
1242 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1243 {
1244 ADD_SYSREG64(WHvArm64RegisterSctlrEl1, pVCpu->cpum.GstCtx.Sctlr);
1245 ADD_SYSREG64(WHvArm64RegisterTcrEl1, pVCpu->cpum.GstCtx.Tcr);
1246 ADD_SYSREG64(WHvArm64RegisterTtbr0El1, pVCpu->cpum.GstCtx.Ttbr0);
1247 ADD_SYSREG64(WHvArm64RegisterTtbr1El1, pVCpu->cpum.GstCtx.Ttbr1);
1248 }
1249
1250 /* Vector state. */
1251 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1252 {
1253 ADD_REG128(WHvArm64RegisterQ0, pVCpu->cpum.GstCtx.aVRegs[0]);
1254 ADD_REG128(WHvArm64RegisterQ1, pVCpu->cpum.GstCtx.aVRegs[1]);
1255 ADD_REG128(WHvArm64RegisterQ2, pVCpu->cpum.GstCtx.aVRegs[2]);
1256 ADD_REG128(WHvArm64RegisterQ3, pVCpu->cpum.GstCtx.aVRegs[3]);
1257 ADD_REG128(WHvArm64RegisterQ4, pVCpu->cpum.GstCtx.aVRegs[4]);
1258 ADD_REG128(WHvArm64RegisterQ5, pVCpu->cpum.GstCtx.aVRegs[5]);
1259 ADD_REG128(WHvArm64RegisterQ6, pVCpu->cpum.GstCtx.aVRegs[6]);
1260 ADD_REG128(WHvArm64RegisterQ7, pVCpu->cpum.GstCtx.aVRegs[7]);
1261 ADD_REG128(WHvArm64RegisterQ8, pVCpu->cpum.GstCtx.aVRegs[8]);
1262 ADD_REG128(WHvArm64RegisterQ9, pVCpu->cpum.GstCtx.aVRegs[9]);
1263 ADD_REG128(WHvArm64RegisterQ10, pVCpu->cpum.GstCtx.aVRegs[10]);
1264 ADD_REG128(WHvArm64RegisterQ11, pVCpu->cpum.GstCtx.aVRegs[11]);
1265 ADD_REG128(WHvArm64RegisterQ12, pVCpu->cpum.GstCtx.aVRegs[12]);
1266 ADD_REG128(WHvArm64RegisterQ13, pVCpu->cpum.GstCtx.aVRegs[13]);
1267 ADD_REG128(WHvArm64RegisterQ14, pVCpu->cpum.GstCtx.aVRegs[14]);
1268 ADD_REG128(WHvArm64RegisterQ15, pVCpu->cpum.GstCtx.aVRegs[15]);
1269 ADD_REG128(WHvArm64RegisterQ16, pVCpu->cpum.GstCtx.aVRegs[16]);
1270 ADD_REG128(WHvArm64RegisterQ17, pVCpu->cpum.GstCtx.aVRegs[17]);
1271 ADD_REG128(WHvArm64RegisterQ18, pVCpu->cpum.GstCtx.aVRegs[18]);
1272 ADD_REG128(WHvArm64RegisterQ19, pVCpu->cpum.GstCtx.aVRegs[19]);
1273 ADD_REG128(WHvArm64RegisterQ20, pVCpu->cpum.GstCtx.aVRegs[20]);
1274 ADD_REG128(WHvArm64RegisterQ21, pVCpu->cpum.GstCtx.aVRegs[21]);
1275 ADD_REG128(WHvArm64RegisterQ22, pVCpu->cpum.GstCtx.aVRegs[22]);
1276 ADD_REG128(WHvArm64RegisterQ23, pVCpu->cpum.GstCtx.aVRegs[23]);
1277 ADD_REG128(WHvArm64RegisterQ24, pVCpu->cpum.GstCtx.aVRegs[24]);
1278 ADD_REG128(WHvArm64RegisterQ25, pVCpu->cpum.GstCtx.aVRegs[25]);
1279 ADD_REG128(WHvArm64RegisterQ26, pVCpu->cpum.GstCtx.aVRegs[26]);
1280 ADD_REG128(WHvArm64RegisterQ27, pVCpu->cpum.GstCtx.aVRegs[27]);
1281 ADD_REG128(WHvArm64RegisterQ28, pVCpu->cpum.GstCtx.aVRegs[28]);
1282 ADD_REG128(WHvArm64RegisterQ29, pVCpu->cpum.GstCtx.aVRegs[29]);
1283 ADD_REG128(WHvArm64RegisterQ30, pVCpu->cpum.GstCtx.aVRegs[30]);
1284 ADD_REG128(WHvArm64RegisterQ31, pVCpu->cpum.GstCtx.aVRegs[31]);
1285 }
1286
1287 if (fWhat & CPUMCTX_EXTRN_FPCR)
1288 ADD_REG64_RAW(WHvArm64RegisterFpcr, pVCpu->cpum.GstCtx.fpcr);
1289 if (fWhat & CPUMCTX_EXTRN_FPSR)
1290 ADD_REG64_RAW(WHvArm64RegisterFpsr, pVCpu->cpum.GstCtx.fpsr);
1291
1292 /* System registers. */
1293 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1294 {
1295 ADD_SYSREG64(WHvArm64RegisterVbarEl1, pVCpu->cpum.GstCtx.VBar);
1296 ADD_SYSREG64(WHvArm64RegisterEsrEl1, pVCpu->cpum.GstCtx.Esr);
1297 ADD_SYSREG64(WHvArm64RegisterFarEl1, pVCpu->cpum.GstCtx.Far);
1298 ADD_SYSREG64(WHvArm64RegisterCntkctlEl1, pVCpu->cpum.GstCtx.CntKCtl);
1299 ADD_SYSREG64(WHvArm64RegisterContextidrEl1, pVCpu->cpum.GstCtx.ContextIdr);
1300 ADD_SYSREG64(WHvArm64RegisterCpacrEl1, pVCpu->cpum.GstCtx.Cpacr);
1301 ADD_SYSREG64(WHvArm64RegisterCsselrEl1, pVCpu->cpum.GstCtx.Csselr);
1302 ADD_SYSREG64(WHvArm64RegisterMairEl1, pVCpu->cpum.GstCtx.Mair);
1303 ADD_SYSREG64(WHvArm64RegisterParEl1, pVCpu->cpum.GstCtx.Par);
1304 ADD_SYSREG64(WHvArm64RegisterTpidrroEl0, pVCpu->cpum.GstCtx.TpIdrRoEl0);
1305 ADD_SYSREG64(WHvArm64RegisterTpidrEl0, pVCpu->cpum.GstCtx.aTpIdr[0]);
1306 ADD_SYSREG64(WHvArm64RegisterTpidrEl1, pVCpu->cpum.GstCtx.aTpIdr[1]);
1307 }
1308
1309 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1310 {
1311 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1312 {
1313 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Ctrl);
1314 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i), pVCpu->cpum.GstCtx.aBp[i].Value);
1315 }
1316
1317 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1318 {
1319 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Ctrl);
1320 ADD_SYSREG64((WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i), pVCpu->cpum.GstCtx.aWp[i].Value);
1321 }
1322
1323 ADD_SYSREG64(WHvArm64RegisterMdscrEl1, pVCpu->cpum.GstCtx.Mdscr);
1324 }
1325
1326 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1327 {
1328 ADD_SYSREG64(WHvArm64RegisterApdAKeyHiEl1, pVCpu->cpum.GstCtx.Apda.High);
1329 ADD_SYSREG64(WHvArm64RegisterApdAKeyLoEl1, pVCpu->cpum.GstCtx.Apda.Low);
1330 ADD_SYSREG64(WHvArm64RegisterApdBKeyHiEl1, pVCpu->cpum.GstCtx.Apdb.High);
1331 ADD_SYSREG64(WHvArm64RegisterApdBKeyLoEl1, pVCpu->cpum.GstCtx.Apdb.Low);
1332 ADD_SYSREG64(WHvArm64RegisterApgAKeyHiEl1, pVCpu->cpum.GstCtx.Apga.High);
1333 ADD_SYSREG64(WHvArm64RegisterApgAKeyLoEl1, pVCpu->cpum.GstCtx.Apga.Low);
1334 ADD_SYSREG64(WHvArm64RegisterApiAKeyHiEl1, pVCpu->cpum.GstCtx.Apia.High);
1335 ADD_SYSREG64(WHvArm64RegisterApiAKeyLoEl1, pVCpu->cpum.GstCtx.Apia.Low);
1336 ADD_SYSREG64(WHvArm64RegisterApiBKeyHiEl1, pVCpu->cpum.GstCtx.Apib.High);
1337 ADD_SYSREG64(WHvArm64RegisterApiBKeyLoEl1, pVCpu->cpum.GstCtx.Apib.Low);
1338 }
1339
1340#undef ADD_REG64
1341#undef ADD_REG64_RAW
1342#undef ADD_REG128
1343
1344 /*
1345 * Set the registers.
1346 */
1347 Assert(iReg < RT_ELEMENTS(aValues));
1348 Assert(iReg < RT_ELEMENTS(aenmNames));
1349 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
1350 if (SUCCEEDED(hrc))
1351 {
1352 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1353 return VINF_SUCCESS;
1354 }
1355 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1356 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
1357 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1358 return VERR_INTERNAL_ERROR;
1359}
1360
1361
1362NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
1363{
1364 WHV_REGISTER_NAME aenmNames[256];
1365
1366 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
1367 if (!fWhat)
1368 return VINF_SUCCESS;
1369
1370 uintptr_t iReg = 0;
1371
1372 /* GPRs */
1373 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1374 {
1375 if (fWhat & CPUMCTX_EXTRN_X0)
1376 aenmNames[iReg++] = WHvArm64RegisterX0;
1377 if (fWhat & CPUMCTX_EXTRN_X1)
1378 aenmNames[iReg++] = WHvArm64RegisterX1;
1379 if (fWhat & CPUMCTX_EXTRN_X2)
1380 aenmNames[iReg++] = WHvArm64RegisterX2;
1381 if (fWhat & CPUMCTX_EXTRN_X3)
1382 aenmNames[iReg++] = WHvArm64RegisterX3;
1383 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1384 {
1385 aenmNames[iReg++] = WHvArm64RegisterX4;
1386 aenmNames[iReg++] = WHvArm64RegisterX5;
1387 aenmNames[iReg++] = WHvArm64RegisterX6;
1388 aenmNames[iReg++] = WHvArm64RegisterX7;
1389 aenmNames[iReg++] = WHvArm64RegisterX8;
1390 aenmNames[iReg++] = WHvArm64RegisterX9;
1391 aenmNames[iReg++] = WHvArm64RegisterX10;
1392 aenmNames[iReg++] = WHvArm64RegisterX11;
1393 aenmNames[iReg++] = WHvArm64RegisterX12;
1394 aenmNames[iReg++] = WHvArm64RegisterX13;
1395 aenmNames[iReg++] = WHvArm64RegisterX14;
1396 aenmNames[iReg++] = WHvArm64RegisterX15;
1397 aenmNames[iReg++] = WHvArm64RegisterX16;
1398 aenmNames[iReg++] = WHvArm64RegisterX17;
1399 aenmNames[iReg++] = WHvArm64RegisterX18;
1400 aenmNames[iReg++] = WHvArm64RegisterX19;
1401 aenmNames[iReg++] = WHvArm64RegisterX20;
1402 aenmNames[iReg++] = WHvArm64RegisterX21;
1403 aenmNames[iReg++] = WHvArm64RegisterX22;
1404 aenmNames[iReg++] = WHvArm64RegisterX23;
1405 aenmNames[iReg++] = WHvArm64RegisterX24;
1406 aenmNames[iReg++] = WHvArm64RegisterX25;
1407 aenmNames[iReg++] = WHvArm64RegisterX26;
1408 aenmNames[iReg++] = WHvArm64RegisterX27;
1409 aenmNames[iReg++] = WHvArm64RegisterX28;
1410 }
1411 if (fWhat & CPUMCTX_EXTRN_LR)
1412 aenmNames[iReg++] = WHvArm64RegisterLr;
1413 if (fWhat & CPUMCTX_EXTRN_FP)
1414 aenmNames[iReg++] = WHvArm64RegisterFp;
1415 }
1416
1417 /* PC & Flags */
1418 if (fWhat & CPUMCTX_EXTRN_PC)
1419 aenmNames[iReg++] = WHvArm64RegisterPc;
1420 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1421 aenmNames[iReg++] = WHvArm64RegisterPstate;
1422 if (fWhat & CPUMCTX_EXTRN_SPSR)
1423 aenmNames[iReg++] = WHvArm64RegisterSpsrEl1;
1424 if (fWhat & CPUMCTX_EXTRN_ELR)
1425 aenmNames[iReg++] = WHvArm64RegisterElrEl1;
1426 if (fWhat & CPUMCTX_EXTRN_SP)
1427 {
1428 aenmNames[iReg++] = WHvArm64RegisterSpEl0;
1429 aenmNames[iReg++] = WHvArm64RegisterSpEl1;
1430 }
1431 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1432 {
1433 aenmNames[iReg++] = WHvArm64RegisterSctlrEl1;
1434 aenmNames[iReg++] = WHvArm64RegisterTcrEl1;
1435 aenmNames[iReg++] = WHvArm64RegisterTtbr0El1;
1436 aenmNames[iReg++] = WHvArm64RegisterTtbr1El1;
1437 }
1438
1439 /* Vector state. */
1440 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1441 {
1442 aenmNames[iReg++] = WHvArm64RegisterQ0;
1443 aenmNames[iReg++] = WHvArm64RegisterQ1;
1444 aenmNames[iReg++] = WHvArm64RegisterQ2;
1445 aenmNames[iReg++] = WHvArm64RegisterQ3;
1446 aenmNames[iReg++] = WHvArm64RegisterQ4;
1447 aenmNames[iReg++] = WHvArm64RegisterQ5;
1448 aenmNames[iReg++] = WHvArm64RegisterQ6;
1449 aenmNames[iReg++] = WHvArm64RegisterQ7;
1450 aenmNames[iReg++] = WHvArm64RegisterQ8;
1451 aenmNames[iReg++] = WHvArm64RegisterQ9;
1452 aenmNames[iReg++] = WHvArm64RegisterQ10;
1453 aenmNames[iReg++] = WHvArm64RegisterQ11;
1454 aenmNames[iReg++] = WHvArm64RegisterQ12;
1455 aenmNames[iReg++] = WHvArm64RegisterQ13;
1456 aenmNames[iReg++] = WHvArm64RegisterQ14;
1457 aenmNames[iReg++] = WHvArm64RegisterQ15;
1458
1459 aenmNames[iReg++] = WHvArm64RegisterQ16;
1460 aenmNames[iReg++] = WHvArm64RegisterQ17;
1461 aenmNames[iReg++] = WHvArm64RegisterQ18;
1462 aenmNames[iReg++] = WHvArm64RegisterQ19;
1463 aenmNames[iReg++] = WHvArm64RegisterQ20;
1464 aenmNames[iReg++] = WHvArm64RegisterQ21;
1465 aenmNames[iReg++] = WHvArm64RegisterQ22;
1466 aenmNames[iReg++] = WHvArm64RegisterQ23;
1467 aenmNames[iReg++] = WHvArm64RegisterQ24;
1468 aenmNames[iReg++] = WHvArm64RegisterQ25;
1469 aenmNames[iReg++] = WHvArm64RegisterQ26;
1470 aenmNames[iReg++] = WHvArm64RegisterQ27;
1471 aenmNames[iReg++] = WHvArm64RegisterQ28;
1472 aenmNames[iReg++] = WHvArm64RegisterQ29;
1473 aenmNames[iReg++] = WHvArm64RegisterQ30;
1474 aenmNames[iReg++] = WHvArm64RegisterQ31;
1475 }
1476 if (fWhat & CPUMCTX_EXTRN_FPCR)
1477 aenmNames[iReg++] = WHvArm64RegisterFpcr;
1478 if (fWhat & CPUMCTX_EXTRN_FPSR)
1479 aenmNames[iReg++] = WHvArm64RegisterFpsr;
1480
1481 /* System registers. */
1482 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1483 {
1484 aenmNames[iReg++] = WHvArm64RegisterVbarEl1;
1485 aenmNames[iReg++] = WHvArm64RegisterEsrEl1;
1486 aenmNames[iReg++] = WHvArm64RegisterFarEl1;
1487 aenmNames[iReg++] = WHvArm64RegisterCntkctlEl1;
1488 aenmNames[iReg++] = WHvArm64RegisterContextidrEl1;
1489 aenmNames[iReg++] = WHvArm64RegisterCpacrEl1;
1490 aenmNames[iReg++] = WHvArm64RegisterCsselrEl1;
1491 aenmNames[iReg++] = WHvArm64RegisterMairEl1;
1492 aenmNames[iReg++] = WHvArm64RegisterParEl1;
1493 aenmNames[iReg++] = WHvArm64RegisterTpidrroEl0;
1494 aenmNames[iReg++] = WHvArm64RegisterTpidrEl0;
1495 aenmNames[iReg++] = WHvArm64RegisterTpidrEl1;
1496 }
1497
1498 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1499 {
1500 /* Hyper-V doesn't allow syncing debug break-/watchpoint registers which aren't there. */
1501 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1502 {
1503 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i);
1504 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i);
1505 }
1506
1507 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1508 {
1509 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i);
1510 aenmNames[iReg++] = (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i);
1511 }
1512
1513 aenmNames[iReg++] = WHvArm64RegisterMdscrEl1;
1514 }
1515
1516 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1517 {
1518 aenmNames[iReg++] = WHvArm64RegisterApdAKeyHiEl1;
1519 aenmNames[iReg++] = WHvArm64RegisterApdAKeyLoEl1;
1520 aenmNames[iReg++] = WHvArm64RegisterApdBKeyHiEl1;
1521 aenmNames[iReg++] = WHvArm64RegisterApdBKeyLoEl1;
1522 aenmNames[iReg++] = WHvArm64RegisterApgAKeyHiEl1;
1523 aenmNames[iReg++] = WHvArm64RegisterApgAKeyLoEl1;
1524 aenmNames[iReg++] = WHvArm64RegisterApiAKeyHiEl1;
1525 aenmNames[iReg++] = WHvArm64RegisterApiAKeyLoEl1;
1526 aenmNames[iReg++] = WHvArm64RegisterApiBKeyHiEl1;
1527 aenmNames[iReg++] = WHvArm64RegisterApiBKeyLoEl1;
1528 }
1529
1530 size_t const cRegs = iReg;
1531 Assert(cRegs < RT_ELEMENTS(aenmNames));
1532
1533 /*
1534 * Get the registers.
1535 */
1536 WHV_REGISTER_VALUE aValues[256];
1537 RT_ZERO(aValues);
1538 Assert(RT_ELEMENTS(aValues) >= cRegs);
1539 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
1540 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
1541 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1542 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
1543 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1544 , VERR_NEM_GET_REGISTERS_FAILED);
1545
1546 iReg = 0;
1547#define GET_REG64(a_DstVar, a_enmName) do { \
1548 Assert(aenmNames[iReg] == (a_enmName)); \
1549 (a_DstVar).x = aValues[iReg].Reg64; \
1550 iReg++; \
1551 } while (0)
1552#define GET_REG64_RAW(a_DstVar, a_enmName) do { \
1553 Assert(aenmNames[iReg] == (a_enmName)); \
1554 (a_DstVar) = aValues[iReg].Reg64; \
1555 iReg++; \
1556 } while (0)
1557#define GET_SYSREG64(a_DstVar, a_enmName) do { \
1558 Assert(aenmNames[iReg] == (a_enmName)); \
1559 (a_DstVar).u64 = aValues[iReg].Reg64; \
1560 iReg++; \
1561 } while (0)
1562#define GET_REG128(a_DstVar, a_enmName) do { \
1563 Assert(aenmNames[iReg] == a_enmName); \
1564 (a_DstVar).au64[0] = aValues[iReg].Reg128.Low64; \
1565 (a_DstVar).au64[1] = aValues[iReg].Reg128.High64; \
1566 iReg++; \
1567 } while (0)
1568
1569 /* GPRs */
1570 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1571 {
1572 if (fWhat & CPUMCTX_EXTRN_X0)
1573 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[0], WHvArm64RegisterX0);
1574 if (fWhat & CPUMCTX_EXTRN_X1)
1575 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[1], WHvArm64RegisterX1);
1576 if (fWhat & CPUMCTX_EXTRN_X2)
1577 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[2], WHvArm64RegisterX2);
1578 if (fWhat & CPUMCTX_EXTRN_X3)
1579 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[3], WHvArm64RegisterX3);
1580 if (fWhat & CPUMCTX_EXTRN_X4_X28)
1581 {
1582 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[4], WHvArm64RegisterX4);
1583 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[5], WHvArm64RegisterX5);
1584 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[6], WHvArm64RegisterX6);
1585 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[7], WHvArm64RegisterX7);
1586 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[8], WHvArm64RegisterX8);
1587 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[9], WHvArm64RegisterX9);
1588 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[10], WHvArm64RegisterX10);
1589 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[11], WHvArm64RegisterX11);
1590 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[12], WHvArm64RegisterX12);
1591 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[13], WHvArm64RegisterX13);
1592 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[14], WHvArm64RegisterX14);
1593 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[15], WHvArm64RegisterX15);
1594 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[16], WHvArm64RegisterX16);
1595 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[17], WHvArm64RegisterX17);
1596 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[18], WHvArm64RegisterX18);
1597 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[19], WHvArm64RegisterX19);
1598 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[20], WHvArm64RegisterX20);
1599 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[21], WHvArm64RegisterX21);
1600 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[22], WHvArm64RegisterX22);
1601 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[23], WHvArm64RegisterX23);
1602 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[24], WHvArm64RegisterX24);
1603 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[25], WHvArm64RegisterX25);
1604 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[26], WHvArm64RegisterX26);
1605 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[27], WHvArm64RegisterX27);
1606 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[28], WHvArm64RegisterX28);
1607 }
1608 if (fWhat & CPUMCTX_EXTRN_LR)
1609 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[30], WHvArm64RegisterLr);
1610 if (fWhat & CPUMCTX_EXTRN_FP)
1611 GET_REG64(pVCpu->cpum.GstCtx.aGRegs[29], WHvArm64RegisterFp);
1612 }
1613
1614 /* RIP & Flags */
1615 if (fWhat & CPUMCTX_EXTRN_PC)
1616 GET_REG64_RAW(pVCpu->cpum.GstCtx.Pc.u64, WHvArm64RegisterPc);
1617 if (fWhat & CPUMCTX_EXTRN_PSTATE)
1618 GET_REG64_RAW(pVCpu->cpum.GstCtx.fPState, WHvArm64RegisterPstate);
1619 if (fWhat & CPUMCTX_EXTRN_SPSR)
1620 GET_SYSREG64(pVCpu->cpum.GstCtx.Spsr, WHvArm64RegisterSpsrEl1);
1621 if (fWhat & CPUMCTX_EXTRN_ELR)
1622 GET_SYSREG64(pVCpu->cpum.GstCtx.Elr, WHvArm64RegisterElrEl1);
1623 if (fWhat & CPUMCTX_EXTRN_SP)
1624 {
1625 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[0], WHvArm64RegisterSpEl0);
1626 GET_SYSREG64(pVCpu->cpum.GstCtx.aSpReg[1], WHvArm64RegisterSpEl1);
1627 }
1628 if (fWhat & CPUMCTX_EXTRN_SCTLR_TCR_TTBR)
1629 {
1630 GET_SYSREG64(pVCpu->cpum.GstCtx.Sctlr, WHvArm64RegisterSctlrEl1);
1631 GET_SYSREG64(pVCpu->cpum.GstCtx.Tcr, WHvArm64RegisterTcrEl1);
1632 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr0, WHvArm64RegisterTtbr0El1);
1633 GET_SYSREG64(pVCpu->cpum.GstCtx.Ttbr1, WHvArm64RegisterTtbr1El1);
1634 }
1635
1636 /* Vector state. */
1637 if (fWhat & CPUMCTX_EXTRN_V0_V31)
1638 {
1639 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[0], WHvArm64RegisterQ0);
1640 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[1], WHvArm64RegisterQ1);
1641 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[2], WHvArm64RegisterQ2);
1642 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[3], WHvArm64RegisterQ3);
1643 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[4], WHvArm64RegisterQ4);
1644 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[5], WHvArm64RegisterQ5);
1645 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[6], WHvArm64RegisterQ6);
1646 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[7], WHvArm64RegisterQ7);
1647 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[8], WHvArm64RegisterQ8);
1648 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[9], WHvArm64RegisterQ9);
1649 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[10], WHvArm64RegisterQ10);
1650 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[11], WHvArm64RegisterQ11);
1651 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[12], WHvArm64RegisterQ12);
1652 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[13], WHvArm64RegisterQ13);
1653 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[14], WHvArm64RegisterQ14);
1654 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[15], WHvArm64RegisterQ15);
1655
1656 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[16], WHvArm64RegisterQ16);
1657 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[17], WHvArm64RegisterQ17);
1658 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[18], WHvArm64RegisterQ18);
1659 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[19], WHvArm64RegisterQ19);
1660 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[20], WHvArm64RegisterQ20);
1661 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[21], WHvArm64RegisterQ21);
1662 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[22], WHvArm64RegisterQ22);
1663 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[23], WHvArm64RegisterQ23);
1664 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[24], WHvArm64RegisterQ24);
1665 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[25], WHvArm64RegisterQ25);
1666 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[26], WHvArm64RegisterQ26);
1667 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[27], WHvArm64RegisterQ27);
1668 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[28], WHvArm64RegisterQ28);
1669 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[29], WHvArm64RegisterQ29);
1670 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[30], WHvArm64RegisterQ30);
1671 GET_REG128(pVCpu->cpum.GstCtx.aVRegs[31], WHvArm64RegisterQ31);
1672 }
1673 if (fWhat & CPUMCTX_EXTRN_FPCR)
1674 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpcr, WHvArm64RegisterFpcr);
1675 if (fWhat & CPUMCTX_EXTRN_FPSR)
1676 GET_REG64_RAW(pVCpu->cpum.GstCtx.fpsr, WHvArm64RegisterFpsr);
1677
1678 /* System registers. */
1679 if (fWhat & CPUMCTX_EXTRN_SYSREG_MISC)
1680 {
1681 GET_SYSREG64(pVCpu->cpum.GstCtx.VBar, WHvArm64RegisterVbarEl1);
1682 GET_SYSREG64(pVCpu->cpum.GstCtx.Esr, WHvArm64RegisterEsrEl1);
1683 GET_SYSREG64(pVCpu->cpum.GstCtx.Far, WHvArm64RegisterFarEl1);
1684 GET_SYSREG64(pVCpu->cpum.GstCtx.CntKCtl, WHvArm64RegisterCntkctlEl1);
1685 GET_SYSREG64(pVCpu->cpum.GstCtx.ContextIdr, WHvArm64RegisterContextidrEl1);
1686 GET_SYSREG64(pVCpu->cpum.GstCtx.Cpacr, WHvArm64RegisterCpacrEl1);
1687 GET_SYSREG64(pVCpu->cpum.GstCtx.Csselr, WHvArm64RegisterCsselrEl1);
1688 GET_SYSREG64(pVCpu->cpum.GstCtx.Mair, WHvArm64RegisterMairEl1);
1689 GET_SYSREG64(pVCpu->cpum.GstCtx.Par, WHvArm64RegisterParEl1);
1690 GET_SYSREG64(pVCpu->cpum.GstCtx.TpIdrRoEl0, WHvArm64RegisterTpidrroEl0);
1691 GET_SYSREG64(pVCpu->cpum.GstCtx.aTpIdr[0], WHvArm64RegisterTpidrEl0);
1692 GET_SYSREG64(pVCpu->cpum.GstCtx.aTpIdr[1], WHvArm64RegisterTpidrEl1);
1693 }
1694
1695 if (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG)
1696 {
1697 for (uint32_t i = 0; i < pVM->nem.s.cBreakpoints; i++)
1698 {
1699 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbcr0El1 + i));
1700 GET_SYSREG64(pVCpu->cpum.GstCtx.aBp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgbvr0El1 + i));
1701 }
1702
1703 for (uint32_t i = 0; i < pVM->nem.s.cWatchpoints; i++)
1704 {
1705 GET_SYSREG64(pVCpu->cpum.GstCtx.aWp[i].Ctrl, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwcr0El1 + i));
1706 GET_SYSREG64(pVCpu->cpum.GstCtx.aWp[i].Value, (WHV_REGISTER_NAME)((uint32_t)WHvArm64RegisterDbgwvr0El1 + i));
1707 }
1708
1709 GET_SYSREG64(pVCpu->cpum.GstCtx.Mdscr, WHvArm64RegisterMdscrEl1);
1710 }
1711
1712 if (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS)
1713 {
1714 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.High, WHvArm64RegisterApdAKeyHiEl1);
1715 GET_SYSREG64(pVCpu->cpum.GstCtx.Apda.Low, WHvArm64RegisterApdAKeyLoEl1);
1716 GET_SYSREG64(pVCpu->cpum.GstCtx.Apdb.High, WHvArm64RegisterApdBKeyHiEl1);
1717 GET_SYSREG64(pVCpu->cpum.GstCtx.Apdb.Low, WHvArm64RegisterApdBKeyLoEl1);
1718 GET_SYSREG64(pVCpu->cpum.GstCtx.Apga.High, WHvArm64RegisterApgAKeyHiEl1);
1719 GET_SYSREG64(pVCpu->cpum.GstCtx.Apga.Low, WHvArm64RegisterApgAKeyLoEl1);
1720 GET_SYSREG64(pVCpu->cpum.GstCtx.Apia.High, WHvArm64RegisterApiAKeyHiEl1);
1721 GET_SYSREG64(pVCpu->cpum.GstCtx.Apia.Low, WHvArm64RegisterApiAKeyLoEl1);
1722 GET_SYSREG64(pVCpu->cpum.GstCtx.Apib.High, WHvArm64RegisterApiBKeyHiEl1);
1723 GET_SYSREG64(pVCpu->cpum.GstCtx.Apib.Low, WHvArm64RegisterApiBKeyLoEl1);
1724 }
1725
1726 /* Almost done, just update extrn flags. */
1727 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1728 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1729 pVCpu->cpum.GstCtx.fExtrn = 0;
1730
1731 return VINF_SUCCESS;
1732}
1733
1734
1735/**
1736 * Interface for importing state on demand (used by IEM).
1737 *
1738 * @returns VBox status code.
1739 * @param pVCpu The cross context CPU structure.
1740 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1741 */
1742VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1743{
1744 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1745 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, fWhat);
1746}
1747
1748
1749/**
1750 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1751 *
1752 * @returns VBox status code.
1753 * @param pVCpu The cross context CPU structure.
1754 * @param pcTicks Where to return the CPU tick count.
1755 * @param puAux Where to return the TSC_AUX register value.
1756 */
1757VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1758{
1759 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1760
1761 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1762 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1763 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1764
1765 /* Ensure time for the partition is suspended - it will be resumed as soon as a vCPU starts executing. */
1766 HRESULT hrc = WHvSuspendPartitionTime(pVM->nem.s.hPartition);
1767 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1768 ("WHvSuspendPartitionTime(%p) -> %Rhrc (Last=%#x/%u)\n",
1769 pVM->nem.s.hPartition, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1770 , VERR_NEM_GET_REGISTERS_FAILED);
1771
1772 /* Call the offical API. */
1773 WHV_REGISTER_NAME enmName = WHvArm64RegisterCntvctEl0;
1774 WHV_REGISTER_VALUE Value = { { {0, 0} } };
1775 hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, &enmName, 1, &Value);
1776 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1777 ("WHvGetVirtualProcessorRegisters(%p, %u,{CNTVCT_EL0},1,) -> %Rhrc (Last=%#x/%u)\n",
1778 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1779 , VERR_NEM_GET_REGISTERS_FAILED);
1780 *pcTicks = Value.Reg64;
1781 LogFlow(("NEMHCQueryCpuTick: %#RX64 (host: %#RX64)\n", *pcTicks, ASMReadTSC()));
1782 if (puAux)
1783 *puAux =0;
1784
1785 return VINF_SUCCESS;
1786}
1787
1788
1789/**
1790 * Resumes CPU clock (TSC) on all virtual CPUs.
1791 *
1792 * This is called by TM when the VM is started, restored, resumed or similar.
1793 *
1794 * @returns VBox status code.
1795 * @param pVM The cross context VM structure.
1796 * @param pVCpu The cross context CPU structure of the calling EMT.
1797 * @param uPausedTscValue The TSC value at the time of pausing.
1798 */
1799VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1800{
1801 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1802 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1803
1804 /*
1805 * Call the offical API to do the job.
1806 */
1807 LogFlow(("NEMHCResumeCpuTickOnAll: %#RX64 (host: %#RX64)\n", uPausedTscValue, ASMReadTSC()));
1808
1809 /*
1810 * Now set the CNTVCT_EL0 register for each vCPU, Hyper-V will program the timer offset in
1811 * CNTVOFF_EL2 accordingly. ARM guarantees that CNTVCT_EL0 is synchronised across all CPUs,
1812 * as long as CNTVOFF_EL2 is the same everywhere. Lets just hope scheduling will not affect it
1813 * if the partition time is suspended.
1814 */
1815 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1816 {
1817 WHV_REGISTER_NAME enmName = WHvArm64RegisterCntvctEl0;
1818 WHV_REGISTER_VALUE Value;
1819 Value.Reg64 = uPausedTscValue;
1820 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, idCpu, &enmName, 1, &Value);
1821 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1822 ("WHvSetVirtualProcessorRegisters(%p, 0,{CNTVCT_EL0},1,%#RX64) -> %Rhrc (Last=%#x/%u)\n",
1823 pVM->nem.s.hPartition, idCpu, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1824 , VERR_NEM_SET_TSC);
1825
1826 /* Make sure the CNTV_CTL_EL0 and CNTV_CVAL_EL0 registers are up to date after resuming (saved state load). */
1827 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
1828 pVCpuDst->nem.s.fSyncCntvRegs = true;
1829 }
1830
1831 HRESULT hrc = WHvResumePartitionTime(pVM->nem.s.hPartition);
1832 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1833 ("WHvResumePartitionTime(%p) -> %Rhrc (Last=%#x/%u)\n",
1834 pVM->nem.s.hPartition, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1835 , VERR_NEM_SET_TSC);
1836
1837 return VINF_SUCCESS;
1838}
1839
1840
1841#ifdef LOG_ENABLED
1842/**
1843 * Logs the current CPU state.
1844 */
1845static void nemR3WinLogState(PVMCC pVM, PVMCPUCC pVCpu)
1846{
1847 if (LogIs3Enabled())
1848 {
1849 char szRegs[4096];
1850 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1851 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
1852 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
1853 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
1854 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
1855 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
1856 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
1857 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
1858 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
1859 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
1860 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
1861 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
1862 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
1863 "vbar_el1=%016VR{vbar_el1}\n"
1864 );
1865 char szInstr[256]; RT_ZERO(szInstr);
1866 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1867 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1868 szInstr, sizeof(szInstr), NULL);
1869 Log3(("%s%s\n", szRegs, szInstr));
1870 }
1871}
1872#endif /* LOG_ENABLED */
1873
1874
1875/**
1876 * Copies register state from the (common) exit context.
1877 *
1878 * ASSUMES no state copied yet.
1879 *
1880 * @param pVCpu The cross context per CPU structure.
1881 * @param pMsgHdr The common message header.
1882 */
1883DECLINLINE(void) nemR3WinCopyStateFromArmHeader(PVMCPUCC pVCpu, WHV_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1884{
1885#ifdef LOG_ENABLED /* When state logging is enabled the state is synced completely upon VM exit. */
1886 if (!LogIs3Enabled())
1887#endif
1888 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE))
1889 == (CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE));
1890
1891 pVCpu->cpum.GstCtx.Pc.u64 = pMsgHdr->Pc;
1892 pVCpu->cpum.GstCtx.fPState = pMsgHdr->Cpsr;
1893
1894 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_PSTATE);
1895}
1896
1897
1898/**
1899 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1900 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1901 */
1902typedef struct NEMHCWINHMACPCCSTATE
1903{
1904 /** Input: Write access. */
1905 bool fWriteAccess;
1906 /** Output: Set if we did something. */
1907 bool fDidSomething;
1908 /** Output: Set it we should resume. */
1909 bool fCanResume;
1910} NEMHCWINHMACPCCSTATE;
1911
1912/**
1913 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1914 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1915 * NEMHCWINHMACPCCSTATE structure. }
1916 */
1917NEM_TMPL_STATIC DECLCALLBACK(int)
1918nemHCWinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1919{
1920 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1921 pState->fDidSomething = false;
1922 pState->fCanResume = false;
1923
1924 /* If A20 is disabled, we may need to make another query on the masked
1925 page to get the correct protection information. */
1926 uint8_t u2State = pInfo->u2NemState;
1927 RTGCPHYS GCPhysSrc = GCPhys;
1928
1929 /*
1930 * Consolidate current page state with actual page protection and access type.
1931 * We don't really consider downgrades here, as they shouldn't happen.
1932 */
1933 int rc;
1934 switch (u2State)
1935 {
1936 case NEM_WIN_PAGE_STATE_UNMAPPED:
1937 case NEM_WIN_PAGE_STATE_NOT_SET:
1938 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1939 {
1940 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1941 return VINF_SUCCESS;
1942 }
1943
1944 /* Don't bother remapping it if it's a write request to a non-writable page. */
1945 if ( pState->fWriteAccess
1946 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1947 {
1948 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1949 return VINF_SUCCESS;
1950 }
1951
1952 /* Map the page. */
1953 rc = nemHCNativeSetPhysPage(pVM,
1954 pVCpu,
1955 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1956 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1957 pInfo->fNemProt,
1958 &u2State,
1959 true /*fBackingState*/);
1960 pInfo->u2NemState = u2State;
1961 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1962 GCPhys, g_apszPageStates[u2State], rc));
1963 pState->fDidSomething = true;
1964 pState->fCanResume = true;
1965 return rc;
1966
1967 case NEM_WIN_PAGE_STATE_READABLE:
1968 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1969 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1970 {
1971 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1972 return VINF_SUCCESS;
1973 }
1974
1975 break;
1976
1977 case NEM_WIN_PAGE_STATE_WRITABLE:
1978 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1979 {
1980 if (pInfo->u2OldNemState == NEM_WIN_PAGE_STATE_WRITABLE)
1981 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1982 else
1983 {
1984 pState->fCanResume = true;
1985 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1986 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1987 }
1988 return VINF_SUCCESS;
1989 }
1990 break;
1991
1992 default:
1993 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1994 }
1995
1996 /*
1997 * Unmap and restart the instruction.
1998 * If this fails, which it does every so often, just unmap everything for now.
1999 */
2000 /** @todo figure out whether we mess up the state or if it's WHv. */
2001 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2002 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
2003 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
2004 if (SUCCEEDED(hrc))
2005 {
2006 pState->fDidSomething = true;
2007 pState->fCanResume = true;
2008 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
2009 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
2010 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2011 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
2012 return VINF_SUCCESS;
2013 }
2014 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2015 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x)\n",
2016 GCPhys, g_apszPageStates[u2State], hrc, hrc));
2017 return VERR_NEM_UNMAP_PAGES_FAILED;
2018}
2019
2020
2021/**
2022 * Returns the byte size from the given access SAS value.
2023 *
2024 * @returns Number of bytes to transfer.
2025 * @param uSas The SAS value to convert.
2026 */
2027DECLINLINE(size_t) nemR3WinGetByteCountFromSas(uint8_t uSas)
2028{
2029 switch (uSas)
2030 {
2031 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
2032 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
2033 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
2034 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
2035 default:
2036 AssertReleaseFailed();
2037 }
2038
2039 return 0;
2040}
2041
2042
2043/**
2044 * Sets the given general purpose register to the given value.
2045 *
2046 * @param pVCpu The cross context virtual CPU structure of the
2047 * calling EMT.
2048 * @param uReg The register index.
2049 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
2050 * @param fSignExtend Flag whether to sign extend the value.
2051 * @param u64Val The value.
2052 */
2053DECLINLINE(void) nemR3WinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
2054{
2055 AssertReturnVoid(uReg < 31);
2056
2057 if (f64BitReg)
2058 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
2059 else
2060 pVCpu->cpum.GstCtx.aGRegs[uReg].x = (uint64_t)(fSignExtend ? (int32_t)u64Val : (uint32_t)u64Val);
2061
2062 /* Mark the register as not extern anymore. */
2063 switch (uReg)
2064 {
2065 case 0:
2066 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
2067 break;
2068 case 1:
2069 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
2070 break;
2071 case 2:
2072 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
2073 break;
2074 case 3:
2075 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
2076 break;
2077 default:
2078 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
2079 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
2080 }
2081}
2082
2083
2084/**
2085 * Gets the given general purpose register and returns the value.
2086 *
2087 * @returns Value from the given register.
2088 * @param pVCpu The cross context virtual CPU structure of the
2089 * calling EMT.
2090 * @param uReg The register index.
2091 */
2092DECLINLINE(uint64_t) nemR3WinGetGReg(PVMCPU pVCpu, uint8_t uReg)
2093{
2094 AssertReturn(uReg <= ARMV8_A64_REG_XZR, 0);
2095
2096 if (uReg == ARMV8_A64_REG_XZR)
2097 return 0;
2098
2099 /** @todo Import the register if extern. */
2100 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
2101
2102 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
2103}
2104
2105
2106/**
2107 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2108 *
2109 * @returns Strict VBox status code.
2110 * @param pVM The cross context VM structure.
2111 * @param pVCpu The cross context per CPU structure.
2112 * @param pExit The VM exit information to handle.
2113 * @sa nemHCWinHandleMessageMemory
2114 */
2115NEM_TMPL_STATIC VBOXSTRICTRC
2116nemR3WinHandleExitMemory(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2117{
2118 uint64_t const uHostTsc = ASMReadTSC();
2119 Assert(pExit->MemoryAccess.Header.InterceptAccessType != 3);
2120
2121 /*
2122 * Ask PGM for information about the given GCPhys. We need to check if we're
2123 * out of sync first.
2124 */
2125 WHV_INTERCEPT_MESSAGE_HEADER const *pHdr = &pExit->MemoryAccess.Header;
2126 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite, false, false };
2127 PGMPHYSNEMPAGEINFO Info;
2128 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
2129 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
2130 if (RT_SUCCESS(rc))
2131 {
2132 if (Info.fNemProt & ( pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
2133 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
2134 {
2135 if (State.fCanResume)
2136 {
2137 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
2138 pVCpu->idCpu, pHdr->Pc,
2139 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2140 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2141 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2142 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
2143 pHdr->Pc, uHostTsc);
2144 return VINF_SUCCESS;
2145 }
2146 }
2147 Log4(("MemExit/%u: %08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
2148 pVCpu->idCpu, pHdr->Pc,
2149 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
2150 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
2151 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2152 }
2153 else
2154 Log4(("MemExit/%u: %08RX64: %RGp rc=%Rrc%s; emulating (%s)\n",
2155 pVCpu->idCpu, pHdr->Pc,
2156 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
2157 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.Header.InterceptAccessType]));
2158
2159 /*
2160 * Emulate the memory access, either access handler or special memory.
2161 */
2162 PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
2163 pExit->MemoryAccess.Header.InterceptAccessType == WHvMemoryAccessWrite
2164 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2165 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2166 pHdr->Pc, uHostTsc);
2167 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->MemoryAccess.Header);
2168 RT_NOREF_PV(pExitRec);
2169 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
2170 AssertRCReturn(rc, rc);
2171
2172#ifdef LOG_ENABLED
2173 uint8_t const cbInstr = pExit->MemoryAccess.InstructionByteCount;
2174 RTGCPTR const GCPtrVa = pExit->MemoryAccess.Gva;
2175#endif
2176 RTGCPHYS const GCPhys = pExit->MemoryAccess.Gpa;
2177 uint64_t const uIss = pExit->MemoryAccess.Syndrome;
2178 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
2179 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
2180 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
2181 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
2182 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
2183 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
2184 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
2185 size_t cbAcc = nemR3WinGetByteCountFromSas(uAcc);
2186 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhys=%RGp cbInstr=%u\n",
2187 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrVa, GCPhys, cbInstr));
2188
2189 RT_NOREF(fL2Fault);
2190
2191 VBOXSTRICTRC rcStrict;
2192 if (fIsv)
2193 {
2194 EMHistoryAddExit(pVCpu,
2195 fWrite
2196 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
2197 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
2198 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
2199
2200 uint64_t u64Val = 0;
2201 if (fWrite)
2202 {
2203 u64Val = nemR3WinGetGReg(pVCpu, uReg);
2204 rcStrict = PGMPhysWrite(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2205 Log4(("MmioExit/%u: %08RX64: WRITE %RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
2206 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2207 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2208 }
2209 else
2210 {
2211 rcStrict = PGMPhysRead(pVM, GCPhys, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
2212 Log4(("MmioExit/%u: %08RX64: READ %RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2213 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, cbAcc, cbAcc,
2214 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
2215 if (rcStrict == VINF_SUCCESS)
2216 nemR3WinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
2217 }
2218 }
2219 else
2220 {
2221 /** @todo Our UEFI firmware accesses the flash region with the following instruction
2222 * when the NVRAM actually contains data:
2223 * ldrb w9, [x6, #-0x0001]!
2224 * This is too complicated for the hardware so the ISV bit is not set. Until there
2225 * is a proper IEM implementation we just handle this here for now to avoid annoying
2226 * users too much.
2227 */
2228 /* The following ASSUMES that the vCPU state is completely synced. */
2229
2230 /* Read instruction. */
2231 RTGCPTR GCPtrPage = pVCpu->cpum.GstCtx.Pc.u64 & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2232 const void *pvPageR3 = NULL;
2233 PGMPAGEMAPLOCK PageMapLock;
2234
2235 rcStrict = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrPage, &pvPageR3, &PageMapLock);
2236 if (rcStrict == VINF_SUCCESS)
2237 {
2238 uint32_t u32Instr = *(uint32_t *)((uint8_t *)pvPageR3 + (pVCpu->cpum.GstCtx.Pc.u64 - GCPtrPage));
2239 PGMPhysReleasePageMappingLock(pVCpu->pVMR3, &PageMapLock);
2240
2241 DISSTATE Dis;
2242 rcStrict = DISInstrWithPrefetchedBytes((uintptr_t)pVCpu->cpum.GstCtx.Pc.u64, DISCPUMODE_ARMV8_A64, 0 /*fFilter - none */,
2243 &u32Instr, sizeof(u32Instr), NULL, NULL, &Dis, NULL);
2244 if (rcStrict == VINF_SUCCESS)
2245 {
2246 if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDRB
2247 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2248 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2249 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmAddrInGpr
2250 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit
2251 && (Dis.aParams[1].fUse & DISUSE_PRE_INDEXED))
2252 {
2253 /* The fault address is already the final address. */
2254 uint8_t bVal = 0;
2255 rcStrict = PGMPhysRead(pVM, GCPhys, &bVal, 1, PGMACCESSORIGIN_HM);
2256 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
2257 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, sizeof(bVal), sizeof(bVal),
2258 &bVal, VBOXSTRICTRC_VAL(rcStrict) ));
2259 if (rcStrict == VINF_SUCCESS)
2260 {
2261 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, bVal);
2262 /* Update the indexed register. */
2263 pVCpu->cpum.GstCtx.aGRegs[Dis.aParams[1].armv8.Op.Reg.idReg].x += Dis.aParams[1].armv8.u.offBase;
2264 }
2265 }
2266 /*
2267 * Seeing the following with the Windows 11/ARM TPM driver:
2268 * %fffff800e5342888 48 25 45 29 ldp w8, w9, [x10, #+0x0028]
2269 */
2270 else if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDP
2271 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
2272 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2273 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmReg
2274 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
2275 && Dis.aParams[2].armv8.enmType == kDisArmv8OpParmAddrInGpr
2276 && Dis.aParams[2].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit)
2277 {
2278 /** @todo This is tricky to handle if the first register read returns something else than VINF_SUCCESS... */
2279 /* The fault address is already the final address. */
2280 uint32_t u32Val1 = 0;
2281 uint32_t u32Val2 = 0;
2282 rcStrict = PGMPhysRead(pVM, GCPhys, &u32Val1, sizeof(u32Val1), PGMACCESSORIGIN_HM);
2283 if (rcStrict == VINF_SUCCESS)
2284 rcStrict = PGMPhysRead(pVM, GCPhys + sizeof(uint32_t), &u32Val2, sizeof(u32Val2), PGMACCESSORIGIN_HM);
2285 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs %.*Rhxs rcStrict=%Rrc\n",
2286 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhys, 2 * sizeof(uint32_t), sizeof(u32Val1),
2287 &u32Val1, sizeof(u32Val2), &u32Val2, VBOXSTRICTRC_VAL(rcStrict) ));
2288 if (rcStrict == VINF_SUCCESS)
2289 {
2290 nemR3WinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val1);
2291 nemR3WinSetGReg(pVCpu, Dis.aParams[1].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val2);
2292 }
2293 }
2294 else
2295 AssertFailedReturn(VERR_NOT_SUPPORTED);
2296 }
2297 }
2298 }
2299
2300 if (rcStrict == VINF_SUCCESS)
2301 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t); /** @todo Why is InstructionByteCount always 0? */
2302
2303 return rcStrict;
2304}
2305
2306
2307/**
2308 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
2309 *
2310 * @returns Strict VBox status code.
2311 * @param pVM The cross context VM structure.
2312 * @param pVCpu The cross context per CPU structure.
2313 * @param pExit The VM exit information to handle.
2314 * @sa nemHCWinHandleMessageMemory
2315 */
2316NEM_TMPL_STATIC VBOXSTRICTRC
2317nemR3WinHandleExitHypercall(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2318{
2319 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2320
2321 /** @todo Raise exception to EL1 if PSCI not configured. */
2322 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
2323 uint32_t uFunId = pExit->Hypercall.Immediate;
2324 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
2325 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
2326 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
2327 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
2328 {
2329 switch (uFunNum)
2330 {
2331 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2332 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
2333 break;
2334 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2335 rcStrict = VMR3PowerOff(pVM->pUVM);
2336 break;
2337 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2338 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2339 {
2340 bool fHaltOnReset;
2341 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
2342 if (RT_SUCCESS(rc) && fHaltOnReset)
2343 {
2344 Log(("nemHCLnxHandleExitHypercall: Halt On Reset!\n"));
2345 rcStrict = VINF_EM_HALT;
2346 }
2347 else
2348 {
2349 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
2350 VM_FF_SET(pVM, VM_FF_RESET);
2351 rcStrict = VINF_EM_RESET;
2352 }
2353 break;
2354 }
2355 case ARM_PSCI_FUNC_ID_CPU_ON:
2356 {
2357 uint64_t u64TgtCpu = pExit->Hypercall.X[1];
2358 RTGCPHYS GCPhysExecAddr = pExit->Hypercall.X[2];
2359 uint64_t u64CtxId = pExit->Hypercall.X[3];
2360 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
2361 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
2362 break;
2363 }
2364 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
2365 {
2366 uint32_t u32FunNum = (uint32_t)pExit->Hypercall.X[1];
2367 switch (u32FunNum)
2368 {
2369 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2370 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2371 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2372 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2373 case ARM_PSCI_FUNC_ID_CPU_ON:
2374 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2375 false /*f64BitReg*/, false /*fSignExtend*/,
2376 (uint64_t)ARM_PSCI_STS_SUCCESS);
2377 break;
2378 default:
2379 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2380 false /*f64BitReg*/, false /*fSignExtend*/,
2381 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2382 }
2383 break;
2384 }
2385 default:
2386 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2387 }
2388 }
2389 else
2390 nemR3WinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2391
2392 /** @todo What to do if immediate is != 0? */
2393
2394 if (rcStrict == VINF_SUCCESS)
2395 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
2396
2397 return rcStrict;
2398}
2399
2400
2401/**
2402 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
2403 *
2404 * @returns Strict VBox status code.
2405 * @param pVM The cross context VM structure.
2406 * @param pVCpu The cross context per CPU structure.
2407 * @param pExit The VM exit information to handle.
2408 * @sa nemHCWinHandleMessageUnrecoverableException
2409 */
2410NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExitUnrecoverableException(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2411{
2412#if 0
2413 /*
2414 * Just copy the state we've got and handle it in the loop for now.
2415 */
2416 nemR3WinCopyStateFromX64Header(pVCpu, &pExit->VpContext);
2417 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2418 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
2419 RT_NOREF_PV(pVM);
2420 return VINF_EM_TRIPLE_FAULT;
2421#else
2422 /*
2423 * Let IEM decide whether this is really it.
2424 */
2425 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_UNRECOVERABLE_EXCEPTION),
2426 pExit->UnrecoverableException.Header.Pc, ASMReadTSC());
2427 nemR3WinCopyStateFromArmHeader(pVCpu, &pExit->UnrecoverableException.Header);
2428 AssertReleaseFailed();
2429 RT_NOREF_PV(pVM);
2430 return VINF_SUCCESS;
2431#endif
2432}
2433
2434
2435/**
2436 * Handles VM exits.
2437 *
2438 * @returns Strict VBox status code.
2439 * @param pVM The cross context VM structure.
2440 * @param pVCpu The cross context per CPU structure.
2441 * @param pExit The VM exit information to handle.
2442 * @sa nemHCWinHandleMessage
2443 */
2444NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVMCC pVM, PVMCPUCC pVCpu, MY_WHV_RUN_VP_EXIT_CONTEXT const *pExit)
2445{
2446#ifdef LOG_ENABLED
2447 if (LogIs3Enabled())
2448 {
2449 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
2450 AssertRCReturn(rc, rc);
2451
2452 nemR3WinLogState(pVM, pVCpu);
2453 }
2454#endif
2455
2456 switch (pExit->ExitReason)
2457 {
2458 case WHvRunVpExitReasonUnmappedGpa:
2459 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
2460 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit);
2461
2462 case WHvRunVpExitReasonCanceled:
2463 Log4(("CanceledExit/%u\n", pVCpu->idCpu));
2464 return VINF_SUCCESS;
2465
2466 case WHvRunVpExitReasonHypercall:
2467 return nemR3WinHandleExitHypercall(pVM, pVCpu, pExit);
2468
2469 case 0x8001000c: /* WHvRunVpExitReasonArm64Reset */
2470 {
2471 if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_POWER_OFF)
2472 return VMR3PowerOff(pVM->pUVM);
2473 else if (pExit->Arm64Reset.ResetType == WHV_ARM64_RESET_CONTEXT_TYPE_RESET)
2474 {
2475 VM_FF_SET(pVM, VM_FF_RESET);
2476 return VINF_EM_RESET;
2477 }
2478 else
2479 AssertLogRelFailedReturn(VERR_NEM_IPE_3);
2480 }
2481
2482 case WHvRunVpExitReasonUnrecoverableException:
2483 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
2484 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit);
2485
2486 case WHvRunVpExitReasonUnsupportedFeature:
2487 case WHvRunVpExitReasonInvalidVpRegisterValue:
2488 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2489 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
2490 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
2491
2492 /* Undesired exits: */
2493 case WHvRunVpExitReasonNone:
2494 default:
2495 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
2496 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
2497 }
2498}
2499
2500
2501VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2502{
2503 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc, pVCpu->cpum.GstCtx.fPState));
2504#ifdef LOG_ENABLED
2505 if (LogIs3Enabled())
2506 nemR3WinLogState(pVM, pVCpu);
2507#endif
2508
2509 /*
2510 * Try switch to NEM runloop state.
2511 */
2512 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2513 { /* likely */ }
2514 else
2515 {
2516 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2517 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2518 return VINF_SUCCESS;
2519 }
2520
2521 if (pVCpu->nem.s.fSyncCntvRegs)
2522 {
2523 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterCntvCtlEl0, WHvArm64RegisterCntvCvalEl0 };
2524 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)];
2525 aRegs[0].Reg64 = pVCpu->cpum.GstCtx.CntvCtlEl0;
2526 aRegs[1].Reg64 = pVCpu->cpum.GstCtx.CntvCValEl0;
2527
2528 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2529 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2530 ("WHvSetVirtualProcessorRegisters(%p, 0,{CNTV_CTL_EL0, CNTV_CVAL_EL0}, 2,) -> %Rhrc (Last=%#x/%u)\n",
2531 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2532 , VERR_NEM_IPE_9);
2533 pVCpu->nem.s.fSyncCntvRegs = false;
2534 }
2535
2536
2537 /*
2538 * The run loop.
2539 *
2540 * Current approach to state updating to use the sledgehammer and sync
2541 * everything every time. This will be optimized later.
2542 */
2543 const bool fSingleStepping = DBGFIsStepping(pVCpu);
2544 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2545 for (unsigned iLoop = 0;; iLoop++)
2546 {
2547 /*
2548 * Poll timers and run for a bit.
2549 *
2550 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
2551 * so we take the time of the next timer event and uses that as a deadline.
2552 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
2553 */
2554 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2555 * the whole polling job when timers have changed... */
2556 uint64_t offDeltaIgnored;
2557 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2558 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
2559 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2560 {
2561 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
2562 {
2563 /* Ensure that Hyper-V has the whole state. */
2564 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu);
2565 AssertRCReturn(rc2, rc2);
2566
2567#ifdef LOG_ENABLED
2568 if (LogIsFlowEnabled())
2569 {
2570 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2571 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2572 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2573 LogFlow(("NEM/%u: Entry @ %08RX64 pstate=%#RX64\n", pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64));
2574 }
2575#endif
2576
2577 MY_WHV_RUN_VP_EXIT_CONTEXT ExitReason = {0};
2578 TMNotifyStartOfExecution(pVM, pVCpu);
2579
2580 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
2581
2582 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
2583 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2584#ifdef LOG_ENABLED
2585 if (LogIsFlowEnabled())
2586 {
2587 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterPc, WHvArm64RegisterPstate };
2588 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2589 WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2590 LogFlow(("NEM/%u: Exit @ %08RX64 pstate=%#RX64 Reason=%#x\n",
2591 pVCpu->idCpu, aRegs[0].Reg64, aRegs[1].Reg64, ExitReason.ExitReason));
2592 }
2593#endif
2594 if (SUCCEEDED(hrc))
2595 {
2596 /* Always sync the CNTV_CTL_EL0/CNTV_CVAL_EL0 registers, just like we do on macOS. */
2597 static const WHV_REGISTER_NAME s_aNames[2] = { WHvArm64RegisterCntvCtlEl0, WHvArm64RegisterCntvCvalEl0 };
2598 WHV_REGISTER_VALUE aRegs[RT_ELEMENTS(s_aNames)] = { { { {0, 0} } } };
2599 hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, s_aNames, RT_ELEMENTS(s_aNames), aRegs);
2600 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2601 ("WHvGetVirtualProcessorRegisters(%p, 0,{CNTV_CTL_EL0, CNTV_CVAL_EL0}, 2,) -> %Rhrc (Last=%#x/%u)\n",
2602 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2603 , VERR_NEM_IPE_9);
2604
2605 pVCpu->cpum.GstCtx.CntvCtlEl0 = aRegs[0].Reg64;
2606 pVCpu->cpum.GstCtx.CntvCValEl0 = aRegs[1].Reg64;
2607
2608 /*
2609 * Deal with the message.
2610 */
2611 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason);
2612 if (rcStrict == VINF_SUCCESS)
2613 { /* hopefully likely */ }
2614 else
2615 {
2616 LogFlow(("NEM/%u: breaking: nemR3WinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2617 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2618 break;
2619 }
2620 }
2621 else
2622 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
2623 pVCpu->idCpu, hrc, GetLastError()),
2624 VERR_NEM_IPE_0);
2625
2626 /*
2627 * If no relevant FFs are pending, loop.
2628 */
2629 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2630 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2631 continue;
2632
2633 /** @todo Try handle pending flags, not just return to EM loops. Take care
2634 * not to set important RCs here unless we've handled a message. */
2635 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2636 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2637 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2638 }
2639 else
2640 {
2641 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2642 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2643 }
2644 }
2645 else
2646 {
2647 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2648 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2649 }
2650 break;
2651 } /* the run loop */
2652
2653
2654 /*
2655 * If the CPU is running, make sure to stop it before we try sync back the
2656 * state and return to EM. We don't sync back the whole state if we can help it.
2657 */
2658 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2659 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2660
2661 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
2662 {
2663 /* Try anticipate what we might need. */
2664 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK;
2665 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2666 || RT_FAILURE(rcStrict))
2667 fImport = CPUMCTX_EXTRN_ALL;
2668 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
2669 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2670
2671 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2672 {
2673 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, fImport);
2674 if (RT_SUCCESS(rc2))
2675 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2676 else if (RT_SUCCESS(rcStrict))
2677 rcStrict = rc2;
2678 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2679 pVCpu->cpum.GstCtx.fExtrn = 0;
2680 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2681 }
2682 else
2683 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2684 }
2685 else
2686 {
2687 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2688 pVCpu->cpum.GstCtx.fExtrn = 0;
2689 }
2690
2691#if 0
2692 UINT32 cbWritten;
2693 WHV_ARM64_LOCAL_INTERRUPT_CONTROLLER_STATE IntrState;
2694 HRESULT hrc = WHvGetVirtualProcessorState(pVM->nem.s.hPartition, pVCpu->idCpu, WHvVirtualProcessorStateTypeInterruptControllerState2,
2695 &IntrState, sizeof(IntrState), &cbWritten);
2696 AssertLogRelMsgReturn(SUCCEEDED(hrc),
2697 ("WHvGetVirtualProcessorState(%p, %u,WHvVirtualProcessorStateTypeInterruptControllerState2,) -> %Rhrc (Last=%#x/%u)\n",
2698 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
2699 , VERR_NEM_GET_REGISTERS_FAILED);
2700 LogFlowFunc(("IntrState: cbWritten=%u\n"));
2701 for (uint32_t i = 0; i < RT_ELEMENTS(IntrState.BankedInterruptState); i++)
2702 {
2703 WHV_ARM64_INTERRUPT_STATE *pState = &IntrState.BankedInterruptState[i];
2704 LogFlowFunc(("IntrState: Intr %u:\n"
2705 " Enabled=%RTbool\n"
2706 " EdgeTriggered=%RTbool\n"
2707 " Asserted=%RTbool\n"
2708 " SetPending=%RTbool\n"
2709 " Active=%RTbool\n"
2710 " Direct=%RTbool\n"
2711 " GicrIpriorityrConfigured=%u\n"
2712 " GicrIpriorityrActive=%u\n",
2713 i, pState->Enabled, pState->EdgeTriggered, pState->Asserted, pState->SetPending, pState->Active, pState->Direct,
2714 pState->GicrIpriorityrConfigured, pState->GicrIpriorityrActive));
2715 }
2716#endif
2717
2718 LogFlow(("NEM/%u: %08RX64 pstate=%#08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64,
2719 pVCpu->cpum.GstCtx.fPState, VBOXSTRICTRC_VAL(rcStrict) ));
2720 return rcStrict;
2721}
2722
2723
2724VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2725{
2726 Assert(VM_IS_NEM_ENABLED(pVM));
2727 RT_NOREF(pVM, pVCpu);
2728 return true;
2729}
2730
2731
2732VMMR3_INT_DECL(int) NEMR3Halt(PVM pVM, PVMCPU pVCpu)
2733{
2734 Assert(EMGetState(pVCpu) == EMSTATE_WAIT_SIPI);
2735
2736 /*
2737 * Force the vCPU to get out of the SIPI state and into the normal runloop
2738 * as Hyper-V doesn't cause VM exits for PSCI calls so we wouldn't notice when
2739 * when the guest brings APs online.
2740 * Instead we force the EMT to run the vCPU through Hyper-V which manages the state.
2741 */
2742 RT_NOREF(pVM);
2743 EMSetState(pVCpu, EMSTATE_HALTED);
2744 return VINF_EM_RESCHEDULE;
2745}
2746
2747
2748bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2749{
2750 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2751 return false;
2752}
2753
2754
2755void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2756{
2757 Log8(("nemR3NativeNotifyFF: canceling %u\n", pVCpu->idCpu));
2758 if (pVM->nem.s.fCreatedEmts)
2759 {
2760 HRESULT hrc = WHvCancelRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, 0);
2761 AssertMsg(SUCCEEDED(hrc), ("WHvCancelRunVirtualProcessor -> hrc=%Rhrc\n", hrc));
2762 RT_NOREF_PV(hrc);
2763 }
2764 RT_NOREF_PV(fFlags);
2765}
2766
2767
2768DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2769{
2770 RT_NOREF(pVM, fUseDebugLoop);
2771 return false;
2772}
2773
2774
2775DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2776{
2777 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2778 return false;
2779}
2780
2781
2782DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
2783{
2784 PGMPAGEMAPLOCK Lock;
2785 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
2786 if (RT_SUCCESS(rc))
2787 PGMPhysReleasePageMappingLock(pVM, &Lock);
2788 return rc;
2789}
2790
2791
2792DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2793{
2794 PGMPAGEMAPLOCK Lock;
2795 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
2796 if (RT_SUCCESS(rc))
2797 PGMPhysReleasePageMappingLock(pVM, &Lock);
2798 return rc;
2799}
2800
2801
2802VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2803 uint8_t *pu2State, uint32_t *puNemRange)
2804{
2805 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d)\n",
2806 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange));
2807
2808 *pu2State = UINT8_MAX;
2809 RT_NOREF(puNemRange);
2810
2811 if (pvR3)
2812 {
2813 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2814 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvR3, GCPhys, cb,
2815 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2816 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2817 if (SUCCEEDED(hrc))
2818 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2819 else
2820 {
2821 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2822 GCPhys, cb, pvR3, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2823 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2824 return VERR_NEM_MAP_PAGES_FAILED;
2825 }
2826 }
2827 return VINF_SUCCESS;
2828}
2829
2830
2831VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2832{
2833 RT_NOREF(pVM);
2834 return g_pfnWHvQueryGpaRangeDirtyBitmap != NULL;
2835}
2836
2837
2838VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2839 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2840{
2841 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
2842 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
2843 RT_NOREF(puNemRange);
2844
2845 /*
2846 * Unmap the RAM we're replacing.
2847 */
2848 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2849 {
2850 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2851 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2852 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2853 if (SUCCEEDED(hrc))
2854 { /* likely */ }
2855 else if (pvMmio2)
2856 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2857 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2858 else
2859 {
2860 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2861 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2862 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2863 return VERR_NEM_UNMAP_PAGES_FAILED;
2864 }
2865 }
2866
2867 /*
2868 * Map MMIO2 if any.
2869 */
2870 if (pvMmio2)
2871 {
2872 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2873 WHV_MAP_GPA_RANGE_FLAGS fWHvFlags = WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute;
2874 if ((fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES) && g_pfnWHvQueryGpaRangeDirtyBitmap)
2875 fWHvFlags |= WHvMapGpaRangeFlagTrackDirtyPages;
2876 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2877 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMmio2, GCPhys, cb, fWHvFlags);
2878 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2879 if (SUCCEEDED(hrc))
2880 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2881 else
2882 {
2883 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p fWHvFlags=%#x: Map -> hrc=%Rhrc (%#x) Last=%#x/%u\n",
2884 GCPhys, cb, fFlags, pvMmio2, fWHvFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2885 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2886 return VERR_NEM_MAP_PAGES_FAILED;
2887 }
2888 }
2889 else
2890 {
2891 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2892 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2893 }
2894 RT_NOREF(pvRam);
2895 return VINF_SUCCESS;
2896}
2897
2898
2899VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2900 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2901{
2902 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2903 return VINF_SUCCESS;
2904}
2905
2906
2907VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2908 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2909{
2910 int rc = VINF_SUCCESS;
2911 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2912 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2913
2914 /*
2915 * Unmap the MMIO2 pages.
2916 */
2917 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2918 * we may have more stuff to unmap even in case of pure MMIO... */
2919 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2920 {
2921 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRange, a);
2922 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, cb);
2923 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRange, a);
2924 if (FAILED(hrc))
2925 {
2926 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> hrc=%Rhrc (%#x) Last=%#x/%u (ignored)\n",
2927 GCPhys, cb, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2928 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2929 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2930 }
2931 }
2932
2933 /*
2934 * Restore the RAM we replaced.
2935 */
2936 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2937 {
2938 AssertPtr(pvRam);
2939 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
2940 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvRam, GCPhys, cb,
2941 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
2942 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
2943 if (SUCCEEDED(hrc))
2944 { /* likely */ }
2945 else
2946 {
2947 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p hrc=%Rhrc (%#x) Last=%#x/%u\n",
2948 GCPhys, cb, pvMmio2, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2949 rc = VERR_NEM_MAP_PAGES_FAILED;
2950 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
2951 }
2952 if (pu2State)
2953 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
2954 }
2955 /* Mark the pages as unmapped if relevant. */
2956 else if (pu2State)
2957 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2958
2959 RT_NOREF(pvMmio2, puNemRange);
2960 return rc;
2961}
2962
2963
2964VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2965 void *pvBitmap, size_t cbBitmap)
2966{
2967 Assert(VM_IS_NEM_ENABLED(pVM));
2968 AssertReturn(g_pfnWHvQueryGpaRangeDirtyBitmap, VERR_INTERNAL_ERROR_2);
2969 Assert(cbBitmap == (uint32_t)cbBitmap);
2970 RT_NOREF(uNemRange);
2971
2972 /* This is being profiled by PGM, see /PGM/Mmio2QueryAndResetDirtyBitmap. */
2973 HRESULT hrc = WHvQueryGpaRangeDirtyBitmap(pVM->nem.s.hPartition, GCPhys, cb, (UINT64 *)pvBitmap, (uint32_t)cbBitmap);
2974 if (SUCCEEDED(hrc))
2975 return VINF_SUCCESS;
2976
2977 AssertLogRelMsgFailed(("GCPhys=%RGp LB %RGp pvBitmap=%p LB %#zx hrc=%Rhrc (%#x) Last=%#x/%u\n",
2978 GCPhys, cb, pvBitmap, cbBitmap, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2979 return VERR_NEM_QUERY_DIRTY_BITMAP_FAILED;
2980}
2981
2982
2983VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2984 uint8_t *pu2State, uint32_t *puNemRange)
2985{
2986 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2987 *pu2State = UINT8_MAX;
2988 *puNemRange = 0;
2989
2990#if 0 /* Let's not do this after all. We'll protection change notifications for each page and if not we'll map them lazily. */
2991 RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
2992 for (RTGCPHYS iPage = 0; iPage < cPages; iPage++, GCPhys += X86_PAGE_SIZE)
2993 {
2994 const void *pvPage;
2995 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
2996 if (RT_SUCCESS(rc))
2997 {
2998 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhys, X86_PAGE_SIZE,
2999 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
3000 if (SUCCEEDED(hrc))
3001 { /* likely */ }
3002 else
3003 {
3004 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3005 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3006 return VERR_NEM_INIT_FAILED;
3007 }
3008 }
3009 else
3010 {
3011 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
3012 return rc;
3013 }
3014 }
3015 RT_NOREF_PV(fFlags);
3016#else
3017 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags);
3018#endif
3019 return VINF_SUCCESS;
3020}
3021
3022
3023VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
3024 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
3025{
3026 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
3027 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
3028 *pu2State = UINT8_MAX;
3029
3030 /*
3031 * (Re-)map readonly.
3032 */
3033 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
3034 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
3035 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPages, GCPhys, cb, WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
3036 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
3037 if (SUCCEEDED(hrc))
3038 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
3039 else
3040 {
3041 LogRel(("nemR3NativeNotifyPhysRomRegisterEarly: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x hrc=%Rhrc (%#x) Last=%#x/%u\n",
3042 GCPhys, cb, pvPages, fFlags, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3043 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
3044 return VERR_NEM_MAP_PAGES_FAILED;
3045 }
3046 RT_NOREF(fFlags, puNemRange);
3047 return VINF_SUCCESS;
3048}
3049
3050VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
3051{
3052 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
3053 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
3054 RT_NOREF(pVCpu, fEnabled);
3055}
3056
3057
3058void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
3059{
3060 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
3061 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
3062}
3063
3064
3065VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3066 RTR3PTR pvMemR3, uint8_t *pu2State)
3067{
3068 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
3069 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
3070
3071 *pu2State = UINT8_MAX;
3072 if (pvMemR3)
3073 {
3074 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRange, a);
3075 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvMemR3, GCPhys, cb,
3076 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
3077 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRange, a);
3078 if (SUCCEEDED(hrc))
3079 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3080 else
3081 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: WHvMapGpaRange(,%p,%RGp,%RGp,) -> %Rhrc\n",
3082 pvMemR3, GCPhys, cb, hrc));
3083 }
3084 RT_NOREF(enmKind);
3085}
3086
3087
3088void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
3089 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
3090{
3091 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
3092 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
3093 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
3094}
3095
3096
3097/**
3098 * Worker that maps pages into Hyper-V.
3099 *
3100 * This is used by the PGM physical page notifications as well as the memory
3101 * access VMEXIT handlers.
3102 *
3103 * @returns VBox status code.
3104 * @param pVM The cross context VM structure.
3105 * @param pVCpu The cross context virtual CPU structure of the
3106 * calling EMT.
3107 * @param GCPhysSrc The source page address.
3108 * @param GCPhysDst The hyper-V destination page. This may differ from
3109 * GCPhysSrc when A20 is disabled.
3110 * @param fPageProt NEM_PAGE_PROT_XXX.
3111 * @param pu2State Our page state (input/output).
3112 * @param fBackingChanged Set if the page backing is being changed.
3113 * @thread EMT(pVCpu)
3114 */
3115NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
3116 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
3117{
3118 /*
3119 * Looks like we need to unmap a page before we can change the backing
3120 * or even modify the protection. This is going to be *REALLY* efficient.
3121 * PGM lends us two bits to keep track of the state here.
3122 */
3123 RT_NOREF(pVCpu);
3124 uint8_t const u2OldState = *pu2State;
3125 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
3126 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
3127 if ( fBackingChanged
3128 || u2NewState != u2OldState)
3129 {
3130 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
3131 {
3132 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3133 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
3134 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3135 if (SUCCEEDED(hrc))
3136 {
3137 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3138 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
3139 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3140 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
3141 {
3142 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
3143 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3144 return VINF_SUCCESS;
3145 }
3146 }
3147 else
3148 {
3149 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
3150 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3151 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3152 return VERR_NEM_INIT_FAILED;
3153 }
3154 }
3155 }
3156
3157 /*
3158 * Writeable mapping?
3159 */
3160 if (fPageProt & NEM_PAGE_PROT_WRITE)
3161 {
3162 void *pvPage;
3163 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
3164 if (RT_SUCCESS(rc))
3165 {
3166 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
3167 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
3168 if (SUCCEEDED(hrc))
3169 {
3170 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3171 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
3172 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3173 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
3174 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
3175 return VINF_SUCCESS;
3176 }
3177 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
3178 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3179 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3180 return VERR_NEM_INIT_FAILED;
3181 }
3182 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
3183 return rc;
3184 }
3185
3186 if (fPageProt & NEM_PAGE_PROT_READ)
3187 {
3188 const void *pvPage;
3189 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
3190 if (RT_SUCCESS(rc))
3191 {
3192 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfMapGpaRangePage, a);
3193 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
3194 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
3195 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfMapGpaRangePage, a);
3196 if (SUCCEEDED(hrc))
3197 {
3198 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
3199 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
3200 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3201 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
3202 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
3203 return VINF_SUCCESS;
3204 }
3205 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
3206 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3207 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3208 return VERR_NEM_INIT_FAILED;
3209 }
3210 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
3211 return rc;
3212 }
3213
3214 /* We already unmapped it above. */
3215 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3216 return VINF_SUCCESS;
3217}
3218
3219
3220NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
3221{
3222 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
3223 {
3224 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
3225 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3226 return VINF_SUCCESS;
3227 }
3228
3229 STAM_REL_PROFILE_START(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3230 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
3231 STAM_REL_PROFILE_STOP(&pVM->nem.s.StatProfUnmapGpaRangePage, a);
3232 if (SUCCEEDED(hrc))
3233 {
3234 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
3235 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3236 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3237 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
3238 return VINF_SUCCESS;
3239 }
3240 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
3241 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
3242 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3243 return VERR_NEM_IPE_6;
3244}
3245
3246
3247int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
3248 PGMPAGETYPE enmType, uint8_t *pu2State)
3249{
3250 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3251 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3252 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
3253
3254 int rc;
3255 RT_NOREF_PV(fPageProt);
3256 rc = nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3257 return rc;
3258}
3259
3260
3261VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
3262 PGMPAGETYPE enmType, uint8_t *pu2State)
3263{
3264 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3265 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3266 Assert(VM_IS_NEM_ENABLED(pVM));
3267 RT_NOREF(HCPhys, enmType, pvR3);
3268
3269 RT_NOREF_PV(fPageProt);
3270 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3271}
3272
3273
3274VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
3275 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
3276{
3277 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
3278 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
3279 Assert(VM_IS_NEM_ENABLED(pVM));
3280 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, enmType);
3281
3282 RT_NOREF_PV(fPageProt);
3283 nemHCJustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
3284}
3285
3286
3287/**
3288 * Returns features supported by the NEM backend.
3289 *
3290 * @returns Flags of features supported by the native NEM backend.
3291 * @param pVM The cross context VM structure.
3292 */
3293VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
3294{
3295 RT_NOREF(pVM);
3296 /** @todo Is NEM_FEAT_F_FULL_GST_EXEC always true? */
3297 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC;
3298}
3299
3300
3301/** @page pg_nem_win_aarmv8 NEM/win - Native Execution Manager, Windows.
3302 *
3303 * Open questions:
3304 * - InstructionByteCount and InstructionBytes for unmapped GPA exit are zero...
3305 */
3306
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette