VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 91694

Last change on this file since 91694 was 91694, checked in by vboxsync, 3 years ago

VMM/NEM: Made it build without NEM_WIN_USE_HYPERCALLS_FOR_PAGES again. #ifdef'ed a lot more based on NEM_WIN_USE_HYPERCALLS_FOR_PAGES (basically ring-0 ends up as a stub if it's not defined). [fixes] bugref:10118

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 137.4 KB
Line 
1/* $Id: NEMR0Native-win.cpp 91694 2021-10-12 14:12:37Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/ctype.h>
42#include <iprt/critsect.h>
43#include <iprt/dbg.h>
44#include <iprt/mem.h>
45#include <iprt/memobj.h>
46#include <iprt/string.h>
47#include <iprt/time.h>
48#define PIMAGE_NT_HEADERS32 PIMAGE_NT_HEADERS32_PECOFF
49#include <iprt/formats/pecoff.h>
50
51
52/* Assert compile context sanity. */
53#ifndef RT_OS_WINDOWS
54# error "Windows only file!"
55#endif
56#ifndef RT_ARCH_AMD64
57# error "AMD64 only file!"
58#endif
59
60
61/*********************************************************************************************************************************
62* Internal Functions *
63*********************************************************************************************************************************/
64typedef uint32_t DWORD; /* for winerror.h constants */
65
66
67/*********************************************************************************************************************************
68* Global Variables *
69*********************************************************************************************************************************/
70#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
71static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
72
73/**
74 * WinHvr.sys!WinHvDepositMemory
75 *
76 * This API will try allocates cPages on IdealNode and deposit it to the
77 * hypervisor for use with the given partition. The memory will be freed when
78 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
79 *
80 * Apparently node numbers above 64 has a different meaning.
81 */
82static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
83#endif
84
85RT_C_DECLS_BEGIN
86/**
87 * The WinHvGetPartitionProperty function we intercept in VID.SYS to get the
88 * Hyper-V partition ID.
89 *
90 * This is used from assembly.
91 */
92NTSTATUS WinHvGetPartitionProperty(uintptr_t idPartition, HV_PARTITION_PROPERTY_CODE enmProperty, PHV_PARTITION_PROPERTY puValue);
93decltype(WinHvGetPartitionProperty) *g_pfnWinHvGetPartitionProperty;
94RT_C_DECLS_END
95
96/** @name VID.SYS image details.
97 * @{ */
98#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
99static uint8_t *g_pbVidSys = NULL;
100static uintptr_t g_cbVidSys = 0;
101static PIMAGE_NT_HEADERS g_pVidSysHdrs = NULL;
102/** Pointer to the import thunk entry in VID.SYS for WinHvGetPartitionProperty if we found it. */
103static decltype(WinHvGetPartitionProperty) **g_ppfnVidSysWinHvGetPartitionProperty = NULL;
104
105/** Critical section protecting the WinHvGetPartitionProperty hacking. */
106static RTCRITSECT g_VidSysCritSect;
107#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
108RT_C_DECLS_BEGIN
109/** The partition ID passed to WinHvGetPartitionProperty by VID.SYS. */
110HV_PARTITION_ID g_idVidSysFoundPartition = HV_PARTITION_ID_INVALID;
111/** The thread which is currently looking for a partition ID. */
112RTNATIVETHREAD g_hVidSysMatchThread = NIL_RTNATIVETHREAD;
113/** The property code we expect in WinHvGetPartitionProperty. */
114VID_PARTITION_PROPERTY_CODE g_enmVidSysMatchProperty = INT64_MAX;
115/* NEMR0NativeA-win.asm: */
116extern uint8_t g_abNemR0WinHvrWinHvGetPartitionProperty_OriginalProlog[64];
117RT_C_DECLS_END
118/** @} */
119
120
121
122/*********************************************************************************************************************************
123* Internal Functions *
124*********************************************************************************************************************************/
125NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
126 uint32_t cPages, uint32_t fFlags);
127NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
128#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
129NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
130NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3);
131NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
132NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
133#endif
134DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
135 void *pvOutput, uint32_t cbOutput);
136
137/* NEMR0NativeA-win.asm: */
138DECLASM(NTSTATUS) nemR0VidSysWinHvGetPartitionProperty(uintptr_t idPartition, HV_PARTITION_PROPERTY_CODE enmProperty,
139 PHV_PARTITION_PROPERTY puValue);
140DECLASM(NTSTATUS) nemR0WinHvrWinHvGetPartitionProperty(uintptr_t idPartition, HV_PARTITION_PROPERTY_CODE enmProperty,
141 PHV_PARTITION_PROPERTY puValue);
142
143
144/*
145 * Instantate the code we share with ring-0.
146 */
147#ifdef NEM_WIN_WITH_RING0_RUNLOOP
148# define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
149#else
150# undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
151#endif
152#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
153
154
155/**
156 * Module initialization for NEM.
157 */
158VMMR0_INT_DECL(int) NEMR0Init(void)
159{
160#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
161 return RTCritSectInit(&g_VidSysCritSect);
162#else
163 return VINF_SUCCESS;
164#endif
165}
166
167
168/**
169 * Module termination for NEM.
170 */
171VMMR0_INT_DECL(void) NEMR0Term(void)
172{
173#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
174 RTCritSectDelete(&g_VidSysCritSect);
175#endif
176}
177
178#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
179
180/**
181 * Worker for NEMR0InitVM that allocates a hypercall page.
182 *
183 * @returns VBox status code.
184 * @param pHypercallData The hypercall data page to initialize.
185 */
186static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
187{
188 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
189 if (RT_SUCCESS(rc))
190 {
191 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
192 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
193 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
194 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
195 if (RT_SUCCESS(rc))
196 return VINF_SUCCESS;
197
198 /* bail out */
199 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
200 }
201 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
202 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
203 pHypercallData->pbPage = NULL;
204 return rc;
205}
206
207
208/**
209 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
210 *
211 * @param pHypercallData The hypercall data page to uninitialize.
212 */
213static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
214{
215 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
216 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
217 if (pHypercallData->pbPage != NULL)
218 {
219 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
220 pHypercallData->pbPage = NULL;
221 }
222 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
223 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
224}
225
226
227static int nemR0StrICmp(const char *psz1, const char *psz2)
228{
229 for (;;)
230 {
231 char ch1 = *psz1++;
232 char ch2 = *psz2++;
233 if ( ch1 != ch2
234 && RT_C_TO_LOWER(ch1) != RT_C_TO_LOWER(ch2))
235 return ch1 - ch2;
236 if (!ch1)
237 return 0;
238 }
239}
240
241
242/**
243 * Worker for nemR0PrepareForVidSysIntercept().
244 */
245static void nemR0PrepareForVidSysInterceptInner(void)
246{
247 uint32_t const cbImage = g_cbVidSys;
248 uint8_t * const pbImage = g_pbVidSys;
249 PIMAGE_NT_HEADERS const pNtHdrs = g_pVidSysHdrs;
250 uintptr_t const offEndNtHdrs = (uintptr_t)(pNtHdrs + 1) - (uintptr_t)pbImage;
251
252# define CHECK_LOG_RET(a_Expr, a_LogRel) do { \
253 if (RT_LIKELY(a_Expr)) { /* likely */ } \
254 else \
255 { \
256 LogRel(a_LogRel); \
257 return; \
258 } \
259 } while (0)
260
261 //__try
262 {
263 /*
264 * Get and validate the import directory entry.
265 */
266 CHECK_LOG_RET( pNtHdrs->OptionalHeader.NumberOfRvaAndSizes > IMAGE_DIRECTORY_ENTRY_IMPORT
267 || pNtHdrs->OptionalHeader.NumberOfRvaAndSizes <= IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 4,
268 ("NEMR0: vid.sys: NumberOfRvaAndSizes is out of range: %#x\n", pNtHdrs->OptionalHeader.NumberOfRvaAndSizes));
269
270 IMAGE_DATA_DIRECTORY const ImportDir = pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT];
271 CHECK_LOG_RET( ImportDir.Size >= sizeof(IMAGE_IMPORT_DESCRIPTOR)
272 && ImportDir.VirtualAddress >= offEndNtHdrs /* ASSUMES NT headers before imports */
273 && (uint64_t)ImportDir.VirtualAddress + ImportDir.Size <= cbImage,
274 ("NEMR0: vid.sys: Bad import directory entry: %#x LB %#x (cbImage=%#x, offEndNtHdrs=%#zx)\n",
275 ImportDir.VirtualAddress, ImportDir.Size, cbImage, offEndNtHdrs));
276
277 /*
278 * Walk the import descriptor table looking for NTDLL.DLL.
279 */
280 for (PIMAGE_IMPORT_DESCRIPTOR pImps = (PIMAGE_IMPORT_DESCRIPTOR)&pbImage[ImportDir.VirtualAddress];
281 pImps->Name != 0 && pImps->FirstThunk != 0;
282 pImps++)
283 {
284 CHECK_LOG_RET(pImps->Name < cbImage, ("NEMR0: vid.sys: Bad import directory entry name: %#x", pImps->Name));
285 const char *pszModName = (const char *)&pbImage[pImps->Name];
286 if (nemR0StrICmp(pszModName, "winhvr.sys"))
287 continue;
288 CHECK_LOG_RET(pImps->FirstThunk < cbImage && pImps->FirstThunk >= offEndNtHdrs,
289 ("NEMR0: vid.sys: Bad FirstThunk: %#x", pImps->FirstThunk));
290 CHECK_LOG_RET( pImps->u.OriginalFirstThunk == 0
291 || (pImps->u.OriginalFirstThunk >= offEndNtHdrs && pImps->u.OriginalFirstThunk < cbImage),
292 ("NEMR0: vid.sys: Bad OriginalFirstThunk: %#x", pImps->u.OriginalFirstThunk));
293
294 /*
295 * Walk the thunks table(s) looking for WinHvGetPartitionProperty.
296 */
297 uintptr_t *puFirstThunk = (uintptr_t *)&pbImage[pImps->FirstThunk]; /* update this. */
298 if ( pImps->u.OriginalFirstThunk != 0
299 && pImps->u.OriginalFirstThunk != pImps->FirstThunk)
300 {
301 uintptr_t const *puOrgThunk = (uintptr_t const *)&pbImage[pImps->u.OriginalFirstThunk]; /* read from this. */
302 uintptr_t cLeft = (cbImage - (RT_MAX(pImps->FirstThunk, pImps->u.OriginalFirstThunk)))
303 / sizeof(*puFirstThunk);
304 while (cLeft-- > 0 && *puOrgThunk != 0)
305 {
306 if (!(*puOrgThunk & IMAGE_ORDINAL_FLAG64))
307 {
308 CHECK_LOG_RET(*puOrgThunk >= offEndNtHdrs && *puOrgThunk < cbImage,
309 ("NEMR0: vid.sys: Bad thunk entry: %#x", *puOrgThunk));
310
311 const char *pszSymbol = (const char *)&pbImage[*puOrgThunk + 2];
312 if (strcmp(pszSymbol, "WinHvGetPartitionProperty") == 0)
313 g_ppfnVidSysWinHvGetPartitionProperty = (decltype(WinHvGetPartitionProperty) **)puFirstThunk;
314 }
315
316 puOrgThunk++;
317 puFirstThunk++;
318 }
319 }
320 else
321 {
322 /* No original thunk table, so scan the resolved symbols for a match
323 with the WinHvGetPartitionProperty address. */
324 uintptr_t const uNeedle = (uintptr_t)g_pfnWinHvGetPartitionProperty;
325 uintptr_t cLeft = (cbImage - pImps->FirstThunk) / sizeof(*puFirstThunk);
326 while (cLeft-- > 0 && *puFirstThunk != 0)
327 {
328 if (*puFirstThunk == uNeedle)
329 g_ppfnVidSysWinHvGetPartitionProperty = (decltype(WinHvGetPartitionProperty) **)puFirstThunk;
330 puFirstThunk++;
331 }
332 }
333 }
334
335 /* Report the findings: */
336 if (g_ppfnVidSysWinHvGetPartitionProperty)
337 LogRel(("NEMR0: vid.sys: Found WinHvGetPartitionProperty import thunk at %p (value %p vs %p)\n",
338 g_ppfnVidSysWinHvGetPartitionProperty,*g_ppfnVidSysWinHvGetPartitionProperty, g_pfnWinHvGetPartitionProperty));
339 else
340 LogRel(("NEMR0: vid.sys: Did not find WinHvGetPartitionProperty!\n"));
341 }
342 //__except(EXCEPTION_EXECUTE_HANDLER)
343 //{
344 // return;
345 //}
346# undef CHECK_LOG_RET
347}
348
349
350/**
351 * Worker for NEMR0InitVM that prepares for intercepting stuff in VID.SYS.
352 */
353static void nemR0PrepareForVidSysIntercept(RTDBGKRNLINFO hKrnlInfo)
354{
355 /*
356 * Resolve the symbols we need first.
357 */
358 int rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "vid.sys", "__ImageBase", (void **)&g_pbVidSys);
359 if (RT_SUCCESS(rc))
360 {
361 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "vid.sys", "__ImageSize", (void **)&g_cbVidSys);
362 if (RT_SUCCESS(rc))
363 {
364 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "vid.sys", "__ImageNtHdrs", (void **)&g_pVidSysHdrs);
365 if (RT_SUCCESS(rc))
366 {
367 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvGetPartitionProperty",
368 (void **)&g_pfnWinHvGetPartitionProperty);
369 if (RT_SUCCESS(rc))
370 {
371 /*
372 * Now locate the import thunk entry for WinHvGetPartitionProperty in vid.sys.
373 */
374 nemR0PrepareForVidSysInterceptInner();
375 }
376 else
377 LogRel(("NEMR0: Failed to find winhvr.sys!WinHvGetPartitionProperty (%Rrc)\n", rc));
378 }
379 else
380 LogRel(("NEMR0: Failed to find vid.sys!__ImageNtHdrs (%Rrc)\n", rc));
381 }
382 else
383 LogRel(("NEMR0: Failed to find vid.sys!__ImageSize (%Rrc)\n", rc));
384 }
385 else
386 LogRel(("NEMR0: Failed to find vid.sys!__ImageBase (%Rrc)\n", rc));
387}
388
389#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
390
391
392/**
393 * Called by NEMR3Init to make sure we've got what we need.
394 *
395 * @returns VBox status code.
396 * @param pGVM The ring-0 VM handle.
397 * @thread EMT(0)
398 */
399VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM)
400{
401 AssertCompile(sizeof(pGVM->nemr0.s) <= sizeof(pGVM->nemr0.padding));
402 AssertCompile(sizeof(pGVM->aCpus[0].nemr0.s) <= sizeof(pGVM->aCpus[0].nemr0.padding));
403
404 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
405 AssertRCReturn(rc, rc);
406
407#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
408 /*
409 * We want to perform hypercalls here. The NT kernel started to expose a very low
410 * level interface to do this thru somewhere between build 14271 and 16299. Since
411 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
412 *
413 * We also need to deposit memory to the hypervisor for use with partition (page
414 * mapping structures, stuff).
415 */
416 RTDBGKRNLINFO hKrnlInfo;
417 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
418 if (RT_SUCCESS(rc))
419 {
420 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
421 if (RT_FAILURE(rc))
422 rc = VERR_NEM_MISSING_KERNEL_API_1;
423 if (RT_SUCCESS(rc))
424 {
425 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
426 if (RT_FAILURE(rc))
427 rc = rc == VERR_MODULE_NOT_FOUND ? VERR_NEM_MISSING_KERNEL_API_2 : VERR_NEM_MISSING_KERNEL_API_3;
428 }
429
430 /*
431 * Since late 2021 we may also need to do some nasty trickery with vid.sys to get
432 * the partition ID. So, ge the necessary info while we have a hKrnlInfo instance.
433 */
434 if (RT_SUCCESS(rc))
435 nemR0PrepareForVidSysIntercept(hKrnlInfo);
436
437 RTR0DbgKrnlInfoRelease(hKrnlInfo);
438 if (RT_SUCCESS(rc))
439 {
440 /*
441 * Allocate a page for non-EMT threads to use for hypercalls (update
442 * statistics and such) and a critical section protecting it.
443 */
444 rc = RTCritSectInit(&pGVM->nemr0.s.HypercallDataCritSect);
445 if (RT_SUCCESS(rc))
446 {
447 rc = nemR0InitHypercallData(&pGVM->nemr0.s.HypercallData);
448 if (RT_SUCCESS(rc))
449 {
450 /*
451 * Allocate a page for each VCPU to place hypercall data on.
452 */
453 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
454 {
455 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
456 if (RT_FAILURE(rc))
457 {
458 while (i-- > 0)
459 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
460 break;
461 }
462 }
463 if (RT_SUCCESS(rc))
464 {
465 /*
466 * So far, so good.
467 */
468 return rc;
469 }
470
471 /*
472 * Bail out.
473 */
474 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
475 }
476 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
477 }
478 }
479 }
480#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
481
482 return rc;
483}
484
485#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
486
487/**
488 * Perform an I/O control operation on the partition handle (VID.SYS).
489 *
490 * @returns NT status code.
491 * @param pGVM The ring-0 VM structure.
492 * @param pGVCpu The global (ring-0) CPU structure of the calling EMT.
493 * @param uFunction The function to perform.
494 * @param pvInput The input buffer. This must point within the VM
495 * structure so we can easily convert to a ring-3
496 * pointer if necessary.
497 * @param cbInput The size of the input. @a pvInput must be NULL when
498 * zero.
499 * @param pvOutput The output buffer. This must also point within the
500 * VM structure for ring-3 pointer magic.
501 * @param cbOutput The size of the output. @a pvOutput must be NULL
502 * when zero.
503 * @thread EMT(pGVCpu)
504 */
505DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
506 void *pvOutput, uint32_t cbOutput)
507{
508# ifdef RT_STRICT
509 /*
510 * Input and output parameters are part of the VM CPU structure.
511 */
512 VMCPU_ASSERT_EMT(pGVCpu);
513 if (pvInput)
514 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
515 if (pvOutput)
516 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
517# endif
518
519 int32_t rcNt = STATUS_UNSUCCESSFUL;
520 int rc = SUPR0IoCtlPerform(pGVM->nemr0.s.pIoCtlCtx, uFunction,
521 pvInput,
522 pvInput ? (uintptr_t)pvInput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
523 cbInput,
524 pvOutput,
525 pvOutput ? (uintptr_t)pvOutput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
526 cbOutput,
527 &rcNt);
528 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
529 return (NTSTATUS)rcNt;
530 return STATUS_UNSUCCESSFUL;
531}
532
533
534/**
535 * Here is something that we really do not wish to do, but find us force do to
536 * right now as we cannot rewrite the memory management of VBox 6.1 in time for
537 * windows 11.
538 *
539 * @returns VBox status code.
540 * @param pGVM The ring-0 VM structure.
541 * @param pahMemObjs Array of 6 memory objects that the caller will release.
542 * ASSUMES that they are initialized to NIL.
543 */
544static int nemR0InitVMPart2DontWannaDoTheseUglyPartitionIdFallbacks(PGVM pGVM, PRTR0MEMOBJ pahMemObjs)
545{
546 /*
547 * Check preconditions:
548 */
549 if ( !g_ppfnVidSysWinHvGetPartitionProperty
550 || (uintptr_t)g_ppfnVidSysWinHvGetPartitionProperty & (sizeof(uintptr_t) - 1))
551 {
552 LogRel(("NEMR0: g_ppfnVidSysWinHvGetPartitionProperty is NULL or misaligned (%p), partition ID fallback not possible.\n",
553 g_ppfnVidSysWinHvGetPartitionProperty));
554 return VERR_NEM_INIT_FAILED;
555 }
556 if (!g_pfnWinHvGetPartitionProperty)
557 {
558 LogRel(("NEMR0: g_pfnWinHvGetPartitionProperty is NULL, partition ID fallback not possible.\n"));
559 return VERR_NEM_INIT_FAILED;
560 }
561 if (!pGVM->nem.s.IoCtlGetPartitionProperty.uFunction)
562 {
563 LogRel(("NEMR0: IoCtlGetPartitionProperty.uFunction is 0, partition ID fallback not possible.\n"));
564 return VERR_NEM_INIT_FAILED;
565 }
566
567 /*
568 * Create an alias for the thunk table entry because its very likely to be read-only.
569 */
570 int rc = RTR0MemObjLockKernel(&pahMemObjs[0], g_ppfnVidSysWinHvGetPartitionProperty, sizeof(uintptr_t), RTMEM_PROT_READ);
571 if (RT_FAILURE(rc))
572 {
573 LogRel(("NEMR0: RTR0MemObjLockKernel failed on VID.SYS thunk table entry: %Rrc\n", rc));
574 return rc;
575 }
576
577 rc = RTR0MemObjEnterPhys(&pahMemObjs[1], RTR0MemObjGetPagePhysAddr(pahMemObjs[0], 0), PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
578 if (RT_FAILURE(rc))
579 {
580 LogRel(("NEMR0: RTR0MemObjEnterPhys failed on VID.SYS thunk table entry: %Rrc\n", rc));
581 return rc;
582 }
583
584 rc = RTR0MemObjMapKernel(&pahMemObjs[2], pahMemObjs[1], (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
585 if (RT_FAILURE(rc))
586 {
587 LogRel(("NEMR0: RTR0MemObjMapKernel failed on VID.SYS thunk table entry: %Rrc\n", rc));
588 return rc;
589 }
590
591 decltype(WinHvGetPartitionProperty) **ppfnThunkAlias
592 = (decltype(WinHvGetPartitionProperty) **)( (uintptr_t)RTR0MemObjAddress(pahMemObjs[2])
593 | ((uintptr_t)g_ppfnVidSysWinHvGetPartitionProperty & PAGE_OFFSET_MASK));
594 LogRel(("NEMR0: ppfnThunkAlias=%p *ppfnThunkAlias=%p; original: %p & %p, phys %RHp\n", ppfnThunkAlias, *ppfnThunkAlias,
595 g_ppfnVidSysWinHvGetPartitionProperty, *g_ppfnVidSysWinHvGetPartitionProperty,
596 RTR0MemObjGetPagePhysAddr(pahMemObjs[0], 0) ));
597
598 /*
599 * Create an alias for the target code in WinHvr.sys as there is a very decent
600 * chance we have to patch it.
601 */
602 rc = RTR0MemObjLockKernel(&pahMemObjs[3], g_pfnWinHvGetPartitionProperty, sizeof(uintptr_t), RTMEM_PROT_READ);
603 if (RT_FAILURE(rc))
604 {
605 LogRel(("NEMR0: RTR0MemObjLockKernel failed on WinHvGetPartitionProperty (%p): %Rrc\n", g_pfnWinHvGetPartitionProperty, rc));
606 return rc;
607 }
608
609 rc = RTR0MemObjEnterPhys(&pahMemObjs[4], RTR0MemObjGetPagePhysAddr(pahMemObjs[3], 0), PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
610 if (RT_FAILURE(rc))
611 {
612 LogRel(("NEMR0: RTR0MemObjEnterPhys failed on WinHvGetPartitionProperty: %Rrc\n", rc));
613 return rc;
614 }
615
616 rc = RTR0MemObjMapKernel(&pahMemObjs[5], pahMemObjs[4], (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
617 if (RT_FAILURE(rc))
618 {
619 LogRel(("NEMR0: RTR0MemObjMapKernel failed on WinHvGetPartitionProperty: %Rrc\n", rc));
620 return rc;
621 }
622
623 uint8_t *pbTargetAlias = (uint8_t *)( (uintptr_t)RTR0MemObjAddress(pahMemObjs[5])
624 | ((uintptr_t)g_pfnWinHvGetPartitionProperty & PAGE_OFFSET_MASK));
625 LogRel(("NEMR0: pbTargetAlias=%p %.16Rhxs; original: %p %.16Rhxs, phys %RHp\n", pbTargetAlias, pbTargetAlias,
626 g_pfnWinHvGetPartitionProperty, g_pfnWinHvGetPartitionProperty, RTR0MemObjGetPagePhysAddr(pahMemObjs[3], 0) ));
627
628 /*
629 * Analyse the target functions prologue to figure out how much we should copy
630 * when patching it. We repeat this every time because we don't want to get
631 * tripped up by someone else doing the same stuff as we're doing here.
632 * We need at least 12 bytes for the patch sequence (MOV RAX, QWORD; JMP RAX)
633 */
634 union
635 {
636 uint8_t ab[48]; /**< Must be equal or smallar than g_abNemR0WinHvrWinHvGetPartitionProperty_OriginalProlog */
637 int64_t ai64[6];
638 } Org;
639 memcpy(Org.ab, g_pfnWinHvGetPartitionProperty, sizeof(Org)); /** @todo ASSUMES 48 valid bytes start at function... */
640
641 uint32_t offJmpBack = 0;
642 uint32_t const cbMinJmpPatch = 12;
643 DISSTATE Dis;
644 while (offJmpBack < cbMinJmpPatch && offJmpBack < sizeof(Org) - 16)
645 {
646 uint32_t cbInstr = 1;
647 rc = DISInstr(&Org.ab[offJmpBack], DISCPUMODE_64BIT, &Dis, &cbInstr);
648 if (RT_FAILURE(rc))
649 {
650 LogRel(("NEMR0: DISInstr failed %#x bytes into WinHvGetPartitionProperty: %Rrc (%.48Rhxs)\n",
651 offJmpBack, rc, Org.ab));
652 break;
653 }
654 if (Dis.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
655 {
656 LogRel(("NEMR0: Control flow instruction %#x bytes into WinHvGetPartitionProperty prologue: %.48Rhxs\n",
657 offJmpBack, Org.ab));
658 break;
659 }
660 if (Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */)
661 {
662 LogRel(("NEMR0: RIP relative addressing %#x bytes into WinHvGetPartitionProperty prologue: %.48Rhxs\n",
663 offJmpBack, Org.ab));
664 break;
665 }
666 offJmpBack += cbInstr;
667 }
668
669 uintptr_t const cbLeftInPage = PAGE_SIZE - ((uintptr_t)g_pfnWinHvGetPartitionProperty & PAGE_OFFSET_MASK);
670 if (cbLeftInPage < 16 && offJmpBack >= cbMinJmpPatch)
671 {
672 LogRel(("NEMR0: WinHvGetPartitionProperty patching not possible do the page crossing: %p (%#zx)\n",
673 g_pfnWinHvGetPartitionProperty, cbLeftInPage));
674 offJmpBack = 0;
675 }
676 if (offJmpBack >= cbMinJmpPatch)
677 LogRel(("NEMR0: offJmpBack=%#x for WinHvGetPartitionProperty (%p: %.48Rhxs)\n",
678 offJmpBack, g_pfnWinHvGetPartitionProperty, Org.ab));
679 else
680 offJmpBack = 0;
681 rc = VINF_SUCCESS;
682
683 /*
684 * Now enter serialization lock and get on with it...
685 */
686 PVMCPUCC const pVCpu0 = &pGVM->aCpus[0];
687 NTSTATUS rcNt;
688 RTCritSectEnter(&g_VidSysCritSect);
689
690 /*
691 * First attempt, patching the import table entry.
692 */
693 g_idVidSysFoundPartition = HV_PARTITION_ID_INVALID;
694 g_hVidSysMatchThread = RTThreadNativeSelf();
695 g_enmVidSysMatchProperty = pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty = HvPartitionPropertyProcessorVendor;
696 pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue = 0;
697
698 void *pvOld = NULL;
699 if (ASMAtomicCmpXchgExPtr(ppfnThunkAlias, (void *)(uintptr_t)nemR0VidSysWinHvGetPartitionProperty,
700 (void *)(uintptr_t)g_pfnWinHvGetPartitionProperty, &pvOld))
701 {
702 LogRel(("NEMR0: after switch to %p: ppfnThunkAlias=%p *ppfnThunkAlias=%p; original: %p & %p\n",
703 nemR0VidSysWinHvGetPartitionProperty, ppfnThunkAlias, *ppfnThunkAlias,
704 g_ppfnVidSysWinHvGetPartitionProperty, *g_ppfnVidSysWinHvGetPartitionProperty));
705
706 rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetPartitionProperty.uFunction,
707 &pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty,
708 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty),
709 &pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue,
710 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));
711 ASMAtomicWritePtr(ppfnThunkAlias, (void *)(uintptr_t)g_pfnWinHvGetPartitionProperty);
712 HV_PARTITION_ID idHvPartition = g_idVidSysFoundPartition;
713
714 LogRel(("NEMR0: WinHvGetPartitionProperty trick #1 yielded: rcNt=%#x idHvPartition=%#RX64 uValue=%#RX64\n",
715 rcNt, idHvPartition, pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));
716 pGVM->nemr0.s.idHvPartition = idHvPartition;
717 }
718 else
719 {
720 LogRel(("NEMR0: Unexpected WinHvGetPartitionProperty pointer in VID.SYS: %p, expected %p\n",
721 pvOld, g_pfnWinHvGetPartitionProperty));
722 rc = VERR_NEM_INIT_FAILED;
723 }
724
725 /*
726 * If that didn't succeed, try patching the winhvr.sys code.
727 */
728 if ( pGVM->nemr0.s.idHvPartition == HV_PARTITION_ID_INVALID
729 && offJmpBack >= cbMinJmpPatch)
730 {
731 g_idVidSysFoundPartition = HV_PARTITION_ID_INVALID;
732 g_hVidSysMatchThread = RTThreadNativeSelf();
733 g_enmVidSysMatchProperty = pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty = HvPartitionPropertyProcessorVendor;
734 pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue = 0;
735
736 /*
737 * Prepare the hook area.
738 */
739 uint8_t *pbDst = g_abNemR0WinHvrWinHvGetPartitionProperty_OriginalProlog;
740 memcpy(pbDst, (uint8_t const *)(uintptr_t)g_pfnWinHvGetPartitionProperty, offJmpBack);
741 pbDst += offJmpBack;
742
743 *pbDst++ = 0x48; /* mov rax, imm64 */
744 *pbDst++ = 0xb8;
745 *(uint64_t *)pbDst = (uintptr_t)g_pfnWinHvGetPartitionProperty + offJmpBack;
746 pbDst += sizeof(uint64_t);
747 *pbDst++ = 0xff; /* jmp rax */
748 *pbDst++ = 0xe0;
749 *pbDst++ = 0xcc; /* int3 */
750
751 /*
752 * Patch the original. We use cmpxchg16b here to avoid concurrency problems
753 * (this also makes sure we don't trample over someone else doing similar
754 * patching at the same time).
755 */
756 union
757 {
758 uint8_t ab[16];
759 uint64_t au64[2];
760 } Patch;
761 memcpy(Patch.ab, Org.ab, sizeof(Patch));
762 pbDst = Patch.ab;
763 *pbDst++ = 0x48; /* mov rax, imm64 */
764 *pbDst++ = 0xb8;
765 *(uint64_t *)pbDst = (uintptr_t)nemR0WinHvrWinHvGetPartitionProperty;
766 pbDst += sizeof(uint64_t);
767 *pbDst++ = 0xff; /* jmp rax */
768 *pbDst++ = 0xe0;
769
770 int64_t ai64CmpCopy[2] = { Org.ai64[0], Org.ai64[1] }; /* paranoia */
771 if (_InterlockedCompareExchange128((__int64 volatile *)pbTargetAlias, Patch.au64[1], Patch.au64[0], ai64CmpCopy) != 0)
772 {
773 rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetPartitionProperty.uFunction,
774 &pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty,
775 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty),
776 &pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue,
777 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));
778
779 for (uint32_t cFailures = 0; cFailures < 10; cFailures++)
780 {
781 ai64CmpCopy[0] = Patch.au64[0]; /* paranoia */
782 ai64CmpCopy[1] = Patch.au64[1];
783 if (_InterlockedCompareExchange128((__int64 volatile *)pbTargetAlias, Org.ai64[1], Org.ai64[0], ai64CmpCopy) != 0)
784 {
785 if (cFailures > 0)
786 LogRel(("NEMR0: Succeeded on try #%u.\n", cFailures));
787 break;
788 }
789 LogRel(("NEMR0: Patch restore failure #%u: %.16Rhxs, expected %.16Rhxs\n",
790 cFailures + 1, &ai64CmpCopy[0], &Patch.au64[0]));
791 RTThreadSleep(1000);
792 }
793
794 HV_PARTITION_ID idHvPartition = g_idVidSysFoundPartition;
795 LogRel(("NEMR0: WinHvGetPartitionProperty trick #2 yielded: rcNt=%#x idHvPartition=%#RX64 uValue=%#RX64\n",
796 rcNt, idHvPartition, pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));
797 pGVM->nemr0.s.idHvPartition = idHvPartition;
798
799 }
800 else
801 {
802 LogRel(("NEMR0: Failed to install WinHvGetPartitionProperty patch: %.16Rhxs, expected %.16Rhxs\n",
803 &ai64CmpCopy[0], &Org.ai64[0]));
804 rc = VERR_NEM_INIT_FAILED;
805 }
806 }
807
808 RTCritSectLeave(&g_VidSysCritSect);
809
810 return rc;
811}
812
813#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
814
815/**
816 * 2nd part of the initialization, after we've got a partition handle.
817 *
818 * @returns VBox status code.
819 * @param pGVM The ring-0 VM handle.
820 * @thread EMT(0)
821 */
822VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM)
823{
824 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
825 AssertRCReturn(rc, rc);
826 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
827#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
828# ifdef NEM_WIN_WITH_RING0_RUNLOOP
829 Assert(pGVM->nemr0.s.fMayUseRing0Runloop == false);
830# endif
831
832 /*
833 * Copy and validate the I/O control information from ring-3.
834 */
835 NEMWINIOCTL Copy = pGVM->nem.s.IoCtlGetHvPartitionId;
836 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
837 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
838 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
839 pGVM->nemr0.s.IoCtlGetHvPartitionId = Copy;
840
841 Copy = pGVM->nem.s.IoCtlGetPartitionProperty;
842 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
843 AssertLogRelReturn(Copy.cbInput == sizeof(VID_PARTITION_PROPERTY_CODE), VERR_NEM_INIT_FAILED);
844 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_PROPERTY), VERR_NEM_INIT_FAILED);
845 pGVM->nemr0.s.IoCtlGetPartitionProperty = Copy;
846
847# ifdef NEM_WIN_WITH_RING0_RUNLOOP
848 pGVM->nemr0.s.fMayUseRing0Runloop = pGVM->nem.s.fUseRing0Runloop;
849
850 Copy = pGVM->nem.s.IoCtlStartVirtualProcessor;
851 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
852 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
853 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
854 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
855 if (RT_SUCCESS(rc))
856 pGVM->nemr0.s.IoCtlStartVirtualProcessor = Copy;
857
858 Copy = pGVM->nem.s.IoCtlStopVirtualProcessor;
859 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
860 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
861 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
862 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
863 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
864 if (RT_SUCCESS(rc))
865 pGVM->nemr0.s.IoCtlStopVirtualProcessor = Copy;
866
867 Copy = pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
868 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
869 AssertLogRelStmt( Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)
870 || Copy.cbInput == RT_OFFSETOF(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT, cMillies),
871 rc = VERR_NEM_INIT_FAILED);
872 AssertLogRelStmt(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
873 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
874 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
875 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
876 if (RT_SUCCESS(rc))
877 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext = Copy;
878# endif
879
880 if ( RT_SUCCESS(rc)
881 || !pGVM->nem.s.fUseRing0Runloop)
882 {
883 /*
884 * Setup of an I/O control context for the partition handle for later use.
885 */
886 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pGVM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx);
887 AssertLogRelRCReturn(rc, rc);
888 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
889 {
890 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
891 pGVCpu->nemr0.s.offRing3ConversionDelta = (uintptr_t)pGVM->aCpus[idCpu].pVCpuR3 - (uintptr_t)pGVCpu;
892 }
893
894 /*
895 * Get the partition ID.
896 */
897 PVMCPUCC pVCpu0 = &pGVM->aCpus[0];
898 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
899 &pVCpu0->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu0->nem.s.uIoCtlBuf.idPartition));
900# if 0
901 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
902 pGVM->nemr0.s.idHvPartition = pVCpu0->nem.s.uIoCtlBuf.idPartition;
903# else
904 /*
905 * Since 2021 (Win11) the above I/O control doesn't work on exo-partitions
906 * so we have to go to extremes to get at it. Sigh.
907 */
908 if ( !NT_SUCCESS(rcNt)
909 || pVCpu0->nem.s.uIoCtlBuf.idPartition == HV_PARTITION_ID_INVALID)
910 {
911 LogRel(("IoCtlGetHvPartitionId failed: r0=%#RX64, r3=%#RX64, rcNt=%#x\n",
912 pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition, rcNt));
913
914 RTR0MEMOBJ ahMemObjs[6]
915 = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ };
916 rc = nemR0InitVMPart2DontWannaDoTheseUglyPartitionIdFallbacks(pGVM, ahMemObjs);
917 size_t i = RT_ELEMENTS(ahMemObjs);
918 while (i-- > 0)
919 RTR0MemObjFree(ahMemObjs[i], false /*fFreeMappings*/);
920 }
921 if (pGVM->nem.s.idHvPartition == HV_PARTITION_ID_INVALID)
922 pGVM->nem.s.idHvPartition = pGVM->nemr0.s.idHvPartition;
923# endif
924 AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == pGVM->nem.s.idHvPartition,
925 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition),
926 VERR_NEM_INIT_FAILED);
927 if (RT_SUCCESS(rc) && pGVM->nemr0.s.idHvPartition == HV_PARTITION_ID_INVALID)
928 rc = VERR_NEM_INIT_FAILED;
929 }
930#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
931
932 return rc;
933}
934
935
936/**
937 * Cleanup the NEM parts of the VM in ring-0.
938 *
939 * This is always called and must deal the state regardless of whether
940 * NEMR0InitVM() was called or not. So, take care here.
941 *
942 * @param pGVM The ring-0 VM handle.
943 */
944VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
945{
946#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
947 pGVM->nemr0.s.idHvPartition = HV_PARTITION_ID_INVALID;
948
949 /* Clean up I/O control context. */
950 if (pGVM->nemr0.s.pIoCtlCtx)
951 {
952 int rc = SUPR0IoCtlCleanup(pGVM->nemr0.s.pIoCtlCtx);
953 AssertRC(rc);
954 pGVM->nemr0.s.pIoCtlCtx = NULL;
955 }
956
957 /* Free the hypercall pages. */
958 VMCPUID i = pGVM->cCpus;
959 while (i-- > 0)
960 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
961
962 /* The non-EMT one too. */
963 if (RTCritSectIsInitialized(&pGVM->nemr0.s.HypercallDataCritSect))
964 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
965 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
966#else
967 RT_NOREF(pGVM);
968#endif
969}
970
971
972#if 0 /* for debugging GPA unmapping. */
973static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
974{
975 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nemr0.s.pbHypercallData;
976 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
977 pIn->PartitionId = pGVM->nemr0.s.idHvPartition;
978 pIn->VpIndex = pGVCpu->idCpu;
979 pIn->ByteCount = 0x10;
980 pIn->BaseGpa = GCPhys;
981 pIn->ControlFlags.AsUINT64 = 0;
982 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
983 memset(pOut, 0xfe, sizeof(*pOut));
984 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nemr0.s.HCPhysHypercallData,
985 pGVCpu->nemr0.s.HCPhysHypercallData + sizeof(*pIn));
986 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
987 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
988 __debugbreak();
989
990 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
991}
992#endif
993
994
995#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
996/**
997 * Worker for NEMR0MapPages and others.
998 */
999NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
1000 uint32_t cPages, uint32_t fFlags)
1001{
1002 /*
1003 * Validate.
1004 */
1005 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1006
1007 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
1008 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
1009 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
1010 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
1011 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
1012 if (GCPhysSrc != GCPhysDst)
1013 {
1014 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
1015 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
1016 }
1017
1018 /*
1019 * Compose and make the hypercall.
1020 * Ring-3 is not allowed to fill in the host physical addresses of the call.
1021 */
1022 for (uint32_t iTries = 0;; iTries++)
1023 {
1024 RTGCPHYS GCPhysSrcTmp = GCPhysSrc;
1025 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
1026 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
1027 pMapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
1028 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
1029 pMapPages->MapFlags = fFlags;
1030 pMapPages->u32ExplicitPadding = 0;
1031
1032 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrcTmp += X86_PAGE_SIZE)
1033 {
1034 RTHCPHYS HCPhys = NIL_RTGCPHYS;
1035 int rc = PGMPhysGCPhys2HCPhys(pGVM, GCPhysSrcTmp, &HCPhys);
1036 AssertRCReturn(rc, rc);
1037 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
1038 }
1039
1040 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
1041 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
1042 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
1043 GCPhysDst, GCPhysSrcTmp - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
1044 if (uResult == ((uint64_t)cPages << 32))
1045 return VINF_SUCCESS;
1046
1047 /*
1048 * If the partition is out of memory, try donate another 512 pages to
1049 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
1050 */
1051 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
1052 || iTries > 16
1053 || g_pfnWinHvDepositMemory == NULL)
1054 {
1055 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
1056 return VERR_NEM_MAP_PAGES_FAILED;
1057 }
1058
1059 size_t cPagesAdded = 0;
1060 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nemr0.s.idHvPartition, 512, 0, &cPagesAdded);
1061 if (!cPagesAdded)
1062 {
1063 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
1064 return VERR_NEM_MAP_PAGES_FAILED;
1065 }
1066 }
1067}
1068#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
1069
1070
1071/**
1072 * Maps pages into the guest physical address space.
1073 *
1074 * Generally the caller will be under the PGM lock already, so no extra effort
1075 * is needed to make sure all changes happens under it.
1076 *
1077 * @returns VBox status code.
1078 * @param pGVM The ring-0 VM handle.
1079 * @param idCpu The calling EMT. Necessary for getting the
1080 * hypercall page and arguments.
1081 * @thread EMT(idCpu)
1082 */
1083VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, VMCPUID idCpu)
1084{
1085#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1086 /*
1087 * Unpack the call.
1088 */
1089 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
1090 if (RT_SUCCESS(rc))
1091 {
1092 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1093
1094 RTGCPHYS const GCPhysSrc = pGVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
1095 RTGCPHYS const GCPhysDst = pGVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
1096 uint32_t const cPages = pGVCpu->nem.s.Hypercall.MapPages.cPages;
1097 HV_MAP_GPA_FLAGS const fFlags = pGVCpu->nem.s.Hypercall.MapPages.fFlags;
1098
1099 /*
1100 * Do the work.
1101 */
1102 rc = nemR0WinMapPages(pGVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
1103 }
1104 return rc;
1105#else
1106 RT_NOREF(pGVM, idCpu);
1107 return VERR_NOT_IMPLEMENTED;
1108#endif
1109}
1110
1111
1112#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1113/**
1114 * Worker for NEMR0UnmapPages and others.
1115 */
1116NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
1117{
1118 /*
1119 * Validate input.
1120 */
1121 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1122
1123 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
1124 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
1125 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
1126 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
1127
1128 /*
1129 * Compose and make the hypercall.
1130 */
1131 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
1132 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
1133 pUnmapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
1134 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
1135 pUnmapPages->fFlags = 0;
1136
1137 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
1138 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
1139 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
1140 if (uResult == ((uint64_t)cPages << 32))
1141 {
1142# if 1 /* Do we need to do this? Hopefully not... */
1143 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
1144 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
1145 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
1146# endif
1147 return VINF_SUCCESS;
1148 }
1149
1150 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
1151 return VERR_NEM_UNMAP_PAGES_FAILED;
1152}
1153#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
1154
1155
1156/**
1157 * Unmaps pages from the guest physical address space.
1158 *
1159 * Generally the caller will be under the PGM lock already, so no extra effort
1160 * is needed to make sure all changes happens under it.
1161 *
1162 * @returns VBox status code.
1163 * @param pGVM The ring-0 VM handle.
1164 * @param idCpu The calling EMT. Necessary for getting the
1165 * hypercall page and arguments.
1166 * @thread EMT(idCpu)
1167 */
1168VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, VMCPUID idCpu)
1169{
1170#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1171 /*
1172 * Unpack the call.
1173 */
1174 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
1175 if (RT_SUCCESS(rc))
1176 {
1177 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1178
1179 RTGCPHYS const GCPhys = pGVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
1180 uint32_t const cPages = pGVCpu->nem.s.Hypercall.UnmapPages.cPages;
1181
1182 /*
1183 * Do the work.
1184 */
1185 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
1186 }
1187 return rc;
1188#else
1189 RT_NOREF(pGVM, idCpu);
1190 return VERR_NOT_IMPLEMENTED;
1191#endif
1192}
1193
1194
1195#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1196/**
1197 * Worker for NEMR0ExportState.
1198 *
1199 * Intention is to use it internally later.
1200 *
1201 * @returns VBox status code.
1202 * @param pGVM The ring-0 VM handle.
1203 * @param pGVCpu The ring-0 VCPU handle.
1204 * @param pCtx The CPU context structure to import into.
1205 */
1206NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
1207{
1208 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
1209 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1210 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1211
1212 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
1213 pInput->VpIndex = pGVCpu->idCpu;
1214 pInput->RsvdZ = 0;
1215
1216 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
1217 if ( !fWhat
1218 && pGVCpu->nem.s.fCurrentInterruptWindows == pGVCpu->nem.s.fDesiredInterruptWindows)
1219 return VINF_SUCCESS;
1220 uintptr_t iReg = 0;
1221
1222 /* GPRs */
1223 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1224 {
1225 if (fWhat & CPUMCTX_EXTRN_RAX)
1226 {
1227 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1228 pInput->Elements[iReg].Name = HvX64RegisterRax;
1229 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
1230 iReg++;
1231 }
1232 if (fWhat & CPUMCTX_EXTRN_RCX)
1233 {
1234 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1235 pInput->Elements[iReg].Name = HvX64RegisterRcx;
1236 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
1237 iReg++;
1238 }
1239 if (fWhat & CPUMCTX_EXTRN_RDX)
1240 {
1241 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1242 pInput->Elements[iReg].Name = HvX64RegisterRdx;
1243 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
1244 iReg++;
1245 }
1246 if (fWhat & CPUMCTX_EXTRN_RBX)
1247 {
1248 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1249 pInput->Elements[iReg].Name = HvX64RegisterRbx;
1250 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
1251 iReg++;
1252 }
1253 if (fWhat & CPUMCTX_EXTRN_RSP)
1254 {
1255 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1256 pInput->Elements[iReg].Name = HvX64RegisterRsp;
1257 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
1258 iReg++;
1259 }
1260 if (fWhat & CPUMCTX_EXTRN_RBP)
1261 {
1262 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1263 pInput->Elements[iReg].Name = HvX64RegisterRbp;
1264 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
1265 iReg++;
1266 }
1267 if (fWhat & CPUMCTX_EXTRN_RSI)
1268 {
1269 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1270 pInput->Elements[iReg].Name = HvX64RegisterRsi;
1271 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
1272 iReg++;
1273 }
1274 if (fWhat & CPUMCTX_EXTRN_RDI)
1275 {
1276 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1277 pInput->Elements[iReg].Name = HvX64RegisterRdi;
1278 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
1279 iReg++;
1280 }
1281 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1282 {
1283 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1284 pInput->Elements[iReg].Name = HvX64RegisterR8;
1285 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
1286 iReg++;
1287 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1288 pInput->Elements[iReg].Name = HvX64RegisterR9;
1289 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
1290 iReg++;
1291 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1292 pInput->Elements[iReg].Name = HvX64RegisterR10;
1293 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
1294 iReg++;
1295 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1296 pInput->Elements[iReg].Name = HvX64RegisterR11;
1297 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
1298 iReg++;
1299 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1300 pInput->Elements[iReg].Name = HvX64RegisterR12;
1301 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
1302 iReg++;
1303 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1304 pInput->Elements[iReg].Name = HvX64RegisterR13;
1305 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
1306 iReg++;
1307 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1308 pInput->Elements[iReg].Name = HvX64RegisterR14;
1309 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
1310 iReg++;
1311 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1312 pInput->Elements[iReg].Name = HvX64RegisterR15;
1313 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
1314 iReg++;
1315 }
1316 }
1317
1318 /* RIP & Flags */
1319 if (fWhat & CPUMCTX_EXTRN_RIP)
1320 {
1321 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1322 pInput->Elements[iReg].Name = HvX64RegisterRip;
1323 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
1324 iReg++;
1325 }
1326 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1327 {
1328 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1329 pInput->Elements[iReg].Name = HvX64RegisterRflags;
1330 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
1331 iReg++;
1332 }
1333
1334 /* Segments */
1335# define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
1336 do { \
1337 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
1338 pInput->Elements[a_idx].Name = a_enmName; \
1339 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
1340 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
1341 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
1342 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
1343 } while (0)
1344 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1345 {
1346 if (fWhat & CPUMCTX_EXTRN_CS)
1347 {
1348 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1349 iReg++;
1350 }
1351 if (fWhat & CPUMCTX_EXTRN_ES)
1352 {
1353 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
1354 iReg++;
1355 }
1356 if (fWhat & CPUMCTX_EXTRN_SS)
1357 {
1358 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1359 iReg++;
1360 }
1361 if (fWhat & CPUMCTX_EXTRN_DS)
1362 {
1363 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1364 iReg++;
1365 }
1366 if (fWhat & CPUMCTX_EXTRN_FS)
1367 {
1368 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1369 iReg++;
1370 }
1371 if (fWhat & CPUMCTX_EXTRN_GS)
1372 {
1373 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1374 iReg++;
1375 }
1376 }
1377
1378 /* Descriptor tables & task segment. */
1379 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1380 {
1381 if (fWhat & CPUMCTX_EXTRN_LDTR)
1382 {
1383 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1384 iReg++;
1385 }
1386 if (fWhat & CPUMCTX_EXTRN_TR)
1387 {
1388 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1389 iReg++;
1390 }
1391
1392 if (fWhat & CPUMCTX_EXTRN_IDTR)
1393 {
1394 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1395 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
1396 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
1397 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
1398 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
1399 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
1400 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
1401 iReg++;
1402 }
1403 if (fWhat & CPUMCTX_EXTRN_GDTR)
1404 {
1405 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1406 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
1407 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
1408 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
1409 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
1410 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
1411 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
1412 iReg++;
1413 }
1414 }
1415
1416 /* Control registers. */
1417 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1418 {
1419 if (fWhat & CPUMCTX_EXTRN_CR0)
1420 {
1421 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1422 pInput->Elements[iReg].Name = HvX64RegisterCr0;
1423 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
1424 iReg++;
1425 }
1426 if (fWhat & CPUMCTX_EXTRN_CR2)
1427 {
1428 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1429 pInput->Elements[iReg].Name = HvX64RegisterCr2;
1430 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
1431 iReg++;
1432 }
1433 if (fWhat & CPUMCTX_EXTRN_CR3)
1434 {
1435 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1436 pInput->Elements[iReg].Name = HvX64RegisterCr3;
1437 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
1438 iReg++;
1439 }
1440 if (fWhat & CPUMCTX_EXTRN_CR4)
1441 {
1442 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1443 pInput->Elements[iReg].Name = HvX64RegisterCr4;
1444 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
1445 iReg++;
1446 }
1447 }
1448 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1449 {
1450 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1451 pInput->Elements[iReg].Name = HvX64RegisterCr8;
1452 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pGVCpu);
1453 iReg++;
1454 }
1455
1456 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
1457
1458 /* Debug registers. */
1459/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
1460 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1461 {
1462 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1463 pInput->Elements[iReg].Name = HvX64RegisterDr0;
1464 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pGVCpu);
1465 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
1466 iReg++;
1467 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1468 pInput->Elements[iReg].Name = HvX64RegisterDr1;
1469 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pGVCpu);
1470 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
1471 iReg++;
1472 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1473 pInput->Elements[iReg].Name = HvX64RegisterDr2;
1474 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pGVCpu);
1475 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
1476 iReg++;
1477 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1478 pInput->Elements[iReg].Name = HvX64RegisterDr3;
1479 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pGVCpu);
1480 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
1481 iReg++;
1482 }
1483 if (fWhat & CPUMCTX_EXTRN_DR6)
1484 {
1485 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1486 pInput->Elements[iReg].Name = HvX64RegisterDr6;
1487 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pGVCpu);
1488 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
1489 iReg++;
1490 }
1491 if (fWhat & CPUMCTX_EXTRN_DR7)
1492 {
1493 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1494 pInput->Elements[iReg].Name = HvX64RegisterDr7;
1495 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pGVCpu);
1496 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
1497 iReg++;
1498 }
1499
1500 /* Floating point state. */
1501 if (fWhat & CPUMCTX_EXTRN_X87)
1502 {
1503 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1504 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
1505 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[0].au64[0];
1506 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[0].au64[1];
1507 iReg++;
1508 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1509 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
1510 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[1].au64[0];
1511 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[1].au64[1];
1512 iReg++;
1513 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1514 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
1515 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[2].au64[0];
1516 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[2].au64[1];
1517 iReg++;
1518 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1519 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
1520 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[3].au64[0];
1521 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[3].au64[1];
1522 iReg++;
1523 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1524 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
1525 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[4].au64[0];
1526 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[4].au64[1];
1527 iReg++;
1528 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1529 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
1530 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[5].au64[0];
1531 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[5].au64[1];
1532 iReg++;
1533 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1534 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
1535 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[6].au64[0];
1536 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[6].au64[1];
1537 iReg++;
1538 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1539 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
1540 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[7].au64[0];
1541 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[7].au64[1];
1542 iReg++;
1543
1544 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1545 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
1546 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->XState.x87.FCW;
1547 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->XState.x87.FSW;
1548 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->XState.x87.FTW;
1549 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->XState.x87.FTW >> 8;
1550 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->XState.x87.FOP;
1551 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->XState.x87.FPUIP)
1552 | ((uint64_t)pCtx->XState.x87.CS << 32)
1553 | ((uint64_t)pCtx->XState.x87.Rsrvd1 << 48);
1554 iReg++;
1555/** @todo we've got trouble if if we try write just SSE w/o X87. */
1556 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1557 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
1558 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->XState.x87.FPUDP)
1559 | ((uint64_t)pCtx->XState.x87.DS << 32)
1560 | ((uint64_t)pCtx->XState.x87.Rsrvd2 << 48);
1561 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->XState.x87.MXCSR;
1562 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->XState.x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
1563 iReg++;
1564 }
1565
1566 /* Vector state. */
1567 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1568 {
1569 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1570 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
1571 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[0].uXmm.s.Lo;
1572 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[0].uXmm.s.Hi;
1573 iReg++;
1574 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1575 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
1576 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[1].uXmm.s.Lo;
1577 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[1].uXmm.s.Hi;
1578 iReg++;
1579 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1580 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
1581 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[2].uXmm.s.Lo;
1582 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[2].uXmm.s.Hi;
1583 iReg++;
1584 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1585 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
1586 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[3].uXmm.s.Lo;
1587 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[3].uXmm.s.Hi;
1588 iReg++;
1589 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1590 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
1591 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[4].uXmm.s.Lo;
1592 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[4].uXmm.s.Hi;
1593 iReg++;
1594 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1595 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
1596 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[5].uXmm.s.Lo;
1597 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[5].uXmm.s.Hi;
1598 iReg++;
1599 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1600 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
1601 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[6].uXmm.s.Lo;
1602 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[6].uXmm.s.Hi;
1603 iReg++;
1604 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1605 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
1606 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[7].uXmm.s.Lo;
1607 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[7].uXmm.s.Hi;
1608 iReg++;
1609 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1610 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1611 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[8].uXmm.s.Lo;
1612 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[8].uXmm.s.Hi;
1613 iReg++;
1614 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1615 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1616 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[9].uXmm.s.Lo;
1617 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[9].uXmm.s.Hi;
1618 iReg++;
1619 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1620 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1621 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[10].uXmm.s.Lo;
1622 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[10].uXmm.s.Hi;
1623 iReg++;
1624 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1625 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1626 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[11].uXmm.s.Lo;
1627 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[11].uXmm.s.Hi;
1628 iReg++;
1629 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1630 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1631 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[12].uXmm.s.Lo;
1632 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[12].uXmm.s.Hi;
1633 iReg++;
1634 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1635 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1636 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[13].uXmm.s.Lo;
1637 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[13].uXmm.s.Hi;
1638 iReg++;
1639 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1640 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1641 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[14].uXmm.s.Lo;
1642 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[14].uXmm.s.Hi;
1643 iReg++;
1644 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1645 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1646 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[15].uXmm.s.Lo;
1647 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[15].uXmm.s.Hi;
1648 iReg++;
1649 }
1650
1651 /* MSRs */
1652 // HvX64RegisterTsc - don't touch
1653 if (fWhat & CPUMCTX_EXTRN_EFER)
1654 {
1655 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1656 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1657 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1658 iReg++;
1659 }
1660 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1661 {
1662 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1663 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1664 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1665 iReg++;
1666 }
1667 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1668 {
1669 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1670 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1671 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1672 iReg++;
1673 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1674 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1675 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1676 iReg++;
1677 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1678 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1679 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1680 iReg++;
1681 }
1682 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1683 {
1684 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1685 pInput->Elements[iReg].Name = HvX64RegisterStar;
1686 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1687 iReg++;
1688 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1689 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1690 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1691 iReg++;
1692 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1693 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1694 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1695 iReg++;
1696 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1697 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1698 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1699 iReg++;
1700 }
1701 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1702 {
1703 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1704 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1705 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pGVCpu);
1706 iReg++;
1707 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1708 pInput->Elements[iReg].Name = HvX64RegisterPat;
1709 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1710 iReg++;
1711# if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1712 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1713 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1714 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pGVCpu);
1715 iReg++;
1716# endif
1717
1718 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
1719
1720 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1721 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1722 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1723 iReg++;
1724
1725 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1726
1727 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1728 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1729 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1730 iReg++;
1731 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1732 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1733 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1734 iReg++;
1735 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1736 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1737 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1738 iReg++;
1739 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1740 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1741 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1742 iReg++;
1743 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1744 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1745 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1746 iReg++;
1747 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1748 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1749 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1750 iReg++;
1751 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1752 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1753 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1754 iReg++;
1755 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1756 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1757 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1758 iReg++;
1759 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1760 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1761 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1762 iReg++;
1763 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1764 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1765 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1766 iReg++;
1767 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1768 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1769 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1770 iReg++;
1771 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1772 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1773 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1774 iReg++;
1775
1776# if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1777 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
1778 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1779 {
1780 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1781 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1782 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1783 iReg++;
1784 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1785 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1786 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pGVCpu);
1787 iReg++;
1788 }
1789# endif
1790 }
1791
1792 /* event injection (clear it). */
1793 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1794 {
1795 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1796 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1797 pInput->Elements[iReg].Value.Reg64 = 0;
1798 iReg++;
1799 }
1800
1801 /* Interruptibility state. This can get a little complicated since we get
1802 half of the state via HV_X64_VP_EXECUTION_STATE. */
1803 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1804 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1805 {
1806 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1807 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1808 pInput->Elements[iReg].Value.Reg64 = 0;
1809 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1810 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
1811 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1812 if (VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
1813 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1814 iReg++;
1815 }
1816 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1817 {
1818 if ( pGVCpu->nem.s.fLastInterruptShadow
1819 || ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1820 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip))
1821 {
1822 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1823 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1824 pInput->Elements[iReg].Value.Reg64 = 0;
1825 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1826 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
1827 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1828 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1829 //if (VMCPU_FF_IS_ANY_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
1830 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1831 iReg++;
1832 }
1833 }
1834 else
1835 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1836
1837 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1838 uint8_t const fDesiredIntWin = pGVCpu->nem.s.fDesiredInterruptWindows;
1839 if ( fDesiredIntWin
1840 || pGVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1841 {
1842 pGVCpu->nem.s.fCurrentInterruptWindows = pGVCpu->nem.s.fDesiredInterruptWindows;
1843 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1844 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1845 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1846 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1847 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1848 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1849 iReg++;
1850 }
1851
1852 /// @todo HvRegisterPendingEvent0
1853 /// @todo HvRegisterPendingEvent1
1854
1855 /*
1856 * Set the registers.
1857 */
1858 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1859
1860 /*
1861 * Make the hypercall.
1862 */
1863 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1864 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1865 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1866 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1867 VERR_NEM_SET_REGISTERS_FAILED);
1868 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1869 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1870 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1871 return VINF_SUCCESS;
1872}
1873#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1874
1875
1876/**
1877 * Export the state to the native API (out of CPUMCTX).
1878 *
1879 * @returns VBox status code
1880 * @param pGVM The ring-0 VM handle.
1881 * @param idCpu The calling EMT. Necessary for getting the
1882 * hypercall page and arguments.
1883 */
1884VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, VMCPUID idCpu)
1885{
1886#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1887 /*
1888 * Validate the call.
1889 */
1890 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
1891 if (RT_SUCCESS(rc))
1892 {
1893 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1894 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1895
1896 /*
1897 * Call worker.
1898 */
1899 rc = nemR0WinExportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx);
1900 }
1901 return rc;
1902#else
1903 RT_NOREF(pGVM, idCpu);
1904 return VERR_NOT_IMPLEMENTED;
1905#endif
1906}
1907
1908
1909#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1910/**
1911 * Worker for NEMR0ImportState.
1912 *
1913 * Intention is to use it internally later.
1914 *
1915 * @returns VBox status code.
1916 * @param pGVM The ring-0 VM handle.
1917 * @param pGVCpu The ring-0 VCPU handle.
1918 * @param pCtx The CPU context structure to import into.
1919 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1920 * @param fCanUpdateCr3 Whether it's safe to update CR3 or not.
1921 */
1922NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3)
1923{
1924 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
1925 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1926 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1927 Assert(pCtx == &pGVCpu->cpum.GstCtx);
1928
1929 fWhat &= pCtx->fExtrn;
1930
1931 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
1932 pInput->VpIndex = pGVCpu->idCpu;
1933 pInput->fFlags = 0;
1934
1935 /* GPRs */
1936 uintptr_t iReg = 0;
1937 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1938 {
1939 if (fWhat & CPUMCTX_EXTRN_RAX)
1940 pInput->Names[iReg++] = HvX64RegisterRax;
1941 if (fWhat & CPUMCTX_EXTRN_RCX)
1942 pInput->Names[iReg++] = HvX64RegisterRcx;
1943 if (fWhat & CPUMCTX_EXTRN_RDX)
1944 pInput->Names[iReg++] = HvX64RegisterRdx;
1945 if (fWhat & CPUMCTX_EXTRN_RBX)
1946 pInput->Names[iReg++] = HvX64RegisterRbx;
1947 if (fWhat & CPUMCTX_EXTRN_RSP)
1948 pInput->Names[iReg++] = HvX64RegisterRsp;
1949 if (fWhat & CPUMCTX_EXTRN_RBP)
1950 pInput->Names[iReg++] = HvX64RegisterRbp;
1951 if (fWhat & CPUMCTX_EXTRN_RSI)
1952 pInput->Names[iReg++] = HvX64RegisterRsi;
1953 if (fWhat & CPUMCTX_EXTRN_RDI)
1954 pInput->Names[iReg++] = HvX64RegisterRdi;
1955 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1956 {
1957 pInput->Names[iReg++] = HvX64RegisterR8;
1958 pInput->Names[iReg++] = HvX64RegisterR9;
1959 pInput->Names[iReg++] = HvX64RegisterR10;
1960 pInput->Names[iReg++] = HvX64RegisterR11;
1961 pInput->Names[iReg++] = HvX64RegisterR12;
1962 pInput->Names[iReg++] = HvX64RegisterR13;
1963 pInput->Names[iReg++] = HvX64RegisterR14;
1964 pInput->Names[iReg++] = HvX64RegisterR15;
1965 }
1966 }
1967
1968 /* RIP & Flags */
1969 if (fWhat & CPUMCTX_EXTRN_RIP)
1970 pInput->Names[iReg++] = HvX64RegisterRip;
1971 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1972 pInput->Names[iReg++] = HvX64RegisterRflags;
1973
1974 /* Segments */
1975 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1976 {
1977 if (fWhat & CPUMCTX_EXTRN_CS)
1978 pInput->Names[iReg++] = HvX64RegisterCs;
1979 if (fWhat & CPUMCTX_EXTRN_ES)
1980 pInput->Names[iReg++] = HvX64RegisterEs;
1981 if (fWhat & CPUMCTX_EXTRN_SS)
1982 pInput->Names[iReg++] = HvX64RegisterSs;
1983 if (fWhat & CPUMCTX_EXTRN_DS)
1984 pInput->Names[iReg++] = HvX64RegisterDs;
1985 if (fWhat & CPUMCTX_EXTRN_FS)
1986 pInput->Names[iReg++] = HvX64RegisterFs;
1987 if (fWhat & CPUMCTX_EXTRN_GS)
1988 pInput->Names[iReg++] = HvX64RegisterGs;
1989 }
1990
1991 /* Descriptor tables and the task segment. */
1992 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1993 {
1994 if (fWhat & CPUMCTX_EXTRN_LDTR)
1995 pInput->Names[iReg++] = HvX64RegisterLdtr;
1996 if (fWhat & CPUMCTX_EXTRN_TR)
1997 pInput->Names[iReg++] = HvX64RegisterTr;
1998 if (fWhat & CPUMCTX_EXTRN_IDTR)
1999 pInput->Names[iReg++] = HvX64RegisterIdtr;
2000 if (fWhat & CPUMCTX_EXTRN_GDTR)
2001 pInput->Names[iReg++] = HvX64RegisterGdtr;
2002 }
2003
2004 /* Control registers. */
2005 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
2006 {
2007 if (fWhat & CPUMCTX_EXTRN_CR0)
2008 pInput->Names[iReg++] = HvX64RegisterCr0;
2009 if (fWhat & CPUMCTX_EXTRN_CR2)
2010 pInput->Names[iReg++] = HvX64RegisterCr2;
2011 if (fWhat & CPUMCTX_EXTRN_CR3)
2012 pInput->Names[iReg++] = HvX64RegisterCr3;
2013 if (fWhat & CPUMCTX_EXTRN_CR4)
2014 pInput->Names[iReg++] = HvX64RegisterCr4;
2015 }
2016 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
2017 pInput->Names[iReg++] = HvX64RegisterCr8;
2018
2019 /* Debug registers. */
2020 if (fWhat & CPUMCTX_EXTRN_DR7)
2021 pInput->Names[iReg++] = HvX64RegisterDr7;
2022 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
2023 {
2024 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pCtx->fExtrn & CPUMCTX_EXTRN_DR7))
2025 {
2026 fWhat |= CPUMCTX_EXTRN_DR7;
2027 pInput->Names[iReg++] = HvX64RegisterDr7;
2028 }
2029 pInput->Names[iReg++] = HvX64RegisterDr0;
2030 pInput->Names[iReg++] = HvX64RegisterDr1;
2031 pInput->Names[iReg++] = HvX64RegisterDr2;
2032 pInput->Names[iReg++] = HvX64RegisterDr3;
2033 }
2034 if (fWhat & CPUMCTX_EXTRN_DR6)
2035 pInput->Names[iReg++] = HvX64RegisterDr6;
2036
2037 /* Floating point state. */
2038 if (fWhat & CPUMCTX_EXTRN_X87)
2039 {
2040 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
2041 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
2042 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
2043 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
2044 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
2045 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
2046 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
2047 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
2048 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
2049 }
2050 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
2051 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
2052
2053 /* Vector state. */
2054 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
2055 {
2056 pInput->Names[iReg++] = HvX64RegisterXmm0;
2057 pInput->Names[iReg++] = HvX64RegisterXmm1;
2058 pInput->Names[iReg++] = HvX64RegisterXmm2;
2059 pInput->Names[iReg++] = HvX64RegisterXmm3;
2060 pInput->Names[iReg++] = HvX64RegisterXmm4;
2061 pInput->Names[iReg++] = HvX64RegisterXmm5;
2062 pInput->Names[iReg++] = HvX64RegisterXmm6;
2063 pInput->Names[iReg++] = HvX64RegisterXmm7;
2064 pInput->Names[iReg++] = HvX64RegisterXmm8;
2065 pInput->Names[iReg++] = HvX64RegisterXmm9;
2066 pInput->Names[iReg++] = HvX64RegisterXmm10;
2067 pInput->Names[iReg++] = HvX64RegisterXmm11;
2068 pInput->Names[iReg++] = HvX64RegisterXmm12;
2069 pInput->Names[iReg++] = HvX64RegisterXmm13;
2070 pInput->Names[iReg++] = HvX64RegisterXmm14;
2071 pInput->Names[iReg++] = HvX64RegisterXmm15;
2072 }
2073
2074 /* MSRs */
2075 // HvX64RegisterTsc - don't touch
2076 if (fWhat & CPUMCTX_EXTRN_EFER)
2077 pInput->Names[iReg++] = HvX64RegisterEfer;
2078 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
2079 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
2080 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
2081 {
2082 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
2083 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
2084 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
2085 }
2086 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
2087 {
2088 pInput->Names[iReg++] = HvX64RegisterStar;
2089 pInput->Names[iReg++] = HvX64RegisterLstar;
2090 pInput->Names[iReg++] = HvX64RegisterCstar;
2091 pInput->Names[iReg++] = HvX64RegisterSfmask;
2092 }
2093
2094# ifdef LOG_ENABLED
2095 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
2096# endif
2097 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
2098 {
2099 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
2100 pInput->Names[iReg++] = HvX64RegisterPat;
2101# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2102 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
2103# endif
2104 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
2105 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
2106 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
2107 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
2108 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
2109 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
2110 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
2111 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
2112 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
2113 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
2114 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
2115 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
2116 pInput->Names[iReg++] = HvX64RegisterTscAux;
2117# if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
2118 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2119 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
2120# endif
2121# ifdef LOG_ENABLED
2122 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON)
2123 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
2124# endif
2125 }
2126
2127 /* Interruptibility. */
2128 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2129 {
2130 pInput->Names[iReg++] = HvRegisterInterruptState;
2131 pInput->Names[iReg++] = HvX64RegisterRip;
2132 }
2133
2134 /* event injection */
2135 pInput->Names[iReg++] = HvRegisterPendingInterruption;
2136 pInput->Names[iReg++] = HvRegisterPendingEvent0;
2137 pInput->Names[iReg++] = HvRegisterPendingEvent1;
2138 size_t const cRegs = iReg;
2139 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF_DYN(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
2140
2141 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2142 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
2143 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
2144
2145 /*
2146 * Make the hypercall.
2147 */
2148 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
2149 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2150 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2151 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
2152 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
2153 VERR_NEM_GET_REGISTERS_FAILED);
2154 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
2155
2156 /*
2157 * Copy information to the CPUM context.
2158 */
2159 iReg = 0;
2160
2161 /* GPRs */
2162 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
2163 {
2164 if (fWhat & CPUMCTX_EXTRN_RAX)
2165 {
2166 Assert(pInput->Names[iReg] == HvX64RegisterRax);
2167 pCtx->rax = paValues[iReg++].Reg64;
2168 }
2169 if (fWhat & CPUMCTX_EXTRN_RCX)
2170 {
2171 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
2172 pCtx->rcx = paValues[iReg++].Reg64;
2173 }
2174 if (fWhat & CPUMCTX_EXTRN_RDX)
2175 {
2176 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
2177 pCtx->rdx = paValues[iReg++].Reg64;
2178 }
2179 if (fWhat & CPUMCTX_EXTRN_RBX)
2180 {
2181 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
2182 pCtx->rbx = paValues[iReg++].Reg64;
2183 }
2184 if (fWhat & CPUMCTX_EXTRN_RSP)
2185 {
2186 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
2187 pCtx->rsp = paValues[iReg++].Reg64;
2188 }
2189 if (fWhat & CPUMCTX_EXTRN_RBP)
2190 {
2191 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
2192 pCtx->rbp = paValues[iReg++].Reg64;
2193 }
2194 if (fWhat & CPUMCTX_EXTRN_RSI)
2195 {
2196 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
2197 pCtx->rsi = paValues[iReg++].Reg64;
2198 }
2199 if (fWhat & CPUMCTX_EXTRN_RDI)
2200 {
2201 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
2202 pCtx->rdi = paValues[iReg++].Reg64;
2203 }
2204 if (fWhat & CPUMCTX_EXTRN_R8_R15)
2205 {
2206 Assert(pInput->Names[iReg] == HvX64RegisterR8);
2207 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
2208 pCtx->r8 = paValues[iReg++].Reg64;
2209 pCtx->r9 = paValues[iReg++].Reg64;
2210 pCtx->r10 = paValues[iReg++].Reg64;
2211 pCtx->r11 = paValues[iReg++].Reg64;
2212 pCtx->r12 = paValues[iReg++].Reg64;
2213 pCtx->r13 = paValues[iReg++].Reg64;
2214 pCtx->r14 = paValues[iReg++].Reg64;
2215 pCtx->r15 = paValues[iReg++].Reg64;
2216 }
2217 }
2218
2219 /* RIP & Flags */
2220 if (fWhat & CPUMCTX_EXTRN_RIP)
2221 {
2222 Assert(pInput->Names[iReg] == HvX64RegisterRip);
2223 pCtx->rip = paValues[iReg++].Reg64;
2224 }
2225 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
2226 {
2227 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
2228 pCtx->rflags.u = paValues[iReg++].Reg64;
2229 }
2230
2231 /* Segments */
2232# define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
2233 do { \
2234 Assert(pInput->Names[a_idx] == a_enmName); \
2235 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
2236 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
2237 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
2238 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
2239 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
2240 } while (0)
2241 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
2242 {
2243 if (fWhat & CPUMCTX_EXTRN_CS)
2244 {
2245 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
2246 iReg++;
2247 }
2248 if (fWhat & CPUMCTX_EXTRN_ES)
2249 {
2250 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
2251 iReg++;
2252 }
2253 if (fWhat & CPUMCTX_EXTRN_SS)
2254 {
2255 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
2256 iReg++;
2257 }
2258 if (fWhat & CPUMCTX_EXTRN_DS)
2259 {
2260 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
2261 iReg++;
2262 }
2263 if (fWhat & CPUMCTX_EXTRN_FS)
2264 {
2265 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
2266 iReg++;
2267 }
2268 if (fWhat & CPUMCTX_EXTRN_GS)
2269 {
2270 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
2271 iReg++;
2272 }
2273 }
2274 /* Descriptor tables and the task segment. */
2275 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
2276 {
2277 if (fWhat & CPUMCTX_EXTRN_LDTR)
2278 {
2279 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
2280 iReg++;
2281 }
2282 if (fWhat & CPUMCTX_EXTRN_TR)
2283 {
2284 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
2285 avoid to trigger sanity assertions around the code, always fix this. */
2286 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
2287 switch (pCtx->tr.Attr.n.u4Type)
2288 {
2289 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2290 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2291 break;
2292 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2293 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2294 break;
2295 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2296 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
2297 break;
2298 }
2299 iReg++;
2300 }
2301 if (fWhat & CPUMCTX_EXTRN_IDTR)
2302 {
2303 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
2304 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
2305 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
2306 iReg++;
2307 }
2308 if (fWhat & CPUMCTX_EXTRN_GDTR)
2309 {
2310 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
2311 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
2312 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
2313 iReg++;
2314 }
2315 }
2316
2317 /* Control registers. */
2318 bool fMaybeChangedMode = false;
2319 bool fUpdateCr3 = false;
2320 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
2321 {
2322 if (fWhat & CPUMCTX_EXTRN_CR0)
2323 {
2324 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
2325 if (pCtx->cr0 != paValues[iReg].Reg64)
2326 {
2327 CPUMSetGuestCR0(pGVCpu, paValues[iReg].Reg64);
2328 fMaybeChangedMode = true;
2329 }
2330 iReg++;
2331 }
2332 if (fWhat & CPUMCTX_EXTRN_CR2)
2333 {
2334 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
2335 pCtx->cr2 = paValues[iReg].Reg64;
2336 iReg++;
2337 }
2338 if (fWhat & CPUMCTX_EXTRN_CR3)
2339 {
2340 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
2341 if (pCtx->cr3 != paValues[iReg].Reg64)
2342 {
2343 CPUMSetGuestCR3(pGVCpu, paValues[iReg].Reg64);
2344 fUpdateCr3 = true;
2345 }
2346 iReg++;
2347 }
2348 if (fWhat & CPUMCTX_EXTRN_CR4)
2349 {
2350 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
2351 if (pCtx->cr4 != paValues[iReg].Reg64)
2352 {
2353 CPUMSetGuestCR4(pGVCpu, paValues[iReg].Reg64);
2354 fMaybeChangedMode = true;
2355 }
2356 iReg++;
2357 }
2358 }
2359 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
2360 {
2361 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
2362 APICSetTpr(pGVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
2363 iReg++;
2364 }
2365
2366 /* Debug registers. */
2367 if (fWhat & CPUMCTX_EXTRN_DR7)
2368 {
2369 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
2370 if (pCtx->dr[7] != paValues[iReg].Reg64)
2371 CPUMSetGuestDR7(pGVCpu, paValues[iReg].Reg64);
2372 pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
2373 iReg++;
2374 }
2375 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
2376 {
2377 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
2378 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
2379 if (pCtx->dr[0] != paValues[iReg].Reg64)
2380 CPUMSetGuestDR0(pGVCpu, paValues[iReg].Reg64);
2381 iReg++;
2382 if (pCtx->dr[1] != paValues[iReg].Reg64)
2383 CPUMSetGuestDR1(pGVCpu, paValues[iReg].Reg64);
2384 iReg++;
2385 if (pCtx->dr[2] != paValues[iReg].Reg64)
2386 CPUMSetGuestDR2(pGVCpu, paValues[iReg].Reg64);
2387 iReg++;
2388 if (pCtx->dr[3] != paValues[iReg].Reg64)
2389 CPUMSetGuestDR3(pGVCpu, paValues[iReg].Reg64);
2390 iReg++;
2391 }
2392 if (fWhat & CPUMCTX_EXTRN_DR6)
2393 {
2394 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
2395 if (pCtx->dr[6] != paValues[iReg].Reg64)
2396 CPUMSetGuestDR6(pGVCpu, paValues[iReg].Reg64);
2397 iReg++;
2398 }
2399
2400 /* Floating point state. */
2401 if (fWhat & CPUMCTX_EXTRN_X87)
2402 {
2403 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
2404 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
2405 pCtx->XState.x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2406 pCtx->XState.x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2407 iReg++;
2408 pCtx->XState.x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2409 pCtx->XState.x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2410 iReg++;
2411 pCtx->XState.x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2412 pCtx->XState.x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2413 iReg++;
2414 pCtx->XState.x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2415 pCtx->XState.x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2416 iReg++;
2417 pCtx->XState.x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2418 pCtx->XState.x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2419 iReg++;
2420 pCtx->XState.x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2421 pCtx->XState.x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2422 iReg++;
2423 pCtx->XState.x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2424 pCtx->XState.x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2425 iReg++;
2426 pCtx->XState.x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2427 pCtx->XState.x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2428 iReg++;
2429
2430 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
2431 pCtx->XState.x87.FCW = paValues[iReg].FpControlStatus.FpControl;
2432 pCtx->XState.x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
2433 pCtx->XState.x87.FTW = paValues[iReg].FpControlStatus.FpTag
2434 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
2435 pCtx->XState.x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
2436 pCtx->XState.x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
2437 pCtx->XState.x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
2438 pCtx->XState.x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
2439 iReg++;
2440 }
2441
2442 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
2443 {
2444 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
2445 if (fWhat & CPUMCTX_EXTRN_X87)
2446 {
2447 pCtx->XState.x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
2448 pCtx->XState.x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
2449 pCtx->XState.x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
2450 }
2451 pCtx->XState.x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
2452 pCtx->XState.x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
2453 iReg++;
2454 }
2455
2456 /* Vector state. */
2457 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
2458 {
2459 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
2460 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
2461 pCtx->XState.x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2462 pCtx->XState.x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2463 iReg++;
2464 pCtx->XState.x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2465 pCtx->XState.x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2466 iReg++;
2467 pCtx->XState.x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2468 pCtx->XState.x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2469 iReg++;
2470 pCtx->XState.x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2471 pCtx->XState.x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2472 iReg++;
2473 pCtx->XState.x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2474 pCtx->XState.x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2475 iReg++;
2476 pCtx->XState.x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2477 pCtx->XState.x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2478 iReg++;
2479 pCtx->XState.x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2480 pCtx->XState.x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2481 iReg++;
2482 pCtx->XState.x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2483 pCtx->XState.x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2484 iReg++;
2485 pCtx->XState.x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2486 pCtx->XState.x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2487 iReg++;
2488 pCtx->XState.x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2489 pCtx->XState.x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2490 iReg++;
2491 pCtx->XState.x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2492 pCtx->XState.x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2493 iReg++;
2494 pCtx->XState.x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2495 pCtx->XState.x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2496 iReg++;
2497 pCtx->XState.x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2498 pCtx->XState.x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2499 iReg++;
2500 pCtx->XState.x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2501 pCtx->XState.x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2502 iReg++;
2503 pCtx->XState.x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2504 pCtx->XState.x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2505 iReg++;
2506 pCtx->XState.x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2507 pCtx->XState.x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2508 iReg++;
2509 }
2510
2511
2512 /* MSRs */
2513 // HvX64RegisterTsc - don't touch
2514 if (fWhat & CPUMCTX_EXTRN_EFER)
2515 {
2516 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
2517 if (paValues[iReg].Reg64 != pCtx->msrEFER)
2518 {
2519 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
2520 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
2521 PGMNotifyNxeChanged(pGVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
2522 pCtx->msrEFER = paValues[iReg].Reg64;
2523 fMaybeChangedMode = true;
2524 }
2525 iReg++;
2526 }
2527 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
2528 {
2529 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
2530 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
2531 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
2532 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
2533 iReg++;
2534 }
2535 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
2536 {
2537 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
2538 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
2539 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
2540 pCtx->SysEnter.cs = paValues[iReg].Reg64;
2541 iReg++;
2542
2543 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
2544 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
2545 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
2546 pCtx->SysEnter.eip = paValues[iReg].Reg64;
2547 iReg++;
2548
2549 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
2550 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
2551 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
2552 pCtx->SysEnter.esp = paValues[iReg].Reg64;
2553 iReg++;
2554 }
2555 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
2556 {
2557 Assert(pInput->Names[iReg] == HvX64RegisterStar);
2558 if (pCtx->msrSTAR != paValues[iReg].Reg64)
2559 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
2560 pCtx->msrSTAR = paValues[iReg].Reg64;
2561 iReg++;
2562
2563 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
2564 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
2565 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
2566 pCtx->msrLSTAR = paValues[iReg].Reg64;
2567 iReg++;
2568
2569 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
2570 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
2571 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
2572 pCtx->msrCSTAR = paValues[iReg].Reg64;
2573 iReg++;
2574
2575 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
2576 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
2577 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
2578 pCtx->msrSFMASK = paValues[iReg].Reg64;
2579 iReg++;
2580 }
2581 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
2582 {
2583 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
2584 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pGVCpu);
2585 if (paValues[iReg].Reg64 != uOldBase)
2586 {
2587 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
2588 pGVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
2589 int rc2 = APICSetBaseMsr(pGVCpu, paValues[iReg].Reg64);
2590 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64));
2591 }
2592 iReg++;
2593
2594 Assert(pInput->Names[iReg] == HvX64RegisterPat);
2595 if (pCtx->msrPAT != paValues[iReg].Reg64)
2596 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
2597 pCtx->msrPAT = paValues[iReg].Reg64;
2598 iReg++;
2599
2600# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2601 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
2602 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pGVCpu))
2603 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pGVCpu), paValues[iReg].Reg64));
2604 iReg++;
2605# endif
2606
2607 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
2608 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
2609 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2610 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2611 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2612 iReg++;
2613
2614 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2615
2616 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2617 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2618 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2619 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2620 iReg++;
2621
2622 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2623 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2624 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2625 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2626 iReg++;
2627
2628 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2629 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2630 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2631 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2632 iReg++;
2633
2634 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2635 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2636 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2637 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2638 iReg++;
2639
2640 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2641 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2642 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2643 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2644 iReg++;
2645
2646 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2647 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2648 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2649 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2650 iReg++;
2651
2652 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2653 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2654 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2655 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2656 iReg++;
2657
2658 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2659 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2660 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2661 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2662 iReg++;
2663
2664 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2665 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2666 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2667 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2668 iReg++;
2669
2670 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2671 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2672 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2673 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2674 iReg++;
2675
2676 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2677 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2678 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2679 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2680 iReg++;
2681
2682 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2683 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2684 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2685 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2686 iReg++;
2687
2688# if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2689 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2690 {
2691 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2692 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2693 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2694 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2695 iReg++;
2696 }
2697# endif
2698# ifdef LOG_ENABLED
2699 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON)
2700 {
2701 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2702 uint64_t const uFeatCtrl = CPUMGetGuestIa32FeatCtrl(pVCpu);
2703 if (paValues[iReg].Reg64 != uFeatCtrl)
2704 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, uFeatCtrl, paValues[iReg].Reg64));
2705 iReg++;
2706 }
2707# endif
2708 }
2709
2710 /* Interruptibility. */
2711 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2712 {
2713 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2714 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2715
2716 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2717 {
2718 pGVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2719 if (paValues[iReg].InterruptState.InterruptShadow)
2720 EMSetInhibitInterruptsPC(pGVCpu, paValues[iReg + 1].Reg64);
2721 else
2722 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2723 }
2724
2725 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2726 {
2727 if (paValues[iReg].InterruptState.NmiMasked)
2728 VMCPU_FF_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS);
2729 else
2730 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_BLOCK_NMIS);
2731 }
2732
2733 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2734 iReg += 2;
2735 }
2736
2737 /* Event injection. */
2738 /// @todo HvRegisterPendingInterruption
2739 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2740 if (paValues[iReg].PendingInterruption.InterruptionPending)
2741 {
2742 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2743 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2744 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2745 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2746 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2747 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2748 }
2749
2750 /// @todo HvRegisterPendingEvent0
2751 /// @todo HvRegisterPendingEvent1
2752
2753 /* Almost done, just update extrn flags and maybe change PGM mode. */
2754 pCtx->fExtrn &= ~fWhat;
2755 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2756 pCtx->fExtrn = 0;
2757
2758 /* Typical. */
2759 if (!fMaybeChangedMode && !fUpdateCr3)
2760 return VINF_SUCCESS;
2761
2762 /*
2763 * Slow.
2764 */
2765 int rc = VINF_SUCCESS;
2766 if (fMaybeChangedMode)
2767 {
2768 rc = PGMChangeMode(pGVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2769 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
2770 }
2771
2772 if (fUpdateCr3)
2773 {
2774 if (fCanUpdateCr3)
2775 {
2776 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
2777 rc = PGMUpdateCR3(pGVCpu, pCtx->cr3, false /*fPdpesMapped*/);
2778 if (rc == VINF_SUCCESS)
2779 { /* likely */ }
2780 else
2781 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
2782 }
2783 else
2784 {
2785 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2786 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2787 }
2788 }
2789
2790 return rc;
2791}
2792#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2793
2794
2795/**
2796 * Import the state from the native API (back to CPUMCTX).
2797 *
2798 * @returns VBox status code
2799 * @param pGVM The ring-0 VM handle.
2800 * @param idCpu The calling EMT. Necessary for getting the
2801 * hypercall page and arguments.
2802 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2803 * CPUMCTX_EXTERN_ALL for everything.
2804 */
2805VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, VMCPUID idCpu, uint64_t fWhat)
2806{
2807#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2808 /*
2809 * Validate the call.
2810 */
2811 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2812 if (RT_SUCCESS(rc))
2813 {
2814 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2815 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2816
2817 /*
2818 * Call worker.
2819 */
2820 rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);
2821 }
2822 return rc;
2823#else
2824 RT_NOREF(pGVM, idCpu, fWhat);
2825 return VERR_NOT_IMPLEMENTED;
2826#endif
2827}
2828
2829
2830#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2831/**
2832 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2833 *
2834 * @returns VBox status code.
2835 * @param pGVM The ring-0 VM handle.
2836 * @param pGVCpu The ring-0 VCPU handle.
2837 * @param pcTicks Where to return the current CPU tick count.
2838 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2839 */
2840NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2841{
2842 /*
2843 * Hypercall parameters.
2844 */
2845 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2846 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2847 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2848
2849 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2850 pInput->VpIndex = pGVCpu->idCpu;
2851 pInput->fFlags = 0;
2852 pInput->Names[0] = HvX64RegisterTsc;
2853 pInput->Names[1] = HvX64RegisterTscAux;
2854
2855 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2856 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2857 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2858
2859 /*
2860 * Make the hypercall.
2861 */
2862 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2863 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2864 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2865 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2866 VERR_NEM_GET_REGISTERS_FAILED);
2867
2868 /*
2869 * Get results.
2870 */
2871 *pcTicks = paValues[0].Reg64;
2872 if (pcAux)
2873 *pcAux = paValues[0].Reg32;
2874 return VINF_SUCCESS;
2875}
2876#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2877
2878
2879/**
2880 * Queries the TSC and TSC_AUX values, putting the results in .
2881 *
2882 * @returns VBox status code
2883 * @param pGVM The ring-0 VM handle.
2884 * @param idCpu The calling EMT. Necessary for getting the
2885 * hypercall page and arguments.
2886 */
2887VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, VMCPUID idCpu)
2888{
2889#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2890 /*
2891 * Validate the call.
2892 */
2893 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2894 if (RT_SUCCESS(rc))
2895 {
2896 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2897 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2898
2899 /*
2900 * Call worker.
2901 */
2902 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2903 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2904 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2905 &pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2906 }
2907 return rc;
2908#else
2909 RT_NOREF(pGVM, idCpu);
2910 return VERR_NOT_IMPLEMENTED;
2911#endif
2912}
2913
2914
2915#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2916/**
2917 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2918 *
2919 * @returns VBox status code.
2920 * @param pGVM The ring-0 VM handle.
2921 * @param pGVCpu The ring-0 VCPU handle.
2922 * @param uPausedTscValue The TSC value at the time of pausing.
2923 */
2924NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2925{
2926 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2927
2928 /*
2929 * Set up the hypercall parameters.
2930 */
2931 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2932 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2933
2934 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2935 pInput->VpIndex = 0;
2936 pInput->RsvdZ = 0;
2937 pInput->Elements[0].Name = HvX64RegisterTsc;
2938 pInput->Elements[0].Pad0 = 0;
2939 pInput->Elements[0].Pad1 = 0;
2940 pInput->Elements[0].Value.Reg128.High64 = 0;
2941 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2942
2943 /*
2944 * Disable interrupts and do the first virtual CPU.
2945 */
2946 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2947 uint64_t const uFirstTsc = ASMReadTSC();
2948 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2949 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2950 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2951 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2952
2953 /*
2954 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2955 * that we don't introduce too much drift here.
2956 */
2957 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2958 {
2959 Assert(pInput->PartitionId == pGVM->nemr0.s.idHvPartition);
2960 Assert(pInput->RsvdZ == 0);
2961 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2962 Assert(pInput->Elements[0].Pad0 == 0);
2963 Assert(pInput->Elements[0].Pad1 == 0);
2964 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2965
2966 pInput->VpIndex = iCpu;
2967 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2968 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2969
2970 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2971 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2972 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2973 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2974 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2975 }
2976
2977 /*
2978 * Done.
2979 */
2980 ASMSetFlags(fSavedFlags);
2981 return VINF_SUCCESS;
2982}
2983#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2984
2985
2986/**
2987 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2988 *
2989 * @returns VBox status code
2990 * @param pGVM The ring-0 VM handle.
2991 * @param idCpu The calling EMT. Necessary for getting the
2992 * hypercall page and arguments.
2993 * @param uPausedTscValue The TSC value at the time of pausing.
2994 */
2995VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2996{
2997#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2998 /*
2999 * Validate the call.
3000 */
3001 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
3002 if (RT_SUCCESS(rc))
3003 {
3004 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3005 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
3006
3007 /*
3008 * Call worker.
3009 */
3010 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
3011 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
3012 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
3013 }
3014 return rc;
3015#else
3016 RT_NOREF(pGVM, idCpu, uPausedTscValue);
3017 return VERR_NOT_IMPLEMENTED;
3018#endif
3019}
3020
3021
3022VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
3023{
3024#ifdef NEM_WIN_WITH_RING0_RUNLOOP
3025 if (pGVM->nemr0.s.fMayUseRing0Runloop)
3026 return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu]);
3027 return VERR_NEM_RING3_ONLY;
3028#else
3029 RT_NOREF(pGVM, idCpu);
3030 return VERR_NOT_IMPLEMENTED;
3031#endif
3032}
3033
3034
3035/**
3036 * Updates statistics in the VM structure.
3037 *
3038 * @returns VBox status code.
3039 * @param pGVM The ring-0 VM handle.
3040 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
3041 * page and arguments.
3042 */
3043VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, VMCPUID idCpu)
3044{
3045#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3046 /*
3047 * Validate the call.
3048 */
3049 int rc;
3050 if (idCpu == NIL_VMCPUID)
3051 rc = GVMMR0ValidateGVM(pGVM);
3052 else
3053 rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
3054 if (RT_SUCCESS(rc))
3055 {
3056 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
3057
3058 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
3059 ? &pGVM->aCpus[idCpu].nemr0.s.HypercallData
3060 : &pGVM->nemr0.s.HypercallData;
3061 if ( RT_VALID_PTR(pHypercallData->pbPage)
3062 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
3063 {
3064 if (idCpu == NIL_VMCPUID)
3065 rc = RTCritSectEnter(&pGVM->nemr0.s.HypercallDataCritSect);
3066 if (RT_SUCCESS(rc))
3067 {
3068 /*
3069 * Query the memory statistics for the partition.
3070 */
3071 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
3072 pInput->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
3073 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
3074 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
3075 pInput->ProximityDomainInfo.Flags.Reserved = 0;
3076 pInput->ProximityDomainInfo.Id = 0;
3077
3078 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
3079 RT_ZERO(*pOutput);
3080
3081 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
3082 pHypercallData->HCPhysPage,
3083 pHypercallData->HCPhysPage + sizeof(*pInput));
3084 if (uResult == HV_STATUS_SUCCESS)
3085 {
3086 pGVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
3087 pGVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
3088 rc = VINF_SUCCESS;
3089 }
3090 else
3091 {
3092 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
3093 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
3094 rc = VERR_NEM_IPE_0;
3095 }
3096
3097 if (idCpu == NIL_VMCPUID)
3098 RTCritSectLeave(&pGVM->nemr0.s.HypercallDataCritSect);
3099 }
3100 }
3101 else
3102 rc = VERR_WRONG_ORDER;
3103 }
3104 return rc;
3105#else
3106 RT_NOREF(pGVM, idCpu);
3107 return VINF_SUCCESS;
3108#endif
3109}
3110
3111
3112/**
3113 * Debug only interface for poking around and exploring Hyper-V stuff.
3114 *
3115 * @param pGVM The ring-0 VM handle.
3116 * @param idCpu The calling EMT.
3117 * @param u64Arg What to query. 0 == registers.
3118 */
3119VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, VMCPUID idCpu, uint64_t u64Arg)
3120{
3121#if defined(DEBUG_bird) && defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES)
3122 /*
3123 * Resolve CPU structures.
3124 */
3125 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
3126 if (RT_SUCCESS(rc))
3127 {
3128 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
3129
3130 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3131 if (u64Arg == 0)
3132 {
3133 /*
3134 * Query register.
3135 */
3136 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
3137 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
3138
3139 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
3140 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
3141 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
3142
3143 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
3144 pInput->VpIndex = pGVCpu->idCpu;
3145 pInput->fFlags = 0;
3146 pInput->Names[0] = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
3147
3148 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
3149 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
3150 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
3151 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
3152 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
3153 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
3154 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
3155 rc = VINF_SUCCESS;
3156 }
3157 else if (u64Arg == 1)
3158 {
3159 /*
3160 * Query partition property.
3161 */
3162 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nemr0.s.HypercallData.pbPage;
3163 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
3164
3165 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
3166 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
3167 pOutput->PropertyValue = 0;
3168
3169 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
3170 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pGVCpu->nem.s.Hypercall.Experiment.uItem;
3171 pInput->uPadding = 0;
3172
3173 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
3174 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
3175 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
3176 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
3177 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
3178 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
3179 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
3180 rc = VINF_SUCCESS;
3181 }
3182 else if (u64Arg == 2)
3183 {
3184 /*
3185 * Set register.
3186 */
3187 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
3188 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
3189 RT_BZERO(pInput, RT_UOFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
3190
3191 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
3192 pInput->VpIndex = pGVCpu->idCpu;
3193 pInput->RsvdZ = 0;
3194 pInput->Elements[0].Name = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
3195 pInput->Elements[0].Value.Reg128.High64 = pGVCpu->nem.s.Hypercall.Experiment.uHiValue;
3196 pInput->Elements[0].Value.Reg128.Low64 = pGVCpu->nem.s.Hypercall.Experiment.uLoValue;
3197
3198 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
3199 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
3200 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
3201 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
3202 rc = VINF_SUCCESS;
3203 }
3204 else
3205 rc = VERR_INVALID_FUNCTION;
3206 }
3207 return rc;
3208#else /* !DEBUG_bird */
3209 RT_NOREF(pGVM, idCpu, u64Arg);
3210 return VERR_NOT_SUPPORTED;
3211#endif /* !DEBUG_bird */
3212}
3213
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette