VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 91692

Last change on this file since 91692 was 91692, checked in by vboxsync, 3 years ago

VMM/NEM: Made it build without NEM_WIN_USE_HYPERCALLS_FOR_PAGES again. #ifdef'ed a lot more based on NEM_WIN_USE_HYPERCALLS_FOR_PAGES (basically ring-0 ends up as a stub if it's not defined). bugref:10118

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 137.2 KB
Line 
1/* $Id: NEMR0Native-win.cpp 91692 2021-10-12 13:31:34Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/ctype.h>
42#include <iprt/critsect.h>
43#include <iprt/dbg.h>
44#include <iprt/mem.h>
45#include <iprt/memobj.h>
46#include <iprt/string.h>
47#include <iprt/time.h>
48#define PIMAGE_NT_HEADERS32 PIMAGE_NT_HEADERS32_PECOFF
49#include <iprt/formats/pecoff.h>
50
51
52/* Assert compile context sanity. */
53#ifndef RT_OS_WINDOWS
54# error "Windows only file!"
55#endif
56#ifndef RT_ARCH_AMD64
57# error "AMD64 only file!"
58#endif
59
60
61/*********************************************************************************************************************************
62* Internal Functions *
63*********************************************************************************************************************************/
64typedef uint32_t DWORD; /* for winerror.h constants */
65
66
67/*********************************************************************************************************************************
68* Global Variables *
69*********************************************************************************************************************************/
70#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
71static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
72
73/**
74 * WinHvr.sys!WinHvDepositMemory
75 *
76 * This API will try allocates cPages on IdealNode and deposit it to the
77 * hypervisor for use with the given partition. The memory will be freed when
78 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
79 *
80 * Apparently node numbers above 64 has a different meaning.
81 */
82static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
83
84RT_C_DECLS_BEGIN
85/**
86 * The WinHvGetPartitionProperty function we intercept in VID.SYS to get the
87 * Hyper-V partition ID.
88 *
89 * This is used from assembly.
90 */
91NTSTATUS WinHvGetPartitionProperty(uintptr_t idPartition, HV_PARTITION_PROPERTY_CODE enmProperty, PHV_PARTITION_PROPERTY puValue);
92decltype(WinHvGetPartitionProperty) *g_pfnWinHvGetPartitionProperty;
93RT_C_DECLS_END
94#endif
95
96/** @name VID.SYS image details.
97 * @{ */
98#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
99static uint8_t *g_pbVidSys = NULL;
100static uintptr_t g_cbVidSys = 0;
101static PIMAGE_NT_HEADERS g_pVidSysHdrs = NULL;
102/** Pointer to the import thunk entry in VID.SYS for WinHvGetPartitionProperty if we found it. */
103static decltype(WinHvGetPartitionProperty) **g_ppfnVidSysWinHvGetPartitionProperty = NULL;
104
105/** Critical section protecting the WinHvGetPartitionProperty hacking. */
106static RTCRITSECT g_VidSysCritSect;
107#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
108RT_C_DECLS_BEGIN
109/** The partition ID passed to WinHvGetPartitionProperty by VID.SYS. */
110HV_PARTITION_ID g_idVidSysFoundPartition = HV_PARTITION_ID_INVALID;
111/** The thread which is currently looking for a partition ID. */
112RTNATIVETHREAD g_hVidSysMatchThread = NIL_RTNATIVETHREAD;
113/** The property code we expect in WinHvGetPartitionProperty. */
114VID_PARTITION_PROPERTY_CODE g_enmVidSysMatchProperty = INT64_MAX;
115/* NEMR0NativeA-win.asm: */
116extern uint8_t g_abNemR0WinHvrWinHvGetPartitionProperty_OriginalProlog[64];
117RT_C_DECLS_END
118/** @} */
119
120
121
122/*********************************************************************************************************************************
123* Internal Functions *
124*********************************************************************************************************************************/
125NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
126 uint32_t cPages, uint32_t fFlags);
127NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
128#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
129NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
130NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3);
131NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
132NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
133#endif
134DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
135 void *pvOutput, uint32_t cbOutput);
136
137/* NEMR0NativeA-win.asm: */
138DECLASM(NTSTATUS) nemR0VidSysWinHvGetPartitionProperty(uintptr_t idPartition, HV_PARTITION_PROPERTY_CODE enmProperty,
139 PHV_PARTITION_PROPERTY puValue);
140DECLASM(NTSTATUS) nemR0WinHvrWinHvGetPartitionProperty(uintptr_t idPartition, HV_PARTITION_PROPERTY_CODE enmProperty,
141 PHV_PARTITION_PROPERTY puValue);
142
143
144/*
145 * Instantate the code we share with ring-0.
146 */
147#ifdef NEM_WIN_WITH_RING0_RUNLOOP
148# define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
149#else
150# undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
151#endif
152#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
153
154
155/**
156 * Module initialization for NEM.
157 */
158VMMR0_INT_DECL(int) NEMR0Init(void)
159{
160 return RTCritSectInit(&g_VidSysCritSect);
161}
162
163
164/**
165 * Module termination for NEM.
166 */
167VMMR0_INT_DECL(void) NEMR0Term(void)
168{
169 RTCritSectDelete(&g_VidSysCritSect);
170}
171
172
173/**
174 * Worker for NEMR0InitVM that allocates a hypercall page.
175 *
176 * @returns VBox status code.
177 * @param pHypercallData The hypercall data page to initialize.
178 */
179static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
180{
181 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
182 if (RT_SUCCESS(rc))
183 {
184 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
185 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
186 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
187 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
188 if (RT_SUCCESS(rc))
189 return VINF_SUCCESS;
190
191 /* bail out */
192 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
193 }
194 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
195 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
196 pHypercallData->pbPage = NULL;
197 return rc;
198}
199
200
201/**
202 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
203 *
204 * @param pHypercallData The hypercall data page to uninitialize.
205 */
206static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
207{
208 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
209 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
210 if (pHypercallData->pbPage != NULL)
211 {
212 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
213 pHypercallData->pbPage = NULL;
214 }
215 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
216 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
217}
218
219
220static int nemR0StrICmp(const char *psz1, const char *psz2)
221{
222 for (;;)
223 {
224 char ch1 = *psz1++;
225 char ch2 = *psz2++;
226 if ( ch1 != ch2
227 && RT_C_TO_LOWER(ch1) != RT_C_TO_LOWER(ch2))
228 return ch1 - ch2;
229 if (!ch1)
230 return 0;
231 }
232}
233
234
235/**
236 * Worker for nemR0PrepareForVidSysIntercept().
237 */
238static void nemR0PrepareForVidSysInterceptInner(void)
239{
240 uint32_t const cbImage = g_cbVidSys;
241 uint8_t * const pbImage = g_pbVidSys;
242 PIMAGE_NT_HEADERS const pNtHdrs = g_pVidSysHdrs;
243 uintptr_t const offEndNtHdrs = (uintptr_t)(pNtHdrs + 1) - (uintptr_t)pbImage;
244
245#define CHECK_LOG_RET(a_Expr, a_LogRel) do { \
246 if (RT_LIKELY(a_Expr)) { /* likely */ } \
247 else \
248 { \
249 LogRel(a_LogRel); \
250 return; \
251 } \
252 } while (0)
253
254 //__try
255 {
256 /*
257 * Get and validate the import directory entry.
258 */
259 CHECK_LOG_RET( pNtHdrs->OptionalHeader.NumberOfRvaAndSizes > IMAGE_DIRECTORY_ENTRY_IMPORT
260 || pNtHdrs->OptionalHeader.NumberOfRvaAndSizes <= IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 4,
261 ("NEMR0: vid.sys: NumberOfRvaAndSizes is out of range: %#x\n", pNtHdrs->OptionalHeader.NumberOfRvaAndSizes));
262
263 IMAGE_DATA_DIRECTORY const ImportDir = pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT];
264 CHECK_LOG_RET( ImportDir.Size >= sizeof(IMAGE_IMPORT_DESCRIPTOR)
265 && ImportDir.VirtualAddress >= offEndNtHdrs /* ASSUMES NT headers before imports */
266 && (uint64_t)ImportDir.VirtualAddress + ImportDir.Size <= cbImage,
267 ("NEMR0: vid.sys: Bad import directory entry: %#x LB %#x (cbImage=%#x, offEndNtHdrs=%#zx)\n",
268 ImportDir.VirtualAddress, ImportDir.Size, cbImage, offEndNtHdrs));
269
270 /*
271 * Walk the import descriptor table looking for NTDLL.DLL.
272 */
273 for (PIMAGE_IMPORT_DESCRIPTOR pImps = (PIMAGE_IMPORT_DESCRIPTOR)&pbImage[ImportDir.VirtualAddress];
274 pImps->Name != 0 && pImps->FirstThunk != 0;
275 pImps++)
276 {
277 CHECK_LOG_RET(pImps->Name < cbImage, ("NEMR0: vid.sys: Bad import directory entry name: %#x", pImps->Name));
278 const char *pszModName = (const char *)&pbImage[pImps->Name];
279 if (nemR0StrICmp(pszModName, "winhvr.sys"))
280 continue;
281 CHECK_LOG_RET(pImps->FirstThunk < cbImage && pImps->FirstThunk >= offEndNtHdrs,
282 ("NEMR0: vid.sys: Bad FirstThunk: %#x", pImps->FirstThunk));
283 CHECK_LOG_RET( pImps->u.OriginalFirstThunk == 0
284 || (pImps->u.OriginalFirstThunk >= offEndNtHdrs && pImps->u.OriginalFirstThunk < cbImage),
285 ("NEMR0: vid.sys: Bad OriginalFirstThunk: %#x", pImps->u.OriginalFirstThunk));
286
287 /*
288 * Walk the thunks table(s) looking for WinHvGetPartitionProperty.
289 */
290 uintptr_t *puFirstThunk = (uintptr_t *)&pbImage[pImps->FirstThunk]; /* update this. */
291 if ( pImps->u.OriginalFirstThunk != 0
292 && pImps->u.OriginalFirstThunk != pImps->FirstThunk)
293 {
294 uintptr_t const *puOrgThunk = (uintptr_t const *)&pbImage[pImps->u.OriginalFirstThunk]; /* read from this. */
295 uintptr_t cLeft = (cbImage - (RT_MAX(pImps->FirstThunk, pImps->u.OriginalFirstThunk)))
296 / sizeof(*puFirstThunk);
297 while (cLeft-- > 0 && *puOrgThunk != 0)
298 {
299 if (!(*puOrgThunk & IMAGE_ORDINAL_FLAG64))
300 {
301 CHECK_LOG_RET(*puOrgThunk >= offEndNtHdrs && *puOrgThunk < cbImage,
302 ("NEMR0: vid.sys: Bad thunk entry: %#x", *puOrgThunk));
303
304 const char *pszSymbol = (const char *)&pbImage[*puOrgThunk + 2];
305 if (strcmp(pszSymbol, "WinHvGetPartitionProperty") == 0)
306 g_ppfnVidSysWinHvGetPartitionProperty = (decltype(WinHvGetPartitionProperty) **)puFirstThunk;
307 }
308
309 puOrgThunk++;
310 puFirstThunk++;
311 }
312 }
313 else
314 {
315 /* No original thunk table, so scan the resolved symbols for a match
316 with the WinHvGetPartitionProperty address. */
317 uintptr_t const uNeedle = (uintptr_t)g_pfnWinHvGetPartitionProperty;
318 uintptr_t cLeft = (cbImage - pImps->FirstThunk) / sizeof(*puFirstThunk);
319 while (cLeft-- > 0 && *puFirstThunk != 0)
320 {
321 if (*puFirstThunk == uNeedle)
322 g_ppfnVidSysWinHvGetPartitionProperty = (decltype(WinHvGetPartitionProperty) **)puFirstThunk;
323 puFirstThunk++;
324 }
325 }
326 }
327
328 /* Report the findings: */
329 if (g_ppfnVidSysWinHvGetPartitionProperty)
330 LogRel(("NEMR0: vid.sys: Found WinHvGetPartitionProperty import thunk at %p (value %p vs %p)\n",
331 g_ppfnVidSysWinHvGetPartitionProperty,*g_ppfnVidSysWinHvGetPartitionProperty, g_pfnWinHvGetPartitionProperty));
332 else
333 LogRel(("NEMR0: vid.sys: Did not find WinHvGetPartitionProperty!\n"));
334 }
335 //__except(EXCEPTION_EXECUTE_HANDLER)
336 //{
337 // return;
338 //}
339#undef CHECK_LOG_RET
340}
341
342
343/**
344 * Worker for NEMR0InitVM that prepares for intercepting stuff in VID.SYS.
345 */
346static void nemR0PrepareForVidSysIntercept(RTDBGKRNLINFO hKrnlInfo)
347{
348 /*
349 * Resolve the symbols we need first.
350 */
351 int rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "vid.sys", "__ImageBase", (void **)&g_pbVidSys);
352 if (RT_SUCCESS(rc))
353 {
354 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "vid.sys", "__ImageSize", (void **)&g_cbVidSys);
355 if (RT_SUCCESS(rc))
356 {
357 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "vid.sys", "__ImageNtHdrs", (void **)&g_pVidSysHdrs);
358 if (RT_SUCCESS(rc))
359 {
360 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvGetPartitionProperty",
361 (void **)&g_pfnWinHvGetPartitionProperty);
362 if (RT_SUCCESS(rc))
363 {
364 /*
365 * Now locate the import thunk entry for WinHvGetPartitionProperty in vid.sys.
366 */
367 nemR0PrepareForVidSysInterceptInner();
368 }
369 else
370 LogRel(("NEMR0: Failed to find winhvr.sys!WinHvGetPartitionProperty (%Rrc)\n", rc));
371 }
372 else
373 LogRel(("NEMR0: Failed to find vid.sys!__ImageNtHdrs (%Rrc)\n", rc));
374 }
375 else
376 LogRel(("NEMR0: Failed to find vid.sys!__ImageSize (%Rrc)\n", rc));
377 }
378 else
379 LogRel(("NEMR0: Failed to find vid.sys!__ImageBase (%Rrc)\n", rc));
380}
381
382
383/**
384 * Called by NEMR3Init to make sure we've got what we need.
385 *
386 * @returns VBox status code.
387 * @param pGVM The ring-0 VM handle.
388 * @thread EMT(0)
389 */
390VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM)
391{
392 AssertCompile(sizeof(pGVM->nemr0.s) <= sizeof(pGVM->nemr0.padding));
393 AssertCompile(sizeof(pGVM->aCpus[0].nemr0.s) <= sizeof(pGVM->aCpus[0].nemr0.padding));
394
395 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
396 AssertRCReturn(rc, rc);
397
398#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
399 /*
400 * We want to perform hypercalls here. The NT kernel started to expose a very low
401 * level interface to do this thru somewhere between build 14271 and 16299. Since
402 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
403 *
404 * We also need to deposit memory to the hypervisor for use with partition (page
405 * mapping structures, stuff).
406 */
407 RTDBGKRNLINFO hKrnlInfo;
408 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
409 if (RT_SUCCESS(rc))
410 {
411 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
412 if (RT_FAILURE(rc))
413 rc = VERR_NEM_MISSING_KERNEL_API_1;
414 if (RT_SUCCESS(rc))
415 {
416 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
417 if (RT_FAILURE(rc))
418 rc = rc == VERR_MODULE_NOT_FOUND ? VERR_NEM_MISSING_KERNEL_API_2 : VERR_NEM_MISSING_KERNEL_API_3;
419 }
420
421 /*
422 * Since late 2021 we may also need to do some nasty trickery with vid.sys to get
423 * the partition ID. So, ge the necessary info while we have a hKrnlInfo instance.
424 */
425 if (RT_SUCCESS(rc))
426 nemR0PrepareForVidSysIntercept(hKrnlInfo);
427
428 RTR0DbgKrnlInfoRelease(hKrnlInfo);
429 if (RT_SUCCESS(rc))
430 {
431 /*
432 * Allocate a page for non-EMT threads to use for hypercalls (update
433 * statistics and such) and a critical section protecting it.
434 */
435 rc = RTCritSectInit(&pGVM->nemr0.s.HypercallDataCritSect);
436 if (RT_SUCCESS(rc))
437 {
438 rc = nemR0InitHypercallData(&pGVM->nemr0.s.HypercallData);
439 if (RT_SUCCESS(rc))
440 {
441 /*
442 * Allocate a page for each VCPU to place hypercall data on.
443 */
444 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
445 {
446 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
447 if (RT_FAILURE(rc))
448 {
449 while (i-- > 0)
450 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
451 break;
452 }
453 }
454 if (RT_SUCCESS(rc))
455 {
456 /*
457 * So far, so good.
458 */
459 return rc;
460 }
461
462 /*
463 * Bail out.
464 */
465 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
466 }
467 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
468 }
469 }
470 }
471#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
472
473 return rc;
474}
475
476#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
477
478/**
479 * Perform an I/O control operation on the partition handle (VID.SYS).
480 *
481 * @returns NT status code.
482 * @param pGVM The ring-0 VM structure.
483 * @param pGVCpu The global (ring-0) CPU structure of the calling EMT.
484 * @param uFunction The function to perform.
485 * @param pvInput The input buffer. This must point within the VM
486 * structure so we can easily convert to a ring-3
487 * pointer if necessary.
488 * @param cbInput The size of the input. @a pvInput must be NULL when
489 * zero.
490 * @param pvOutput The output buffer. This must also point within the
491 * VM structure for ring-3 pointer magic.
492 * @param cbOutput The size of the output. @a pvOutput must be NULL
493 * when zero.
494 * @thread EMT(pGVCpu)
495 */
496DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
497 void *pvOutput, uint32_t cbOutput)
498{
499# ifdef RT_STRICT
500 /*
501 * Input and output parameters are part of the VM CPU structure.
502 */
503 VMCPU_ASSERT_EMT(pGVCpu);
504 if (pvInput)
505 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
506 if (pvOutput)
507 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
508# endif
509
510 int32_t rcNt = STATUS_UNSUCCESSFUL;
511 int rc = SUPR0IoCtlPerform(pGVM->nemr0.s.pIoCtlCtx, uFunction,
512 pvInput,
513 pvInput ? (uintptr_t)pvInput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
514 cbInput,
515 pvOutput,
516 pvOutput ? (uintptr_t)pvOutput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
517 cbOutput,
518 &rcNt);
519 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
520 return (NTSTATUS)rcNt;
521 return STATUS_UNSUCCESSFUL;
522}
523
524
525/**
526 * Here is something that we really do not wish to do, but find us force do to
527 * right now as we cannot rewrite the memory management of VBox 6.1 in time for
528 * windows 11.
529 *
530 * @returns VBox status code.
531 * @param pGVM The ring-0 VM structure.
532 * @param pahMemObjs Array of 6 memory objects that the caller will release.
533 * ASSUMES that they are initialized to NIL.
534 */
535static int nemR0InitVMPart2DontWannaDoTheseUglyPartitionIdFallbacks(PGVM pGVM, PRTR0MEMOBJ pahMemObjs)
536{
537 /*
538 * Check preconditions:
539 */
540 if ( !g_ppfnVidSysWinHvGetPartitionProperty
541 || (uintptr_t)g_ppfnVidSysWinHvGetPartitionProperty & (sizeof(uintptr_t) - 1))
542 {
543 LogRel(("NEMR0: g_ppfnVidSysWinHvGetPartitionProperty is NULL or misaligned (%p), partition ID fallback not possible.\n",
544 g_ppfnVidSysWinHvGetPartitionProperty));
545 return VERR_NEM_INIT_FAILED;
546 }
547 if (!g_pfnWinHvGetPartitionProperty)
548 {
549 LogRel(("NEMR0: g_pfnWinHvGetPartitionProperty is NULL, partition ID fallback not possible.\n"));
550 return VERR_NEM_INIT_FAILED;
551 }
552 if (!pGVM->nem.s.IoCtlGetPartitionProperty.uFunction)
553 {
554 LogRel(("NEMR0: IoCtlGetPartitionProperty.uFunction is 0, partition ID fallback not possible.\n"));
555 return VERR_NEM_INIT_FAILED;
556 }
557
558 /*
559 * Create an alias for the thunk table entry because its very likely to be read-only.
560 */
561 int rc = RTR0MemObjLockKernel(&pahMemObjs[0], g_ppfnVidSysWinHvGetPartitionProperty, sizeof(uintptr_t), RTMEM_PROT_READ);
562 if (RT_FAILURE(rc))
563 {
564 LogRel(("NEMR0: RTR0MemObjLockKernel failed on VID.SYS thunk table entry: %Rrc\n", rc));
565 return rc;
566 }
567
568 rc = RTR0MemObjEnterPhys(&pahMemObjs[1], RTR0MemObjGetPagePhysAddr(pahMemObjs[0], 0), PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
569 if (RT_FAILURE(rc))
570 {
571 LogRel(("NEMR0: RTR0MemObjEnterPhys failed on VID.SYS thunk table entry: %Rrc\n", rc));
572 return rc;
573 }
574
575 rc = RTR0MemObjMapKernel(&pahMemObjs[2], pahMemObjs[1], (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
576 if (RT_FAILURE(rc))
577 {
578 LogRel(("NEMR0: RTR0MemObjMapKernel failed on VID.SYS thunk table entry: %Rrc\n", rc));
579 return rc;
580 }
581
582 decltype(WinHvGetPartitionProperty) **ppfnThunkAlias
583 = (decltype(WinHvGetPartitionProperty) **)( (uintptr_t)RTR0MemObjAddress(pahMemObjs[2])
584 | ((uintptr_t)g_ppfnVidSysWinHvGetPartitionProperty & PAGE_OFFSET_MASK));
585 LogRel(("NEMR0: ppfnThunkAlias=%p *ppfnThunkAlias=%p; original: %p & %p, phys %RHp\n", ppfnThunkAlias, *ppfnThunkAlias,
586 g_ppfnVidSysWinHvGetPartitionProperty, *g_ppfnVidSysWinHvGetPartitionProperty,
587 RTR0MemObjGetPagePhysAddr(pahMemObjs[0], 0) ));
588
589 /*
590 * Create an alias for the target code in WinHvr.sys as there is a very decent
591 * chance we have to patch it.
592 */
593 rc = RTR0MemObjLockKernel(&pahMemObjs[3], g_pfnWinHvGetPartitionProperty, sizeof(uintptr_t), RTMEM_PROT_READ);
594 if (RT_FAILURE(rc))
595 {
596 LogRel(("NEMR0: RTR0MemObjLockKernel failed on WinHvGetPartitionProperty (%p): %Rrc\n", g_pfnWinHvGetPartitionProperty, rc));
597 return rc;
598 }
599
600 rc = RTR0MemObjEnterPhys(&pahMemObjs[4], RTR0MemObjGetPagePhysAddr(pahMemObjs[3], 0), PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
601 if (RT_FAILURE(rc))
602 {
603 LogRel(("NEMR0: RTR0MemObjEnterPhys failed on WinHvGetPartitionProperty: %Rrc\n", rc));
604 return rc;
605 }
606
607 rc = RTR0MemObjMapKernel(&pahMemObjs[5], pahMemObjs[4], (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
608 if (RT_FAILURE(rc))
609 {
610 LogRel(("NEMR0: RTR0MemObjMapKernel failed on WinHvGetPartitionProperty: %Rrc\n", rc));
611 return rc;
612 }
613
614 uint8_t *pbTargetAlias = (uint8_t *)( (uintptr_t)RTR0MemObjAddress(pahMemObjs[5])
615 | ((uintptr_t)g_pfnWinHvGetPartitionProperty & PAGE_OFFSET_MASK));
616 LogRel(("NEMR0: pbTargetAlias=%p %.16Rhxs; original: %p %.16Rhxs, phys %RHp\n", pbTargetAlias, pbTargetAlias,
617 g_pfnWinHvGetPartitionProperty, g_pfnWinHvGetPartitionProperty, RTR0MemObjGetPagePhysAddr(pahMemObjs[3], 0) ));
618
619 /*
620 * Analyse the target functions prologue to figure out how much we should copy
621 * when patching it. We repeat this every time because we don't want to get
622 * tripped up by someone else doing the same stuff as we're doing here.
623 * We need at least 12 bytes for the patch sequence (MOV RAX, QWORD; JMP RAX)
624 */
625 union
626 {
627 uint8_t ab[48]; /**< Must be equal or smallar than g_abNemR0WinHvrWinHvGetPartitionProperty_OriginalProlog */
628 int64_t ai64[6];
629 } Org;
630 memcpy(Org.ab, g_pfnWinHvGetPartitionProperty, sizeof(Org)); /** @todo ASSUMES 48 valid bytes start at function... */
631
632 uint32_t offJmpBack = 0;
633 uint32_t const cbMinJmpPatch = 12;
634 DISSTATE Dis;
635 while (offJmpBack < cbMinJmpPatch && offJmpBack < sizeof(Org) - 16)
636 {
637 uint32_t cbInstr = 1;
638 rc = DISInstr(&Org.ab[offJmpBack], DISCPUMODE_64BIT, &Dis, &cbInstr);
639 if (RT_FAILURE(rc))
640 {
641 LogRel(("NEMR0: DISInstr failed %#x bytes into WinHvGetPartitionProperty: %Rrc (%.48Rhxs)\n",
642 offJmpBack, rc, Org.ab));
643 break;
644 }
645 if (Dis.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
646 {
647 LogRel(("NEMR0: Control flow instruction %#x bytes into WinHvGetPartitionProperty prologue: %.48Rhxs\n",
648 offJmpBack, Org.ab));
649 break;
650 }
651 if (Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */)
652 {
653 LogRel(("NEMR0: RIP relative addressing %#x bytes into WinHvGetPartitionProperty prologue: %.48Rhxs\n",
654 offJmpBack, Org.ab));
655 break;
656 }
657 offJmpBack += cbInstr;
658 }
659
660 uintptr_t const cbLeftInPage = PAGE_SIZE - ((uintptr_t)g_pfnWinHvGetPartitionProperty & PAGE_OFFSET_MASK);
661 if (cbLeftInPage < 16 && offJmpBack >= cbMinJmpPatch)
662 {
663 LogRel(("NEMR0: WinHvGetPartitionProperty patching not possible do the page crossing: %p (%#zx)\n",
664 g_pfnWinHvGetPartitionProperty, cbLeftInPage));
665 offJmpBack = 0;
666 }
667 if (offJmpBack >= cbMinJmpPatch)
668 LogRel(("NEMR0: offJmpBack=%#x for WinHvGetPartitionProperty (%p: %.48Rhxs)\n",
669 offJmpBack, g_pfnWinHvGetPartitionProperty, Org.ab));
670 else
671 offJmpBack = 0;
672 rc = VINF_SUCCESS;
673
674 /*
675 * Now enter serialization lock and get on with it...
676 */
677 PVMCPUCC const pVCpu0 = &pGVM->aCpus[0];
678 NTSTATUS rcNt;
679 RTCritSectEnter(&g_VidSysCritSect);
680
681 /*
682 * First attempt, patching the import table entry.
683 */
684 g_idVidSysFoundPartition = HV_PARTITION_ID_INVALID;
685 g_hVidSysMatchThread = RTThreadNativeSelf();
686 g_enmVidSysMatchProperty = pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty = HvPartitionPropertyProcessorVendor;
687 pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue = 0;
688
689 void *pvOld = NULL;
690 if (ASMAtomicCmpXchgExPtr(ppfnThunkAlias, (void *)(uintptr_t)nemR0VidSysWinHvGetPartitionProperty,
691 (void *)(uintptr_t)g_pfnWinHvGetPartitionProperty, &pvOld))
692 {
693 LogRel(("NEMR0: after switch to %p: ppfnThunkAlias=%p *ppfnThunkAlias=%p; original: %p & %p\n",
694 nemR0VidSysWinHvGetPartitionProperty, ppfnThunkAlias, *ppfnThunkAlias,
695 g_ppfnVidSysWinHvGetPartitionProperty, *g_ppfnVidSysWinHvGetPartitionProperty));
696
697 rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetPartitionProperty.uFunction,
698 &pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty,
699 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty),
700 &pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue,
701 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));
702 ASMAtomicWritePtr(ppfnThunkAlias, (void *)(uintptr_t)g_pfnWinHvGetPartitionProperty);
703 HV_PARTITION_ID idHvPartition = g_idVidSysFoundPartition;
704
705 LogRel(("NEMR0: WinHvGetPartitionProperty trick #1 yielded: rcNt=%#x idHvPartition=%#RX64 uValue=%#RX64\n",
706 rcNt, idHvPartition, pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));
707 pGVM->nemr0.s.idHvPartition = idHvPartition;
708 }
709 else
710 {
711 LogRel(("NEMR0: Unexpected WinHvGetPartitionProperty pointer in VID.SYS: %p, expected %p\n",
712 pvOld, g_pfnWinHvGetPartitionProperty));
713 rc = VERR_NEM_INIT_FAILED;
714 }
715
716 /*
717 * If that didn't succeed, try patching the winhvr.sys code.
718 */
719 if ( pGVM->nemr0.s.idHvPartition == HV_PARTITION_ID_INVALID
720 && offJmpBack >= cbMinJmpPatch)
721 {
722 g_idVidSysFoundPartition = HV_PARTITION_ID_INVALID;
723 g_hVidSysMatchThread = RTThreadNativeSelf();
724 g_enmVidSysMatchProperty = pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty = HvPartitionPropertyProcessorVendor;
725 pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue = 0;
726
727 /*
728 * Prepare the hook area.
729 */
730 uint8_t *pbDst = g_abNemR0WinHvrWinHvGetPartitionProperty_OriginalProlog;
731 memcpy(pbDst, (uint8_t const *)(uintptr_t)g_pfnWinHvGetPartitionProperty, offJmpBack);
732 pbDst += offJmpBack;
733
734 *pbDst++ = 0x48; /* mov rax, imm64 */
735 *pbDst++ = 0xb8;
736 *(uint64_t *)pbDst = (uintptr_t)g_pfnWinHvGetPartitionProperty + offJmpBack;
737 pbDst += sizeof(uint64_t);
738 *pbDst++ = 0xff; /* jmp rax */
739 *pbDst++ = 0xe0;
740 *pbDst++ = 0xcc; /* int3 */
741
742 /*
743 * Patch the original. We use cmpxchg16b here to avoid concurrency problems
744 * (this also makes sure we don't trample over someone else doing similar
745 * patching at the same time).
746 */
747 union
748 {
749 uint8_t ab[16];
750 uint64_t au64[2];
751 } Patch;
752 memcpy(Patch.ab, Org.ab, sizeof(Patch));
753 pbDst = Patch.ab;
754 *pbDst++ = 0x48; /* mov rax, imm64 */
755 *pbDst++ = 0xb8;
756 *(uint64_t *)pbDst = (uintptr_t)nemR0WinHvrWinHvGetPartitionProperty;
757 pbDst += sizeof(uint64_t);
758 *pbDst++ = 0xff; /* jmp rax */
759 *pbDst++ = 0xe0;
760
761 int64_t ai64CmpCopy[2] = { Org.ai64[0], Org.ai64[1] }; /* paranoia */
762 if (_InterlockedCompareExchange128((__int64 volatile *)pbTargetAlias, Patch.au64[1], Patch.au64[0], ai64CmpCopy) != 0)
763 {
764 rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetPartitionProperty.uFunction,
765 &pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty,
766 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty),
767 &pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue,
768 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));
769
770 for (uint32_t cFailures = 0; cFailures < 10; cFailures++)
771 {
772 ai64CmpCopy[0] = Patch.au64[0]; /* paranoia */
773 ai64CmpCopy[1] = Patch.au64[1];
774 if (_InterlockedCompareExchange128((__int64 volatile *)pbTargetAlias, Org.ai64[1], Org.ai64[0], ai64CmpCopy) != 0)
775 {
776 if (cFailures > 0)
777 LogRel(("NEMR0: Succeeded on try #%u.\n", cFailures));
778 break;
779 }
780 LogRel(("NEMR0: Patch restore failure #%u: %.16Rhxs, expected %.16Rhxs\n",
781 cFailures + 1, &ai64CmpCopy[0], &Patch.au64[0]));
782 RTThreadSleep(1000);
783 }
784
785 HV_PARTITION_ID idHvPartition = g_idVidSysFoundPartition;
786 LogRel(("NEMR0: WinHvGetPartitionProperty trick #2 yielded: rcNt=%#x idHvPartition=%#RX64 uValue=%#RX64\n",
787 rcNt, idHvPartition, pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));
788 pGVM->nemr0.s.idHvPartition = idHvPartition;
789
790 }
791 else
792 {
793 LogRel(("NEMR0: Failed to install WinHvGetPartitionProperty patch: %.16Rhxs, expected %.16Rhxs\n",
794 &ai64CmpCopy[0], &Org.ai64[0]));
795 rc = VERR_NEM_INIT_FAILED;
796 }
797 }
798
799 RTCritSectLeave(&g_VidSysCritSect);
800
801 return rc;
802}
803
804#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
805
806/**
807 * 2nd part of the initialization, after we've got a partition handle.
808 *
809 * @returns VBox status code.
810 * @param pGVM The ring-0 VM handle.
811 * @thread EMT(0)
812 */
813VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM)
814{
815 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
816 AssertRCReturn(rc, rc);
817 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
818#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
819# ifdef NEM_WIN_WITH_RING0_RUNLOOP
820 Assert(pGVM->nemr0.s.fMayUseRing0Runloop == false);
821# endif
822
823 /*
824 * Copy and validate the I/O control information from ring-3.
825 */
826 NEMWINIOCTL Copy = pGVM->nem.s.IoCtlGetHvPartitionId;
827 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
828 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
829 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
830 pGVM->nemr0.s.IoCtlGetHvPartitionId = Copy;
831
832 Copy = pGVM->nem.s.IoCtlGetPartitionProperty;
833 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
834 AssertLogRelReturn(Copy.cbInput == sizeof(VID_PARTITION_PROPERTY_CODE), VERR_NEM_INIT_FAILED);
835 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_PROPERTY), VERR_NEM_INIT_FAILED);
836 pGVM->nemr0.s.IoCtlGetPartitionProperty = Copy;
837
838# ifdef NEM_WIN_WITH_RING0_RUNLOOP
839 pGVM->nemr0.s.fMayUseRing0Runloop = pGVM->nem.s.fUseRing0Runloop;
840
841 Copy = pGVM->nem.s.IoCtlStartVirtualProcessor;
842 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
843 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
844 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
845 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
846 if (RT_SUCCESS(rc))
847 pGVM->nemr0.s.IoCtlStartVirtualProcessor = Copy;
848
849 Copy = pGVM->nem.s.IoCtlStopVirtualProcessor;
850 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
851 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
852 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
853 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
854 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
855 if (RT_SUCCESS(rc))
856 pGVM->nemr0.s.IoCtlStopVirtualProcessor = Copy;
857
858 Copy = pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
859 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
860 AssertLogRelStmt( Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)
861 || Copy.cbInput == RT_OFFSETOF(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT, cMillies),
862 rc = VERR_NEM_INIT_FAILED);
863 AssertLogRelStmt(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
864 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
865 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
866 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
867 if (RT_SUCCESS(rc))
868 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext = Copy;
869# endif
870
871 if ( RT_SUCCESS(rc)
872 || !pGVM->nem.s.fUseRing0Runloop)
873 {
874 /*
875 * Setup of an I/O control context for the partition handle for later use.
876 */
877 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pGVM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx);
878 AssertLogRelRCReturn(rc, rc);
879 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
880 {
881 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
882 pGVCpu->nemr0.s.offRing3ConversionDelta = (uintptr_t)pGVM->aCpus[idCpu].pVCpuR3 - (uintptr_t)pGVCpu;
883 }
884
885 /*
886 * Get the partition ID.
887 */
888 PVMCPUCC pVCpu0 = &pGVM->aCpus[0];
889 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
890 &pVCpu0->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu0->nem.s.uIoCtlBuf.idPartition));
891# if 0
892 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
893 pGVM->nemr0.s.idHvPartition = pVCpu0->nem.s.uIoCtlBuf.idPartition;
894# else
895 /*
896 * Since 2021 (Win11) the above I/O control doesn't work on exo-partitions
897 * so we have to go to extremes to get at it. Sigh.
898 */
899 if ( !NT_SUCCESS(rcNt)
900 || pVCpu0->nem.s.uIoCtlBuf.idPartition == HV_PARTITION_ID_INVALID)
901 {
902 LogRel(("IoCtlGetHvPartitionId failed: r0=%#RX64, r3=%#RX64, rcNt=%#x\n",
903 pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition, rcNt));
904
905 RTR0MEMOBJ ahMemObjs[6]
906 = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ };
907 rc = nemR0InitVMPart2DontWannaDoTheseUglyPartitionIdFallbacks(pGVM, ahMemObjs);
908 size_t i = RT_ELEMENTS(ahMemObjs);
909 while (i-- > 0)
910 RTR0MemObjFree(ahMemObjs[i], false /*fFreeMappings*/);
911 }
912 if (pGVM->nem.s.idHvPartition == HV_PARTITION_ID_INVALID)
913 pGVM->nem.s.idHvPartition = pGVM->nemr0.s.idHvPartition;
914# endif
915 AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == pGVM->nem.s.idHvPartition,
916 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition),
917 VERR_NEM_INIT_FAILED);
918 if (RT_SUCCESS(rc) && pGVM->nemr0.s.idHvPartition == HV_PARTITION_ID_INVALID)
919 rc = VERR_NEM_INIT_FAILED;
920 }
921#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
922
923 return rc;
924}
925
926
927/**
928 * Cleanup the NEM parts of the VM in ring-0.
929 *
930 * This is always called and must deal the state regardless of whether
931 * NEMR0InitVM() was called or not. So, take care here.
932 *
933 * @param pGVM The ring-0 VM handle.
934 */
935VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
936{
937#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
938 pGVM->nemr0.s.idHvPartition = HV_PARTITION_ID_INVALID;
939
940 /* Clean up I/O control context. */
941 if (pGVM->nemr0.s.pIoCtlCtx)
942 {
943 int rc = SUPR0IoCtlCleanup(pGVM->nemr0.s.pIoCtlCtx);
944 AssertRC(rc);
945 pGVM->nemr0.s.pIoCtlCtx = NULL;
946 }
947
948 /* Free the hypercall pages. */
949 VMCPUID i = pGVM->cCpus;
950 while (i-- > 0)
951 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
952
953 /* The non-EMT one too. */
954 if (RTCritSectIsInitialized(&pGVM->nemr0.s.HypercallDataCritSect))
955 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
956 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
957#else
958 RT_NOREF(pGVM);
959#endif
960}
961
962
963#if 0 /* for debugging GPA unmapping. */
964static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
965{
966 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nemr0.s.pbHypercallData;
967 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
968 pIn->PartitionId = pGVM->nemr0.s.idHvPartition;
969 pIn->VpIndex = pGVCpu->idCpu;
970 pIn->ByteCount = 0x10;
971 pIn->BaseGpa = GCPhys;
972 pIn->ControlFlags.AsUINT64 = 0;
973 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
974 memset(pOut, 0xfe, sizeof(*pOut));
975 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nemr0.s.HCPhysHypercallData,
976 pGVCpu->nemr0.s.HCPhysHypercallData + sizeof(*pIn));
977 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
978 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
979 __debugbreak();
980
981 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
982}
983#endif
984
985
986#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
987/**
988 * Worker for NEMR0MapPages and others.
989 */
990NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
991 uint32_t cPages, uint32_t fFlags)
992{
993 /*
994 * Validate.
995 */
996 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
997
998 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
999 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
1000 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
1001 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
1002 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
1003 if (GCPhysSrc != GCPhysDst)
1004 {
1005 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
1006 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
1007 }
1008
1009 /*
1010 * Compose and make the hypercall.
1011 * Ring-3 is not allowed to fill in the host physical addresses of the call.
1012 */
1013 for (uint32_t iTries = 0;; iTries++)
1014 {
1015 RTGCPHYS GCPhysSrcTmp = GCPhysSrc;
1016 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
1017 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
1018 pMapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
1019 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
1020 pMapPages->MapFlags = fFlags;
1021 pMapPages->u32ExplicitPadding = 0;
1022
1023 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrcTmp += X86_PAGE_SIZE)
1024 {
1025 RTHCPHYS HCPhys = NIL_RTGCPHYS;
1026 int rc = PGMPhysGCPhys2HCPhys(pGVM, GCPhysSrcTmp, &HCPhys);
1027 AssertRCReturn(rc, rc);
1028 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
1029 }
1030
1031 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
1032 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
1033 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
1034 GCPhysDst, GCPhysSrcTmp - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
1035 if (uResult == ((uint64_t)cPages << 32))
1036 return VINF_SUCCESS;
1037
1038 /*
1039 * If the partition is out of memory, try donate another 512 pages to
1040 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
1041 */
1042 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
1043 || iTries > 16
1044 || g_pfnWinHvDepositMemory == NULL)
1045 {
1046 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
1047 return VERR_NEM_MAP_PAGES_FAILED;
1048 }
1049
1050 size_t cPagesAdded = 0;
1051 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nemr0.s.idHvPartition, 512, 0, &cPagesAdded);
1052 if (!cPagesAdded)
1053 {
1054 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
1055 return VERR_NEM_MAP_PAGES_FAILED;
1056 }
1057 }
1058}
1059#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
1060
1061
1062/**
1063 * Maps pages into the guest physical address space.
1064 *
1065 * Generally the caller will be under the PGM lock already, so no extra effort
1066 * is needed to make sure all changes happens under it.
1067 *
1068 * @returns VBox status code.
1069 * @param pGVM The ring-0 VM handle.
1070 * @param idCpu The calling EMT. Necessary for getting the
1071 * hypercall page and arguments.
1072 * @thread EMT(idCpu)
1073 */
1074VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, VMCPUID idCpu)
1075{
1076#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1077 /*
1078 * Unpack the call.
1079 */
1080 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
1081 if (RT_SUCCESS(rc))
1082 {
1083 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1084
1085 RTGCPHYS const GCPhysSrc = pGVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
1086 RTGCPHYS const GCPhysDst = pGVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
1087 uint32_t const cPages = pGVCpu->nem.s.Hypercall.MapPages.cPages;
1088 HV_MAP_GPA_FLAGS const fFlags = pGVCpu->nem.s.Hypercall.MapPages.fFlags;
1089
1090 /*
1091 * Do the work.
1092 */
1093 rc = nemR0WinMapPages(pGVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
1094 }
1095 return rc;
1096#else
1097 RT_NOREF(pGVM, idCpu);
1098 return VERR_NOT_IMPLEMENTED;
1099#endif
1100}
1101
1102
1103#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1104/**
1105 * Worker for NEMR0UnmapPages and others.
1106 */
1107NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
1108{
1109 /*
1110 * Validate input.
1111 */
1112 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1113
1114 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
1115 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
1116 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
1117 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
1118
1119 /*
1120 * Compose and make the hypercall.
1121 */
1122 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
1123 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
1124 pUnmapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
1125 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
1126 pUnmapPages->fFlags = 0;
1127
1128 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
1129 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
1130 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
1131 if (uResult == ((uint64_t)cPages << 32))
1132 {
1133# if 1 /* Do we need to do this? Hopefully not... */
1134 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
1135 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
1136 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
1137# endif
1138 return VINF_SUCCESS;
1139 }
1140
1141 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
1142 return VERR_NEM_UNMAP_PAGES_FAILED;
1143}
1144#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
1145
1146
1147/**
1148 * Unmaps pages from the guest physical address space.
1149 *
1150 * Generally the caller will be under the PGM lock already, so no extra effort
1151 * is needed to make sure all changes happens under it.
1152 *
1153 * @returns VBox status code.
1154 * @param pGVM The ring-0 VM handle.
1155 * @param idCpu The calling EMT. Necessary for getting the
1156 * hypercall page and arguments.
1157 * @thread EMT(idCpu)
1158 */
1159VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, VMCPUID idCpu)
1160{
1161#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1162 /*
1163 * Unpack the call.
1164 */
1165 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
1166 if (RT_SUCCESS(rc))
1167 {
1168 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1169
1170 RTGCPHYS const GCPhys = pGVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
1171 uint32_t const cPages = pGVCpu->nem.s.Hypercall.UnmapPages.cPages;
1172
1173 /*
1174 * Do the work.
1175 */
1176 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
1177 }
1178 return rc;
1179#else
1180 RT_NOREF(pGVM, idCpu);
1181 return VERR_NOT_IMPLEMENTED;
1182#endif
1183}
1184
1185
1186#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1187/**
1188 * Worker for NEMR0ExportState.
1189 *
1190 * Intention is to use it internally later.
1191 *
1192 * @returns VBox status code.
1193 * @param pGVM The ring-0 VM handle.
1194 * @param pGVCpu The ring-0 VCPU handle.
1195 * @param pCtx The CPU context structure to import into.
1196 */
1197NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
1198{
1199 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
1200 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1201 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1202
1203 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
1204 pInput->VpIndex = pGVCpu->idCpu;
1205 pInput->RsvdZ = 0;
1206
1207 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
1208 if ( !fWhat
1209 && pGVCpu->nem.s.fCurrentInterruptWindows == pGVCpu->nem.s.fDesiredInterruptWindows)
1210 return VINF_SUCCESS;
1211 uintptr_t iReg = 0;
1212
1213 /* GPRs */
1214 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1215 {
1216 if (fWhat & CPUMCTX_EXTRN_RAX)
1217 {
1218 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1219 pInput->Elements[iReg].Name = HvX64RegisterRax;
1220 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
1221 iReg++;
1222 }
1223 if (fWhat & CPUMCTX_EXTRN_RCX)
1224 {
1225 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1226 pInput->Elements[iReg].Name = HvX64RegisterRcx;
1227 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
1228 iReg++;
1229 }
1230 if (fWhat & CPUMCTX_EXTRN_RDX)
1231 {
1232 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1233 pInput->Elements[iReg].Name = HvX64RegisterRdx;
1234 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
1235 iReg++;
1236 }
1237 if (fWhat & CPUMCTX_EXTRN_RBX)
1238 {
1239 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1240 pInput->Elements[iReg].Name = HvX64RegisterRbx;
1241 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
1242 iReg++;
1243 }
1244 if (fWhat & CPUMCTX_EXTRN_RSP)
1245 {
1246 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1247 pInput->Elements[iReg].Name = HvX64RegisterRsp;
1248 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
1249 iReg++;
1250 }
1251 if (fWhat & CPUMCTX_EXTRN_RBP)
1252 {
1253 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1254 pInput->Elements[iReg].Name = HvX64RegisterRbp;
1255 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
1256 iReg++;
1257 }
1258 if (fWhat & CPUMCTX_EXTRN_RSI)
1259 {
1260 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1261 pInput->Elements[iReg].Name = HvX64RegisterRsi;
1262 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
1263 iReg++;
1264 }
1265 if (fWhat & CPUMCTX_EXTRN_RDI)
1266 {
1267 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1268 pInput->Elements[iReg].Name = HvX64RegisterRdi;
1269 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
1270 iReg++;
1271 }
1272 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1273 {
1274 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1275 pInput->Elements[iReg].Name = HvX64RegisterR8;
1276 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
1277 iReg++;
1278 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1279 pInput->Elements[iReg].Name = HvX64RegisterR9;
1280 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
1281 iReg++;
1282 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1283 pInput->Elements[iReg].Name = HvX64RegisterR10;
1284 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
1285 iReg++;
1286 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1287 pInput->Elements[iReg].Name = HvX64RegisterR11;
1288 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
1289 iReg++;
1290 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1291 pInput->Elements[iReg].Name = HvX64RegisterR12;
1292 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
1293 iReg++;
1294 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1295 pInput->Elements[iReg].Name = HvX64RegisterR13;
1296 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
1297 iReg++;
1298 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1299 pInput->Elements[iReg].Name = HvX64RegisterR14;
1300 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
1301 iReg++;
1302 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1303 pInput->Elements[iReg].Name = HvX64RegisterR15;
1304 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
1305 iReg++;
1306 }
1307 }
1308
1309 /* RIP & Flags */
1310 if (fWhat & CPUMCTX_EXTRN_RIP)
1311 {
1312 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1313 pInput->Elements[iReg].Name = HvX64RegisterRip;
1314 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
1315 iReg++;
1316 }
1317 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1318 {
1319 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1320 pInput->Elements[iReg].Name = HvX64RegisterRflags;
1321 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
1322 iReg++;
1323 }
1324
1325 /* Segments */
1326# define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
1327 do { \
1328 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
1329 pInput->Elements[a_idx].Name = a_enmName; \
1330 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
1331 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
1332 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
1333 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
1334 } while (0)
1335 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1336 {
1337 if (fWhat & CPUMCTX_EXTRN_CS)
1338 {
1339 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1340 iReg++;
1341 }
1342 if (fWhat & CPUMCTX_EXTRN_ES)
1343 {
1344 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
1345 iReg++;
1346 }
1347 if (fWhat & CPUMCTX_EXTRN_SS)
1348 {
1349 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1350 iReg++;
1351 }
1352 if (fWhat & CPUMCTX_EXTRN_DS)
1353 {
1354 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1355 iReg++;
1356 }
1357 if (fWhat & CPUMCTX_EXTRN_FS)
1358 {
1359 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1360 iReg++;
1361 }
1362 if (fWhat & CPUMCTX_EXTRN_GS)
1363 {
1364 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1365 iReg++;
1366 }
1367 }
1368
1369 /* Descriptor tables & task segment. */
1370 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1371 {
1372 if (fWhat & CPUMCTX_EXTRN_LDTR)
1373 {
1374 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1375 iReg++;
1376 }
1377 if (fWhat & CPUMCTX_EXTRN_TR)
1378 {
1379 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1380 iReg++;
1381 }
1382
1383 if (fWhat & CPUMCTX_EXTRN_IDTR)
1384 {
1385 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1386 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
1387 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
1388 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
1389 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
1390 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
1391 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
1392 iReg++;
1393 }
1394 if (fWhat & CPUMCTX_EXTRN_GDTR)
1395 {
1396 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1397 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
1398 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
1399 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
1400 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
1401 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
1402 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
1403 iReg++;
1404 }
1405 }
1406
1407 /* Control registers. */
1408 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1409 {
1410 if (fWhat & CPUMCTX_EXTRN_CR0)
1411 {
1412 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1413 pInput->Elements[iReg].Name = HvX64RegisterCr0;
1414 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
1415 iReg++;
1416 }
1417 if (fWhat & CPUMCTX_EXTRN_CR2)
1418 {
1419 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1420 pInput->Elements[iReg].Name = HvX64RegisterCr2;
1421 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
1422 iReg++;
1423 }
1424 if (fWhat & CPUMCTX_EXTRN_CR3)
1425 {
1426 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1427 pInput->Elements[iReg].Name = HvX64RegisterCr3;
1428 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
1429 iReg++;
1430 }
1431 if (fWhat & CPUMCTX_EXTRN_CR4)
1432 {
1433 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1434 pInput->Elements[iReg].Name = HvX64RegisterCr4;
1435 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
1436 iReg++;
1437 }
1438 }
1439 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1440 {
1441 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1442 pInput->Elements[iReg].Name = HvX64RegisterCr8;
1443 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pGVCpu);
1444 iReg++;
1445 }
1446
1447 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
1448
1449 /* Debug registers. */
1450/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
1451 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1452 {
1453 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1454 pInput->Elements[iReg].Name = HvX64RegisterDr0;
1455 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pGVCpu);
1456 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
1457 iReg++;
1458 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1459 pInput->Elements[iReg].Name = HvX64RegisterDr1;
1460 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pGVCpu);
1461 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
1462 iReg++;
1463 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1464 pInput->Elements[iReg].Name = HvX64RegisterDr2;
1465 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pGVCpu);
1466 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
1467 iReg++;
1468 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1469 pInput->Elements[iReg].Name = HvX64RegisterDr3;
1470 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pGVCpu);
1471 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
1472 iReg++;
1473 }
1474 if (fWhat & CPUMCTX_EXTRN_DR6)
1475 {
1476 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1477 pInput->Elements[iReg].Name = HvX64RegisterDr6;
1478 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pGVCpu);
1479 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
1480 iReg++;
1481 }
1482 if (fWhat & CPUMCTX_EXTRN_DR7)
1483 {
1484 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1485 pInput->Elements[iReg].Name = HvX64RegisterDr7;
1486 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pGVCpu);
1487 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
1488 iReg++;
1489 }
1490
1491 /* Floating point state. */
1492 if (fWhat & CPUMCTX_EXTRN_X87)
1493 {
1494 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1495 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
1496 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[0].au64[0];
1497 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[0].au64[1];
1498 iReg++;
1499 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1500 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
1501 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[1].au64[0];
1502 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[1].au64[1];
1503 iReg++;
1504 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1505 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
1506 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[2].au64[0];
1507 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[2].au64[1];
1508 iReg++;
1509 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1510 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
1511 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[3].au64[0];
1512 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[3].au64[1];
1513 iReg++;
1514 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1515 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
1516 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[4].au64[0];
1517 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[4].au64[1];
1518 iReg++;
1519 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1520 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
1521 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[5].au64[0];
1522 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[5].au64[1];
1523 iReg++;
1524 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1525 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
1526 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[6].au64[0];
1527 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[6].au64[1];
1528 iReg++;
1529 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1530 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
1531 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[7].au64[0];
1532 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[7].au64[1];
1533 iReg++;
1534
1535 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1536 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
1537 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->XState.x87.FCW;
1538 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->XState.x87.FSW;
1539 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->XState.x87.FTW;
1540 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->XState.x87.FTW >> 8;
1541 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->XState.x87.FOP;
1542 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->XState.x87.FPUIP)
1543 | ((uint64_t)pCtx->XState.x87.CS << 32)
1544 | ((uint64_t)pCtx->XState.x87.Rsrvd1 << 48);
1545 iReg++;
1546/** @todo we've got trouble if if we try write just SSE w/o X87. */
1547 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1548 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
1549 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->XState.x87.FPUDP)
1550 | ((uint64_t)pCtx->XState.x87.DS << 32)
1551 | ((uint64_t)pCtx->XState.x87.Rsrvd2 << 48);
1552 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->XState.x87.MXCSR;
1553 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->XState.x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
1554 iReg++;
1555 }
1556
1557 /* Vector state. */
1558 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1559 {
1560 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1561 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
1562 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[0].uXmm.s.Lo;
1563 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[0].uXmm.s.Hi;
1564 iReg++;
1565 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1566 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
1567 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[1].uXmm.s.Lo;
1568 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[1].uXmm.s.Hi;
1569 iReg++;
1570 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1571 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
1572 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[2].uXmm.s.Lo;
1573 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[2].uXmm.s.Hi;
1574 iReg++;
1575 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1576 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
1577 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[3].uXmm.s.Lo;
1578 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[3].uXmm.s.Hi;
1579 iReg++;
1580 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1581 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
1582 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[4].uXmm.s.Lo;
1583 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[4].uXmm.s.Hi;
1584 iReg++;
1585 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1586 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
1587 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[5].uXmm.s.Lo;
1588 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[5].uXmm.s.Hi;
1589 iReg++;
1590 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1591 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
1592 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[6].uXmm.s.Lo;
1593 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[6].uXmm.s.Hi;
1594 iReg++;
1595 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1596 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
1597 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[7].uXmm.s.Lo;
1598 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[7].uXmm.s.Hi;
1599 iReg++;
1600 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1601 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1602 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[8].uXmm.s.Lo;
1603 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[8].uXmm.s.Hi;
1604 iReg++;
1605 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1606 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1607 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[9].uXmm.s.Lo;
1608 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[9].uXmm.s.Hi;
1609 iReg++;
1610 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1611 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1612 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[10].uXmm.s.Lo;
1613 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[10].uXmm.s.Hi;
1614 iReg++;
1615 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1616 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1617 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[11].uXmm.s.Lo;
1618 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[11].uXmm.s.Hi;
1619 iReg++;
1620 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1621 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1622 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[12].uXmm.s.Lo;
1623 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[12].uXmm.s.Hi;
1624 iReg++;
1625 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1626 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1627 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[13].uXmm.s.Lo;
1628 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[13].uXmm.s.Hi;
1629 iReg++;
1630 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1631 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1632 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[14].uXmm.s.Lo;
1633 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[14].uXmm.s.Hi;
1634 iReg++;
1635 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1636 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1637 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[15].uXmm.s.Lo;
1638 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[15].uXmm.s.Hi;
1639 iReg++;
1640 }
1641
1642 /* MSRs */
1643 // HvX64RegisterTsc - don't touch
1644 if (fWhat & CPUMCTX_EXTRN_EFER)
1645 {
1646 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1647 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1648 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1649 iReg++;
1650 }
1651 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1652 {
1653 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1654 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1655 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1656 iReg++;
1657 }
1658 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1659 {
1660 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1661 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1662 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1663 iReg++;
1664 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1665 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1666 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1667 iReg++;
1668 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1669 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1670 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1671 iReg++;
1672 }
1673 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1674 {
1675 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1676 pInput->Elements[iReg].Name = HvX64RegisterStar;
1677 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1678 iReg++;
1679 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1680 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1681 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1682 iReg++;
1683 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1684 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1685 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1686 iReg++;
1687 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1688 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1689 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1690 iReg++;
1691 }
1692 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1693 {
1694 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1695 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1696 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pGVCpu);
1697 iReg++;
1698 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1699 pInput->Elements[iReg].Name = HvX64RegisterPat;
1700 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1701 iReg++;
1702# if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1703 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1704 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1705 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pGVCpu);
1706 iReg++;
1707# endif
1708
1709 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
1710
1711 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1712 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1713 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1714 iReg++;
1715
1716 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1717
1718 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1719 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1720 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1721 iReg++;
1722 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1723 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1724 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1725 iReg++;
1726 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1727 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1728 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1729 iReg++;
1730 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1731 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1732 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1733 iReg++;
1734 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1735 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1736 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1737 iReg++;
1738 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1739 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1740 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1741 iReg++;
1742 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1743 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1744 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1745 iReg++;
1746 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1747 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1748 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1749 iReg++;
1750 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1751 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1752 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1753 iReg++;
1754 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1755 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1756 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1757 iReg++;
1758 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1759 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1760 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1761 iReg++;
1762 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1763 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1764 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1765 iReg++;
1766
1767# if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1768 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
1769 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1770 {
1771 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1772 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1773 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1774 iReg++;
1775 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1776 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1777 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pGVCpu);
1778 iReg++;
1779 }
1780# endif
1781 }
1782
1783 /* event injection (clear it). */
1784 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1785 {
1786 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1787 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1788 pInput->Elements[iReg].Value.Reg64 = 0;
1789 iReg++;
1790 }
1791
1792 /* Interruptibility state. This can get a little complicated since we get
1793 half of the state via HV_X64_VP_EXECUTION_STATE. */
1794 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1795 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1796 {
1797 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1798 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1799 pInput->Elements[iReg].Value.Reg64 = 0;
1800 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1801 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
1802 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1803 if (VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
1804 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1805 iReg++;
1806 }
1807 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1808 {
1809 if ( pGVCpu->nem.s.fLastInterruptShadow
1810 || ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1811 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip))
1812 {
1813 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1814 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1815 pInput->Elements[iReg].Value.Reg64 = 0;
1816 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1817 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
1818 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1819 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1820 //if (VMCPU_FF_IS_ANY_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
1821 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1822 iReg++;
1823 }
1824 }
1825 else
1826 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1827
1828 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1829 uint8_t const fDesiredIntWin = pGVCpu->nem.s.fDesiredInterruptWindows;
1830 if ( fDesiredIntWin
1831 || pGVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1832 {
1833 pGVCpu->nem.s.fCurrentInterruptWindows = pGVCpu->nem.s.fDesiredInterruptWindows;
1834 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1835 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1836 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1837 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1838 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1839 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1840 iReg++;
1841 }
1842
1843 /// @todo HvRegisterPendingEvent0
1844 /// @todo HvRegisterPendingEvent1
1845
1846 /*
1847 * Set the registers.
1848 */
1849 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1850
1851 /*
1852 * Make the hypercall.
1853 */
1854 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1855 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1856 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1857 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1858 VERR_NEM_SET_REGISTERS_FAILED);
1859 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1860 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1861 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1862 return VINF_SUCCESS;
1863}
1864#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1865
1866
1867/**
1868 * Export the state to the native API (out of CPUMCTX).
1869 *
1870 * @returns VBox status code
1871 * @param pGVM The ring-0 VM handle.
1872 * @param idCpu The calling EMT. Necessary for getting the
1873 * hypercall page and arguments.
1874 */
1875VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, VMCPUID idCpu)
1876{
1877#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1878 /*
1879 * Validate the call.
1880 */
1881 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
1882 if (RT_SUCCESS(rc))
1883 {
1884 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1885 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1886
1887 /*
1888 * Call worker.
1889 */
1890 rc = nemR0WinExportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx);
1891 }
1892 return rc;
1893#else
1894 RT_NOREF(pGVM, idCpu);
1895 return VERR_NOT_IMPLEMENTED;
1896#endif
1897}
1898
1899
1900#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1901/**
1902 * Worker for NEMR0ImportState.
1903 *
1904 * Intention is to use it internally later.
1905 *
1906 * @returns VBox status code.
1907 * @param pGVM The ring-0 VM handle.
1908 * @param pGVCpu The ring-0 VCPU handle.
1909 * @param pCtx The CPU context structure to import into.
1910 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1911 * @param fCanUpdateCr3 Whether it's safe to update CR3 or not.
1912 */
1913NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3)
1914{
1915 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
1916 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1917 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1918 Assert(pCtx == &pGVCpu->cpum.GstCtx);
1919
1920 fWhat &= pCtx->fExtrn;
1921
1922 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
1923 pInput->VpIndex = pGVCpu->idCpu;
1924 pInput->fFlags = 0;
1925
1926 /* GPRs */
1927 uintptr_t iReg = 0;
1928 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1929 {
1930 if (fWhat & CPUMCTX_EXTRN_RAX)
1931 pInput->Names[iReg++] = HvX64RegisterRax;
1932 if (fWhat & CPUMCTX_EXTRN_RCX)
1933 pInput->Names[iReg++] = HvX64RegisterRcx;
1934 if (fWhat & CPUMCTX_EXTRN_RDX)
1935 pInput->Names[iReg++] = HvX64RegisterRdx;
1936 if (fWhat & CPUMCTX_EXTRN_RBX)
1937 pInput->Names[iReg++] = HvX64RegisterRbx;
1938 if (fWhat & CPUMCTX_EXTRN_RSP)
1939 pInput->Names[iReg++] = HvX64RegisterRsp;
1940 if (fWhat & CPUMCTX_EXTRN_RBP)
1941 pInput->Names[iReg++] = HvX64RegisterRbp;
1942 if (fWhat & CPUMCTX_EXTRN_RSI)
1943 pInput->Names[iReg++] = HvX64RegisterRsi;
1944 if (fWhat & CPUMCTX_EXTRN_RDI)
1945 pInput->Names[iReg++] = HvX64RegisterRdi;
1946 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1947 {
1948 pInput->Names[iReg++] = HvX64RegisterR8;
1949 pInput->Names[iReg++] = HvX64RegisterR9;
1950 pInput->Names[iReg++] = HvX64RegisterR10;
1951 pInput->Names[iReg++] = HvX64RegisterR11;
1952 pInput->Names[iReg++] = HvX64RegisterR12;
1953 pInput->Names[iReg++] = HvX64RegisterR13;
1954 pInput->Names[iReg++] = HvX64RegisterR14;
1955 pInput->Names[iReg++] = HvX64RegisterR15;
1956 }
1957 }
1958
1959 /* RIP & Flags */
1960 if (fWhat & CPUMCTX_EXTRN_RIP)
1961 pInput->Names[iReg++] = HvX64RegisterRip;
1962 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1963 pInput->Names[iReg++] = HvX64RegisterRflags;
1964
1965 /* Segments */
1966 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1967 {
1968 if (fWhat & CPUMCTX_EXTRN_CS)
1969 pInput->Names[iReg++] = HvX64RegisterCs;
1970 if (fWhat & CPUMCTX_EXTRN_ES)
1971 pInput->Names[iReg++] = HvX64RegisterEs;
1972 if (fWhat & CPUMCTX_EXTRN_SS)
1973 pInput->Names[iReg++] = HvX64RegisterSs;
1974 if (fWhat & CPUMCTX_EXTRN_DS)
1975 pInput->Names[iReg++] = HvX64RegisterDs;
1976 if (fWhat & CPUMCTX_EXTRN_FS)
1977 pInput->Names[iReg++] = HvX64RegisterFs;
1978 if (fWhat & CPUMCTX_EXTRN_GS)
1979 pInput->Names[iReg++] = HvX64RegisterGs;
1980 }
1981
1982 /* Descriptor tables and the task segment. */
1983 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1984 {
1985 if (fWhat & CPUMCTX_EXTRN_LDTR)
1986 pInput->Names[iReg++] = HvX64RegisterLdtr;
1987 if (fWhat & CPUMCTX_EXTRN_TR)
1988 pInput->Names[iReg++] = HvX64RegisterTr;
1989 if (fWhat & CPUMCTX_EXTRN_IDTR)
1990 pInput->Names[iReg++] = HvX64RegisterIdtr;
1991 if (fWhat & CPUMCTX_EXTRN_GDTR)
1992 pInput->Names[iReg++] = HvX64RegisterGdtr;
1993 }
1994
1995 /* Control registers. */
1996 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1997 {
1998 if (fWhat & CPUMCTX_EXTRN_CR0)
1999 pInput->Names[iReg++] = HvX64RegisterCr0;
2000 if (fWhat & CPUMCTX_EXTRN_CR2)
2001 pInput->Names[iReg++] = HvX64RegisterCr2;
2002 if (fWhat & CPUMCTX_EXTRN_CR3)
2003 pInput->Names[iReg++] = HvX64RegisterCr3;
2004 if (fWhat & CPUMCTX_EXTRN_CR4)
2005 pInput->Names[iReg++] = HvX64RegisterCr4;
2006 }
2007 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
2008 pInput->Names[iReg++] = HvX64RegisterCr8;
2009
2010 /* Debug registers. */
2011 if (fWhat & CPUMCTX_EXTRN_DR7)
2012 pInput->Names[iReg++] = HvX64RegisterDr7;
2013 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
2014 {
2015 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pCtx->fExtrn & CPUMCTX_EXTRN_DR7))
2016 {
2017 fWhat |= CPUMCTX_EXTRN_DR7;
2018 pInput->Names[iReg++] = HvX64RegisterDr7;
2019 }
2020 pInput->Names[iReg++] = HvX64RegisterDr0;
2021 pInput->Names[iReg++] = HvX64RegisterDr1;
2022 pInput->Names[iReg++] = HvX64RegisterDr2;
2023 pInput->Names[iReg++] = HvX64RegisterDr3;
2024 }
2025 if (fWhat & CPUMCTX_EXTRN_DR6)
2026 pInput->Names[iReg++] = HvX64RegisterDr6;
2027
2028 /* Floating point state. */
2029 if (fWhat & CPUMCTX_EXTRN_X87)
2030 {
2031 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
2032 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
2033 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
2034 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
2035 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
2036 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
2037 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
2038 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
2039 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
2040 }
2041 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
2042 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
2043
2044 /* Vector state. */
2045 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
2046 {
2047 pInput->Names[iReg++] = HvX64RegisterXmm0;
2048 pInput->Names[iReg++] = HvX64RegisterXmm1;
2049 pInput->Names[iReg++] = HvX64RegisterXmm2;
2050 pInput->Names[iReg++] = HvX64RegisterXmm3;
2051 pInput->Names[iReg++] = HvX64RegisterXmm4;
2052 pInput->Names[iReg++] = HvX64RegisterXmm5;
2053 pInput->Names[iReg++] = HvX64RegisterXmm6;
2054 pInput->Names[iReg++] = HvX64RegisterXmm7;
2055 pInput->Names[iReg++] = HvX64RegisterXmm8;
2056 pInput->Names[iReg++] = HvX64RegisterXmm9;
2057 pInput->Names[iReg++] = HvX64RegisterXmm10;
2058 pInput->Names[iReg++] = HvX64RegisterXmm11;
2059 pInput->Names[iReg++] = HvX64RegisterXmm12;
2060 pInput->Names[iReg++] = HvX64RegisterXmm13;
2061 pInput->Names[iReg++] = HvX64RegisterXmm14;
2062 pInput->Names[iReg++] = HvX64RegisterXmm15;
2063 }
2064
2065 /* MSRs */
2066 // HvX64RegisterTsc - don't touch
2067 if (fWhat & CPUMCTX_EXTRN_EFER)
2068 pInput->Names[iReg++] = HvX64RegisterEfer;
2069 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
2070 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
2071 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
2072 {
2073 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
2074 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
2075 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
2076 }
2077 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
2078 {
2079 pInput->Names[iReg++] = HvX64RegisterStar;
2080 pInput->Names[iReg++] = HvX64RegisterLstar;
2081 pInput->Names[iReg++] = HvX64RegisterCstar;
2082 pInput->Names[iReg++] = HvX64RegisterSfmask;
2083 }
2084
2085# ifdef LOG_ENABLED
2086 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
2087# endif
2088 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
2089 {
2090 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
2091 pInput->Names[iReg++] = HvX64RegisterPat;
2092# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2093 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
2094# endif
2095 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
2096 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
2097 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
2098 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
2099 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
2100 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
2101 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
2102 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
2103 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
2104 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
2105 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
2106 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
2107 pInput->Names[iReg++] = HvX64RegisterTscAux;
2108# if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
2109 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2110 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
2111# endif
2112# ifdef LOG_ENABLED
2113 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON)
2114 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
2115# endif
2116 }
2117
2118 /* Interruptibility. */
2119 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2120 {
2121 pInput->Names[iReg++] = HvRegisterInterruptState;
2122 pInput->Names[iReg++] = HvX64RegisterRip;
2123 }
2124
2125 /* event injection */
2126 pInput->Names[iReg++] = HvRegisterPendingInterruption;
2127 pInput->Names[iReg++] = HvRegisterPendingEvent0;
2128 pInput->Names[iReg++] = HvRegisterPendingEvent1;
2129 size_t const cRegs = iReg;
2130 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF_DYN(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
2131
2132 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2133 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
2134 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
2135
2136 /*
2137 * Make the hypercall.
2138 */
2139 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
2140 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2141 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2142 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
2143 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
2144 VERR_NEM_GET_REGISTERS_FAILED);
2145 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
2146
2147 /*
2148 * Copy information to the CPUM context.
2149 */
2150 iReg = 0;
2151
2152 /* GPRs */
2153 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
2154 {
2155 if (fWhat & CPUMCTX_EXTRN_RAX)
2156 {
2157 Assert(pInput->Names[iReg] == HvX64RegisterRax);
2158 pCtx->rax = paValues[iReg++].Reg64;
2159 }
2160 if (fWhat & CPUMCTX_EXTRN_RCX)
2161 {
2162 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
2163 pCtx->rcx = paValues[iReg++].Reg64;
2164 }
2165 if (fWhat & CPUMCTX_EXTRN_RDX)
2166 {
2167 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
2168 pCtx->rdx = paValues[iReg++].Reg64;
2169 }
2170 if (fWhat & CPUMCTX_EXTRN_RBX)
2171 {
2172 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
2173 pCtx->rbx = paValues[iReg++].Reg64;
2174 }
2175 if (fWhat & CPUMCTX_EXTRN_RSP)
2176 {
2177 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
2178 pCtx->rsp = paValues[iReg++].Reg64;
2179 }
2180 if (fWhat & CPUMCTX_EXTRN_RBP)
2181 {
2182 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
2183 pCtx->rbp = paValues[iReg++].Reg64;
2184 }
2185 if (fWhat & CPUMCTX_EXTRN_RSI)
2186 {
2187 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
2188 pCtx->rsi = paValues[iReg++].Reg64;
2189 }
2190 if (fWhat & CPUMCTX_EXTRN_RDI)
2191 {
2192 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
2193 pCtx->rdi = paValues[iReg++].Reg64;
2194 }
2195 if (fWhat & CPUMCTX_EXTRN_R8_R15)
2196 {
2197 Assert(pInput->Names[iReg] == HvX64RegisterR8);
2198 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
2199 pCtx->r8 = paValues[iReg++].Reg64;
2200 pCtx->r9 = paValues[iReg++].Reg64;
2201 pCtx->r10 = paValues[iReg++].Reg64;
2202 pCtx->r11 = paValues[iReg++].Reg64;
2203 pCtx->r12 = paValues[iReg++].Reg64;
2204 pCtx->r13 = paValues[iReg++].Reg64;
2205 pCtx->r14 = paValues[iReg++].Reg64;
2206 pCtx->r15 = paValues[iReg++].Reg64;
2207 }
2208 }
2209
2210 /* RIP & Flags */
2211 if (fWhat & CPUMCTX_EXTRN_RIP)
2212 {
2213 Assert(pInput->Names[iReg] == HvX64RegisterRip);
2214 pCtx->rip = paValues[iReg++].Reg64;
2215 }
2216 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
2217 {
2218 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
2219 pCtx->rflags.u = paValues[iReg++].Reg64;
2220 }
2221
2222 /* Segments */
2223# define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
2224 do { \
2225 Assert(pInput->Names[a_idx] == a_enmName); \
2226 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
2227 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
2228 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
2229 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
2230 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
2231 } while (0)
2232 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
2233 {
2234 if (fWhat & CPUMCTX_EXTRN_CS)
2235 {
2236 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
2237 iReg++;
2238 }
2239 if (fWhat & CPUMCTX_EXTRN_ES)
2240 {
2241 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
2242 iReg++;
2243 }
2244 if (fWhat & CPUMCTX_EXTRN_SS)
2245 {
2246 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
2247 iReg++;
2248 }
2249 if (fWhat & CPUMCTX_EXTRN_DS)
2250 {
2251 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
2252 iReg++;
2253 }
2254 if (fWhat & CPUMCTX_EXTRN_FS)
2255 {
2256 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
2257 iReg++;
2258 }
2259 if (fWhat & CPUMCTX_EXTRN_GS)
2260 {
2261 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
2262 iReg++;
2263 }
2264 }
2265 /* Descriptor tables and the task segment. */
2266 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
2267 {
2268 if (fWhat & CPUMCTX_EXTRN_LDTR)
2269 {
2270 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
2271 iReg++;
2272 }
2273 if (fWhat & CPUMCTX_EXTRN_TR)
2274 {
2275 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
2276 avoid to trigger sanity assertions around the code, always fix this. */
2277 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
2278 switch (pCtx->tr.Attr.n.u4Type)
2279 {
2280 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2281 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2282 break;
2283 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2284 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2285 break;
2286 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2287 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
2288 break;
2289 }
2290 iReg++;
2291 }
2292 if (fWhat & CPUMCTX_EXTRN_IDTR)
2293 {
2294 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
2295 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
2296 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
2297 iReg++;
2298 }
2299 if (fWhat & CPUMCTX_EXTRN_GDTR)
2300 {
2301 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
2302 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
2303 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
2304 iReg++;
2305 }
2306 }
2307
2308 /* Control registers. */
2309 bool fMaybeChangedMode = false;
2310 bool fUpdateCr3 = false;
2311 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
2312 {
2313 if (fWhat & CPUMCTX_EXTRN_CR0)
2314 {
2315 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
2316 if (pCtx->cr0 != paValues[iReg].Reg64)
2317 {
2318 CPUMSetGuestCR0(pGVCpu, paValues[iReg].Reg64);
2319 fMaybeChangedMode = true;
2320 }
2321 iReg++;
2322 }
2323 if (fWhat & CPUMCTX_EXTRN_CR2)
2324 {
2325 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
2326 pCtx->cr2 = paValues[iReg].Reg64;
2327 iReg++;
2328 }
2329 if (fWhat & CPUMCTX_EXTRN_CR3)
2330 {
2331 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
2332 if (pCtx->cr3 != paValues[iReg].Reg64)
2333 {
2334 CPUMSetGuestCR3(pGVCpu, paValues[iReg].Reg64);
2335 fUpdateCr3 = true;
2336 }
2337 iReg++;
2338 }
2339 if (fWhat & CPUMCTX_EXTRN_CR4)
2340 {
2341 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
2342 if (pCtx->cr4 != paValues[iReg].Reg64)
2343 {
2344 CPUMSetGuestCR4(pGVCpu, paValues[iReg].Reg64);
2345 fMaybeChangedMode = true;
2346 }
2347 iReg++;
2348 }
2349 }
2350 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
2351 {
2352 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
2353 APICSetTpr(pGVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
2354 iReg++;
2355 }
2356
2357 /* Debug registers. */
2358 if (fWhat & CPUMCTX_EXTRN_DR7)
2359 {
2360 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
2361 if (pCtx->dr[7] != paValues[iReg].Reg64)
2362 CPUMSetGuestDR7(pGVCpu, paValues[iReg].Reg64);
2363 pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
2364 iReg++;
2365 }
2366 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
2367 {
2368 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
2369 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
2370 if (pCtx->dr[0] != paValues[iReg].Reg64)
2371 CPUMSetGuestDR0(pGVCpu, paValues[iReg].Reg64);
2372 iReg++;
2373 if (pCtx->dr[1] != paValues[iReg].Reg64)
2374 CPUMSetGuestDR1(pGVCpu, paValues[iReg].Reg64);
2375 iReg++;
2376 if (pCtx->dr[2] != paValues[iReg].Reg64)
2377 CPUMSetGuestDR2(pGVCpu, paValues[iReg].Reg64);
2378 iReg++;
2379 if (pCtx->dr[3] != paValues[iReg].Reg64)
2380 CPUMSetGuestDR3(pGVCpu, paValues[iReg].Reg64);
2381 iReg++;
2382 }
2383 if (fWhat & CPUMCTX_EXTRN_DR6)
2384 {
2385 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
2386 if (pCtx->dr[6] != paValues[iReg].Reg64)
2387 CPUMSetGuestDR6(pGVCpu, paValues[iReg].Reg64);
2388 iReg++;
2389 }
2390
2391 /* Floating point state. */
2392 if (fWhat & CPUMCTX_EXTRN_X87)
2393 {
2394 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
2395 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
2396 pCtx->XState.x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2397 pCtx->XState.x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2398 iReg++;
2399 pCtx->XState.x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2400 pCtx->XState.x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2401 iReg++;
2402 pCtx->XState.x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2403 pCtx->XState.x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2404 iReg++;
2405 pCtx->XState.x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2406 pCtx->XState.x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2407 iReg++;
2408 pCtx->XState.x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2409 pCtx->XState.x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2410 iReg++;
2411 pCtx->XState.x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2412 pCtx->XState.x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2413 iReg++;
2414 pCtx->XState.x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2415 pCtx->XState.x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2416 iReg++;
2417 pCtx->XState.x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2418 pCtx->XState.x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2419 iReg++;
2420
2421 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
2422 pCtx->XState.x87.FCW = paValues[iReg].FpControlStatus.FpControl;
2423 pCtx->XState.x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
2424 pCtx->XState.x87.FTW = paValues[iReg].FpControlStatus.FpTag
2425 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
2426 pCtx->XState.x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
2427 pCtx->XState.x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
2428 pCtx->XState.x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
2429 pCtx->XState.x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
2430 iReg++;
2431 }
2432
2433 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
2434 {
2435 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
2436 if (fWhat & CPUMCTX_EXTRN_X87)
2437 {
2438 pCtx->XState.x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
2439 pCtx->XState.x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
2440 pCtx->XState.x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
2441 }
2442 pCtx->XState.x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
2443 pCtx->XState.x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
2444 iReg++;
2445 }
2446
2447 /* Vector state. */
2448 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
2449 {
2450 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
2451 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
2452 pCtx->XState.x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2453 pCtx->XState.x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2454 iReg++;
2455 pCtx->XState.x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2456 pCtx->XState.x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2457 iReg++;
2458 pCtx->XState.x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2459 pCtx->XState.x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2460 iReg++;
2461 pCtx->XState.x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2462 pCtx->XState.x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2463 iReg++;
2464 pCtx->XState.x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2465 pCtx->XState.x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2466 iReg++;
2467 pCtx->XState.x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2468 pCtx->XState.x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2469 iReg++;
2470 pCtx->XState.x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2471 pCtx->XState.x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2472 iReg++;
2473 pCtx->XState.x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2474 pCtx->XState.x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2475 iReg++;
2476 pCtx->XState.x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2477 pCtx->XState.x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2478 iReg++;
2479 pCtx->XState.x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2480 pCtx->XState.x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2481 iReg++;
2482 pCtx->XState.x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2483 pCtx->XState.x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2484 iReg++;
2485 pCtx->XState.x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2486 pCtx->XState.x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2487 iReg++;
2488 pCtx->XState.x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2489 pCtx->XState.x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2490 iReg++;
2491 pCtx->XState.x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2492 pCtx->XState.x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2493 iReg++;
2494 pCtx->XState.x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2495 pCtx->XState.x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2496 iReg++;
2497 pCtx->XState.x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2498 pCtx->XState.x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2499 iReg++;
2500 }
2501
2502
2503 /* MSRs */
2504 // HvX64RegisterTsc - don't touch
2505 if (fWhat & CPUMCTX_EXTRN_EFER)
2506 {
2507 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
2508 if (paValues[iReg].Reg64 != pCtx->msrEFER)
2509 {
2510 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
2511 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
2512 PGMNotifyNxeChanged(pGVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
2513 pCtx->msrEFER = paValues[iReg].Reg64;
2514 fMaybeChangedMode = true;
2515 }
2516 iReg++;
2517 }
2518 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
2519 {
2520 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
2521 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
2522 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
2523 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
2524 iReg++;
2525 }
2526 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
2527 {
2528 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
2529 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
2530 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
2531 pCtx->SysEnter.cs = paValues[iReg].Reg64;
2532 iReg++;
2533
2534 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
2535 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
2536 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
2537 pCtx->SysEnter.eip = paValues[iReg].Reg64;
2538 iReg++;
2539
2540 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
2541 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
2542 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
2543 pCtx->SysEnter.esp = paValues[iReg].Reg64;
2544 iReg++;
2545 }
2546 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
2547 {
2548 Assert(pInput->Names[iReg] == HvX64RegisterStar);
2549 if (pCtx->msrSTAR != paValues[iReg].Reg64)
2550 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
2551 pCtx->msrSTAR = paValues[iReg].Reg64;
2552 iReg++;
2553
2554 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
2555 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
2556 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
2557 pCtx->msrLSTAR = paValues[iReg].Reg64;
2558 iReg++;
2559
2560 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
2561 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
2562 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
2563 pCtx->msrCSTAR = paValues[iReg].Reg64;
2564 iReg++;
2565
2566 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
2567 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
2568 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
2569 pCtx->msrSFMASK = paValues[iReg].Reg64;
2570 iReg++;
2571 }
2572 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
2573 {
2574 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
2575 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pGVCpu);
2576 if (paValues[iReg].Reg64 != uOldBase)
2577 {
2578 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
2579 pGVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
2580 int rc2 = APICSetBaseMsr(pGVCpu, paValues[iReg].Reg64);
2581 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64));
2582 }
2583 iReg++;
2584
2585 Assert(pInput->Names[iReg] == HvX64RegisterPat);
2586 if (pCtx->msrPAT != paValues[iReg].Reg64)
2587 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
2588 pCtx->msrPAT = paValues[iReg].Reg64;
2589 iReg++;
2590
2591# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2592 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
2593 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pGVCpu))
2594 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pGVCpu), paValues[iReg].Reg64));
2595 iReg++;
2596# endif
2597
2598 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
2599 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
2600 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2601 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2602 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2603 iReg++;
2604
2605 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2606
2607 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2608 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2609 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2610 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2611 iReg++;
2612
2613 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2614 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2615 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2616 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2617 iReg++;
2618
2619 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2620 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2621 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2622 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2623 iReg++;
2624
2625 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2626 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2627 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2628 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2629 iReg++;
2630
2631 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2632 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2633 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2634 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2635 iReg++;
2636
2637 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2638 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2639 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2640 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2641 iReg++;
2642
2643 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2644 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2645 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2646 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2647 iReg++;
2648
2649 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2650 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2651 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2652 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2653 iReg++;
2654
2655 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2656 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2657 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2658 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2659 iReg++;
2660
2661 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2662 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2663 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2664 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2665 iReg++;
2666
2667 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2668 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2669 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2670 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2671 iReg++;
2672
2673 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2674 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2675 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2676 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2677 iReg++;
2678
2679# if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2680 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2681 {
2682 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2683 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2684 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2685 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2686 iReg++;
2687 }
2688# endif
2689# ifdef LOG_ENABLED
2690 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON)
2691 {
2692 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2693 uint64_t const uFeatCtrl = CPUMGetGuestIa32FeatCtrl(pVCpu);
2694 if (paValues[iReg].Reg64 != uFeatCtrl)
2695 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, uFeatCtrl, paValues[iReg].Reg64));
2696 iReg++;
2697 }
2698# endif
2699 }
2700
2701 /* Interruptibility. */
2702 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2703 {
2704 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2705 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2706
2707 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2708 {
2709 pGVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2710 if (paValues[iReg].InterruptState.InterruptShadow)
2711 EMSetInhibitInterruptsPC(pGVCpu, paValues[iReg + 1].Reg64);
2712 else
2713 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2714 }
2715
2716 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2717 {
2718 if (paValues[iReg].InterruptState.NmiMasked)
2719 VMCPU_FF_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS);
2720 else
2721 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_BLOCK_NMIS);
2722 }
2723
2724 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2725 iReg += 2;
2726 }
2727
2728 /* Event injection. */
2729 /// @todo HvRegisterPendingInterruption
2730 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2731 if (paValues[iReg].PendingInterruption.InterruptionPending)
2732 {
2733 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2734 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2735 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2736 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2737 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2738 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2739 }
2740
2741 /// @todo HvRegisterPendingEvent0
2742 /// @todo HvRegisterPendingEvent1
2743
2744 /* Almost done, just update extrn flags and maybe change PGM mode. */
2745 pCtx->fExtrn &= ~fWhat;
2746 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2747 pCtx->fExtrn = 0;
2748
2749 /* Typical. */
2750 if (!fMaybeChangedMode && !fUpdateCr3)
2751 return VINF_SUCCESS;
2752
2753 /*
2754 * Slow.
2755 */
2756 int rc = VINF_SUCCESS;
2757 if (fMaybeChangedMode)
2758 {
2759 rc = PGMChangeMode(pGVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2760 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
2761 }
2762
2763 if (fUpdateCr3)
2764 {
2765 if (fCanUpdateCr3)
2766 {
2767 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
2768 rc = PGMUpdateCR3(pGVCpu, pCtx->cr3, false /*fPdpesMapped*/);
2769 if (rc == VINF_SUCCESS)
2770 { /* likely */ }
2771 else
2772 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
2773 }
2774 else
2775 {
2776 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2777 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2778 }
2779 }
2780
2781 return rc;
2782}
2783#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2784
2785
2786/**
2787 * Import the state from the native API (back to CPUMCTX).
2788 *
2789 * @returns VBox status code
2790 * @param pGVM The ring-0 VM handle.
2791 * @param idCpu The calling EMT. Necessary for getting the
2792 * hypercall page and arguments.
2793 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2794 * CPUMCTX_EXTERN_ALL for everything.
2795 */
2796VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, VMCPUID idCpu, uint64_t fWhat)
2797{
2798#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2799 /*
2800 * Validate the call.
2801 */
2802 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2803 if (RT_SUCCESS(rc))
2804 {
2805 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2806 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2807
2808 /*
2809 * Call worker.
2810 */
2811 rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);
2812 }
2813 return rc;
2814#else
2815 RT_NOREF(pGVM, idCpu, fWhat);
2816 return VERR_NOT_IMPLEMENTED;
2817#endif
2818}
2819
2820
2821#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2822/**
2823 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2824 *
2825 * @returns VBox status code.
2826 * @param pGVM The ring-0 VM handle.
2827 * @param pGVCpu The ring-0 VCPU handle.
2828 * @param pcTicks Where to return the current CPU tick count.
2829 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2830 */
2831NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2832{
2833 /*
2834 * Hypercall parameters.
2835 */
2836 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2837 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2838 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2839
2840 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2841 pInput->VpIndex = pGVCpu->idCpu;
2842 pInput->fFlags = 0;
2843 pInput->Names[0] = HvX64RegisterTsc;
2844 pInput->Names[1] = HvX64RegisterTscAux;
2845
2846 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2847 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2848 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2849
2850 /*
2851 * Make the hypercall.
2852 */
2853 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2854 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2855 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2856 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2857 VERR_NEM_GET_REGISTERS_FAILED);
2858
2859 /*
2860 * Get results.
2861 */
2862 *pcTicks = paValues[0].Reg64;
2863 if (pcAux)
2864 *pcAux = paValues[0].Reg32;
2865 return VINF_SUCCESS;
2866}
2867#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2868
2869
2870/**
2871 * Queries the TSC and TSC_AUX values, putting the results in .
2872 *
2873 * @returns VBox status code
2874 * @param pGVM The ring-0 VM handle.
2875 * @param idCpu The calling EMT. Necessary for getting the
2876 * hypercall page and arguments.
2877 */
2878VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, VMCPUID idCpu)
2879{
2880#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2881 /*
2882 * Validate the call.
2883 */
2884 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2885 if (RT_SUCCESS(rc))
2886 {
2887 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2888 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2889
2890 /*
2891 * Call worker.
2892 */
2893 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2894 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2895 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2896 &pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2897 }
2898 return rc;
2899#else
2900 RT_NOREF(pGVM, idCpu);
2901 return VERR_NOT_IMPLEMENTED;
2902#endif
2903}
2904
2905
2906#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2907/**
2908 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2909 *
2910 * @returns VBox status code.
2911 * @param pGVM The ring-0 VM handle.
2912 * @param pGVCpu The ring-0 VCPU handle.
2913 * @param uPausedTscValue The TSC value at the time of pausing.
2914 */
2915NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2916{
2917 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2918
2919 /*
2920 * Set up the hypercall parameters.
2921 */
2922 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2923 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2924
2925 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2926 pInput->VpIndex = 0;
2927 pInput->RsvdZ = 0;
2928 pInput->Elements[0].Name = HvX64RegisterTsc;
2929 pInput->Elements[0].Pad0 = 0;
2930 pInput->Elements[0].Pad1 = 0;
2931 pInput->Elements[0].Value.Reg128.High64 = 0;
2932 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2933
2934 /*
2935 * Disable interrupts and do the first virtual CPU.
2936 */
2937 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2938 uint64_t const uFirstTsc = ASMReadTSC();
2939 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2940 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2941 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2942 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2943
2944 /*
2945 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2946 * that we don't introduce too much drift here.
2947 */
2948 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2949 {
2950 Assert(pInput->PartitionId == pGVM->nemr0.s.idHvPartition);
2951 Assert(pInput->RsvdZ == 0);
2952 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2953 Assert(pInput->Elements[0].Pad0 == 0);
2954 Assert(pInput->Elements[0].Pad1 == 0);
2955 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2956
2957 pInput->VpIndex = iCpu;
2958 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2959 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2960
2961 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2962 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2963 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2964 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2965 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2966 }
2967
2968 /*
2969 * Done.
2970 */
2971 ASMSetFlags(fSavedFlags);
2972 return VINF_SUCCESS;
2973}
2974#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2975
2976
2977/**
2978 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2979 *
2980 * @returns VBox status code
2981 * @param pGVM The ring-0 VM handle.
2982 * @param idCpu The calling EMT. Necessary for getting the
2983 * hypercall page and arguments.
2984 * @param uPausedTscValue The TSC value at the time of pausing.
2985 */
2986VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2987{
2988#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2989 /*
2990 * Validate the call.
2991 */
2992 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2993 if (RT_SUCCESS(rc))
2994 {
2995 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2996 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2997
2998 /*
2999 * Call worker.
3000 */
3001 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
3002 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
3003 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
3004 }
3005 return rc;
3006#else
3007 RT_NOREF(pGVM, idCpu, uPausedTscValue);
3008 return VERR_NOT_IMPLEMENTED;
3009#endif
3010}
3011
3012
3013VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
3014{
3015#ifdef NEM_WIN_WITH_RING0_RUNLOOP
3016 if (pGVM->nemr0.s.fMayUseRing0Runloop)
3017 return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu]);
3018 return VERR_NEM_RING3_ONLY;
3019#else
3020 RT_NOREF(pGVM, idCpu);
3021 return VERR_NOT_IMPLEMENTED;
3022#endif
3023}
3024
3025
3026/**
3027 * Updates statistics in the VM structure.
3028 *
3029 * @returns VBox status code.
3030 * @param pGVM The ring-0 VM handle.
3031 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
3032 * page and arguments.
3033 */
3034VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, VMCPUID idCpu)
3035{
3036#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3037 /*
3038 * Validate the call.
3039 */
3040 int rc;
3041 if (idCpu == NIL_VMCPUID)
3042 rc = GVMMR0ValidateGVM(pGVM);
3043 else
3044 rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
3045 if (RT_SUCCESS(rc))
3046 {
3047 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
3048
3049 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
3050 ? &pGVM->aCpus[idCpu].nemr0.s.HypercallData
3051 : &pGVM->nemr0.s.HypercallData;
3052 if ( RT_VALID_PTR(pHypercallData->pbPage)
3053 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
3054 {
3055 if (idCpu == NIL_VMCPUID)
3056 rc = RTCritSectEnter(&pGVM->nemr0.s.HypercallDataCritSect);
3057 if (RT_SUCCESS(rc))
3058 {
3059 /*
3060 * Query the memory statistics for the partition.
3061 */
3062 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
3063 pInput->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
3064 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
3065 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
3066 pInput->ProximityDomainInfo.Flags.Reserved = 0;
3067 pInput->ProximityDomainInfo.Id = 0;
3068
3069 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
3070 RT_ZERO(*pOutput);
3071
3072 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
3073 pHypercallData->HCPhysPage,
3074 pHypercallData->HCPhysPage + sizeof(*pInput));
3075 if (uResult == HV_STATUS_SUCCESS)
3076 {
3077 pGVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
3078 pGVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
3079 rc = VINF_SUCCESS;
3080 }
3081 else
3082 {
3083 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
3084 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
3085 rc = VERR_NEM_IPE_0;
3086 }
3087
3088 if (idCpu == NIL_VMCPUID)
3089 RTCritSectLeave(&pGVM->nemr0.s.HypercallDataCritSect);
3090 }
3091 }
3092 else
3093 rc = VERR_WRONG_ORDER;
3094 }
3095 return rc;
3096#else
3097 RT_NOREF(pGVM, idCpu);
3098 return VINF_SUCCESS;
3099#endif
3100}
3101
3102
3103/**
3104 * Debug only interface for poking around and exploring Hyper-V stuff.
3105 *
3106 * @param pGVM The ring-0 VM handle.
3107 * @param idCpu The calling EMT.
3108 * @param u64Arg What to query. 0 == registers.
3109 */
3110VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, VMCPUID idCpu, uint64_t u64Arg)
3111{
3112#if defined(DEBUG_bird) && defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES)
3113 /*
3114 * Resolve CPU structures.
3115 */
3116 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
3117 if (RT_SUCCESS(rc))
3118 {
3119 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
3120
3121 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3122 if (u64Arg == 0)
3123 {
3124 /*
3125 * Query register.
3126 */
3127 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
3128 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
3129
3130 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
3131 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
3132 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
3133
3134 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
3135 pInput->VpIndex = pGVCpu->idCpu;
3136 pInput->fFlags = 0;
3137 pInput->Names[0] = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
3138
3139 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
3140 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
3141 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
3142 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
3143 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
3144 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
3145 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
3146 rc = VINF_SUCCESS;
3147 }
3148 else if (u64Arg == 1)
3149 {
3150 /*
3151 * Query partition property.
3152 */
3153 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nemr0.s.HypercallData.pbPage;
3154 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
3155
3156 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
3157 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
3158 pOutput->PropertyValue = 0;
3159
3160 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
3161 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pGVCpu->nem.s.Hypercall.Experiment.uItem;
3162 pInput->uPadding = 0;
3163
3164 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
3165 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
3166 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
3167 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
3168 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
3169 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
3170 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
3171 rc = VINF_SUCCESS;
3172 }
3173 else if (u64Arg == 2)
3174 {
3175 /*
3176 * Set register.
3177 */
3178 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
3179 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
3180 RT_BZERO(pInput, RT_UOFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
3181
3182 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
3183 pInput->VpIndex = pGVCpu->idCpu;
3184 pInput->RsvdZ = 0;
3185 pInput->Elements[0].Name = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
3186 pInput->Elements[0].Value.Reg128.High64 = pGVCpu->nem.s.Hypercall.Experiment.uHiValue;
3187 pInput->Elements[0].Value.Reg128.Low64 = pGVCpu->nem.s.Hypercall.Experiment.uLoValue;
3188
3189 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
3190 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
3191 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
3192 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
3193 rc = VINF_SUCCESS;
3194 }
3195 else
3196 rc = VERR_INVALID_FUNCTION;
3197 }
3198 return rc;
3199#else /* !DEBUG_bird */
3200 RT_NOREF(pGVM, idCpu, u64Arg);
3201 return VERR_NOT_SUPPORTED;
3202#endif /* !DEBUG_bird */
3203}
3204
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette