VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 91688

Last change on this file since 91688 was 91688, checked in by vboxsync, 3 years ago

VMM/NEM: Added some more #ifdef'ing to reduce the amount of ioctl probing to what we actually need. bugref:10118

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 136.1 KB
Line 
1/* $Id: NEMR0Native-win.cpp 91688 2021-10-12 12:21:10Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/ctype.h>
42#include <iprt/critsect.h>
43#include <iprt/dbg.h>
44#include <iprt/mem.h>
45#include <iprt/memobj.h>
46#include <iprt/string.h>
47#include <iprt/time.h>
48#define PIMAGE_NT_HEADERS32 PIMAGE_NT_HEADERS32_PECOFF
49#include <iprt/formats/pecoff.h>
50
51
52/* Assert compile context sanity. */
53#ifndef RT_OS_WINDOWS
54# error "Windows only file!"
55#endif
56#ifndef RT_ARCH_AMD64
57# error "AMD64 only file!"
58#endif
59
60
61/*********************************************************************************************************************************
62* Internal Functions *
63*********************************************************************************************************************************/
64typedef uint32_t DWORD; /* for winerror.h constants */
65
66
67/*********************************************************************************************************************************
68* Global Variables *
69*********************************************************************************************************************************/
70static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
71
72/**
73 * WinHvr.sys!WinHvDepositMemory
74 *
75 * This API will try allocates cPages on IdealNode and deposit it to the
76 * hypervisor for use with the given partition. The memory will be freed when
77 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
78 *
79 * Apparently node numbers above 64 has a different meaning.
80 */
81static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
82
83RT_C_DECLS_BEGIN
84/**
85 * The WinHvGetPartitionProperty function we intercept in VID.SYS to get the
86 * Hyper-V partition ID.
87 *
88 * This is used from assembly.
89 */
90NTSTATUS WinHvGetPartitionProperty(uintptr_t idPartition, HV_PARTITION_PROPERTY_CODE enmProperty, PHV_PARTITION_PROPERTY puValue);
91decltype(WinHvGetPartitionProperty) *g_pfnWinHvGetPartitionProperty;
92RT_C_DECLS_END
93
94/** @name VID.SYS image details.
95 * @{ */
96static uint8_t *g_pbVidSys = NULL;
97static uintptr_t g_cbVidSys = 0;
98static PIMAGE_NT_HEADERS g_pVidSysHdrs = NULL;
99/** Pointer to the import thunk entry in VID.SYS for WinHvGetPartitionProperty if we found it. */
100static decltype(WinHvGetPartitionProperty) **g_ppfnVidSysWinHvGetPartitionProperty = NULL;
101
102/** Critical section protecting the WinHvGetPartitionProperty hacking. */
103static RTCRITSECT g_VidSysCritSect;
104RT_C_DECLS_BEGIN
105/** The partition ID passed to WinHvGetPartitionProperty by VID.SYS. */
106HV_PARTITION_ID g_idVidSysFoundPartition = HV_PARTITION_ID_INVALID;
107/** The thread which is currently looking for a partition ID. */
108RTNATIVETHREAD g_hVidSysMatchThread = NIL_RTNATIVETHREAD;
109/** The property code we expect in WinHvGetPartitionProperty. */
110VID_PARTITION_PROPERTY_CODE g_enmVidSysMatchProperty = INT64_MAX;
111/* NEMR0NativeA-win.asm: */
112extern uint8_t g_abNemR0WinHvrWinHvGetPartitionProperty_OriginalProlog[64];
113RT_C_DECLS_END
114/** @} */
115
116
117
118/*********************************************************************************************************************************
119* Internal Functions *
120*********************************************************************************************************************************/
121NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
122 uint32_t cPages, uint32_t fFlags);
123NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
124#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
125NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
126NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3);
127NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
128NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
129#endif
130DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
131 void *pvOutput, uint32_t cbOutput);
132
133/* NEMR0NativeA-win.asm: */
134DECLASM(NTSTATUS) nemR0VidSysWinHvGetPartitionProperty(uintptr_t idPartition, HV_PARTITION_PROPERTY_CODE enmProperty,
135 PHV_PARTITION_PROPERTY puValue);
136DECLASM(NTSTATUS) nemR0WinHvrWinHvGetPartitionProperty(uintptr_t idPartition, HV_PARTITION_PROPERTY_CODE enmProperty,
137 PHV_PARTITION_PROPERTY puValue);
138
139
140/*
141 * Instantate the code we share with ring-0.
142 */
143#ifdef NEM_WIN_WITH_RING0_RUNLOOP
144# define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
145#else
146# undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
147#endif
148#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
149
150
151/**
152 * Module initialization for NEM.
153 */
154VMMR0_INT_DECL(int) NEMR0Init(void)
155{
156 return RTCritSectInit(&g_VidSysCritSect);
157}
158
159
160/**
161 * Module termination for NEM.
162 */
163VMMR0_INT_DECL(void) NEMR0Term(void)
164{
165 RTCritSectDelete(&g_VidSysCritSect);
166}
167
168
169/**
170 * Worker for NEMR0InitVM that allocates a hypercall page.
171 *
172 * @returns VBox status code.
173 * @param pHypercallData The hypercall data page to initialize.
174 */
175static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
176{
177 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
178 if (RT_SUCCESS(rc))
179 {
180 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
181 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
182 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
183 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
184 if (RT_SUCCESS(rc))
185 return VINF_SUCCESS;
186
187 /* bail out */
188 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
189 }
190 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
191 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
192 pHypercallData->pbPage = NULL;
193 return rc;
194}
195
196
197/**
198 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
199 *
200 * @param pHypercallData The hypercall data page to uninitialize.
201 */
202static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
203{
204 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
205 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
206 if (pHypercallData->pbPage != NULL)
207 {
208 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
209 pHypercallData->pbPage = NULL;
210 }
211 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
212 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
213}
214
215
216static int nemR0StrICmp(const char *psz1, const char *psz2)
217{
218 for (;;)
219 {
220 char ch1 = *psz1++;
221 char ch2 = *psz2++;
222 if ( ch1 != ch2
223 && RT_C_TO_LOWER(ch1) != RT_C_TO_LOWER(ch2))
224 return ch1 - ch2;
225 if (!ch1)
226 return 0;
227 }
228}
229
230
231/**
232 * Worker for nemR0PrepareForVidSysIntercept().
233 */
234static void nemR0PrepareForVidSysInterceptInner(void)
235{
236 uint32_t const cbImage = g_cbVidSys;
237 uint8_t * const pbImage = g_pbVidSys;
238 PIMAGE_NT_HEADERS const pNtHdrs = g_pVidSysHdrs;
239 uintptr_t const offEndNtHdrs = (uintptr_t)(pNtHdrs + 1) - (uintptr_t)pbImage;
240
241#define CHECK_LOG_RET(a_Expr, a_LogRel) do { \
242 if (RT_LIKELY(a_Expr)) { /* likely */ } \
243 else \
244 { \
245 LogRel(a_LogRel); \
246 return; \
247 } \
248 } while (0)
249
250 //__try
251 {
252 /*
253 * Get and validate the import directory entry.
254 */
255 CHECK_LOG_RET( pNtHdrs->OptionalHeader.NumberOfRvaAndSizes > IMAGE_DIRECTORY_ENTRY_IMPORT
256 || pNtHdrs->OptionalHeader.NumberOfRvaAndSizes <= IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 4,
257 ("NEMR0: vid.sys: NumberOfRvaAndSizes is out of range: %#x\n", pNtHdrs->OptionalHeader.NumberOfRvaAndSizes));
258
259 IMAGE_DATA_DIRECTORY const ImportDir = pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT];
260 CHECK_LOG_RET( ImportDir.Size >= sizeof(IMAGE_IMPORT_DESCRIPTOR)
261 && ImportDir.VirtualAddress >= offEndNtHdrs /* ASSUMES NT headers before imports */
262 && (uint64_t)ImportDir.VirtualAddress + ImportDir.Size <= cbImage,
263 ("NEMR0: vid.sys: Bad import directory entry: %#x LB %#x (cbImage=%#x, offEndNtHdrs=%#zx)\n",
264 ImportDir.VirtualAddress, ImportDir.Size, cbImage, offEndNtHdrs));
265
266 /*
267 * Walk the import descriptor table looking for NTDLL.DLL.
268 */
269 for (PIMAGE_IMPORT_DESCRIPTOR pImps = (PIMAGE_IMPORT_DESCRIPTOR)&pbImage[ImportDir.VirtualAddress];
270 pImps->Name != 0 && pImps->FirstThunk != 0;
271 pImps++)
272 {
273 CHECK_LOG_RET(pImps->Name < cbImage, ("NEMR0: vid.sys: Bad import directory entry name: %#x", pImps->Name));
274 const char *pszModName = (const char *)&pbImage[pImps->Name];
275 if (nemR0StrICmp(pszModName, "winhvr.sys"))
276 continue;
277 CHECK_LOG_RET(pImps->FirstThunk < cbImage && pImps->FirstThunk >= offEndNtHdrs,
278 ("NEMR0: vid.sys: Bad FirstThunk: %#x", pImps->FirstThunk));
279 CHECK_LOG_RET( pImps->u.OriginalFirstThunk == 0
280 || (pImps->u.OriginalFirstThunk >= offEndNtHdrs && pImps->u.OriginalFirstThunk < cbImage),
281 ("NEMR0: vid.sys: Bad OriginalFirstThunk: %#x", pImps->u.OriginalFirstThunk));
282
283 /*
284 * Walk the thunks table(s) looking for WinHvGetPartitionProperty.
285 */
286 uintptr_t *puFirstThunk = (uintptr_t *)&pbImage[pImps->FirstThunk]; /* update this. */
287 if ( pImps->u.OriginalFirstThunk != 0
288 && pImps->u.OriginalFirstThunk != pImps->FirstThunk)
289 {
290 uintptr_t const *puOrgThunk = (uintptr_t const *)&pbImage[pImps->u.OriginalFirstThunk]; /* read from this. */
291 uintptr_t cLeft = (cbImage - (RT_MAX(pImps->FirstThunk, pImps->u.OriginalFirstThunk)))
292 / sizeof(*puFirstThunk);
293 while (cLeft-- > 0 && *puOrgThunk != 0)
294 {
295 if (!(*puOrgThunk & IMAGE_ORDINAL_FLAG64))
296 {
297 CHECK_LOG_RET(*puOrgThunk >= offEndNtHdrs && *puOrgThunk < cbImage,
298 ("NEMR0: vid.sys: Bad thunk entry: %#x", *puOrgThunk));
299
300 const char *pszSymbol = (const char *)&pbImage[*puOrgThunk + 2];
301 if (strcmp(pszSymbol, "WinHvGetPartitionProperty") == 0)
302 g_ppfnVidSysWinHvGetPartitionProperty = (decltype(WinHvGetPartitionProperty) **)puFirstThunk;
303 }
304
305 puOrgThunk++;
306 puFirstThunk++;
307 }
308 }
309 else
310 {
311 /* No original thunk table, so scan the resolved symbols for a match
312 with the WinHvGetPartitionProperty address. */
313 uintptr_t const uNeedle = (uintptr_t)g_pfnWinHvGetPartitionProperty;
314 uintptr_t cLeft = (cbImage - pImps->FirstThunk) / sizeof(*puFirstThunk);
315 while (cLeft-- > 0 && *puFirstThunk != 0)
316 {
317 if (*puFirstThunk == uNeedle)
318 g_ppfnVidSysWinHvGetPartitionProperty = (decltype(WinHvGetPartitionProperty) **)puFirstThunk;
319 puFirstThunk++;
320 }
321 }
322 }
323
324 /* Report the findings: */
325 if (g_ppfnVidSysWinHvGetPartitionProperty)
326 LogRel(("NEMR0: vid.sys: Found WinHvGetPartitionProperty import thunk at %p (value %p vs %p)\n",
327 g_ppfnVidSysWinHvGetPartitionProperty,*g_ppfnVidSysWinHvGetPartitionProperty, g_pfnWinHvGetPartitionProperty));
328 else
329 LogRel(("NEMR0: vid.sys: Did not find WinHvGetPartitionProperty!\n"));
330 }
331 //__except(EXCEPTION_EXECUTE_HANDLER)
332 //{
333 // return;
334 //}
335#undef CHECK_LOG_RET
336}
337
338
339/**
340 * Worker for NEMR0InitVM that prepares for intercepting stuff in VID.SYS.
341 */
342static void nemR0PrepareForVidSysIntercept(RTDBGKRNLINFO hKrnlInfo)
343{
344 /*
345 * Resolve the symbols we need first.
346 */
347 int rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "vid.sys", "__ImageBase", (void **)&g_pbVidSys);
348 if (RT_SUCCESS(rc))
349 {
350 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "vid.sys", "__ImageSize", (void **)&g_cbVidSys);
351 if (RT_SUCCESS(rc))
352 {
353 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "vid.sys", "__ImageNtHdrs", (void **)&g_pVidSysHdrs);
354 if (RT_SUCCESS(rc))
355 {
356 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvGetPartitionProperty",
357 (void **)&g_pfnWinHvGetPartitionProperty);
358 if (RT_SUCCESS(rc))
359 {
360 /*
361 * Now locate the import thunk entry for WinHvGetPartitionProperty in vid.sys.
362 */
363 nemR0PrepareForVidSysInterceptInner();
364 }
365 else
366 LogRel(("NEMR0: Failed to find winhvr.sys!WinHvGetPartitionProperty (%Rrc)\n", rc));
367 }
368 else
369 LogRel(("NEMR0: Failed to find vid.sys!__ImageNtHdrs (%Rrc)\n", rc));
370 }
371 else
372 LogRel(("NEMR0: Failed to find vid.sys!__ImageSize (%Rrc)\n", rc));
373 }
374 else
375 LogRel(("NEMR0: Failed to find vid.sys!__ImageBase (%Rrc)\n", rc));
376}
377
378
379/**
380 * Called by NEMR3Init to make sure we've got what we need.
381 *
382 * @returns VBox status code.
383 * @param pGVM The ring-0 VM handle.
384 * @thread EMT(0)
385 */
386VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM)
387{
388 AssertCompile(sizeof(pGVM->nemr0.s) <= sizeof(pGVM->nemr0.padding));
389 AssertCompile(sizeof(pGVM->aCpus[0].nemr0.s) <= sizeof(pGVM->aCpus[0].nemr0.padding));
390
391 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
392 AssertRCReturn(rc, rc);
393
394 /*
395 * We want to perform hypercalls here. The NT kernel started to expose a very low
396 * level interface to do this thru somewhere between build 14271 and 16299. Since
397 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
398 *
399 * We also need to deposit memory to the hypervisor for use with partition (page
400 * mapping structures, stuff).
401 */
402 RTDBGKRNLINFO hKrnlInfo;
403 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
404 if (RT_SUCCESS(rc))
405 {
406 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
407 if (RT_FAILURE(rc))
408 rc = VERR_NEM_MISSING_KERNEL_API_1;
409 if (RT_SUCCESS(rc))
410 {
411 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
412 if (RT_FAILURE(rc))
413 rc = rc == VERR_MODULE_NOT_FOUND ? VERR_NEM_MISSING_KERNEL_API_2 : VERR_NEM_MISSING_KERNEL_API_3;
414 }
415
416 /*
417 * Since late 2021 we may also need to do some nasty trickery with vid.sys to get
418 * the partition ID. So, ge the necessary info while we have a hKrnlInfo instance.
419 */
420 if (RT_SUCCESS(rc))
421 nemR0PrepareForVidSysIntercept(hKrnlInfo);
422
423 RTR0DbgKrnlInfoRelease(hKrnlInfo);
424 if (RT_SUCCESS(rc))
425 {
426 /*
427 * Allocate a page for non-EMT threads to use for hypercalls (update
428 * statistics and such) and a critical section protecting it.
429 */
430 rc = RTCritSectInit(&pGVM->nemr0.s.HypercallDataCritSect);
431 if (RT_SUCCESS(rc))
432 {
433 rc = nemR0InitHypercallData(&pGVM->nemr0.s.HypercallData);
434 if (RT_SUCCESS(rc))
435 {
436 /*
437 * Allocate a page for each VCPU to place hypercall data on.
438 */
439 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
440 {
441 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
442 if (RT_FAILURE(rc))
443 {
444 while (i-- > 0)
445 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
446 break;
447 }
448 }
449 if (RT_SUCCESS(rc))
450 {
451 /*
452 * So far, so good.
453 */
454 return rc;
455 }
456
457 /*
458 * Bail out.
459 */
460 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
461 }
462 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
463 }
464 }
465 }
466
467 return rc;
468}
469
470
471/**
472 * Perform an I/O control operation on the partition handle (VID.SYS).
473 *
474 * @returns NT status code.
475 * @param pGVM The ring-0 VM structure.
476 * @param pGVCpu The global (ring-0) CPU structure of the calling EMT.
477 * @param uFunction The function to perform.
478 * @param pvInput The input buffer. This must point within the VM
479 * structure so we can easily convert to a ring-3
480 * pointer if necessary.
481 * @param cbInput The size of the input. @a pvInput must be NULL when
482 * zero.
483 * @param pvOutput The output buffer. This must also point within the
484 * VM structure for ring-3 pointer magic.
485 * @param cbOutput The size of the output. @a pvOutput must be NULL
486 * when zero.
487 * @thread EMT(pGVCpu)
488 */
489DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
490 void *pvOutput, uint32_t cbOutput)
491{
492#ifdef RT_STRICT
493 /*
494 * Input and output parameters are part of the VM CPU structure.
495 */
496 VMCPU_ASSERT_EMT(pGVCpu);
497 if (pvInput)
498 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
499 if (pvOutput)
500 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
501#endif
502
503 int32_t rcNt = STATUS_UNSUCCESSFUL;
504 int rc = SUPR0IoCtlPerform(pGVM->nemr0.s.pIoCtlCtx, uFunction,
505 pvInput,
506 pvInput ? (uintptr_t)pvInput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
507 cbInput,
508 pvOutput,
509 pvOutput ? (uintptr_t)pvOutput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
510 cbOutput,
511 &rcNt);
512 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
513 return (NTSTATUS)rcNt;
514 return STATUS_UNSUCCESSFUL;
515}
516
517
518/**
519 * Here is something that we really do not wish to do, but find us force do to
520 * right now as we cannot rewrite the memory management of VBox 6.1 in time for
521 * windows 11.
522 *
523 * @returns VBox status code.
524 * @param pGVM The ring-0 VM structure.
525 * @param pahMemObjs Array of 6 memory objects that the caller will release.
526 * ASSUMES that they are initialized to NIL.
527 */
528static int nemR0InitVMPart2DontWannaDoTheseUglyPartitionIdFallbacks(PGVM pGVM, PRTR0MEMOBJ pahMemObjs)
529{
530 /*
531 * Check preconditions:
532 */
533 if ( !g_ppfnVidSysWinHvGetPartitionProperty
534 || (uintptr_t)g_ppfnVidSysWinHvGetPartitionProperty & (sizeof(uintptr_t) - 1))
535 {
536 LogRel(("NEMR0: g_ppfnVidSysWinHvGetPartitionProperty is NULL or misaligned (%p), partition ID fallback not possible.\n",
537 g_ppfnVidSysWinHvGetPartitionProperty));
538 return VERR_NEM_INIT_FAILED;
539 }
540 if (!g_pfnWinHvGetPartitionProperty)
541 {
542 LogRel(("NEMR0: g_pfnWinHvGetPartitionProperty is NULL, partition ID fallback not possible.\n"));
543 return VERR_NEM_INIT_FAILED;
544 }
545 if (!pGVM->nem.s.IoCtlGetPartitionProperty.uFunction)
546 {
547 LogRel(("NEMR0: IoCtlGetPartitionProperty.uFunction is 0, partition ID fallback not possible.\n"));
548 return VERR_NEM_INIT_FAILED;
549 }
550
551 /*
552 * Create an alias for the thunk table entry because its very likely to be read-only.
553 */
554 int rc = RTR0MemObjLockKernel(&pahMemObjs[0], g_ppfnVidSysWinHvGetPartitionProperty, sizeof(uintptr_t), RTMEM_PROT_READ);
555 if (RT_FAILURE(rc))
556 {
557 LogRel(("NEMR0: RTR0MemObjLockKernel failed on VID.SYS thunk table entry: %Rrc\n", rc));
558 return rc;
559 }
560
561 rc = RTR0MemObjEnterPhys(&pahMemObjs[1], RTR0MemObjGetPagePhysAddr(pahMemObjs[0], 0), PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
562 if (RT_FAILURE(rc))
563 {
564 LogRel(("NEMR0: RTR0MemObjEnterPhys failed on VID.SYS thunk table entry: %Rrc\n", rc));
565 return rc;
566 }
567
568 rc = RTR0MemObjMapKernel(&pahMemObjs[2], pahMemObjs[1], (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
569 if (RT_FAILURE(rc))
570 {
571 LogRel(("NEMR0: RTR0MemObjMapKernel failed on VID.SYS thunk table entry: %Rrc\n", rc));
572 return rc;
573 }
574
575 decltype(WinHvGetPartitionProperty) **ppfnThunkAlias
576 = (decltype(WinHvGetPartitionProperty) **)( (uintptr_t)RTR0MemObjAddress(pahMemObjs[2])
577 | ((uintptr_t)g_ppfnVidSysWinHvGetPartitionProperty & PAGE_OFFSET_MASK));
578 LogRel(("NEMR0: ppfnThunkAlias=%p *ppfnThunkAlias=%p; original: %p & %p, phys %RHp\n", ppfnThunkAlias, *ppfnThunkAlias,
579 g_ppfnVidSysWinHvGetPartitionProperty, *g_ppfnVidSysWinHvGetPartitionProperty,
580 RTR0MemObjGetPagePhysAddr(pahMemObjs[0], 0) ));
581
582 /*
583 * Create an alias for the target code in WinHvr.sys as there is a very decent
584 * chance we have to patch it.
585 */
586 rc = RTR0MemObjLockKernel(&pahMemObjs[3], g_pfnWinHvGetPartitionProperty, sizeof(uintptr_t), RTMEM_PROT_READ);
587 if (RT_FAILURE(rc))
588 {
589 LogRel(("NEMR0: RTR0MemObjLockKernel failed on WinHvGetPartitionProperty (%p): %Rrc\n", g_pfnWinHvGetPartitionProperty, rc));
590 return rc;
591 }
592
593 rc = RTR0MemObjEnterPhys(&pahMemObjs[4], RTR0MemObjGetPagePhysAddr(pahMemObjs[3], 0), PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
594 if (RT_FAILURE(rc))
595 {
596 LogRel(("NEMR0: RTR0MemObjEnterPhys failed on WinHvGetPartitionProperty: %Rrc\n", rc));
597 return rc;
598 }
599
600 rc = RTR0MemObjMapKernel(&pahMemObjs[5], pahMemObjs[4], (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
601 if (RT_FAILURE(rc))
602 {
603 LogRel(("NEMR0: RTR0MemObjMapKernel failed on WinHvGetPartitionProperty: %Rrc\n", rc));
604 return rc;
605 }
606
607 uint8_t *pbTargetAlias = (uint8_t *)( (uintptr_t)RTR0MemObjAddress(pahMemObjs[5])
608 | ((uintptr_t)g_pfnWinHvGetPartitionProperty & PAGE_OFFSET_MASK));
609 LogRel(("NEMR0: pbTargetAlias=%p %.16Rhxs; original: %p %.16Rhxs, phys %RHp\n", pbTargetAlias, pbTargetAlias,
610 g_pfnWinHvGetPartitionProperty, g_pfnWinHvGetPartitionProperty, RTR0MemObjGetPagePhysAddr(pahMemObjs[3], 0) ));
611
612 /*
613 * Analyse the target functions prologue to figure out how much we should copy
614 * when patching it. We repeat this every time because we don't want to get
615 * tripped up by someone else doing the same stuff as we're doing here.
616 * We need at least 12 bytes for the patch sequence (MOV RAX, QWORD; JMP RAX)
617 */
618 union
619 {
620 uint8_t ab[48]; /**< Must be equal or smallar than g_abNemR0WinHvrWinHvGetPartitionProperty_OriginalProlog */
621 int64_t ai64[6];
622 } Org;
623 memcpy(Org.ab, g_pfnWinHvGetPartitionProperty, sizeof(Org)); /** @todo ASSUMES 48 valid bytes start at function... */
624
625 uint32_t offJmpBack = 0;
626 uint32_t const cbMinJmpPatch = 12;
627 DISSTATE Dis;
628 while (offJmpBack < cbMinJmpPatch && offJmpBack < sizeof(Org) - 16)
629 {
630 uint32_t cbInstr = 1;
631 rc = DISInstr(&Org.ab[offJmpBack], DISCPUMODE_64BIT, &Dis, &cbInstr);
632 if (RT_FAILURE(rc))
633 {
634 LogRel(("NEMR0: DISInstr failed %#x bytes into WinHvGetPartitionProperty: %Rrc (%.48Rhxs)\n",
635 offJmpBack, rc, Org.ab));
636 break;
637 }
638 if (Dis.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)
639 {
640 LogRel(("NEMR0: Control flow instruction %#x bytes into WinHvGetPartitionProperty prologue: %.48Rhxs\n",
641 offJmpBack, Org.ab));
642 break;
643 }
644 if (Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */)
645 {
646 LogRel(("NEMR0: RIP relative addressing %#x bytes into WinHvGetPartitionProperty prologue: %.48Rhxs\n",
647 offJmpBack, Org.ab));
648 break;
649 }
650 offJmpBack += cbInstr;
651 }
652
653 uintptr_t const cbLeftInPage = PAGE_SIZE - ((uintptr_t)g_pfnWinHvGetPartitionProperty & PAGE_OFFSET_MASK);
654 if (cbLeftInPage < 16 && offJmpBack >= cbMinJmpPatch)
655 {
656 LogRel(("NEMR0: WinHvGetPartitionProperty patching not possible do the page crossing: %p (%#zx)\n",
657 g_pfnWinHvGetPartitionProperty, cbLeftInPage));
658 offJmpBack = 0;
659 }
660 if (offJmpBack >= cbMinJmpPatch)
661 LogRel(("NEMR0: offJmpBack=%#x for WinHvGetPartitionProperty (%p: %.48Rhxs)\n",
662 offJmpBack, g_pfnWinHvGetPartitionProperty, Org.ab));
663 else
664 offJmpBack = 0;
665 rc = VINF_SUCCESS;
666
667 /*
668 * Now enter serialization lock and get on with it...
669 */
670 PVMCPUCC const pVCpu0 = &pGVM->aCpus[0];
671 NTSTATUS rcNt;
672 RTCritSectEnter(&g_VidSysCritSect);
673
674 /*
675 * First attempt, patching the import table entry.
676 */
677 g_idVidSysFoundPartition = HV_PARTITION_ID_INVALID;
678 g_hVidSysMatchThread = RTThreadNativeSelf();
679 g_enmVidSysMatchProperty = pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty = HvPartitionPropertyProcessorVendor;
680 pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue = 0;
681
682 void *pvOld = NULL;
683 if (ASMAtomicCmpXchgExPtr(ppfnThunkAlias, (void *)(uintptr_t)nemR0VidSysWinHvGetPartitionProperty,
684 (void *)(uintptr_t)g_pfnWinHvGetPartitionProperty, &pvOld))
685 {
686 LogRel(("NEMR0: after switch to %p: ppfnThunkAlias=%p *ppfnThunkAlias=%p; original: %p & %p\n",
687 nemR0VidSysWinHvGetPartitionProperty, ppfnThunkAlias, *ppfnThunkAlias,
688 g_ppfnVidSysWinHvGetPartitionProperty, *g_ppfnVidSysWinHvGetPartitionProperty));
689
690 rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetPartitionProperty.uFunction,
691 &pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty,
692 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty),
693 &pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue,
694 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));
695 ASMAtomicWritePtr(ppfnThunkAlias, (void *)(uintptr_t)g_pfnWinHvGetPartitionProperty);
696 HV_PARTITION_ID idHvPartition = g_idVidSysFoundPartition;
697
698 LogRel(("NEMR0: WinHvGetPartitionProperty trick #1 yielded: rcNt=%#x idHvPartition=%#RX64 uValue=%#RX64\n",
699 rcNt, idHvPartition, pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));
700 pGVM->nemr0.s.idHvPartition = idHvPartition;
701 }
702 else
703 {
704 LogRel(("NEMR0: Unexpected WinHvGetPartitionProperty pointer in VID.SYS: %p, expected %p\n",
705 pvOld, g_pfnWinHvGetPartitionProperty));
706 rc = VERR_NEM_INIT_FAILED;
707 }
708
709 /*
710 * If that didn't succeed, try patching the winhvr.sys code.
711 */
712 if ( pGVM->nemr0.s.idHvPartition == HV_PARTITION_ID_INVALID
713 && offJmpBack >= cbMinJmpPatch)
714 {
715 g_idVidSysFoundPartition = HV_PARTITION_ID_INVALID;
716 g_hVidSysMatchThread = RTThreadNativeSelf();
717 g_enmVidSysMatchProperty = pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty = HvPartitionPropertyProcessorVendor;
718 pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue = 0;
719
720 /*
721 * Prepare the hook area.
722 */
723 uint8_t *pbDst = g_abNemR0WinHvrWinHvGetPartitionProperty_OriginalProlog;
724 memcpy(pbDst, (uint8_t const *)(uintptr_t)g_pfnWinHvGetPartitionProperty, offJmpBack);
725 pbDst += offJmpBack;
726
727 *pbDst++ = 0x48; /* mov rax, imm64 */
728 *pbDst++ = 0xb8;
729 *(uint64_t *)pbDst = (uintptr_t)g_pfnWinHvGetPartitionProperty + offJmpBack;
730 pbDst += sizeof(uint64_t);
731 *pbDst++ = 0xff; /* jmp rax */
732 *pbDst++ = 0xe0;
733 *pbDst++ = 0xcc; /* int3 */
734
735 /*
736 * Patch the original. We use cmpxchg16b here to avoid concurrency problems
737 * (this also makes sure we don't trample over someone else doing similar
738 * patching at the same time).
739 */
740 union
741 {
742 uint8_t ab[16];
743 uint64_t au64[2];
744 } Patch;
745 memcpy(Patch.ab, Org.ab, sizeof(Patch));
746 pbDst = Patch.ab;
747 *pbDst++ = 0x48; /* mov rax, imm64 */
748 *pbDst++ = 0xb8;
749 *(uint64_t *)pbDst = (uintptr_t)nemR0WinHvrWinHvGetPartitionProperty;
750 pbDst += sizeof(uint64_t);
751 *pbDst++ = 0xff; /* jmp rax */
752 *pbDst++ = 0xe0;
753
754 int64_t ai64CmpCopy[2] = { Org.ai64[0], Org.ai64[1] }; /* paranoia */
755 if (_InterlockedCompareExchange128((__int64 volatile *)pbTargetAlias, Patch.au64[1], Patch.au64[0], ai64CmpCopy) != 0)
756 {
757 rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetPartitionProperty.uFunction,
758 &pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty,
759 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.enmProperty),
760 &pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue,
761 sizeof(pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));
762
763 for (uint32_t cFailures = 0; cFailures < 10; cFailures++)
764 {
765 ai64CmpCopy[0] = Patch.au64[0]; /* paranoia */
766 ai64CmpCopy[1] = Patch.au64[1];
767 if (_InterlockedCompareExchange128((__int64 volatile *)pbTargetAlias, Org.ai64[1], Org.ai64[0], ai64CmpCopy) != 0)
768 {
769 if (cFailures > 0)
770 LogRel(("NEMR0: Succeeded on try #%u.\n", cFailures));
771 break;
772 }
773 LogRel(("NEMR0: Patch restore failure #%u: %.16Rhxs, expected %.16Rhxs\n",
774 cFailures + 1, &ai64CmpCopy[0], &Patch.au64[0]));
775 RTThreadSleep(1000);
776 }
777
778 HV_PARTITION_ID idHvPartition = g_idVidSysFoundPartition;
779 LogRel(("NEMR0: WinHvGetPartitionProperty trick #2 yielded: rcNt=%#x idHvPartition=%#RX64 uValue=%#RX64\n",
780 rcNt, idHvPartition, pVCpu0->nem.s.uIoCtlBuf.GetProp.uValue));
781 pGVM->nemr0.s.idHvPartition = idHvPartition;
782
783 }
784 else
785 {
786 LogRel(("NEMR0: Failed to install WinHvGetPartitionProperty patch: %.16Rhxs, expected %.16Rhxs\n",
787 &ai64CmpCopy[0], &Org.ai64[0]));
788 rc = VERR_NEM_INIT_FAILED;
789 }
790 }
791
792 RTCritSectLeave(&g_VidSysCritSect);
793
794 return rc;
795}
796
797
798/**
799 * 2nd part of the initialization, after we've got a partition handle.
800 *
801 * @returns VBox status code.
802 * @param pGVM The ring-0 VM handle.
803 * @thread EMT(0)
804 */
805VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM)
806{
807 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
808 AssertRCReturn(rc, rc);
809 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
810#ifdef NEM_WIN_WITH_RING0_RUNLOOP
811 Assert(pGVM->nemr0.s.fMayUseRing0Runloop == false);
812#endif
813
814 /*
815 * Copy and validate the I/O control information from ring-3.
816 */
817 NEMWINIOCTL Copy = pGVM->nem.s.IoCtlGetHvPartitionId;
818 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
819 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
820 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
821 pGVM->nemr0.s.IoCtlGetHvPartitionId = Copy;
822
823 Copy = pGVM->nem.s.IoCtlGetPartitionProperty;
824 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
825 AssertLogRelReturn(Copy.cbInput == sizeof(VID_PARTITION_PROPERTY_CODE), VERR_NEM_INIT_FAILED);
826 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_PROPERTY), VERR_NEM_INIT_FAILED);
827 pGVM->nemr0.s.IoCtlGetPartitionProperty = Copy;
828
829#ifdef NEM_WIN_WITH_RING0_RUNLOOP
830 pGVM->nemr0.s.fMayUseRing0Runloop = pGVM->nem.s.fUseRing0Runloop;
831
832 Copy = pGVM->nem.s.IoCtlStartVirtualProcessor;
833 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
834 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
835 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
836 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
837 if (RT_SUCCESS(rc))
838 pGVM->nemr0.s.IoCtlStartVirtualProcessor = Copy;
839
840 Copy = pGVM->nem.s.IoCtlStopVirtualProcessor;
841 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
842 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
843 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
844 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
845 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
846 if (RT_SUCCESS(rc))
847 pGVM->nemr0.s.IoCtlStopVirtualProcessor = Copy;
848
849 Copy = pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
850 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
851 AssertLogRelStmt( Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)
852 || Copy.cbInput == RT_OFFSETOF(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT, cMillies),
853 rc = VERR_NEM_INIT_FAILED);
854 AssertLogRelStmt(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
855 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
856 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
857 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
858 if (RT_SUCCESS(rc))
859 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext = Copy;
860#endif
861
862 if ( RT_SUCCESS(rc)
863 || !pGVM->nem.s.fUseRing0Runloop)
864 {
865 /*
866 * Setup of an I/O control context for the partition handle for later use.
867 */
868 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pGVM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx);
869 AssertLogRelRCReturn(rc, rc);
870 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
871 {
872 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
873 pGVCpu->nemr0.s.offRing3ConversionDelta = (uintptr_t)pGVM->aCpus[idCpu].pVCpuR3 - (uintptr_t)pGVCpu;
874 }
875
876 /*
877 * Get the partition ID.
878 */
879 PVMCPUCC pVCpu0 = &pGVM->aCpus[0];
880 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
881 &pVCpu0->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu0->nem.s.uIoCtlBuf.idPartition));
882#if 0
883 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
884 pGVM->nemr0.s.idHvPartition = pVCpu0->nem.s.uIoCtlBuf.idPartition;
885#else
886 /*
887 * Since 2021 (Win11) the above I/O control doesn't work on exo-partitions
888 * so we have to go to extremes to get at it. Sigh.
889 */
890 if ( !NT_SUCCESS(rcNt)
891 || pVCpu0->nem.s.uIoCtlBuf.idPartition == HV_PARTITION_ID_INVALID)
892 {
893 LogRel(("IoCtlGetHvPartitionId failed: r0=%#RX64, r3=%#RX64, rcNt=%#x\n",
894 pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition, rcNt));
895
896 RTR0MEMOBJ ahMemObjs[6]
897 = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ };
898 rc = nemR0InitVMPart2DontWannaDoTheseUglyPartitionIdFallbacks(pGVM, ahMemObjs);
899 size_t i = RT_ELEMENTS(ahMemObjs);
900 while (i-- > 0)
901 RTR0MemObjFree(ahMemObjs[i], false /*fFreeMappings*/);
902 }
903 if (pGVM->nem.s.idHvPartition == HV_PARTITION_ID_INVALID)
904 pGVM->nem.s.idHvPartition = pGVM->nemr0.s.idHvPartition;
905#endif
906 AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == pGVM->nem.s.idHvPartition,
907 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition),
908 VERR_NEM_INIT_FAILED);
909 if (RT_SUCCESS(rc) && pGVM->nemr0.s.idHvPartition == HV_PARTITION_ID_INVALID)
910 rc = VERR_NEM_INIT_FAILED;
911 }
912
913 return rc;
914}
915
916
917/**
918 * Cleanup the NEM parts of the VM in ring-0.
919 *
920 * This is always called and must deal the state regardless of whether
921 * NEMR0InitVM() was called or not. So, take care here.
922 *
923 * @param pGVM The ring-0 VM handle.
924 */
925VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
926{
927 pGVM->nemr0.s.idHvPartition = HV_PARTITION_ID_INVALID;
928
929 /* Clean up I/O control context. */
930 if (pGVM->nemr0.s.pIoCtlCtx)
931 {
932 int rc = SUPR0IoCtlCleanup(pGVM->nemr0.s.pIoCtlCtx);
933 AssertRC(rc);
934 pGVM->nemr0.s.pIoCtlCtx = NULL;
935 }
936
937 /* Free the hypercall pages. */
938 VMCPUID i = pGVM->cCpus;
939 while (i-- > 0)
940 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
941
942 /* The non-EMT one too. */
943 if (RTCritSectIsInitialized(&pGVM->nemr0.s.HypercallDataCritSect))
944 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
945 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
946}
947
948
949#if 0 /* for debugging GPA unmapping. */
950static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
951{
952 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nemr0.s.pbHypercallData;
953 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
954 pIn->PartitionId = pGVM->nemr0.s.idHvPartition;
955 pIn->VpIndex = pGVCpu->idCpu;
956 pIn->ByteCount = 0x10;
957 pIn->BaseGpa = GCPhys;
958 pIn->ControlFlags.AsUINT64 = 0;
959 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
960 memset(pOut, 0xfe, sizeof(*pOut));
961 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nemr0.s.HCPhysHypercallData,
962 pGVCpu->nemr0.s.HCPhysHypercallData + sizeof(*pIn));
963 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
964 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
965 __debugbreak();
966
967 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
968}
969#endif
970
971
972/**
973 * Worker for NEMR0MapPages and others.
974 */
975NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
976 uint32_t cPages, uint32_t fFlags)
977{
978 /*
979 * Validate.
980 */
981 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
982
983 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
984 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
985 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
986 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
987 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
988 if (GCPhysSrc != GCPhysDst)
989 {
990 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
991 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
992 }
993
994 /*
995 * Compose and make the hypercall.
996 * Ring-3 is not allowed to fill in the host physical addresses of the call.
997 */
998 for (uint32_t iTries = 0;; iTries++)
999 {
1000 RTGCPHYS GCPhysSrcTmp = GCPhysSrc;
1001 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
1002 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
1003 pMapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
1004 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
1005 pMapPages->MapFlags = fFlags;
1006 pMapPages->u32ExplicitPadding = 0;
1007
1008 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrcTmp += X86_PAGE_SIZE)
1009 {
1010 RTHCPHYS HCPhys = NIL_RTGCPHYS;
1011 int rc = PGMPhysGCPhys2HCPhys(pGVM, GCPhysSrcTmp, &HCPhys);
1012 AssertRCReturn(rc, rc);
1013 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
1014 }
1015
1016 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
1017 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
1018 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
1019 GCPhysDst, GCPhysSrcTmp - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
1020 if (uResult == ((uint64_t)cPages << 32))
1021 return VINF_SUCCESS;
1022
1023 /*
1024 * If the partition is out of memory, try donate another 512 pages to
1025 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
1026 */
1027 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
1028 || iTries > 16
1029 || g_pfnWinHvDepositMemory == NULL)
1030 {
1031 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
1032 return VERR_NEM_MAP_PAGES_FAILED;
1033 }
1034
1035 size_t cPagesAdded = 0;
1036 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nemr0.s.idHvPartition, 512, 0, &cPagesAdded);
1037 if (!cPagesAdded)
1038 {
1039 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
1040 return VERR_NEM_MAP_PAGES_FAILED;
1041 }
1042 }
1043}
1044
1045
1046/**
1047 * Maps pages into the guest physical address space.
1048 *
1049 * Generally the caller will be under the PGM lock already, so no extra effort
1050 * is needed to make sure all changes happens under it.
1051 *
1052 * @returns VBox status code.
1053 * @param pGVM The ring-0 VM handle.
1054 * @param idCpu The calling EMT. Necessary for getting the
1055 * hypercall page and arguments.
1056 * @thread EMT(idCpu)
1057 */
1058VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, VMCPUID idCpu)
1059{
1060 /*
1061 * Unpack the call.
1062 */
1063 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
1064 if (RT_SUCCESS(rc))
1065 {
1066 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1067
1068 RTGCPHYS const GCPhysSrc = pGVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
1069 RTGCPHYS const GCPhysDst = pGVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
1070 uint32_t const cPages = pGVCpu->nem.s.Hypercall.MapPages.cPages;
1071 HV_MAP_GPA_FLAGS const fFlags = pGVCpu->nem.s.Hypercall.MapPages.fFlags;
1072
1073 /*
1074 * Do the work.
1075 */
1076 rc = nemR0WinMapPages(pGVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
1077 }
1078 return rc;
1079}
1080
1081
1082/**
1083 * Worker for NEMR0UnmapPages and others.
1084 */
1085NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
1086{
1087 /*
1088 * Validate input.
1089 */
1090 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1091
1092 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
1093 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
1094 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
1095 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
1096
1097 /*
1098 * Compose and make the hypercall.
1099 */
1100 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
1101 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
1102 pUnmapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
1103 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
1104 pUnmapPages->fFlags = 0;
1105
1106 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
1107 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
1108 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
1109 if (uResult == ((uint64_t)cPages << 32))
1110 {
1111#if 1 /* Do we need to do this? Hopefully not... */
1112 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
1113 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
1114 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
1115#endif
1116 return VINF_SUCCESS;
1117 }
1118
1119 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
1120 return VERR_NEM_UNMAP_PAGES_FAILED;
1121}
1122
1123
1124/**
1125 * Unmaps pages from the guest physical address space.
1126 *
1127 * Generally the caller will be under the PGM lock already, so no extra effort
1128 * is needed to make sure all changes happens under it.
1129 *
1130 * @returns VBox status code.
1131 * @param pGVM The ring-0 VM handle.
1132 * @param idCpu The calling EMT. Necessary for getting the
1133 * hypercall page and arguments.
1134 * @thread EMT(idCpu)
1135 */
1136VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, VMCPUID idCpu)
1137{
1138 /*
1139 * Unpack the call.
1140 */
1141 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
1142 if (RT_SUCCESS(rc))
1143 {
1144 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1145
1146 RTGCPHYS const GCPhys = pGVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
1147 uint32_t const cPages = pGVCpu->nem.s.Hypercall.UnmapPages.cPages;
1148
1149 /*
1150 * Do the work.
1151 */
1152 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
1153 }
1154 return rc;
1155}
1156
1157
1158#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1159/**
1160 * Worker for NEMR0ExportState.
1161 *
1162 * Intention is to use it internally later.
1163 *
1164 * @returns VBox status code.
1165 * @param pGVM The ring-0 VM handle.
1166 * @param pGVCpu The ring-0 VCPU handle.
1167 * @param pCtx The CPU context structure to import into.
1168 */
1169NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
1170{
1171 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
1172 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1173 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1174
1175 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
1176 pInput->VpIndex = pGVCpu->idCpu;
1177 pInput->RsvdZ = 0;
1178
1179 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
1180 if ( !fWhat
1181 && pGVCpu->nem.s.fCurrentInterruptWindows == pGVCpu->nem.s.fDesiredInterruptWindows)
1182 return VINF_SUCCESS;
1183 uintptr_t iReg = 0;
1184
1185 /* GPRs */
1186 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1187 {
1188 if (fWhat & CPUMCTX_EXTRN_RAX)
1189 {
1190 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1191 pInput->Elements[iReg].Name = HvX64RegisterRax;
1192 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
1193 iReg++;
1194 }
1195 if (fWhat & CPUMCTX_EXTRN_RCX)
1196 {
1197 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1198 pInput->Elements[iReg].Name = HvX64RegisterRcx;
1199 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
1200 iReg++;
1201 }
1202 if (fWhat & CPUMCTX_EXTRN_RDX)
1203 {
1204 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1205 pInput->Elements[iReg].Name = HvX64RegisterRdx;
1206 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
1207 iReg++;
1208 }
1209 if (fWhat & CPUMCTX_EXTRN_RBX)
1210 {
1211 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1212 pInput->Elements[iReg].Name = HvX64RegisterRbx;
1213 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
1214 iReg++;
1215 }
1216 if (fWhat & CPUMCTX_EXTRN_RSP)
1217 {
1218 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1219 pInput->Elements[iReg].Name = HvX64RegisterRsp;
1220 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
1221 iReg++;
1222 }
1223 if (fWhat & CPUMCTX_EXTRN_RBP)
1224 {
1225 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1226 pInput->Elements[iReg].Name = HvX64RegisterRbp;
1227 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
1228 iReg++;
1229 }
1230 if (fWhat & CPUMCTX_EXTRN_RSI)
1231 {
1232 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1233 pInput->Elements[iReg].Name = HvX64RegisterRsi;
1234 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
1235 iReg++;
1236 }
1237 if (fWhat & CPUMCTX_EXTRN_RDI)
1238 {
1239 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1240 pInput->Elements[iReg].Name = HvX64RegisterRdi;
1241 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
1242 iReg++;
1243 }
1244 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1245 {
1246 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1247 pInput->Elements[iReg].Name = HvX64RegisterR8;
1248 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
1249 iReg++;
1250 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1251 pInput->Elements[iReg].Name = HvX64RegisterR9;
1252 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
1253 iReg++;
1254 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1255 pInput->Elements[iReg].Name = HvX64RegisterR10;
1256 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
1257 iReg++;
1258 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1259 pInput->Elements[iReg].Name = HvX64RegisterR11;
1260 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
1261 iReg++;
1262 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1263 pInput->Elements[iReg].Name = HvX64RegisterR12;
1264 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
1265 iReg++;
1266 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1267 pInput->Elements[iReg].Name = HvX64RegisterR13;
1268 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
1269 iReg++;
1270 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1271 pInput->Elements[iReg].Name = HvX64RegisterR14;
1272 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
1273 iReg++;
1274 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1275 pInput->Elements[iReg].Name = HvX64RegisterR15;
1276 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
1277 iReg++;
1278 }
1279 }
1280
1281 /* RIP & Flags */
1282 if (fWhat & CPUMCTX_EXTRN_RIP)
1283 {
1284 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1285 pInput->Elements[iReg].Name = HvX64RegisterRip;
1286 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
1287 iReg++;
1288 }
1289 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1290 {
1291 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1292 pInput->Elements[iReg].Name = HvX64RegisterRflags;
1293 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
1294 iReg++;
1295 }
1296
1297 /* Segments */
1298# define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
1299 do { \
1300 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
1301 pInput->Elements[a_idx].Name = a_enmName; \
1302 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
1303 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
1304 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
1305 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
1306 } while (0)
1307 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1308 {
1309 if (fWhat & CPUMCTX_EXTRN_CS)
1310 {
1311 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1312 iReg++;
1313 }
1314 if (fWhat & CPUMCTX_EXTRN_ES)
1315 {
1316 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
1317 iReg++;
1318 }
1319 if (fWhat & CPUMCTX_EXTRN_SS)
1320 {
1321 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1322 iReg++;
1323 }
1324 if (fWhat & CPUMCTX_EXTRN_DS)
1325 {
1326 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1327 iReg++;
1328 }
1329 if (fWhat & CPUMCTX_EXTRN_FS)
1330 {
1331 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1332 iReg++;
1333 }
1334 if (fWhat & CPUMCTX_EXTRN_GS)
1335 {
1336 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1337 iReg++;
1338 }
1339 }
1340
1341 /* Descriptor tables & task segment. */
1342 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1343 {
1344 if (fWhat & CPUMCTX_EXTRN_LDTR)
1345 {
1346 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1347 iReg++;
1348 }
1349 if (fWhat & CPUMCTX_EXTRN_TR)
1350 {
1351 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1352 iReg++;
1353 }
1354
1355 if (fWhat & CPUMCTX_EXTRN_IDTR)
1356 {
1357 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1358 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
1359 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
1360 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
1361 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
1362 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
1363 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
1364 iReg++;
1365 }
1366 if (fWhat & CPUMCTX_EXTRN_GDTR)
1367 {
1368 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1369 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
1370 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
1371 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
1372 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
1373 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
1374 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
1375 iReg++;
1376 }
1377 }
1378
1379 /* Control registers. */
1380 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1381 {
1382 if (fWhat & CPUMCTX_EXTRN_CR0)
1383 {
1384 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1385 pInput->Elements[iReg].Name = HvX64RegisterCr0;
1386 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
1387 iReg++;
1388 }
1389 if (fWhat & CPUMCTX_EXTRN_CR2)
1390 {
1391 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1392 pInput->Elements[iReg].Name = HvX64RegisterCr2;
1393 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
1394 iReg++;
1395 }
1396 if (fWhat & CPUMCTX_EXTRN_CR3)
1397 {
1398 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1399 pInput->Elements[iReg].Name = HvX64RegisterCr3;
1400 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
1401 iReg++;
1402 }
1403 if (fWhat & CPUMCTX_EXTRN_CR4)
1404 {
1405 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1406 pInput->Elements[iReg].Name = HvX64RegisterCr4;
1407 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
1408 iReg++;
1409 }
1410 }
1411 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1412 {
1413 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1414 pInput->Elements[iReg].Name = HvX64RegisterCr8;
1415 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pGVCpu);
1416 iReg++;
1417 }
1418
1419 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
1420
1421 /* Debug registers. */
1422/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
1423 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1424 {
1425 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1426 pInput->Elements[iReg].Name = HvX64RegisterDr0;
1427 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pGVCpu);
1428 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
1429 iReg++;
1430 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1431 pInput->Elements[iReg].Name = HvX64RegisterDr1;
1432 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pGVCpu);
1433 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
1434 iReg++;
1435 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1436 pInput->Elements[iReg].Name = HvX64RegisterDr2;
1437 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pGVCpu);
1438 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
1439 iReg++;
1440 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1441 pInput->Elements[iReg].Name = HvX64RegisterDr3;
1442 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pGVCpu);
1443 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
1444 iReg++;
1445 }
1446 if (fWhat & CPUMCTX_EXTRN_DR6)
1447 {
1448 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1449 pInput->Elements[iReg].Name = HvX64RegisterDr6;
1450 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pGVCpu);
1451 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
1452 iReg++;
1453 }
1454 if (fWhat & CPUMCTX_EXTRN_DR7)
1455 {
1456 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1457 pInput->Elements[iReg].Name = HvX64RegisterDr7;
1458 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pGVCpu);
1459 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
1460 iReg++;
1461 }
1462
1463 /* Floating point state. */
1464 if (fWhat & CPUMCTX_EXTRN_X87)
1465 {
1466 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1467 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
1468 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[0].au64[0];
1469 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[0].au64[1];
1470 iReg++;
1471 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1472 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
1473 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[1].au64[0];
1474 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[1].au64[1];
1475 iReg++;
1476 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1477 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
1478 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[2].au64[0];
1479 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[2].au64[1];
1480 iReg++;
1481 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1482 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
1483 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[3].au64[0];
1484 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[3].au64[1];
1485 iReg++;
1486 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1487 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
1488 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[4].au64[0];
1489 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[4].au64[1];
1490 iReg++;
1491 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1492 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
1493 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[5].au64[0];
1494 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[5].au64[1];
1495 iReg++;
1496 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1497 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
1498 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[6].au64[0];
1499 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[6].au64[1];
1500 iReg++;
1501 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1502 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
1503 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[7].au64[0];
1504 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[7].au64[1];
1505 iReg++;
1506
1507 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1508 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
1509 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->XState.x87.FCW;
1510 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->XState.x87.FSW;
1511 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->XState.x87.FTW;
1512 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->XState.x87.FTW >> 8;
1513 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->XState.x87.FOP;
1514 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->XState.x87.FPUIP)
1515 | ((uint64_t)pCtx->XState.x87.CS << 32)
1516 | ((uint64_t)pCtx->XState.x87.Rsrvd1 << 48);
1517 iReg++;
1518/** @todo we've got trouble if if we try write just SSE w/o X87. */
1519 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1520 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
1521 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->XState.x87.FPUDP)
1522 | ((uint64_t)pCtx->XState.x87.DS << 32)
1523 | ((uint64_t)pCtx->XState.x87.Rsrvd2 << 48);
1524 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->XState.x87.MXCSR;
1525 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->XState.x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
1526 iReg++;
1527 }
1528
1529 /* Vector state. */
1530 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1531 {
1532 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1533 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
1534 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[0].uXmm.s.Lo;
1535 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[0].uXmm.s.Hi;
1536 iReg++;
1537 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1538 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
1539 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[1].uXmm.s.Lo;
1540 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[1].uXmm.s.Hi;
1541 iReg++;
1542 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1543 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
1544 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[2].uXmm.s.Lo;
1545 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[2].uXmm.s.Hi;
1546 iReg++;
1547 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1548 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
1549 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[3].uXmm.s.Lo;
1550 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[3].uXmm.s.Hi;
1551 iReg++;
1552 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1553 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
1554 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[4].uXmm.s.Lo;
1555 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[4].uXmm.s.Hi;
1556 iReg++;
1557 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1558 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
1559 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[5].uXmm.s.Lo;
1560 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[5].uXmm.s.Hi;
1561 iReg++;
1562 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1563 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
1564 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[6].uXmm.s.Lo;
1565 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[6].uXmm.s.Hi;
1566 iReg++;
1567 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1568 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
1569 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[7].uXmm.s.Lo;
1570 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[7].uXmm.s.Hi;
1571 iReg++;
1572 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1573 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1574 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[8].uXmm.s.Lo;
1575 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[8].uXmm.s.Hi;
1576 iReg++;
1577 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1578 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1579 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[9].uXmm.s.Lo;
1580 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[9].uXmm.s.Hi;
1581 iReg++;
1582 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1583 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1584 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[10].uXmm.s.Lo;
1585 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[10].uXmm.s.Hi;
1586 iReg++;
1587 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1588 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1589 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[11].uXmm.s.Lo;
1590 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[11].uXmm.s.Hi;
1591 iReg++;
1592 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1593 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1594 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[12].uXmm.s.Lo;
1595 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[12].uXmm.s.Hi;
1596 iReg++;
1597 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1598 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1599 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[13].uXmm.s.Lo;
1600 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[13].uXmm.s.Hi;
1601 iReg++;
1602 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1603 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1604 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[14].uXmm.s.Lo;
1605 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[14].uXmm.s.Hi;
1606 iReg++;
1607 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1608 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1609 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[15].uXmm.s.Lo;
1610 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[15].uXmm.s.Hi;
1611 iReg++;
1612 }
1613
1614 /* MSRs */
1615 // HvX64RegisterTsc - don't touch
1616 if (fWhat & CPUMCTX_EXTRN_EFER)
1617 {
1618 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1619 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1620 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1621 iReg++;
1622 }
1623 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1624 {
1625 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1626 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1627 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1628 iReg++;
1629 }
1630 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1631 {
1632 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1633 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1634 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1635 iReg++;
1636 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1637 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1638 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1639 iReg++;
1640 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1641 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1642 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1643 iReg++;
1644 }
1645 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1646 {
1647 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1648 pInput->Elements[iReg].Name = HvX64RegisterStar;
1649 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1650 iReg++;
1651 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1652 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1653 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1654 iReg++;
1655 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1656 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1657 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1658 iReg++;
1659 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1660 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1661 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1662 iReg++;
1663 }
1664 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1665 {
1666 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1667 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1668 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pGVCpu);
1669 iReg++;
1670 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1671 pInput->Elements[iReg].Name = HvX64RegisterPat;
1672 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1673 iReg++;
1674# if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1675 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1676 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1677 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pGVCpu);
1678 iReg++;
1679# endif
1680
1681 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
1682
1683 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1684 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1685 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1686 iReg++;
1687
1688 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1689
1690 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1691 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1692 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1693 iReg++;
1694 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1695 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1696 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1697 iReg++;
1698 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1699 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1700 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1701 iReg++;
1702 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1703 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1704 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1705 iReg++;
1706 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1707 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1708 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1709 iReg++;
1710 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1711 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1712 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1713 iReg++;
1714 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1715 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1716 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1717 iReg++;
1718 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1719 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1720 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1721 iReg++;
1722 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1723 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1724 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1725 iReg++;
1726 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1727 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1728 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1729 iReg++;
1730 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1731 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1732 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1733 iReg++;
1734 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1735 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1736 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1737 iReg++;
1738
1739# if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1740 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
1741 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1742 {
1743 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1744 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1745 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1746 iReg++;
1747 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1748 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1749 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pGVCpu);
1750 iReg++;
1751 }
1752# endif
1753 }
1754
1755 /* event injection (clear it). */
1756 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1757 {
1758 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1759 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1760 pInput->Elements[iReg].Value.Reg64 = 0;
1761 iReg++;
1762 }
1763
1764 /* Interruptibility state. This can get a little complicated since we get
1765 half of the state via HV_X64_VP_EXECUTION_STATE. */
1766 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1767 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1768 {
1769 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1770 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1771 pInput->Elements[iReg].Value.Reg64 = 0;
1772 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1773 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
1774 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1775 if (VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
1776 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1777 iReg++;
1778 }
1779 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1780 {
1781 if ( pGVCpu->nem.s.fLastInterruptShadow
1782 || ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1783 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip))
1784 {
1785 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1786 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1787 pInput->Elements[iReg].Value.Reg64 = 0;
1788 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1789 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
1790 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1791 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1792 //if (VMCPU_FF_IS_ANY_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
1793 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1794 iReg++;
1795 }
1796 }
1797 else
1798 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1799
1800 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1801 uint8_t const fDesiredIntWin = pGVCpu->nem.s.fDesiredInterruptWindows;
1802 if ( fDesiredIntWin
1803 || pGVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1804 {
1805 pGVCpu->nem.s.fCurrentInterruptWindows = pGVCpu->nem.s.fDesiredInterruptWindows;
1806 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1807 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1808 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1809 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1810 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1811 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1812 iReg++;
1813 }
1814
1815 /// @todo HvRegisterPendingEvent0
1816 /// @todo HvRegisterPendingEvent1
1817
1818 /*
1819 * Set the registers.
1820 */
1821 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1822
1823 /*
1824 * Make the hypercall.
1825 */
1826 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1827 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1828 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1829 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1830 VERR_NEM_SET_REGISTERS_FAILED);
1831 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1832 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1833 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1834 return VINF_SUCCESS;
1835}
1836#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1837
1838
1839/**
1840 * Export the state to the native API (out of CPUMCTX).
1841 *
1842 * @returns VBox status code
1843 * @param pGVM The ring-0 VM handle.
1844 * @param idCpu The calling EMT. Necessary for getting the
1845 * hypercall page and arguments.
1846 */
1847VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, VMCPUID idCpu)
1848{
1849#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1850 /*
1851 * Validate the call.
1852 */
1853 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
1854 if (RT_SUCCESS(rc))
1855 {
1856 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1857 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1858
1859 /*
1860 * Call worker.
1861 */
1862 rc = nemR0WinExportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx);
1863 }
1864 return rc;
1865#else
1866 RT_NOREF(pGVM, idCpu);
1867 return VERR_NOT_IMPLEMENTED;
1868#endif
1869}
1870
1871
1872#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1873/**
1874 * Worker for NEMR0ImportState.
1875 *
1876 * Intention is to use it internally later.
1877 *
1878 * @returns VBox status code.
1879 * @param pGVM The ring-0 VM handle.
1880 * @param pGVCpu The ring-0 VCPU handle.
1881 * @param pCtx The CPU context structure to import into.
1882 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1883 * @param fCanUpdateCr3 Whether it's safe to update CR3 or not.
1884 */
1885NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3)
1886{
1887 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
1888 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1889 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1890 Assert(pCtx == &pGVCpu->cpum.GstCtx);
1891
1892 fWhat &= pCtx->fExtrn;
1893
1894 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
1895 pInput->VpIndex = pGVCpu->idCpu;
1896 pInput->fFlags = 0;
1897
1898 /* GPRs */
1899 uintptr_t iReg = 0;
1900 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1901 {
1902 if (fWhat & CPUMCTX_EXTRN_RAX)
1903 pInput->Names[iReg++] = HvX64RegisterRax;
1904 if (fWhat & CPUMCTX_EXTRN_RCX)
1905 pInput->Names[iReg++] = HvX64RegisterRcx;
1906 if (fWhat & CPUMCTX_EXTRN_RDX)
1907 pInput->Names[iReg++] = HvX64RegisterRdx;
1908 if (fWhat & CPUMCTX_EXTRN_RBX)
1909 pInput->Names[iReg++] = HvX64RegisterRbx;
1910 if (fWhat & CPUMCTX_EXTRN_RSP)
1911 pInput->Names[iReg++] = HvX64RegisterRsp;
1912 if (fWhat & CPUMCTX_EXTRN_RBP)
1913 pInput->Names[iReg++] = HvX64RegisterRbp;
1914 if (fWhat & CPUMCTX_EXTRN_RSI)
1915 pInput->Names[iReg++] = HvX64RegisterRsi;
1916 if (fWhat & CPUMCTX_EXTRN_RDI)
1917 pInput->Names[iReg++] = HvX64RegisterRdi;
1918 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1919 {
1920 pInput->Names[iReg++] = HvX64RegisterR8;
1921 pInput->Names[iReg++] = HvX64RegisterR9;
1922 pInput->Names[iReg++] = HvX64RegisterR10;
1923 pInput->Names[iReg++] = HvX64RegisterR11;
1924 pInput->Names[iReg++] = HvX64RegisterR12;
1925 pInput->Names[iReg++] = HvX64RegisterR13;
1926 pInput->Names[iReg++] = HvX64RegisterR14;
1927 pInput->Names[iReg++] = HvX64RegisterR15;
1928 }
1929 }
1930
1931 /* RIP & Flags */
1932 if (fWhat & CPUMCTX_EXTRN_RIP)
1933 pInput->Names[iReg++] = HvX64RegisterRip;
1934 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1935 pInput->Names[iReg++] = HvX64RegisterRflags;
1936
1937 /* Segments */
1938 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1939 {
1940 if (fWhat & CPUMCTX_EXTRN_CS)
1941 pInput->Names[iReg++] = HvX64RegisterCs;
1942 if (fWhat & CPUMCTX_EXTRN_ES)
1943 pInput->Names[iReg++] = HvX64RegisterEs;
1944 if (fWhat & CPUMCTX_EXTRN_SS)
1945 pInput->Names[iReg++] = HvX64RegisterSs;
1946 if (fWhat & CPUMCTX_EXTRN_DS)
1947 pInput->Names[iReg++] = HvX64RegisterDs;
1948 if (fWhat & CPUMCTX_EXTRN_FS)
1949 pInput->Names[iReg++] = HvX64RegisterFs;
1950 if (fWhat & CPUMCTX_EXTRN_GS)
1951 pInput->Names[iReg++] = HvX64RegisterGs;
1952 }
1953
1954 /* Descriptor tables and the task segment. */
1955 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1956 {
1957 if (fWhat & CPUMCTX_EXTRN_LDTR)
1958 pInput->Names[iReg++] = HvX64RegisterLdtr;
1959 if (fWhat & CPUMCTX_EXTRN_TR)
1960 pInput->Names[iReg++] = HvX64RegisterTr;
1961 if (fWhat & CPUMCTX_EXTRN_IDTR)
1962 pInput->Names[iReg++] = HvX64RegisterIdtr;
1963 if (fWhat & CPUMCTX_EXTRN_GDTR)
1964 pInput->Names[iReg++] = HvX64RegisterGdtr;
1965 }
1966
1967 /* Control registers. */
1968 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1969 {
1970 if (fWhat & CPUMCTX_EXTRN_CR0)
1971 pInput->Names[iReg++] = HvX64RegisterCr0;
1972 if (fWhat & CPUMCTX_EXTRN_CR2)
1973 pInput->Names[iReg++] = HvX64RegisterCr2;
1974 if (fWhat & CPUMCTX_EXTRN_CR3)
1975 pInput->Names[iReg++] = HvX64RegisterCr3;
1976 if (fWhat & CPUMCTX_EXTRN_CR4)
1977 pInput->Names[iReg++] = HvX64RegisterCr4;
1978 }
1979 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1980 pInput->Names[iReg++] = HvX64RegisterCr8;
1981
1982 /* Debug registers. */
1983 if (fWhat & CPUMCTX_EXTRN_DR7)
1984 pInput->Names[iReg++] = HvX64RegisterDr7;
1985 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1986 {
1987 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pCtx->fExtrn & CPUMCTX_EXTRN_DR7))
1988 {
1989 fWhat |= CPUMCTX_EXTRN_DR7;
1990 pInput->Names[iReg++] = HvX64RegisterDr7;
1991 }
1992 pInput->Names[iReg++] = HvX64RegisterDr0;
1993 pInput->Names[iReg++] = HvX64RegisterDr1;
1994 pInput->Names[iReg++] = HvX64RegisterDr2;
1995 pInput->Names[iReg++] = HvX64RegisterDr3;
1996 }
1997 if (fWhat & CPUMCTX_EXTRN_DR6)
1998 pInput->Names[iReg++] = HvX64RegisterDr6;
1999
2000 /* Floating point state. */
2001 if (fWhat & CPUMCTX_EXTRN_X87)
2002 {
2003 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
2004 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
2005 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
2006 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
2007 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
2008 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
2009 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
2010 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
2011 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
2012 }
2013 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
2014 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
2015
2016 /* Vector state. */
2017 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
2018 {
2019 pInput->Names[iReg++] = HvX64RegisterXmm0;
2020 pInput->Names[iReg++] = HvX64RegisterXmm1;
2021 pInput->Names[iReg++] = HvX64RegisterXmm2;
2022 pInput->Names[iReg++] = HvX64RegisterXmm3;
2023 pInput->Names[iReg++] = HvX64RegisterXmm4;
2024 pInput->Names[iReg++] = HvX64RegisterXmm5;
2025 pInput->Names[iReg++] = HvX64RegisterXmm6;
2026 pInput->Names[iReg++] = HvX64RegisterXmm7;
2027 pInput->Names[iReg++] = HvX64RegisterXmm8;
2028 pInput->Names[iReg++] = HvX64RegisterXmm9;
2029 pInput->Names[iReg++] = HvX64RegisterXmm10;
2030 pInput->Names[iReg++] = HvX64RegisterXmm11;
2031 pInput->Names[iReg++] = HvX64RegisterXmm12;
2032 pInput->Names[iReg++] = HvX64RegisterXmm13;
2033 pInput->Names[iReg++] = HvX64RegisterXmm14;
2034 pInput->Names[iReg++] = HvX64RegisterXmm15;
2035 }
2036
2037 /* MSRs */
2038 // HvX64RegisterTsc - don't touch
2039 if (fWhat & CPUMCTX_EXTRN_EFER)
2040 pInput->Names[iReg++] = HvX64RegisterEfer;
2041 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
2042 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
2043 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
2044 {
2045 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
2046 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
2047 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
2048 }
2049 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
2050 {
2051 pInput->Names[iReg++] = HvX64RegisterStar;
2052 pInput->Names[iReg++] = HvX64RegisterLstar;
2053 pInput->Names[iReg++] = HvX64RegisterCstar;
2054 pInput->Names[iReg++] = HvX64RegisterSfmask;
2055 }
2056
2057# ifdef LOG_ENABLED
2058 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
2059# endif
2060 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
2061 {
2062 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
2063 pInput->Names[iReg++] = HvX64RegisterPat;
2064# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2065 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
2066# endif
2067 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
2068 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
2069 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
2070 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
2071 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
2072 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
2073 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
2074 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
2075 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
2076 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
2077 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
2078 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
2079 pInput->Names[iReg++] = HvX64RegisterTscAux;
2080# if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
2081 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2082 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
2083# endif
2084# ifdef LOG_ENABLED
2085 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON)
2086 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
2087# endif
2088 }
2089
2090 /* Interruptibility. */
2091 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2092 {
2093 pInput->Names[iReg++] = HvRegisterInterruptState;
2094 pInput->Names[iReg++] = HvX64RegisterRip;
2095 }
2096
2097 /* event injection */
2098 pInput->Names[iReg++] = HvRegisterPendingInterruption;
2099 pInput->Names[iReg++] = HvRegisterPendingEvent0;
2100 pInput->Names[iReg++] = HvRegisterPendingEvent1;
2101 size_t const cRegs = iReg;
2102 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF_DYN(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
2103
2104 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2105 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
2106 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
2107
2108 /*
2109 * Make the hypercall.
2110 */
2111 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
2112 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2113 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2114 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
2115 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
2116 VERR_NEM_GET_REGISTERS_FAILED);
2117 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
2118
2119 /*
2120 * Copy information to the CPUM context.
2121 */
2122 iReg = 0;
2123
2124 /* GPRs */
2125 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
2126 {
2127 if (fWhat & CPUMCTX_EXTRN_RAX)
2128 {
2129 Assert(pInput->Names[iReg] == HvX64RegisterRax);
2130 pCtx->rax = paValues[iReg++].Reg64;
2131 }
2132 if (fWhat & CPUMCTX_EXTRN_RCX)
2133 {
2134 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
2135 pCtx->rcx = paValues[iReg++].Reg64;
2136 }
2137 if (fWhat & CPUMCTX_EXTRN_RDX)
2138 {
2139 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
2140 pCtx->rdx = paValues[iReg++].Reg64;
2141 }
2142 if (fWhat & CPUMCTX_EXTRN_RBX)
2143 {
2144 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
2145 pCtx->rbx = paValues[iReg++].Reg64;
2146 }
2147 if (fWhat & CPUMCTX_EXTRN_RSP)
2148 {
2149 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
2150 pCtx->rsp = paValues[iReg++].Reg64;
2151 }
2152 if (fWhat & CPUMCTX_EXTRN_RBP)
2153 {
2154 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
2155 pCtx->rbp = paValues[iReg++].Reg64;
2156 }
2157 if (fWhat & CPUMCTX_EXTRN_RSI)
2158 {
2159 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
2160 pCtx->rsi = paValues[iReg++].Reg64;
2161 }
2162 if (fWhat & CPUMCTX_EXTRN_RDI)
2163 {
2164 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
2165 pCtx->rdi = paValues[iReg++].Reg64;
2166 }
2167 if (fWhat & CPUMCTX_EXTRN_R8_R15)
2168 {
2169 Assert(pInput->Names[iReg] == HvX64RegisterR8);
2170 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
2171 pCtx->r8 = paValues[iReg++].Reg64;
2172 pCtx->r9 = paValues[iReg++].Reg64;
2173 pCtx->r10 = paValues[iReg++].Reg64;
2174 pCtx->r11 = paValues[iReg++].Reg64;
2175 pCtx->r12 = paValues[iReg++].Reg64;
2176 pCtx->r13 = paValues[iReg++].Reg64;
2177 pCtx->r14 = paValues[iReg++].Reg64;
2178 pCtx->r15 = paValues[iReg++].Reg64;
2179 }
2180 }
2181
2182 /* RIP & Flags */
2183 if (fWhat & CPUMCTX_EXTRN_RIP)
2184 {
2185 Assert(pInput->Names[iReg] == HvX64RegisterRip);
2186 pCtx->rip = paValues[iReg++].Reg64;
2187 }
2188 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
2189 {
2190 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
2191 pCtx->rflags.u = paValues[iReg++].Reg64;
2192 }
2193
2194 /* Segments */
2195# define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
2196 do { \
2197 Assert(pInput->Names[a_idx] == a_enmName); \
2198 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
2199 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
2200 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
2201 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
2202 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
2203 } while (0)
2204 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
2205 {
2206 if (fWhat & CPUMCTX_EXTRN_CS)
2207 {
2208 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
2209 iReg++;
2210 }
2211 if (fWhat & CPUMCTX_EXTRN_ES)
2212 {
2213 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
2214 iReg++;
2215 }
2216 if (fWhat & CPUMCTX_EXTRN_SS)
2217 {
2218 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
2219 iReg++;
2220 }
2221 if (fWhat & CPUMCTX_EXTRN_DS)
2222 {
2223 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
2224 iReg++;
2225 }
2226 if (fWhat & CPUMCTX_EXTRN_FS)
2227 {
2228 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
2229 iReg++;
2230 }
2231 if (fWhat & CPUMCTX_EXTRN_GS)
2232 {
2233 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
2234 iReg++;
2235 }
2236 }
2237 /* Descriptor tables and the task segment. */
2238 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
2239 {
2240 if (fWhat & CPUMCTX_EXTRN_LDTR)
2241 {
2242 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
2243 iReg++;
2244 }
2245 if (fWhat & CPUMCTX_EXTRN_TR)
2246 {
2247 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
2248 avoid to trigger sanity assertions around the code, always fix this. */
2249 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
2250 switch (pCtx->tr.Attr.n.u4Type)
2251 {
2252 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2253 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2254 break;
2255 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2256 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2257 break;
2258 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2259 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
2260 break;
2261 }
2262 iReg++;
2263 }
2264 if (fWhat & CPUMCTX_EXTRN_IDTR)
2265 {
2266 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
2267 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
2268 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
2269 iReg++;
2270 }
2271 if (fWhat & CPUMCTX_EXTRN_GDTR)
2272 {
2273 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
2274 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
2275 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
2276 iReg++;
2277 }
2278 }
2279
2280 /* Control registers. */
2281 bool fMaybeChangedMode = false;
2282 bool fUpdateCr3 = false;
2283 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
2284 {
2285 if (fWhat & CPUMCTX_EXTRN_CR0)
2286 {
2287 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
2288 if (pCtx->cr0 != paValues[iReg].Reg64)
2289 {
2290 CPUMSetGuestCR0(pGVCpu, paValues[iReg].Reg64);
2291 fMaybeChangedMode = true;
2292 }
2293 iReg++;
2294 }
2295 if (fWhat & CPUMCTX_EXTRN_CR2)
2296 {
2297 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
2298 pCtx->cr2 = paValues[iReg].Reg64;
2299 iReg++;
2300 }
2301 if (fWhat & CPUMCTX_EXTRN_CR3)
2302 {
2303 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
2304 if (pCtx->cr3 != paValues[iReg].Reg64)
2305 {
2306 CPUMSetGuestCR3(pGVCpu, paValues[iReg].Reg64);
2307 fUpdateCr3 = true;
2308 }
2309 iReg++;
2310 }
2311 if (fWhat & CPUMCTX_EXTRN_CR4)
2312 {
2313 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
2314 if (pCtx->cr4 != paValues[iReg].Reg64)
2315 {
2316 CPUMSetGuestCR4(pGVCpu, paValues[iReg].Reg64);
2317 fMaybeChangedMode = true;
2318 }
2319 iReg++;
2320 }
2321 }
2322 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
2323 {
2324 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
2325 APICSetTpr(pGVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
2326 iReg++;
2327 }
2328
2329 /* Debug registers. */
2330 if (fWhat & CPUMCTX_EXTRN_DR7)
2331 {
2332 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
2333 if (pCtx->dr[7] != paValues[iReg].Reg64)
2334 CPUMSetGuestDR7(pGVCpu, paValues[iReg].Reg64);
2335 pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
2336 iReg++;
2337 }
2338 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
2339 {
2340 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
2341 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
2342 if (pCtx->dr[0] != paValues[iReg].Reg64)
2343 CPUMSetGuestDR0(pGVCpu, paValues[iReg].Reg64);
2344 iReg++;
2345 if (pCtx->dr[1] != paValues[iReg].Reg64)
2346 CPUMSetGuestDR1(pGVCpu, paValues[iReg].Reg64);
2347 iReg++;
2348 if (pCtx->dr[2] != paValues[iReg].Reg64)
2349 CPUMSetGuestDR2(pGVCpu, paValues[iReg].Reg64);
2350 iReg++;
2351 if (pCtx->dr[3] != paValues[iReg].Reg64)
2352 CPUMSetGuestDR3(pGVCpu, paValues[iReg].Reg64);
2353 iReg++;
2354 }
2355 if (fWhat & CPUMCTX_EXTRN_DR6)
2356 {
2357 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
2358 if (pCtx->dr[6] != paValues[iReg].Reg64)
2359 CPUMSetGuestDR6(pGVCpu, paValues[iReg].Reg64);
2360 iReg++;
2361 }
2362
2363 /* Floating point state. */
2364 if (fWhat & CPUMCTX_EXTRN_X87)
2365 {
2366 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
2367 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
2368 pCtx->XState.x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2369 pCtx->XState.x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2370 iReg++;
2371 pCtx->XState.x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2372 pCtx->XState.x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2373 iReg++;
2374 pCtx->XState.x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2375 pCtx->XState.x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2376 iReg++;
2377 pCtx->XState.x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2378 pCtx->XState.x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2379 iReg++;
2380 pCtx->XState.x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2381 pCtx->XState.x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2382 iReg++;
2383 pCtx->XState.x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2384 pCtx->XState.x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2385 iReg++;
2386 pCtx->XState.x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2387 pCtx->XState.x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2388 iReg++;
2389 pCtx->XState.x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
2390 pCtx->XState.x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
2391 iReg++;
2392
2393 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
2394 pCtx->XState.x87.FCW = paValues[iReg].FpControlStatus.FpControl;
2395 pCtx->XState.x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
2396 pCtx->XState.x87.FTW = paValues[iReg].FpControlStatus.FpTag
2397 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
2398 pCtx->XState.x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
2399 pCtx->XState.x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
2400 pCtx->XState.x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
2401 pCtx->XState.x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
2402 iReg++;
2403 }
2404
2405 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
2406 {
2407 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
2408 if (fWhat & CPUMCTX_EXTRN_X87)
2409 {
2410 pCtx->XState.x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
2411 pCtx->XState.x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
2412 pCtx->XState.x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
2413 }
2414 pCtx->XState.x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
2415 pCtx->XState.x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
2416 iReg++;
2417 }
2418
2419 /* Vector state. */
2420 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
2421 {
2422 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
2423 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
2424 pCtx->XState.x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2425 pCtx->XState.x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2426 iReg++;
2427 pCtx->XState.x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2428 pCtx->XState.x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2429 iReg++;
2430 pCtx->XState.x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2431 pCtx->XState.x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2432 iReg++;
2433 pCtx->XState.x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2434 pCtx->XState.x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2435 iReg++;
2436 pCtx->XState.x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2437 pCtx->XState.x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2438 iReg++;
2439 pCtx->XState.x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2440 pCtx->XState.x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2441 iReg++;
2442 pCtx->XState.x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2443 pCtx->XState.x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2444 iReg++;
2445 pCtx->XState.x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2446 pCtx->XState.x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2447 iReg++;
2448 pCtx->XState.x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2449 pCtx->XState.x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2450 iReg++;
2451 pCtx->XState.x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2452 pCtx->XState.x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2453 iReg++;
2454 pCtx->XState.x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2455 pCtx->XState.x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2456 iReg++;
2457 pCtx->XState.x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2458 pCtx->XState.x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2459 iReg++;
2460 pCtx->XState.x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2461 pCtx->XState.x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2462 iReg++;
2463 pCtx->XState.x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2464 pCtx->XState.x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2465 iReg++;
2466 pCtx->XState.x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2467 pCtx->XState.x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2468 iReg++;
2469 pCtx->XState.x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
2470 pCtx->XState.x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
2471 iReg++;
2472 }
2473
2474
2475 /* MSRs */
2476 // HvX64RegisterTsc - don't touch
2477 if (fWhat & CPUMCTX_EXTRN_EFER)
2478 {
2479 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
2480 if (paValues[iReg].Reg64 != pCtx->msrEFER)
2481 {
2482 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
2483 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
2484 PGMNotifyNxeChanged(pGVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
2485 pCtx->msrEFER = paValues[iReg].Reg64;
2486 fMaybeChangedMode = true;
2487 }
2488 iReg++;
2489 }
2490 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
2491 {
2492 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
2493 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
2494 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
2495 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
2496 iReg++;
2497 }
2498 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
2499 {
2500 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
2501 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
2502 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
2503 pCtx->SysEnter.cs = paValues[iReg].Reg64;
2504 iReg++;
2505
2506 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
2507 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
2508 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
2509 pCtx->SysEnter.eip = paValues[iReg].Reg64;
2510 iReg++;
2511
2512 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
2513 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
2514 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
2515 pCtx->SysEnter.esp = paValues[iReg].Reg64;
2516 iReg++;
2517 }
2518 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
2519 {
2520 Assert(pInput->Names[iReg] == HvX64RegisterStar);
2521 if (pCtx->msrSTAR != paValues[iReg].Reg64)
2522 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
2523 pCtx->msrSTAR = paValues[iReg].Reg64;
2524 iReg++;
2525
2526 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
2527 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
2528 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
2529 pCtx->msrLSTAR = paValues[iReg].Reg64;
2530 iReg++;
2531
2532 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
2533 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
2534 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
2535 pCtx->msrCSTAR = paValues[iReg].Reg64;
2536 iReg++;
2537
2538 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
2539 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
2540 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
2541 pCtx->msrSFMASK = paValues[iReg].Reg64;
2542 iReg++;
2543 }
2544 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
2545 {
2546 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
2547 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pGVCpu);
2548 if (paValues[iReg].Reg64 != uOldBase)
2549 {
2550 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
2551 pGVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
2552 int rc2 = APICSetBaseMsr(pGVCpu, paValues[iReg].Reg64);
2553 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64));
2554 }
2555 iReg++;
2556
2557 Assert(pInput->Names[iReg] == HvX64RegisterPat);
2558 if (pCtx->msrPAT != paValues[iReg].Reg64)
2559 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
2560 pCtx->msrPAT = paValues[iReg].Reg64;
2561 iReg++;
2562
2563# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2564 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
2565 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pGVCpu))
2566 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pGVCpu), paValues[iReg].Reg64));
2567 iReg++;
2568# endif
2569
2570 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
2571 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
2572 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2573 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2574 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2575 iReg++;
2576
2577 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2578
2579 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2580 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2581 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2582 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2583 iReg++;
2584
2585 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2586 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2587 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2588 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2589 iReg++;
2590
2591 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2592 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2593 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2594 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2595 iReg++;
2596
2597 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2598 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2599 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2600 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2601 iReg++;
2602
2603 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2604 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2605 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2606 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2607 iReg++;
2608
2609 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2610 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2611 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2612 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2613 iReg++;
2614
2615 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2616 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2617 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2618 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2619 iReg++;
2620
2621 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2622 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2623 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2624 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2625 iReg++;
2626
2627 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2628 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2629 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2630 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2631 iReg++;
2632
2633 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2634 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2635 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2636 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2637 iReg++;
2638
2639 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2640 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2641 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2642 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2643 iReg++;
2644
2645 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2646 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2647 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2648 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2649 iReg++;
2650
2651# if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2652 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2653 {
2654 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2655 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2656 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2657 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2658 iReg++;
2659 }
2660# endif
2661# ifdef LOG_ENABLED
2662 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON)
2663 {
2664 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2665 uint64_t const uFeatCtrl = CPUMGetGuestIa32FeatCtrl(pVCpu);
2666 if (paValues[iReg].Reg64 != uFeatCtrl)
2667 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, uFeatCtrl, paValues[iReg].Reg64));
2668 iReg++;
2669 }
2670# endif
2671 }
2672
2673 /* Interruptibility. */
2674 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2675 {
2676 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2677 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2678
2679 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2680 {
2681 pGVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2682 if (paValues[iReg].InterruptState.InterruptShadow)
2683 EMSetInhibitInterruptsPC(pGVCpu, paValues[iReg + 1].Reg64);
2684 else
2685 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2686 }
2687
2688 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2689 {
2690 if (paValues[iReg].InterruptState.NmiMasked)
2691 VMCPU_FF_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS);
2692 else
2693 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_BLOCK_NMIS);
2694 }
2695
2696 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2697 iReg += 2;
2698 }
2699
2700 /* Event injection. */
2701 /// @todo HvRegisterPendingInterruption
2702 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2703 if (paValues[iReg].PendingInterruption.InterruptionPending)
2704 {
2705 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2706 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2707 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2708 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2709 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2710 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2711 }
2712
2713 /// @todo HvRegisterPendingEvent0
2714 /// @todo HvRegisterPendingEvent1
2715
2716 /* Almost done, just update extrn flags and maybe change PGM mode. */
2717 pCtx->fExtrn &= ~fWhat;
2718 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2719 pCtx->fExtrn = 0;
2720
2721 /* Typical. */
2722 if (!fMaybeChangedMode && !fUpdateCr3)
2723 return VINF_SUCCESS;
2724
2725 /*
2726 * Slow.
2727 */
2728 int rc = VINF_SUCCESS;
2729 if (fMaybeChangedMode)
2730 {
2731 rc = PGMChangeMode(pGVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2732 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
2733 }
2734
2735 if (fUpdateCr3)
2736 {
2737 if (fCanUpdateCr3)
2738 {
2739 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
2740 rc = PGMUpdateCR3(pGVCpu, pCtx->cr3, false /*fPdpesMapped*/);
2741 if (rc == VINF_SUCCESS)
2742 { /* likely */ }
2743 else
2744 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
2745 }
2746 else
2747 {
2748 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2749 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2750 }
2751 }
2752
2753 return rc;
2754}
2755#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2756
2757
2758/**
2759 * Import the state from the native API (back to CPUMCTX).
2760 *
2761 * @returns VBox status code
2762 * @param pGVM The ring-0 VM handle.
2763 * @param idCpu The calling EMT. Necessary for getting the
2764 * hypercall page and arguments.
2765 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2766 * CPUMCTX_EXTERN_ALL for everything.
2767 */
2768VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, VMCPUID idCpu, uint64_t fWhat)
2769{
2770#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2771 /*
2772 * Validate the call.
2773 */
2774 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2775 if (RT_SUCCESS(rc))
2776 {
2777 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2778 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2779
2780 /*
2781 * Call worker.
2782 */
2783 rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);
2784 }
2785 return rc;
2786#else
2787 RT_NOREF(pGVM, idCpu, fWhat);
2788 return VERR_NOT_IMPLEMENTED;
2789#endif
2790}
2791
2792
2793#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2794/**
2795 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2796 *
2797 * @returns VBox status code.
2798 * @param pGVM The ring-0 VM handle.
2799 * @param pGVCpu The ring-0 VCPU handle.
2800 * @param pcTicks Where to return the current CPU tick count.
2801 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2802 */
2803NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2804{
2805 /*
2806 * Hypercall parameters.
2807 */
2808 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2809 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2810 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2811
2812 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2813 pInput->VpIndex = pGVCpu->idCpu;
2814 pInput->fFlags = 0;
2815 pInput->Names[0] = HvX64RegisterTsc;
2816 pInput->Names[1] = HvX64RegisterTscAux;
2817
2818 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2819 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2820 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2821
2822 /*
2823 * Make the hypercall.
2824 */
2825 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2826 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2827 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2828 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2829 VERR_NEM_GET_REGISTERS_FAILED);
2830
2831 /*
2832 * Get results.
2833 */
2834 *pcTicks = paValues[0].Reg64;
2835 if (pcAux)
2836 *pcAux = paValues[0].Reg32;
2837 return VINF_SUCCESS;
2838}
2839#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2840
2841
2842/**
2843 * Queries the TSC and TSC_AUX values, putting the results in .
2844 *
2845 * @returns VBox status code
2846 * @param pGVM The ring-0 VM handle.
2847 * @param idCpu The calling EMT. Necessary for getting the
2848 * hypercall page and arguments.
2849 */
2850VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, VMCPUID idCpu)
2851{
2852#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2853 /*
2854 * Validate the call.
2855 */
2856 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2857 if (RT_SUCCESS(rc))
2858 {
2859 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2860 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2861
2862 /*
2863 * Call worker.
2864 */
2865 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2866 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2867 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2868 &pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2869 }
2870 return rc;
2871#else
2872 RT_NOREF(pGVM, idCpu);
2873 return VERR_NOT_IMPLEMENTED;
2874#endif
2875}
2876
2877
2878#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2879/**
2880 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2881 *
2882 * @returns VBox status code.
2883 * @param pGVM The ring-0 VM handle.
2884 * @param pGVCpu The ring-0 VCPU handle.
2885 * @param uPausedTscValue The TSC value at the time of pausing.
2886 */
2887NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2888{
2889 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2890
2891 /*
2892 * Set up the hypercall parameters.
2893 */
2894 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2895 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2896
2897 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2898 pInput->VpIndex = 0;
2899 pInput->RsvdZ = 0;
2900 pInput->Elements[0].Name = HvX64RegisterTsc;
2901 pInput->Elements[0].Pad0 = 0;
2902 pInput->Elements[0].Pad1 = 0;
2903 pInput->Elements[0].Value.Reg128.High64 = 0;
2904 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2905
2906 /*
2907 * Disable interrupts and do the first virtual CPU.
2908 */
2909 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2910 uint64_t const uFirstTsc = ASMReadTSC();
2911 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2912 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2913 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2914 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2915
2916 /*
2917 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2918 * that we don't introduce too much drift here.
2919 */
2920 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2921 {
2922 Assert(pInput->PartitionId == pGVM->nemr0.s.idHvPartition);
2923 Assert(pInput->RsvdZ == 0);
2924 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2925 Assert(pInput->Elements[0].Pad0 == 0);
2926 Assert(pInput->Elements[0].Pad1 == 0);
2927 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2928
2929 pInput->VpIndex = iCpu;
2930 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2931 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2932
2933 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2934 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2935 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2936 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2937 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2938 }
2939
2940 /*
2941 * Done.
2942 */
2943 ASMSetFlags(fSavedFlags);
2944 return VINF_SUCCESS;
2945}
2946#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2947
2948
2949/**
2950 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2951 *
2952 * @returns VBox status code
2953 * @param pGVM The ring-0 VM handle.
2954 * @param idCpu The calling EMT. Necessary for getting the
2955 * hypercall page and arguments.
2956 * @param uPausedTscValue The TSC value at the time of pausing.
2957 */
2958VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2959{
2960#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2961 /*
2962 * Validate the call.
2963 */
2964 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2965 if (RT_SUCCESS(rc))
2966 {
2967 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2968 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2969
2970 /*
2971 * Call worker.
2972 */
2973 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2974 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2975 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
2976 }
2977 return rc;
2978#else
2979 RT_NOREF(pGVM, idCpu, uPausedTscValue);
2980 return VERR_NOT_IMPLEMENTED;
2981#endif
2982}
2983
2984
2985VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2986{
2987#ifdef NEM_WIN_WITH_RING0_RUNLOOP
2988 if (pGVM->nemr0.s.fMayUseRing0Runloop)
2989 return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu]);
2990 return VERR_NEM_RING3_ONLY;
2991#else
2992 RT_NOREF(pGVM, idCpu);
2993 return VERR_NOT_IMPLEMENTED;
2994#endif
2995}
2996
2997
2998/**
2999 * Updates statistics in the VM structure.
3000 *
3001 * @returns VBox status code.
3002 * @param pGVM The ring-0 VM handle.
3003 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
3004 * page and arguments.
3005 */
3006VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, VMCPUID idCpu)
3007{
3008 /*
3009 * Validate the call.
3010 */
3011 int rc;
3012 if (idCpu == NIL_VMCPUID)
3013 rc = GVMMR0ValidateGVM(pGVM);
3014 else
3015 rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
3016 if (RT_SUCCESS(rc))
3017 {
3018 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
3019
3020 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
3021 ? &pGVM->aCpus[idCpu].nemr0.s.HypercallData
3022 : &pGVM->nemr0.s.HypercallData;
3023 if ( RT_VALID_PTR(pHypercallData->pbPage)
3024 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
3025 {
3026 if (idCpu == NIL_VMCPUID)
3027 rc = RTCritSectEnter(&pGVM->nemr0.s.HypercallDataCritSect);
3028 if (RT_SUCCESS(rc))
3029 {
3030 /*
3031 * Query the memory statistics for the partition.
3032 */
3033 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
3034 pInput->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
3035 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
3036 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
3037 pInput->ProximityDomainInfo.Flags.Reserved = 0;
3038 pInput->ProximityDomainInfo.Id = 0;
3039
3040 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
3041 RT_ZERO(*pOutput);
3042
3043 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
3044 pHypercallData->HCPhysPage,
3045 pHypercallData->HCPhysPage + sizeof(*pInput));
3046 if (uResult == HV_STATUS_SUCCESS)
3047 {
3048 pGVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
3049 pGVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
3050 rc = VINF_SUCCESS;
3051 }
3052 else
3053 {
3054 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
3055 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
3056 rc = VERR_NEM_IPE_0;
3057 }
3058
3059 if (idCpu == NIL_VMCPUID)
3060 RTCritSectLeave(&pGVM->nemr0.s.HypercallDataCritSect);
3061 }
3062 }
3063 else
3064 rc = VERR_WRONG_ORDER;
3065 }
3066 return rc;
3067}
3068
3069
3070#if 1 && defined(DEBUG_bird)
3071/**
3072 * Debug only interface for poking around and exploring Hyper-V stuff.
3073 *
3074 * @param pGVM The ring-0 VM handle.
3075 * @param idCpu The calling EMT.
3076 * @param u64Arg What to query. 0 == registers.
3077 */
3078VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, VMCPUID idCpu, uint64_t u64Arg)
3079{
3080 /*
3081 * Resolve CPU structures.
3082 */
3083 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
3084 if (RT_SUCCESS(rc))
3085 {
3086 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
3087
3088 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
3089 if (u64Arg == 0)
3090 {
3091 /*
3092 * Query register.
3093 */
3094 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
3095 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
3096
3097 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
3098 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
3099 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
3100
3101 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
3102 pInput->VpIndex = pGVCpu->idCpu;
3103 pInput->fFlags = 0;
3104 pInput->Names[0] = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
3105
3106 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
3107 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
3108 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
3109 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
3110 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
3111 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
3112 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
3113 rc = VINF_SUCCESS;
3114 }
3115 else if (u64Arg == 1)
3116 {
3117 /*
3118 * Query partition property.
3119 */
3120 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nemr0.s.HypercallData.pbPage;
3121 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
3122
3123 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
3124 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
3125 pOutput->PropertyValue = 0;
3126
3127 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
3128 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pGVCpu->nem.s.Hypercall.Experiment.uItem;
3129 pInput->uPadding = 0;
3130
3131 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
3132 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
3133 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
3134 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
3135 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
3136 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
3137 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
3138 rc = VINF_SUCCESS;
3139 }
3140 else if (u64Arg == 2)
3141 {
3142 /*
3143 * Set register.
3144 */
3145 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
3146 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
3147 RT_BZERO(pInput, RT_UOFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
3148
3149 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
3150 pInput->VpIndex = pGVCpu->idCpu;
3151 pInput->RsvdZ = 0;
3152 pInput->Elements[0].Name = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
3153 pInput->Elements[0].Value.Reg128.High64 = pGVCpu->nem.s.Hypercall.Experiment.uHiValue;
3154 pInput->Elements[0].Value.Reg128.Low64 = pGVCpu->nem.s.Hypercall.Experiment.uLoValue;
3155
3156 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
3157 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
3158 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
3159 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
3160 rc = VINF_SUCCESS;
3161 }
3162 else
3163 rc = VERR_INVALID_FUNCTION;
3164 }
3165 return rc;
3166}
3167#endif /* DEBUG_bird */
3168
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette