VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 84948

Last change on this file since 84948 was 83474, checked in by vboxsync, 5 years ago

VMM/NEM: Added more VERR_NEM_MISSING_KERNEL_API status codes so we can better tell what does wrong.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 112.9 KB
Line 
1/* $Id: NEMR0Native-win.cpp 83474 2020-03-28 16:48:23Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/dbg.h>
42#include <iprt/memobj.h>
43#include <iprt/string.h>
44#include <iprt/time.h>
45
46
47/* Assert compile context sanity. */
48#ifndef RT_OS_WINDOWS
49# error "Windows only file!"
50#endif
51#ifndef RT_ARCH_AMD64
52# error "AMD64 only file!"
53#endif
54
55
56/*********************************************************************************************************************************
57* Internal Functions *
58*********************************************************************************************************************************/
59typedef uint32_t DWORD; /* for winerror.h constants */
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
66
67/**
68 * WinHvr.sys!WinHvDepositMemory
69 *
70 * This API will try allocates cPages on IdealNode and deposit it to the
71 * hypervisor for use with the given partition. The memory will be freed when
72 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
73 *
74 * Apparently node numbers above 64 has a different meaning.
75 */
76static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
77
78
79/*********************************************************************************************************************************
80* Internal Functions *
81*********************************************************************************************************************************/
82NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
83 uint32_t cPages, uint32_t fFlags);
84NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
85#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
86NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
87NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3);
88NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
89NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
90#endif
91DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
92 void *pvOutput, uint32_t cbOutput);
93
94
95/*
96 * Instantate the code we share with ring-0.
97 */
98#ifdef NEM_WIN_WITH_RING0_RUNLOOP
99# define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
100#else
101# undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
102#endif
103#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
104
105
106
107/**
108 * Worker for NEMR0InitVM that allocates a hypercall page.
109 *
110 * @returns VBox status code.
111 * @param pHypercallData The hypercall data page to initialize.
112 */
113static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
114{
115 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
116 if (RT_SUCCESS(rc))
117 {
118 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
119 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
120 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
121 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
122 if (RT_SUCCESS(rc))
123 return VINF_SUCCESS;
124
125 /* bail out */
126 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
127 }
128 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
129 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
130 pHypercallData->pbPage = NULL;
131 return rc;
132}
133
134/**
135 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
136 *
137 * @param pHypercallData The hypercall data page to uninitialize.
138 */
139static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
140{
141 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
142 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
143 if (pHypercallData->pbPage != NULL)
144 {
145 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
146 pHypercallData->pbPage = NULL;
147 }
148 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
149 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
150}
151
152
153/**
154 * Called by NEMR3Init to make sure we've got what we need.
155 *
156 * @returns VBox status code.
157 * @param pGVM The ring-0 VM handle.
158 * @thread EMT(0)
159 */
160VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM)
161{
162 AssertCompile(sizeof(pGVM->nemr0.s) <= sizeof(pGVM->nemr0.padding));
163 AssertCompile(sizeof(pGVM->aCpus[0].nemr0.s) <= sizeof(pGVM->aCpus[0].nemr0.padding));
164
165 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
166 AssertRCReturn(rc, rc);
167
168 /*
169 * We want to perform hypercalls here. The NT kernel started to expose a very low
170 * level interface to do this thru somewhere between build 14271 and 16299. Since
171 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
172 *
173 * We also need to deposit memory to the hypervisor for use with partition (page
174 * mapping structures, stuff).
175 */
176 RTDBGKRNLINFO hKrnlInfo;
177 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
178 if (RT_SUCCESS(rc))
179 {
180 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
181 if (RT_FAILURE(rc))
182 rc = VERR_NEM_MISSING_KERNEL_API_1;
183 if (RT_SUCCESS(rc))
184 {
185 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
186 if (RT_FAILURE(rc))
187 rc = rc == VERR_MODULE_NOT_FOUND ? VERR_NEM_MISSING_KERNEL_API_2 : VERR_NEM_MISSING_KERNEL_API_3;
188 }
189 RTR0DbgKrnlInfoRelease(hKrnlInfo);
190 if (RT_SUCCESS(rc))
191 {
192 /*
193 * Allocate a page for non-EMT threads to use for hypercalls (update
194 * statistics and such) and a critical section protecting it.
195 */
196 rc = RTCritSectInit(&pGVM->nemr0.s.HypercallDataCritSect);
197 if (RT_SUCCESS(rc))
198 {
199 rc = nemR0InitHypercallData(&pGVM->nemr0.s.HypercallData);
200 if (RT_SUCCESS(rc))
201 {
202 /*
203 * Allocate a page for each VCPU to place hypercall data on.
204 */
205 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
206 {
207 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
208 if (RT_FAILURE(rc))
209 {
210 while (i-- > 0)
211 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
212 break;
213 }
214 }
215 if (RT_SUCCESS(rc))
216 {
217 /*
218 * So far, so good.
219 */
220 return rc;
221 }
222
223 /*
224 * Bail out.
225 */
226 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
227 }
228 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
229 }
230 }
231 }
232
233 return rc;
234}
235
236
237/**
238 * Perform an I/O control operation on the partition handle (VID.SYS).
239 *
240 * @returns NT status code.
241 * @param pGVM The ring-0 VM structure.
242 * @param pGVCpu The global (ring-0) CPU structure of the calling EMT.
243 * @param uFunction The function to perform.
244 * @param pvInput The input buffer. This must point within the VM
245 * structure so we can easily convert to a ring-3
246 * pointer if necessary.
247 * @param cbInput The size of the input. @a pvInput must be NULL when
248 * zero.
249 * @param pvOutput The output buffer. This must also point within the
250 * VM structure for ring-3 pointer magic.
251 * @param cbOutput The size of the output. @a pvOutput must be NULL
252 * when zero.
253 * @thread EMT(pGVCpu)
254 */
255DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
256 void *pvOutput, uint32_t cbOutput)
257{
258#ifdef RT_STRICT
259 /*
260 * Input and output parameters are part of the VM CPU structure.
261 */
262 VMCPU_ASSERT_EMT(pGVCpu);
263 if (pvInput)
264 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
265 if (pvOutput)
266 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
267#endif
268
269 int32_t rcNt = STATUS_UNSUCCESSFUL;
270 int rc = SUPR0IoCtlPerform(pGVM->nemr0.s.pIoCtlCtx, uFunction,
271 pvInput,
272 pvInput ? (uintptr_t)pvInput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
273 cbInput,
274 pvOutput,
275 pvOutput ? (uintptr_t)pvOutput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
276 cbOutput,
277 &rcNt);
278 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
279 return (NTSTATUS)rcNt;
280 return STATUS_UNSUCCESSFUL;
281}
282
283
284/**
285 * 2nd part of the initialization, after we've got a partition handle.
286 *
287 * @returns VBox status code.
288 * @param pGVM The ring-0 VM handle.
289 * @thread EMT(0)
290 */
291VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM)
292{
293 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
294 AssertRCReturn(rc, rc);
295 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
296 Assert(pGVM->nemr0.s.fMayUseRing0Runloop == false);
297
298 /*
299 * Copy and validate the I/O control information from ring-3.
300 */
301 NEMWINIOCTL Copy = pGVM->nem.s.IoCtlGetHvPartitionId;
302 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
303 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
304 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
305 pGVM->nemr0.s.IoCtlGetHvPartitionId = Copy;
306
307 pGVM->nemr0.s.fMayUseRing0Runloop = pGVM->nem.s.fUseRing0Runloop;
308
309 Copy = pGVM->nem.s.IoCtlStartVirtualProcessor;
310 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
311 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
312 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
313 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
314 if (RT_SUCCESS(rc))
315 pGVM->nemr0.s.IoCtlStartVirtualProcessor = Copy;
316
317 Copy = pGVM->nem.s.IoCtlStopVirtualProcessor;
318 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
319 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
320 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
321 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
322 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
323 if (RT_SUCCESS(rc))
324 pGVM->nemr0.s.IoCtlStopVirtualProcessor = Copy;
325
326 Copy = pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
327 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
328 AssertLogRelStmt( Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)
329 || Copy.cbInput == RT_OFFSETOF(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT, cMillies),
330 rc = VERR_NEM_INIT_FAILED);
331 AssertLogRelStmt(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
332 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
333 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
334 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
335 if (RT_SUCCESS(rc))
336 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext = Copy;
337
338 if ( RT_SUCCESS(rc)
339 || !pGVM->nem.s.fUseRing0Runloop)
340 {
341 /*
342 * Setup of an I/O control context for the partition handle for later use.
343 */
344 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pGVM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx);
345 AssertLogRelRCReturn(rc, rc);
346 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
347 {
348 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
349 pGVCpu->nemr0.s.offRing3ConversionDelta = (uintptr_t)pGVM->aCpus[idCpu].pVCpuR3 - (uintptr_t)pGVCpu;
350 }
351
352 /*
353 * Get the partition ID.
354 */
355 PVMCPUCC pVCpu0 = &pGVM->aCpus[0];
356 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
357 &pVCpu0->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu0->nem.s.uIoCtlBuf.idPartition));
358 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
359 pGVM->nemr0.s.idHvPartition = pVCpu0->nem.s.uIoCtlBuf.idPartition;
360 AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == pGVM->nem.s.idHvPartition,
361 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition),
362 VERR_NEM_INIT_FAILED);
363 }
364
365 return rc;
366}
367
368
369/**
370 * Cleanup the NEM parts of the VM in ring-0.
371 *
372 * This is always called and must deal the state regardless of whether
373 * NEMR0InitVM() was called or not. So, take care here.
374 *
375 * @param pGVM The ring-0 VM handle.
376 */
377VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
378{
379 pGVM->nemr0.s.idHvPartition = HV_PARTITION_ID_INVALID;
380
381 /* Clean up I/O control context. */
382 if (pGVM->nemr0.s.pIoCtlCtx)
383 {
384 int rc = SUPR0IoCtlCleanup(pGVM->nemr0.s.pIoCtlCtx);
385 AssertRC(rc);
386 pGVM->nemr0.s.pIoCtlCtx = NULL;
387 }
388
389 /* Free the hypercall pages. */
390 VMCPUID i = pGVM->cCpus;
391 while (i-- > 0)
392 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
393
394 /* The non-EMT one too. */
395 if (RTCritSectIsInitialized(&pGVM->nemr0.s.HypercallDataCritSect))
396 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
397 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
398}
399
400
401#if 0 /* for debugging GPA unmapping. */
402static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
403{
404 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nemr0.s.pbHypercallData;
405 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
406 pIn->PartitionId = pGVM->nemr0.s.idHvPartition;
407 pIn->VpIndex = pGVCpu->idCpu;
408 pIn->ByteCount = 0x10;
409 pIn->BaseGpa = GCPhys;
410 pIn->ControlFlags.AsUINT64 = 0;
411 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
412 memset(pOut, 0xfe, sizeof(*pOut));
413 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nemr0.s.HCPhysHypercallData,
414 pGVCpu->nemr0.s.HCPhysHypercallData + sizeof(*pIn));
415 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
416 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
417 __debugbreak();
418
419 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
420}
421#endif
422
423
424/**
425 * Worker for NEMR0MapPages and others.
426 */
427NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
428 uint32_t cPages, uint32_t fFlags)
429{
430 /*
431 * Validate.
432 */
433 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
434
435 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
436 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
437 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
438 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
439 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
440 if (GCPhysSrc != GCPhysDst)
441 {
442 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
443 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
444 }
445
446 /*
447 * Compose and make the hypercall.
448 * Ring-3 is not allowed to fill in the host physical addresses of the call.
449 */
450 for (uint32_t iTries = 0;; iTries++)
451 {
452 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
453 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
454 pMapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
455 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
456 pMapPages->MapFlags = fFlags;
457 pMapPages->u32ExplicitPadding = 0;
458 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
459 {
460 RTHCPHYS HCPhys = NIL_RTGCPHYS;
461 int rc = PGMPhysGCPhys2HCPhys(pGVM, GCPhysSrc, &HCPhys);
462 AssertRCReturn(rc, rc);
463 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
464 }
465
466 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
467 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
468 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
469 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
470 if (uResult == ((uint64_t)cPages << 32))
471 return VINF_SUCCESS;
472
473 /*
474 * If the partition is out of memory, try donate another 512 pages to
475 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
476 */
477 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
478 || iTries > 16
479 || g_pfnWinHvDepositMemory == NULL)
480 {
481 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
482 return VERR_NEM_MAP_PAGES_FAILED;
483 }
484
485 size_t cPagesAdded = 0;
486 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nemr0.s.idHvPartition, 512, 0, &cPagesAdded);
487 if (!cPagesAdded)
488 {
489 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
490 return VERR_NEM_MAP_PAGES_FAILED;
491 }
492 }
493}
494
495
496/**
497 * Maps pages into the guest physical address space.
498 *
499 * Generally the caller will be under the PGM lock already, so no extra effort
500 * is needed to make sure all changes happens under it.
501 *
502 * @returns VBox status code.
503 * @param pGVM The ring-0 VM handle.
504 * @param idCpu The calling EMT. Necessary for getting the
505 * hypercall page and arguments.
506 * @thread EMT(idCpu)
507 */
508VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, VMCPUID idCpu)
509{
510 /*
511 * Unpack the call.
512 */
513 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
514 if (RT_SUCCESS(rc))
515 {
516 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
517
518 RTGCPHYS const GCPhysSrc = pGVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
519 RTGCPHYS const GCPhysDst = pGVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
520 uint32_t const cPages = pGVCpu->nem.s.Hypercall.MapPages.cPages;
521 HV_MAP_GPA_FLAGS const fFlags = pGVCpu->nem.s.Hypercall.MapPages.fFlags;
522
523 /*
524 * Do the work.
525 */
526 rc = nemR0WinMapPages(pGVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
527 }
528 return rc;
529}
530
531
532/**
533 * Worker for NEMR0UnmapPages and others.
534 */
535NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
536{
537 /*
538 * Validate input.
539 */
540 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
541
542 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
543 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
544 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
545 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
546
547 /*
548 * Compose and make the hypercall.
549 */
550 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
551 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
552 pUnmapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
553 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
554 pUnmapPages->fFlags = 0;
555
556 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
557 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
558 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
559 if (uResult == ((uint64_t)cPages << 32))
560 {
561#if 1 /* Do we need to do this? Hopefully not... */
562 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
563 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
564 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
565#endif
566 return VINF_SUCCESS;
567 }
568
569 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
570 return VERR_NEM_UNMAP_PAGES_FAILED;
571}
572
573
574/**
575 * Unmaps pages from the guest physical address space.
576 *
577 * Generally the caller will be under the PGM lock already, so no extra effort
578 * is needed to make sure all changes happens under it.
579 *
580 * @returns VBox status code.
581 * @param pGVM The ring-0 VM handle.
582 * @param idCpu The calling EMT. Necessary for getting the
583 * hypercall page and arguments.
584 * @thread EMT(idCpu)
585 */
586VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, VMCPUID idCpu)
587{
588 /*
589 * Unpack the call.
590 */
591 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
592 if (RT_SUCCESS(rc))
593 {
594 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
595
596 RTGCPHYS const GCPhys = pGVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
597 uint32_t const cPages = pGVCpu->nem.s.Hypercall.UnmapPages.cPages;
598
599 /*
600 * Do the work.
601 */
602 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
603 }
604 return rc;
605}
606
607
608#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
609/**
610 * Worker for NEMR0ExportState.
611 *
612 * Intention is to use it internally later.
613 *
614 * @returns VBox status code.
615 * @param pGVM The ring-0 VM handle.
616 * @param pGVCpu The ring-0 VCPU handle.
617 * @param pCtx The CPU context structure to import into.
618 */
619NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
620{
621 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
622 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
623 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
624
625 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
626 pInput->VpIndex = pGVCpu->idCpu;
627 pInput->RsvdZ = 0;
628
629 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
630 if ( !fWhat
631 && pGVCpu->nem.s.fCurrentInterruptWindows == pGVCpu->nem.s.fDesiredInterruptWindows)
632 return VINF_SUCCESS;
633 uintptr_t iReg = 0;
634
635 /* GPRs */
636 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
637 {
638 if (fWhat & CPUMCTX_EXTRN_RAX)
639 {
640 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
641 pInput->Elements[iReg].Name = HvX64RegisterRax;
642 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
643 iReg++;
644 }
645 if (fWhat & CPUMCTX_EXTRN_RCX)
646 {
647 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
648 pInput->Elements[iReg].Name = HvX64RegisterRcx;
649 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
650 iReg++;
651 }
652 if (fWhat & CPUMCTX_EXTRN_RDX)
653 {
654 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
655 pInput->Elements[iReg].Name = HvX64RegisterRdx;
656 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
657 iReg++;
658 }
659 if (fWhat & CPUMCTX_EXTRN_RBX)
660 {
661 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
662 pInput->Elements[iReg].Name = HvX64RegisterRbx;
663 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
664 iReg++;
665 }
666 if (fWhat & CPUMCTX_EXTRN_RSP)
667 {
668 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
669 pInput->Elements[iReg].Name = HvX64RegisterRsp;
670 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
671 iReg++;
672 }
673 if (fWhat & CPUMCTX_EXTRN_RBP)
674 {
675 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
676 pInput->Elements[iReg].Name = HvX64RegisterRbp;
677 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
678 iReg++;
679 }
680 if (fWhat & CPUMCTX_EXTRN_RSI)
681 {
682 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
683 pInput->Elements[iReg].Name = HvX64RegisterRsi;
684 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
685 iReg++;
686 }
687 if (fWhat & CPUMCTX_EXTRN_RDI)
688 {
689 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
690 pInput->Elements[iReg].Name = HvX64RegisterRdi;
691 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
692 iReg++;
693 }
694 if (fWhat & CPUMCTX_EXTRN_R8_R15)
695 {
696 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
697 pInput->Elements[iReg].Name = HvX64RegisterR8;
698 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
699 iReg++;
700 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
701 pInput->Elements[iReg].Name = HvX64RegisterR9;
702 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
703 iReg++;
704 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
705 pInput->Elements[iReg].Name = HvX64RegisterR10;
706 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
707 iReg++;
708 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
709 pInput->Elements[iReg].Name = HvX64RegisterR11;
710 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
711 iReg++;
712 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
713 pInput->Elements[iReg].Name = HvX64RegisterR12;
714 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
715 iReg++;
716 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
717 pInput->Elements[iReg].Name = HvX64RegisterR13;
718 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
719 iReg++;
720 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
721 pInput->Elements[iReg].Name = HvX64RegisterR14;
722 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
723 iReg++;
724 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
725 pInput->Elements[iReg].Name = HvX64RegisterR15;
726 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
727 iReg++;
728 }
729 }
730
731 /* RIP & Flags */
732 if (fWhat & CPUMCTX_EXTRN_RIP)
733 {
734 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
735 pInput->Elements[iReg].Name = HvX64RegisterRip;
736 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
737 iReg++;
738 }
739 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
740 {
741 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
742 pInput->Elements[iReg].Name = HvX64RegisterRflags;
743 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
744 iReg++;
745 }
746
747 /* Segments */
748# define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
749 do { \
750 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
751 pInput->Elements[a_idx].Name = a_enmName; \
752 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
753 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
754 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
755 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
756 } while (0)
757 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
758 {
759 if (fWhat & CPUMCTX_EXTRN_CS)
760 {
761 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
762 iReg++;
763 }
764 if (fWhat & CPUMCTX_EXTRN_ES)
765 {
766 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
767 iReg++;
768 }
769 if (fWhat & CPUMCTX_EXTRN_SS)
770 {
771 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
772 iReg++;
773 }
774 if (fWhat & CPUMCTX_EXTRN_DS)
775 {
776 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
777 iReg++;
778 }
779 if (fWhat & CPUMCTX_EXTRN_FS)
780 {
781 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
782 iReg++;
783 }
784 if (fWhat & CPUMCTX_EXTRN_GS)
785 {
786 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
787 iReg++;
788 }
789 }
790
791 /* Descriptor tables & task segment. */
792 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
793 {
794 if (fWhat & CPUMCTX_EXTRN_LDTR)
795 {
796 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
797 iReg++;
798 }
799 if (fWhat & CPUMCTX_EXTRN_TR)
800 {
801 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
802 iReg++;
803 }
804
805 if (fWhat & CPUMCTX_EXTRN_IDTR)
806 {
807 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
808 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
809 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
810 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
811 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
812 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
813 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
814 iReg++;
815 }
816 if (fWhat & CPUMCTX_EXTRN_GDTR)
817 {
818 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
819 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
820 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
821 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
822 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
823 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
824 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
825 iReg++;
826 }
827 }
828
829 /* Control registers. */
830 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
831 {
832 if (fWhat & CPUMCTX_EXTRN_CR0)
833 {
834 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
835 pInput->Elements[iReg].Name = HvX64RegisterCr0;
836 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
837 iReg++;
838 }
839 if (fWhat & CPUMCTX_EXTRN_CR2)
840 {
841 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
842 pInput->Elements[iReg].Name = HvX64RegisterCr2;
843 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
844 iReg++;
845 }
846 if (fWhat & CPUMCTX_EXTRN_CR3)
847 {
848 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
849 pInput->Elements[iReg].Name = HvX64RegisterCr3;
850 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
851 iReg++;
852 }
853 if (fWhat & CPUMCTX_EXTRN_CR4)
854 {
855 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
856 pInput->Elements[iReg].Name = HvX64RegisterCr4;
857 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
858 iReg++;
859 }
860 }
861 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
862 {
863 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
864 pInput->Elements[iReg].Name = HvX64RegisterCr8;
865 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pGVCpu);
866 iReg++;
867 }
868
869 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
870
871 /* Debug registers. */
872/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
873 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
874 {
875 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
876 pInput->Elements[iReg].Name = HvX64RegisterDr0;
877 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pGVCpu);
878 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
879 iReg++;
880 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
881 pInput->Elements[iReg].Name = HvX64RegisterDr1;
882 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pGVCpu);
883 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
884 iReg++;
885 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
886 pInput->Elements[iReg].Name = HvX64RegisterDr2;
887 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pGVCpu);
888 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
889 iReg++;
890 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
891 pInput->Elements[iReg].Name = HvX64RegisterDr3;
892 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pGVCpu);
893 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
894 iReg++;
895 }
896 if (fWhat & CPUMCTX_EXTRN_DR6)
897 {
898 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
899 pInput->Elements[iReg].Name = HvX64RegisterDr6;
900 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pGVCpu);
901 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
902 iReg++;
903 }
904 if (fWhat & CPUMCTX_EXTRN_DR7)
905 {
906 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
907 pInput->Elements[iReg].Name = HvX64RegisterDr7;
908 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pGVCpu);
909 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
910 iReg++;
911 }
912
913 /* Floating point state. */
914 if (fWhat & CPUMCTX_EXTRN_X87)
915 {
916 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
917 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
918 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[0].au64[0];
919 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[0].au64[1];
920 iReg++;
921 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
922 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
923 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[1].au64[0];
924 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[1].au64[1];
925 iReg++;
926 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
927 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
928 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[2].au64[0];
929 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[2].au64[1];
930 iReg++;
931 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
932 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
933 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[3].au64[0];
934 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[3].au64[1];
935 iReg++;
936 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
937 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
938 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[4].au64[0];
939 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[4].au64[1];
940 iReg++;
941 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
942 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
943 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[5].au64[0];
944 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[5].au64[1];
945 iReg++;
946 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
947 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
948 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[6].au64[0];
949 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[6].au64[1];
950 iReg++;
951 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
952 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
953 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[7].au64[0];
954 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[7].au64[1];
955 iReg++;
956
957 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
958 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
959 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->pXStateR0->x87.FCW;
960 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->pXStateR0->x87.FSW;
961 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->pXStateR0->x87.FTW;
962 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->pXStateR0->x87.FTW >> 8;
963 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->pXStateR0->x87.FOP;
964 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->pXStateR0->x87.FPUIP)
965 | ((uint64_t)pCtx->pXStateR0->x87.CS << 32)
966 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd1 << 48);
967 iReg++;
968/** @todo we've got trouble if if we try write just SSE w/o X87. */
969 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
970 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
971 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->pXStateR0->x87.FPUDP)
972 | ((uint64_t)pCtx->pXStateR0->x87.DS << 32)
973 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd2 << 48);
974 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->pXStateR0->x87.MXCSR;
975 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR0->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
976 iReg++;
977 }
978
979 /* Vector state. */
980 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
981 {
982 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
983 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
984 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo;
985 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi;
986 iReg++;
987 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
988 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
989 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo;
990 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi;
991 iReg++;
992 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
993 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
994 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo;
995 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi;
996 iReg++;
997 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
998 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
999 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo;
1000 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi;
1001 iReg++;
1002 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1003 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
1004 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo;
1005 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi;
1006 iReg++;
1007 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1008 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
1009 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo;
1010 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi;
1011 iReg++;
1012 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1013 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
1014 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo;
1015 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi;
1016 iReg++;
1017 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1018 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
1019 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo;
1020 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi;
1021 iReg++;
1022 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1023 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1024 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo;
1025 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi;
1026 iReg++;
1027 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1028 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1029 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo;
1030 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi;
1031 iReg++;
1032 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1033 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1034 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo;
1035 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi;
1036 iReg++;
1037 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1038 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1039 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo;
1040 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi;
1041 iReg++;
1042 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1043 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1044 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo;
1045 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi;
1046 iReg++;
1047 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1048 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1049 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo;
1050 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi;
1051 iReg++;
1052 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1053 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1054 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo;
1055 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi;
1056 iReg++;
1057 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1058 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1059 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo;
1060 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi;
1061 iReg++;
1062 }
1063
1064 /* MSRs */
1065 // HvX64RegisterTsc - don't touch
1066 if (fWhat & CPUMCTX_EXTRN_EFER)
1067 {
1068 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1069 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1070 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1071 iReg++;
1072 }
1073 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1074 {
1075 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1076 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1077 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1078 iReg++;
1079 }
1080 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1081 {
1082 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1083 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1084 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1085 iReg++;
1086 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1087 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1088 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1089 iReg++;
1090 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1091 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1092 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1093 iReg++;
1094 }
1095 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1096 {
1097 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1098 pInput->Elements[iReg].Name = HvX64RegisterStar;
1099 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1100 iReg++;
1101 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1102 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1103 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1104 iReg++;
1105 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1106 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1107 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1108 iReg++;
1109 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1110 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1111 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1112 iReg++;
1113 }
1114 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1115 {
1116 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1117 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1118 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pGVCpu);
1119 iReg++;
1120 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1121 pInput->Elements[iReg].Name = HvX64RegisterPat;
1122 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1123 iReg++;
1124# if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1125 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1126 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1127 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pGVCpu);
1128 iReg++;
1129# endif
1130
1131 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
1132
1133 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1134 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1135 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1136 iReg++;
1137
1138 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1139
1140 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1141 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1142 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1143 iReg++;
1144 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1145 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1146 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1147 iReg++;
1148 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1149 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1150 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1151 iReg++;
1152 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1153 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1154 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1155 iReg++;
1156 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1157 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1158 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1159 iReg++;
1160 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1161 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1162 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1163 iReg++;
1164 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1165 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1166 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1167 iReg++;
1168 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1169 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1170 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1171 iReg++;
1172 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1173 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1174 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1175 iReg++;
1176 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1177 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1178 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1179 iReg++;
1180 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1181 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1182 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1183 iReg++;
1184 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1185 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1186 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1187 iReg++;
1188
1189# if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1190 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
1191 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1192 {
1193 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1194 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1195 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1196 iReg++;
1197 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1198 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1199 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pGVCpu);
1200 iReg++;
1201 }
1202# endif
1203 }
1204
1205 /* event injection (clear it). */
1206 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1207 {
1208 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1209 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1210 pInput->Elements[iReg].Value.Reg64 = 0;
1211 iReg++;
1212 }
1213
1214 /* Interruptibility state. This can get a little complicated since we get
1215 half of the state via HV_X64_VP_EXECUTION_STATE. */
1216 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1217 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1218 {
1219 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1220 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1221 pInput->Elements[iReg].Value.Reg64 = 0;
1222 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1223 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
1224 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1225 if (VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
1226 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1227 iReg++;
1228 }
1229 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1230 {
1231 if ( pGVCpu->nem.s.fLastInterruptShadow
1232 || ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1233 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip))
1234 {
1235 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1236 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1237 pInput->Elements[iReg].Value.Reg64 = 0;
1238 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1239 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
1240 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1241 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1242 //if (VMCPU_FF_IS_ANY_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
1243 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1244 iReg++;
1245 }
1246 }
1247 else
1248 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1249
1250 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1251 uint8_t const fDesiredIntWin = pGVCpu->nem.s.fDesiredInterruptWindows;
1252 if ( fDesiredIntWin
1253 || pGVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1254 {
1255 pGVCpu->nem.s.fCurrentInterruptWindows = pGVCpu->nem.s.fDesiredInterruptWindows;
1256 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1257 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1258 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1259 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1260 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1261 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1262 iReg++;
1263 }
1264
1265 /// @todo HvRegisterPendingEvent0
1266 /// @todo HvRegisterPendingEvent1
1267
1268 /*
1269 * Set the registers.
1270 */
1271 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1272
1273 /*
1274 * Make the hypercall.
1275 */
1276 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1277 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1278 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1279 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1280 VERR_NEM_SET_REGISTERS_FAILED);
1281 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1282 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1283 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1284 return VINF_SUCCESS;
1285}
1286#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1287
1288
1289/**
1290 * Export the state to the native API (out of CPUMCTX).
1291 *
1292 * @returns VBox status code
1293 * @param pGVM The ring-0 VM handle.
1294 * @param idCpu The calling EMT. Necessary for getting the
1295 * hypercall page and arguments.
1296 */
1297VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, VMCPUID idCpu)
1298{
1299#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1300 /*
1301 * Validate the call.
1302 */
1303 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
1304 if (RT_SUCCESS(rc))
1305 {
1306 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1307 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1308
1309 /*
1310 * Call worker.
1311 */
1312 rc = nemR0WinExportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx);
1313 }
1314 return rc;
1315#else
1316 RT_NOREF(pGVM, idCpu);
1317 return VERR_NOT_IMPLEMENTED;
1318#endif
1319}
1320
1321
1322#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1323/**
1324 * Worker for NEMR0ImportState.
1325 *
1326 * Intention is to use it internally later.
1327 *
1328 * @returns VBox status code.
1329 * @param pGVM The ring-0 VM handle.
1330 * @param pGVCpu The ring-0 VCPU handle.
1331 * @param pCtx The CPU context structure to import into.
1332 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1333 * @param fCanUpdateCr3 Whether it's safe to update CR3 or not.
1334 */
1335NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3)
1336{
1337 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
1338 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1339 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1340 Assert(pCtx == &pGVCpu->cpum.GstCtx);
1341
1342 fWhat &= pCtx->fExtrn;
1343
1344 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
1345 pInput->VpIndex = pGVCpu->idCpu;
1346 pInput->fFlags = 0;
1347
1348 /* GPRs */
1349 uintptr_t iReg = 0;
1350 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1351 {
1352 if (fWhat & CPUMCTX_EXTRN_RAX)
1353 pInput->Names[iReg++] = HvX64RegisterRax;
1354 if (fWhat & CPUMCTX_EXTRN_RCX)
1355 pInput->Names[iReg++] = HvX64RegisterRcx;
1356 if (fWhat & CPUMCTX_EXTRN_RDX)
1357 pInput->Names[iReg++] = HvX64RegisterRdx;
1358 if (fWhat & CPUMCTX_EXTRN_RBX)
1359 pInput->Names[iReg++] = HvX64RegisterRbx;
1360 if (fWhat & CPUMCTX_EXTRN_RSP)
1361 pInput->Names[iReg++] = HvX64RegisterRsp;
1362 if (fWhat & CPUMCTX_EXTRN_RBP)
1363 pInput->Names[iReg++] = HvX64RegisterRbp;
1364 if (fWhat & CPUMCTX_EXTRN_RSI)
1365 pInput->Names[iReg++] = HvX64RegisterRsi;
1366 if (fWhat & CPUMCTX_EXTRN_RDI)
1367 pInput->Names[iReg++] = HvX64RegisterRdi;
1368 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1369 {
1370 pInput->Names[iReg++] = HvX64RegisterR8;
1371 pInput->Names[iReg++] = HvX64RegisterR9;
1372 pInput->Names[iReg++] = HvX64RegisterR10;
1373 pInput->Names[iReg++] = HvX64RegisterR11;
1374 pInput->Names[iReg++] = HvX64RegisterR12;
1375 pInput->Names[iReg++] = HvX64RegisterR13;
1376 pInput->Names[iReg++] = HvX64RegisterR14;
1377 pInput->Names[iReg++] = HvX64RegisterR15;
1378 }
1379 }
1380
1381 /* RIP & Flags */
1382 if (fWhat & CPUMCTX_EXTRN_RIP)
1383 pInput->Names[iReg++] = HvX64RegisterRip;
1384 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1385 pInput->Names[iReg++] = HvX64RegisterRflags;
1386
1387 /* Segments */
1388 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1389 {
1390 if (fWhat & CPUMCTX_EXTRN_CS)
1391 pInput->Names[iReg++] = HvX64RegisterCs;
1392 if (fWhat & CPUMCTX_EXTRN_ES)
1393 pInput->Names[iReg++] = HvX64RegisterEs;
1394 if (fWhat & CPUMCTX_EXTRN_SS)
1395 pInput->Names[iReg++] = HvX64RegisterSs;
1396 if (fWhat & CPUMCTX_EXTRN_DS)
1397 pInput->Names[iReg++] = HvX64RegisterDs;
1398 if (fWhat & CPUMCTX_EXTRN_FS)
1399 pInput->Names[iReg++] = HvX64RegisterFs;
1400 if (fWhat & CPUMCTX_EXTRN_GS)
1401 pInput->Names[iReg++] = HvX64RegisterGs;
1402 }
1403
1404 /* Descriptor tables and the task segment. */
1405 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1406 {
1407 if (fWhat & CPUMCTX_EXTRN_LDTR)
1408 pInput->Names[iReg++] = HvX64RegisterLdtr;
1409 if (fWhat & CPUMCTX_EXTRN_TR)
1410 pInput->Names[iReg++] = HvX64RegisterTr;
1411 if (fWhat & CPUMCTX_EXTRN_IDTR)
1412 pInput->Names[iReg++] = HvX64RegisterIdtr;
1413 if (fWhat & CPUMCTX_EXTRN_GDTR)
1414 pInput->Names[iReg++] = HvX64RegisterGdtr;
1415 }
1416
1417 /* Control registers. */
1418 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1419 {
1420 if (fWhat & CPUMCTX_EXTRN_CR0)
1421 pInput->Names[iReg++] = HvX64RegisterCr0;
1422 if (fWhat & CPUMCTX_EXTRN_CR2)
1423 pInput->Names[iReg++] = HvX64RegisterCr2;
1424 if (fWhat & CPUMCTX_EXTRN_CR3)
1425 pInput->Names[iReg++] = HvX64RegisterCr3;
1426 if (fWhat & CPUMCTX_EXTRN_CR4)
1427 pInput->Names[iReg++] = HvX64RegisterCr4;
1428 }
1429 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1430 pInput->Names[iReg++] = HvX64RegisterCr8;
1431
1432 /* Debug registers. */
1433 if (fWhat & CPUMCTX_EXTRN_DR7)
1434 pInput->Names[iReg++] = HvX64RegisterDr7;
1435 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1436 {
1437 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pCtx->fExtrn & CPUMCTX_EXTRN_DR7))
1438 {
1439 fWhat |= CPUMCTX_EXTRN_DR7;
1440 pInput->Names[iReg++] = HvX64RegisterDr7;
1441 }
1442 pInput->Names[iReg++] = HvX64RegisterDr0;
1443 pInput->Names[iReg++] = HvX64RegisterDr1;
1444 pInput->Names[iReg++] = HvX64RegisterDr2;
1445 pInput->Names[iReg++] = HvX64RegisterDr3;
1446 }
1447 if (fWhat & CPUMCTX_EXTRN_DR6)
1448 pInput->Names[iReg++] = HvX64RegisterDr6;
1449
1450 /* Floating point state. */
1451 if (fWhat & CPUMCTX_EXTRN_X87)
1452 {
1453 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1454 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1455 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1456 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1457 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1458 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1459 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1460 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1461 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1462 }
1463 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1464 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1465
1466 /* Vector state. */
1467 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1468 {
1469 pInput->Names[iReg++] = HvX64RegisterXmm0;
1470 pInput->Names[iReg++] = HvX64RegisterXmm1;
1471 pInput->Names[iReg++] = HvX64RegisterXmm2;
1472 pInput->Names[iReg++] = HvX64RegisterXmm3;
1473 pInput->Names[iReg++] = HvX64RegisterXmm4;
1474 pInput->Names[iReg++] = HvX64RegisterXmm5;
1475 pInput->Names[iReg++] = HvX64RegisterXmm6;
1476 pInput->Names[iReg++] = HvX64RegisterXmm7;
1477 pInput->Names[iReg++] = HvX64RegisterXmm8;
1478 pInput->Names[iReg++] = HvX64RegisterXmm9;
1479 pInput->Names[iReg++] = HvX64RegisterXmm10;
1480 pInput->Names[iReg++] = HvX64RegisterXmm11;
1481 pInput->Names[iReg++] = HvX64RegisterXmm12;
1482 pInput->Names[iReg++] = HvX64RegisterXmm13;
1483 pInput->Names[iReg++] = HvX64RegisterXmm14;
1484 pInput->Names[iReg++] = HvX64RegisterXmm15;
1485 }
1486
1487 /* MSRs */
1488 // HvX64RegisterTsc - don't touch
1489 if (fWhat & CPUMCTX_EXTRN_EFER)
1490 pInput->Names[iReg++] = HvX64RegisterEfer;
1491 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1492 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1493 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1494 {
1495 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1496 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1497 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1498 }
1499 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1500 {
1501 pInput->Names[iReg++] = HvX64RegisterStar;
1502 pInput->Names[iReg++] = HvX64RegisterLstar;
1503 pInput->Names[iReg++] = HvX64RegisterCstar;
1504 pInput->Names[iReg++] = HvX64RegisterSfmask;
1505 }
1506
1507# ifdef LOG_ENABLED
1508 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
1509# endif
1510 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1511 {
1512 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1513 pInput->Names[iReg++] = HvX64RegisterPat;
1514# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1515 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
1516# endif
1517 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
1518 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
1519 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
1520 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
1521 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
1522 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
1523 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
1524 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
1525 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
1526 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
1527 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
1528 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
1529 pInput->Names[iReg++] = HvX64RegisterTscAux;
1530# if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
1531 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1532 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
1533# endif
1534# ifdef LOG_ENABLED
1535 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON)
1536 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
1537# endif
1538 }
1539
1540 /* Interruptibility. */
1541 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1542 {
1543 pInput->Names[iReg++] = HvRegisterInterruptState;
1544 pInput->Names[iReg++] = HvX64RegisterRip;
1545 }
1546
1547 /* event injection */
1548 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1549 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1550 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1551 size_t const cRegs = iReg;
1552 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF_DYN(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1553
1554 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1555 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
1556 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1557
1558 /*
1559 * Make the hypercall.
1560 */
1561 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1562 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
1563 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
1564 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1565 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1566 VERR_NEM_GET_REGISTERS_FAILED);
1567 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1568
1569 /*
1570 * Copy information to the CPUM context.
1571 */
1572 iReg = 0;
1573
1574 /* GPRs */
1575 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1576 {
1577 if (fWhat & CPUMCTX_EXTRN_RAX)
1578 {
1579 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1580 pCtx->rax = paValues[iReg++].Reg64;
1581 }
1582 if (fWhat & CPUMCTX_EXTRN_RCX)
1583 {
1584 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1585 pCtx->rcx = paValues[iReg++].Reg64;
1586 }
1587 if (fWhat & CPUMCTX_EXTRN_RDX)
1588 {
1589 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1590 pCtx->rdx = paValues[iReg++].Reg64;
1591 }
1592 if (fWhat & CPUMCTX_EXTRN_RBX)
1593 {
1594 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1595 pCtx->rbx = paValues[iReg++].Reg64;
1596 }
1597 if (fWhat & CPUMCTX_EXTRN_RSP)
1598 {
1599 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1600 pCtx->rsp = paValues[iReg++].Reg64;
1601 }
1602 if (fWhat & CPUMCTX_EXTRN_RBP)
1603 {
1604 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1605 pCtx->rbp = paValues[iReg++].Reg64;
1606 }
1607 if (fWhat & CPUMCTX_EXTRN_RSI)
1608 {
1609 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1610 pCtx->rsi = paValues[iReg++].Reg64;
1611 }
1612 if (fWhat & CPUMCTX_EXTRN_RDI)
1613 {
1614 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1615 pCtx->rdi = paValues[iReg++].Reg64;
1616 }
1617 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1618 {
1619 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1620 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1621 pCtx->r8 = paValues[iReg++].Reg64;
1622 pCtx->r9 = paValues[iReg++].Reg64;
1623 pCtx->r10 = paValues[iReg++].Reg64;
1624 pCtx->r11 = paValues[iReg++].Reg64;
1625 pCtx->r12 = paValues[iReg++].Reg64;
1626 pCtx->r13 = paValues[iReg++].Reg64;
1627 pCtx->r14 = paValues[iReg++].Reg64;
1628 pCtx->r15 = paValues[iReg++].Reg64;
1629 }
1630 }
1631
1632 /* RIP & Flags */
1633 if (fWhat & CPUMCTX_EXTRN_RIP)
1634 {
1635 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1636 pCtx->rip = paValues[iReg++].Reg64;
1637 }
1638 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1639 {
1640 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1641 pCtx->rflags.u = paValues[iReg++].Reg64;
1642 }
1643
1644 /* Segments */
1645# define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1646 do { \
1647 Assert(pInput->Names[a_idx] == a_enmName); \
1648 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1649 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1650 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1651 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1652 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1653 } while (0)
1654 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1655 {
1656 if (fWhat & CPUMCTX_EXTRN_CS)
1657 {
1658 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1659 iReg++;
1660 }
1661 if (fWhat & CPUMCTX_EXTRN_ES)
1662 {
1663 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1664 iReg++;
1665 }
1666 if (fWhat & CPUMCTX_EXTRN_SS)
1667 {
1668 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1669 iReg++;
1670 }
1671 if (fWhat & CPUMCTX_EXTRN_DS)
1672 {
1673 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1674 iReg++;
1675 }
1676 if (fWhat & CPUMCTX_EXTRN_FS)
1677 {
1678 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1679 iReg++;
1680 }
1681 if (fWhat & CPUMCTX_EXTRN_GS)
1682 {
1683 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1684 iReg++;
1685 }
1686 }
1687 /* Descriptor tables and the task segment. */
1688 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1689 {
1690 if (fWhat & CPUMCTX_EXTRN_LDTR)
1691 {
1692 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1693 iReg++;
1694 }
1695 if (fWhat & CPUMCTX_EXTRN_TR)
1696 {
1697 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1698 avoid to trigger sanity assertions around the code, always fix this. */
1699 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1700 switch (pCtx->tr.Attr.n.u4Type)
1701 {
1702 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1703 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1704 break;
1705 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1706 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1707 break;
1708 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1709 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1710 break;
1711 }
1712 iReg++;
1713 }
1714 if (fWhat & CPUMCTX_EXTRN_IDTR)
1715 {
1716 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1717 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1718 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1719 iReg++;
1720 }
1721 if (fWhat & CPUMCTX_EXTRN_GDTR)
1722 {
1723 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1724 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1725 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1726 iReg++;
1727 }
1728 }
1729
1730 /* Control registers. */
1731 bool fMaybeChangedMode = false;
1732 bool fUpdateCr3 = false;
1733 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1734 {
1735 if (fWhat & CPUMCTX_EXTRN_CR0)
1736 {
1737 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1738 if (pCtx->cr0 != paValues[iReg].Reg64)
1739 {
1740 CPUMSetGuestCR0(pGVCpu, paValues[iReg].Reg64);
1741 fMaybeChangedMode = true;
1742 }
1743 iReg++;
1744 }
1745 if (fWhat & CPUMCTX_EXTRN_CR2)
1746 {
1747 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1748 pCtx->cr2 = paValues[iReg].Reg64;
1749 iReg++;
1750 }
1751 if (fWhat & CPUMCTX_EXTRN_CR3)
1752 {
1753 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1754 if (pCtx->cr3 != paValues[iReg].Reg64)
1755 {
1756 CPUMSetGuestCR3(pGVCpu, paValues[iReg].Reg64);
1757 fUpdateCr3 = true;
1758 }
1759 iReg++;
1760 }
1761 if (fWhat & CPUMCTX_EXTRN_CR4)
1762 {
1763 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1764 if (pCtx->cr4 != paValues[iReg].Reg64)
1765 {
1766 CPUMSetGuestCR4(pGVCpu, paValues[iReg].Reg64);
1767 fMaybeChangedMode = true;
1768 }
1769 iReg++;
1770 }
1771 }
1772 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1773 {
1774 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1775 APICSetTpr(pGVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1776 iReg++;
1777 }
1778
1779 /* Debug registers. */
1780 if (fWhat & CPUMCTX_EXTRN_DR7)
1781 {
1782 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1783 if (pCtx->dr[7] != paValues[iReg].Reg64)
1784 CPUMSetGuestDR7(pGVCpu, paValues[iReg].Reg64);
1785 pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
1786 iReg++;
1787 }
1788 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1789 {
1790 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1791 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1792 if (pCtx->dr[0] != paValues[iReg].Reg64)
1793 CPUMSetGuestDR0(pGVCpu, paValues[iReg].Reg64);
1794 iReg++;
1795 if (pCtx->dr[1] != paValues[iReg].Reg64)
1796 CPUMSetGuestDR1(pGVCpu, paValues[iReg].Reg64);
1797 iReg++;
1798 if (pCtx->dr[2] != paValues[iReg].Reg64)
1799 CPUMSetGuestDR2(pGVCpu, paValues[iReg].Reg64);
1800 iReg++;
1801 if (pCtx->dr[3] != paValues[iReg].Reg64)
1802 CPUMSetGuestDR3(pGVCpu, paValues[iReg].Reg64);
1803 iReg++;
1804 }
1805 if (fWhat & CPUMCTX_EXTRN_DR6)
1806 {
1807 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1808 if (pCtx->dr[6] != paValues[iReg].Reg64)
1809 CPUMSetGuestDR6(pGVCpu, paValues[iReg].Reg64);
1810 iReg++;
1811 }
1812
1813 /* Floating point state. */
1814 if (fWhat & CPUMCTX_EXTRN_X87)
1815 {
1816 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1817 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1818 pCtx->pXStateR0->x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1819 pCtx->pXStateR0->x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1820 iReg++;
1821 pCtx->pXStateR0->x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1822 pCtx->pXStateR0->x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1823 iReg++;
1824 pCtx->pXStateR0->x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1825 pCtx->pXStateR0->x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1826 iReg++;
1827 pCtx->pXStateR0->x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1828 pCtx->pXStateR0->x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1829 iReg++;
1830 pCtx->pXStateR0->x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1831 pCtx->pXStateR0->x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1832 iReg++;
1833 pCtx->pXStateR0->x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1834 pCtx->pXStateR0->x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1835 iReg++;
1836 pCtx->pXStateR0->x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1837 pCtx->pXStateR0->x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1838 iReg++;
1839 pCtx->pXStateR0->x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1840 pCtx->pXStateR0->x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1841 iReg++;
1842
1843 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1844 pCtx->pXStateR0->x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1845 pCtx->pXStateR0->x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1846 pCtx->pXStateR0->x87.FTW = paValues[iReg].FpControlStatus.FpTag
1847 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1848 pCtx->pXStateR0->x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1849 pCtx->pXStateR0->x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1850 pCtx->pXStateR0->x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1851 pCtx->pXStateR0->x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1852 iReg++;
1853 }
1854
1855 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1856 {
1857 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1858 if (fWhat & CPUMCTX_EXTRN_X87)
1859 {
1860 pCtx->pXStateR0->x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1861 pCtx->pXStateR0->x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1862 pCtx->pXStateR0->x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1863 }
1864 pCtx->pXStateR0->x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1865 pCtx->pXStateR0->x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1866 iReg++;
1867 }
1868
1869 /* Vector state. */
1870 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1871 {
1872 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1873 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1874 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1875 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1876 iReg++;
1877 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1878 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1879 iReg++;
1880 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1881 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1882 iReg++;
1883 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1884 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1885 iReg++;
1886 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1887 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1888 iReg++;
1889 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1890 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1891 iReg++;
1892 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1893 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1894 iReg++;
1895 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1896 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1897 iReg++;
1898 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1899 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1900 iReg++;
1901 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1902 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1903 iReg++;
1904 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1905 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1906 iReg++;
1907 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1908 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1909 iReg++;
1910 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1911 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1912 iReg++;
1913 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1914 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1915 iReg++;
1916 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1917 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1918 iReg++;
1919 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1920 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1921 iReg++;
1922 }
1923
1924
1925 /* MSRs */
1926 // HvX64RegisterTsc - don't touch
1927 if (fWhat & CPUMCTX_EXTRN_EFER)
1928 {
1929 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1930 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1931 {
1932 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
1933 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
1934 PGMNotifyNxeChanged(pGVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1935 pCtx->msrEFER = paValues[iReg].Reg64;
1936 fMaybeChangedMode = true;
1937 }
1938 iReg++;
1939 }
1940 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1941 {
1942 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1943 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
1944 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
1945 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1946 iReg++;
1947 }
1948 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1949 {
1950 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
1951 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
1952 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
1953 pCtx->SysEnter.cs = paValues[iReg].Reg64;
1954 iReg++;
1955
1956 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
1957 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
1958 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
1959 pCtx->SysEnter.eip = paValues[iReg].Reg64;
1960 iReg++;
1961
1962 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
1963 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
1964 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
1965 pCtx->SysEnter.esp = paValues[iReg].Reg64;
1966 iReg++;
1967 }
1968 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1969 {
1970 Assert(pInput->Names[iReg] == HvX64RegisterStar);
1971 if (pCtx->msrSTAR != paValues[iReg].Reg64)
1972 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
1973 pCtx->msrSTAR = paValues[iReg].Reg64;
1974 iReg++;
1975
1976 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
1977 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
1978 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
1979 pCtx->msrLSTAR = paValues[iReg].Reg64;
1980 iReg++;
1981
1982 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
1983 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
1984 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
1985 pCtx->msrCSTAR = paValues[iReg].Reg64;
1986 iReg++;
1987
1988 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
1989 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
1990 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
1991 pCtx->msrSFMASK = paValues[iReg].Reg64;
1992 iReg++;
1993 }
1994 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1995 {
1996 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
1997 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pGVCpu);
1998 if (paValues[iReg].Reg64 != uOldBase)
1999 {
2000 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
2001 pGVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
2002 int rc2 = APICSetBaseMsr(pGVCpu, paValues[iReg].Reg64);
2003 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64));
2004 }
2005 iReg++;
2006
2007 Assert(pInput->Names[iReg] == HvX64RegisterPat);
2008 if (pCtx->msrPAT != paValues[iReg].Reg64)
2009 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
2010 pCtx->msrPAT = paValues[iReg].Reg64;
2011 iReg++;
2012
2013# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2014 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
2015 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pGVCpu))
2016 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pGVCpu), paValues[iReg].Reg64));
2017 iReg++;
2018# endif
2019
2020 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
2021 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
2022 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2023 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2024 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2025 iReg++;
2026
2027 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2028
2029 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2030 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2031 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2032 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2033 iReg++;
2034
2035 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2036 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2037 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2038 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2039 iReg++;
2040
2041 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2042 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2043 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2044 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2045 iReg++;
2046
2047 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2048 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2049 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2050 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2051 iReg++;
2052
2053 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2054 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2055 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2056 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2057 iReg++;
2058
2059 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2060 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2061 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2062 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2063 iReg++;
2064
2065 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2066 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2067 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2068 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2069 iReg++;
2070
2071 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2072 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2073 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2074 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2075 iReg++;
2076
2077 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2078 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2079 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2080 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2081 iReg++;
2082
2083 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2084 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2085 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2086 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2087 iReg++;
2088
2089 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2090 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2091 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2092 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2093 iReg++;
2094
2095 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2096 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2097 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2098 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2099 iReg++;
2100
2101# if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2102 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2103 {
2104 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2105 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2106 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2107 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2108 iReg++;
2109 }
2110# endif
2111# ifdef LOG_ENABLED
2112 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON)
2113 {
2114 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2115 if (paValues[iReg].Reg64 != pCtx->hwvirt.vmx.Msrs.u64FeatCtrl)
2116 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, pCtx->hwvirt.vmx.Msrs.u64FeatCtrl, paValues[iReg].Reg64));
2117 iReg++;
2118 }
2119# endif
2120 }
2121
2122 /* Interruptibility. */
2123 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2124 {
2125 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2126 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2127
2128 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2129 {
2130 pGVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2131 if (paValues[iReg].InterruptState.InterruptShadow)
2132 EMSetInhibitInterruptsPC(pGVCpu, paValues[iReg + 1].Reg64);
2133 else
2134 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2135 }
2136
2137 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2138 {
2139 if (paValues[iReg].InterruptState.NmiMasked)
2140 VMCPU_FF_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS);
2141 else
2142 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_BLOCK_NMIS);
2143 }
2144
2145 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2146 iReg += 2;
2147 }
2148
2149 /* Event injection. */
2150 /// @todo HvRegisterPendingInterruption
2151 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2152 if (paValues[iReg].PendingInterruption.InterruptionPending)
2153 {
2154 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2155 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2156 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2157 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2158 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2159 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2160 }
2161
2162 /// @todo HvRegisterPendingEvent0
2163 /// @todo HvRegisterPendingEvent1
2164
2165 /* Almost done, just update extrn flags and maybe change PGM mode. */
2166 pCtx->fExtrn &= ~fWhat;
2167 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2168 pCtx->fExtrn = 0;
2169
2170 /* Typical. */
2171 if (!fMaybeChangedMode && !fUpdateCr3)
2172 return VINF_SUCCESS;
2173
2174 /*
2175 * Slow.
2176 */
2177 int rc = VINF_SUCCESS;
2178 if (fMaybeChangedMode)
2179 {
2180 rc = PGMChangeMode(pGVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2181 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
2182 }
2183
2184 if (fUpdateCr3)
2185 {
2186 if (fCanUpdateCr3)
2187 {
2188 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
2189 rc = PGMUpdateCR3(pGVCpu, pCtx->cr3);
2190 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
2191 }
2192 else
2193 {
2194 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2195 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2196 }
2197 }
2198
2199 return rc;
2200}
2201#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2202
2203
2204/**
2205 * Import the state from the native API (back to CPUMCTX).
2206 *
2207 * @returns VBox status code
2208 * @param pGVM The ring-0 VM handle.
2209 * @param idCpu The calling EMT. Necessary for getting the
2210 * hypercall page and arguments.
2211 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2212 * CPUMCTX_EXTERN_ALL for everything.
2213 */
2214VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, VMCPUID idCpu, uint64_t fWhat)
2215{
2216#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2217 /*
2218 * Validate the call.
2219 */
2220 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2221 if (RT_SUCCESS(rc))
2222 {
2223 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2224 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2225
2226 /*
2227 * Call worker.
2228 */
2229 rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);
2230 }
2231 return rc;
2232#else
2233 RT_NOREF(pGVM, idCpu, fWhat);
2234 return VERR_NOT_IMPLEMENTED;
2235#endif
2236}
2237
2238
2239#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2240/**
2241 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2242 *
2243 * @returns VBox status code.
2244 * @param pGVM The ring-0 VM handle.
2245 * @param pGVCpu The ring-0 VCPU handle.
2246 * @param pcTicks Where to return the current CPU tick count.
2247 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2248 */
2249NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2250{
2251 /*
2252 * Hypercall parameters.
2253 */
2254 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2255 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2256 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2257
2258 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2259 pInput->VpIndex = pGVCpu->idCpu;
2260 pInput->fFlags = 0;
2261 pInput->Names[0] = HvX64RegisterTsc;
2262 pInput->Names[1] = HvX64RegisterTscAux;
2263
2264 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2265 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2266 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2267
2268 /*
2269 * Make the hypercall.
2270 */
2271 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2272 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2273 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2274 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2275 VERR_NEM_GET_REGISTERS_FAILED);
2276
2277 /*
2278 * Get results.
2279 */
2280 *pcTicks = paValues[0].Reg64;
2281 if (pcAux)
2282 *pcAux = paValues[0].Reg32;
2283 return VINF_SUCCESS;
2284}
2285#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2286
2287
2288/**
2289 * Queries the TSC and TSC_AUX values, putting the results in .
2290 *
2291 * @returns VBox status code
2292 * @param pGVM The ring-0 VM handle.
2293 * @param idCpu The calling EMT. Necessary for getting the
2294 * hypercall page and arguments.
2295 */
2296VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, VMCPUID idCpu)
2297{
2298#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2299 /*
2300 * Validate the call.
2301 */
2302 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2303 if (RT_SUCCESS(rc))
2304 {
2305 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2306 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2307
2308 /*
2309 * Call worker.
2310 */
2311 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2312 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2313 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2314 &pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2315 }
2316 return rc;
2317#else
2318 RT_NOREF(pGVM, idCpu);
2319 return VERR_NOT_IMPLEMENTED;
2320#endif
2321}
2322
2323
2324#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2325/**
2326 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2327 *
2328 * @returns VBox status code.
2329 * @param pGVM The ring-0 VM handle.
2330 * @param pGVCpu The ring-0 VCPU handle.
2331 * @param uPausedTscValue The TSC value at the time of pausing.
2332 */
2333NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2334{
2335 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2336
2337 /*
2338 * Set up the hypercall parameters.
2339 */
2340 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2341 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2342
2343 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2344 pInput->VpIndex = 0;
2345 pInput->RsvdZ = 0;
2346 pInput->Elements[0].Name = HvX64RegisterTsc;
2347 pInput->Elements[0].Pad0 = 0;
2348 pInput->Elements[0].Pad1 = 0;
2349 pInput->Elements[0].Value.Reg128.High64 = 0;
2350 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2351
2352 /*
2353 * Disable interrupts and do the first virtual CPU.
2354 */
2355 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2356 uint64_t const uFirstTsc = ASMReadTSC();
2357 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2358 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2359 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2360 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2361
2362 /*
2363 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2364 * that we don't introduce too much drift here.
2365 */
2366 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2367 {
2368 Assert(pInput->PartitionId == pGVM->nemr0.s.idHvPartition);
2369 Assert(pInput->RsvdZ == 0);
2370 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2371 Assert(pInput->Elements[0].Pad0 == 0);
2372 Assert(pInput->Elements[0].Pad1 == 0);
2373 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2374
2375 pInput->VpIndex = iCpu;
2376 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2377 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2378
2379 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2380 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2381 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2382 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2383 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2384 }
2385
2386 /*
2387 * Done.
2388 */
2389 ASMSetFlags(fSavedFlags);
2390 return VINF_SUCCESS;
2391}
2392#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2393
2394
2395/**
2396 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2397 *
2398 * @returns VBox status code
2399 * @param pGVM The ring-0 VM handle.
2400 * @param idCpu The calling EMT. Necessary for getting the
2401 * hypercall page and arguments.
2402 * @param uPausedTscValue The TSC value at the time of pausing.
2403 */
2404VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2405{
2406#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2407 /*
2408 * Validate the call.
2409 */
2410 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2411 if (RT_SUCCESS(rc))
2412 {
2413 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2414 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2415
2416 /*
2417 * Call worker.
2418 */
2419 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2420 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2421 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
2422 }
2423 return rc;
2424#else
2425 RT_NOREF(pGVM, idCpu, uPausedTscValue);
2426 return VERR_NOT_IMPLEMENTED;
2427#endif
2428}
2429
2430
2431VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2432{
2433#ifdef NEM_WIN_WITH_RING0_RUNLOOP
2434 if (pGVM->nemr0.s.fMayUseRing0Runloop)
2435 return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu]);
2436 return VERR_NEM_RING3_ONLY;
2437#else
2438 RT_NOREF(pGVM, idCpu);
2439 return VERR_NOT_IMPLEMENTED;
2440#endif
2441}
2442
2443
2444/**
2445 * Updates statistics in the VM structure.
2446 *
2447 * @returns VBox status code.
2448 * @param pGVM The ring-0 VM handle.
2449 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
2450 * page and arguments.
2451 */
2452VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, VMCPUID idCpu)
2453{
2454 /*
2455 * Validate the call.
2456 */
2457 int rc;
2458 if (idCpu == NIL_VMCPUID)
2459 rc = GVMMR0ValidateGVM(pGVM);
2460 else
2461 rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2462 if (RT_SUCCESS(rc))
2463 {
2464 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2465
2466 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
2467 ? &pGVM->aCpus[idCpu].nemr0.s.HypercallData
2468 : &pGVM->nemr0.s.HypercallData;
2469 if ( RT_VALID_PTR(pHypercallData->pbPage)
2470 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
2471 {
2472 if (idCpu == NIL_VMCPUID)
2473 rc = RTCritSectEnter(&pGVM->nemr0.s.HypercallDataCritSect);
2474 if (RT_SUCCESS(rc))
2475 {
2476 /*
2477 * Query the memory statistics for the partition.
2478 */
2479 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
2480 pInput->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
2481 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
2482 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
2483 pInput->ProximityDomainInfo.Flags.Reserved = 0;
2484 pInput->ProximityDomainInfo.Id = 0;
2485
2486 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
2487 RT_ZERO(*pOutput);
2488
2489 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
2490 pHypercallData->HCPhysPage,
2491 pHypercallData->HCPhysPage + sizeof(*pInput));
2492 if (uResult == HV_STATUS_SUCCESS)
2493 {
2494 pGVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
2495 pGVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
2496 rc = VINF_SUCCESS;
2497 }
2498 else
2499 {
2500 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
2501 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
2502 rc = VERR_NEM_IPE_0;
2503 }
2504
2505 if (idCpu == NIL_VMCPUID)
2506 RTCritSectLeave(&pGVM->nemr0.s.HypercallDataCritSect);
2507 }
2508 }
2509 else
2510 rc = VERR_WRONG_ORDER;
2511 }
2512 return rc;
2513}
2514
2515
2516#if 1 && defined(DEBUG_bird)
2517/**
2518 * Debug only interface for poking around and exploring Hyper-V stuff.
2519 *
2520 * @param pGVM The ring-0 VM handle.
2521 * @param idCpu The calling EMT.
2522 * @param u64Arg What to query. 0 == registers.
2523 */
2524VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, VMCPUID idCpu, uint64_t u64Arg)
2525{
2526 /*
2527 * Resolve CPU structures.
2528 */
2529 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2530 if (RT_SUCCESS(rc))
2531 {
2532 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2533
2534 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2535 if (u64Arg == 0)
2536 {
2537 /*
2538 * Query register.
2539 */
2540 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2541 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2542
2543 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
2544 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2545 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
2546
2547 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2548 pInput->VpIndex = pGVCpu->idCpu;
2549 pInput->fFlags = 0;
2550 pInput->Names[0] = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
2551
2552 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
2553 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2554 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2555 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2556 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2557 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
2558 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
2559 rc = VINF_SUCCESS;
2560 }
2561 else if (u64Arg == 1)
2562 {
2563 /*
2564 * Query partition property.
2565 */
2566 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nemr0.s.HypercallData.pbPage;
2567 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2568
2569 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
2570 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
2571 pOutput->PropertyValue = 0;
2572
2573 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2574 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pGVCpu->nem.s.Hypercall.Experiment.uItem;
2575 pInput->uPadding = 0;
2576
2577 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
2578 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2579 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2580 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
2581 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2582 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
2583 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
2584 rc = VINF_SUCCESS;
2585 }
2586 else if (u64Arg == 2)
2587 {
2588 /*
2589 * Set register.
2590 */
2591 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2592 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2593 RT_BZERO(pInput, RT_UOFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
2594
2595 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2596 pInput->VpIndex = pGVCpu->idCpu;
2597 pInput->RsvdZ = 0;
2598 pInput->Elements[0].Name = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
2599 pInput->Elements[0].Value.Reg128.High64 = pGVCpu->nem.s.Hypercall.Experiment.uHiValue;
2600 pInput->Elements[0].Value.Reg128.Low64 = pGVCpu->nem.s.Hypercall.Experiment.uLoValue;
2601
2602 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2603 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
2604 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2605 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2606 rc = VINF_SUCCESS;
2607 }
2608 else
2609 rc = VERR_INVALID_FUNCTION;
2610 }
2611 return rc;
2612}
2613#endif /* DEBUG_bird */
2614
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette