VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 91323

Last change on this file since 91323 was 91323, checked in by vboxsync, 3 years ago

VMM: bugref:10106 Fixed IA32_FEATURE_CONTROL MSR reported to the guest.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 112.5 KB
Line 
1/* $Id: NEMR0Native-win.cpp 91323 2021-09-22 10:04:56Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/dbg.h>
42#include <iprt/memobj.h>
43#include <iprt/string.h>
44#include <iprt/time.h>
45
46
47/* Assert compile context sanity. */
48#ifndef RT_OS_WINDOWS
49# error "Windows only file!"
50#endif
51#ifndef RT_ARCH_AMD64
52# error "AMD64 only file!"
53#endif
54
55
56/*********************************************************************************************************************************
57* Internal Functions *
58*********************************************************************************************************************************/
59typedef uint32_t DWORD; /* for winerror.h constants */
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
66
67/**
68 * WinHvr.sys!WinHvDepositMemory
69 *
70 * This API will try allocates cPages on IdealNode and deposit it to the
71 * hypervisor for use with the given partition. The memory will be freed when
72 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
73 *
74 * Apparently node numbers above 64 has a different meaning.
75 */
76static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
77
78
79/*********************************************************************************************************************************
80* Internal Functions *
81*********************************************************************************************************************************/
82NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
83 uint32_t cPages, uint32_t fFlags);
84NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
85#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
86NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
87NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3);
88NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
89NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
90#endif
91DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
92 void *pvOutput, uint32_t cbOutput);
93
94
95/*
96 * Instantate the code we share with ring-0.
97 */
98#ifdef NEM_WIN_WITH_RING0_RUNLOOP
99# define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
100#else
101# undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
102#endif
103#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
104
105
106
107/**
108 * Worker for NEMR0InitVM that allocates a hypercall page.
109 *
110 * @returns VBox status code.
111 * @param pHypercallData The hypercall data page to initialize.
112 */
113static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
114{
115 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
116 if (RT_SUCCESS(rc))
117 {
118 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
119 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
120 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
121 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
122 if (RT_SUCCESS(rc))
123 return VINF_SUCCESS;
124
125 /* bail out */
126 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
127 }
128 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
129 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
130 pHypercallData->pbPage = NULL;
131 return rc;
132}
133
134/**
135 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
136 *
137 * @param pHypercallData The hypercall data page to uninitialize.
138 */
139static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
140{
141 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
142 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
143 if (pHypercallData->pbPage != NULL)
144 {
145 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
146 pHypercallData->pbPage = NULL;
147 }
148 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
149 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
150}
151
152
153/**
154 * Called by NEMR3Init to make sure we've got what we need.
155 *
156 * @returns VBox status code.
157 * @param pGVM The ring-0 VM handle.
158 * @thread EMT(0)
159 */
160VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM)
161{
162 AssertCompile(sizeof(pGVM->nemr0.s) <= sizeof(pGVM->nemr0.padding));
163 AssertCompile(sizeof(pGVM->aCpus[0].nemr0.s) <= sizeof(pGVM->aCpus[0].nemr0.padding));
164
165 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
166 AssertRCReturn(rc, rc);
167
168 /*
169 * We want to perform hypercalls here. The NT kernel started to expose a very low
170 * level interface to do this thru somewhere between build 14271 and 16299. Since
171 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
172 *
173 * We also need to deposit memory to the hypervisor for use with partition (page
174 * mapping structures, stuff).
175 */
176 RTDBGKRNLINFO hKrnlInfo;
177 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
178 if (RT_SUCCESS(rc))
179 {
180 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
181 if (RT_FAILURE(rc))
182 rc = VERR_NEM_MISSING_KERNEL_API_1;
183 if (RT_SUCCESS(rc))
184 {
185 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
186 if (RT_FAILURE(rc))
187 rc = rc == VERR_MODULE_NOT_FOUND ? VERR_NEM_MISSING_KERNEL_API_2 : VERR_NEM_MISSING_KERNEL_API_3;
188 }
189 RTR0DbgKrnlInfoRelease(hKrnlInfo);
190 if (RT_SUCCESS(rc))
191 {
192 /*
193 * Allocate a page for non-EMT threads to use for hypercalls (update
194 * statistics and such) and a critical section protecting it.
195 */
196 rc = RTCritSectInit(&pGVM->nemr0.s.HypercallDataCritSect);
197 if (RT_SUCCESS(rc))
198 {
199 rc = nemR0InitHypercallData(&pGVM->nemr0.s.HypercallData);
200 if (RT_SUCCESS(rc))
201 {
202 /*
203 * Allocate a page for each VCPU to place hypercall data on.
204 */
205 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
206 {
207 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
208 if (RT_FAILURE(rc))
209 {
210 while (i-- > 0)
211 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
212 break;
213 }
214 }
215 if (RT_SUCCESS(rc))
216 {
217 /*
218 * So far, so good.
219 */
220 return rc;
221 }
222
223 /*
224 * Bail out.
225 */
226 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
227 }
228 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
229 }
230 }
231 }
232
233 return rc;
234}
235
236
237/**
238 * Perform an I/O control operation on the partition handle (VID.SYS).
239 *
240 * @returns NT status code.
241 * @param pGVM The ring-0 VM structure.
242 * @param pGVCpu The global (ring-0) CPU structure of the calling EMT.
243 * @param uFunction The function to perform.
244 * @param pvInput The input buffer. This must point within the VM
245 * structure so we can easily convert to a ring-3
246 * pointer if necessary.
247 * @param cbInput The size of the input. @a pvInput must be NULL when
248 * zero.
249 * @param pvOutput The output buffer. This must also point within the
250 * VM structure for ring-3 pointer magic.
251 * @param cbOutput The size of the output. @a pvOutput must be NULL
252 * when zero.
253 * @thread EMT(pGVCpu)
254 */
255DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
256 void *pvOutput, uint32_t cbOutput)
257{
258#ifdef RT_STRICT
259 /*
260 * Input and output parameters are part of the VM CPU structure.
261 */
262 VMCPU_ASSERT_EMT(pGVCpu);
263 if (pvInput)
264 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
265 if (pvOutput)
266 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
267#endif
268
269 int32_t rcNt = STATUS_UNSUCCESSFUL;
270 int rc = SUPR0IoCtlPerform(pGVM->nemr0.s.pIoCtlCtx, uFunction,
271 pvInput,
272 pvInput ? (uintptr_t)pvInput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
273 cbInput,
274 pvOutput,
275 pvOutput ? (uintptr_t)pvOutput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
276 cbOutput,
277 &rcNt);
278 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
279 return (NTSTATUS)rcNt;
280 return STATUS_UNSUCCESSFUL;
281}
282
283
284/**
285 * 2nd part of the initialization, after we've got a partition handle.
286 *
287 * @returns VBox status code.
288 * @param pGVM The ring-0 VM handle.
289 * @thread EMT(0)
290 */
291VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM)
292{
293 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
294 AssertRCReturn(rc, rc);
295 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
296 Assert(pGVM->nemr0.s.fMayUseRing0Runloop == false);
297
298 /*
299 * Copy and validate the I/O control information from ring-3.
300 */
301 NEMWINIOCTL Copy = pGVM->nem.s.IoCtlGetHvPartitionId;
302 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
303 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
304 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
305 pGVM->nemr0.s.IoCtlGetHvPartitionId = Copy;
306
307 pGVM->nemr0.s.fMayUseRing0Runloop = pGVM->nem.s.fUseRing0Runloop;
308
309 Copy = pGVM->nem.s.IoCtlStartVirtualProcessor;
310 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
311 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
312 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
313 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
314 if (RT_SUCCESS(rc))
315 pGVM->nemr0.s.IoCtlStartVirtualProcessor = Copy;
316
317 Copy = pGVM->nem.s.IoCtlStopVirtualProcessor;
318 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
319 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
320 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
321 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
322 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
323 if (RT_SUCCESS(rc))
324 pGVM->nemr0.s.IoCtlStopVirtualProcessor = Copy;
325
326 Copy = pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
327 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
328 AssertLogRelStmt( Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)
329 || Copy.cbInput == RT_OFFSETOF(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT, cMillies),
330 rc = VERR_NEM_INIT_FAILED);
331 AssertLogRelStmt(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
332 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
333 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
334 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
335 if (RT_SUCCESS(rc))
336 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext = Copy;
337
338 if ( RT_SUCCESS(rc)
339 || !pGVM->nem.s.fUseRing0Runloop)
340 {
341 /*
342 * Setup of an I/O control context for the partition handle for later use.
343 */
344 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pGVM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx);
345 AssertLogRelRCReturn(rc, rc);
346 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
347 {
348 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
349 pGVCpu->nemr0.s.offRing3ConversionDelta = (uintptr_t)pGVM->aCpus[idCpu].pVCpuR3 - (uintptr_t)pGVCpu;
350 }
351
352 /*
353 * Get the partition ID.
354 */
355 PVMCPUCC pVCpu0 = &pGVM->aCpus[0];
356 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
357 &pVCpu0->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu0->nem.s.uIoCtlBuf.idPartition));
358 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
359 pGVM->nemr0.s.idHvPartition = pVCpu0->nem.s.uIoCtlBuf.idPartition;
360 AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == pGVM->nem.s.idHvPartition,
361 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition),
362 VERR_NEM_INIT_FAILED);
363 }
364
365 return rc;
366}
367
368
369/**
370 * Cleanup the NEM parts of the VM in ring-0.
371 *
372 * This is always called and must deal the state regardless of whether
373 * NEMR0InitVM() was called or not. So, take care here.
374 *
375 * @param pGVM The ring-0 VM handle.
376 */
377VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
378{
379 pGVM->nemr0.s.idHvPartition = HV_PARTITION_ID_INVALID;
380
381 /* Clean up I/O control context. */
382 if (pGVM->nemr0.s.pIoCtlCtx)
383 {
384 int rc = SUPR0IoCtlCleanup(pGVM->nemr0.s.pIoCtlCtx);
385 AssertRC(rc);
386 pGVM->nemr0.s.pIoCtlCtx = NULL;
387 }
388
389 /* Free the hypercall pages. */
390 VMCPUID i = pGVM->cCpus;
391 while (i-- > 0)
392 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
393
394 /* The non-EMT one too. */
395 if (RTCritSectIsInitialized(&pGVM->nemr0.s.HypercallDataCritSect))
396 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
397 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
398}
399
400
401#if 0 /* for debugging GPA unmapping. */
402static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
403{
404 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nemr0.s.pbHypercallData;
405 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
406 pIn->PartitionId = pGVM->nemr0.s.idHvPartition;
407 pIn->VpIndex = pGVCpu->idCpu;
408 pIn->ByteCount = 0x10;
409 pIn->BaseGpa = GCPhys;
410 pIn->ControlFlags.AsUINT64 = 0;
411 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
412 memset(pOut, 0xfe, sizeof(*pOut));
413 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nemr0.s.HCPhysHypercallData,
414 pGVCpu->nemr0.s.HCPhysHypercallData + sizeof(*pIn));
415 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
416 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
417 __debugbreak();
418
419 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
420}
421#endif
422
423
424/**
425 * Worker for NEMR0MapPages and others.
426 */
427NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
428 uint32_t cPages, uint32_t fFlags)
429{
430 /*
431 * Validate.
432 */
433 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
434
435 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
436 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
437 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
438 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
439 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
440 if (GCPhysSrc != GCPhysDst)
441 {
442 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
443 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
444 }
445
446 /*
447 * Compose and make the hypercall.
448 * Ring-3 is not allowed to fill in the host physical addresses of the call.
449 */
450 for (uint32_t iTries = 0;; iTries++)
451 {
452 RTGCPHYS GCPhysSrcTmp = GCPhysSrc;
453 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
454 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
455 pMapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
456 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
457 pMapPages->MapFlags = fFlags;
458 pMapPages->u32ExplicitPadding = 0;
459
460 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrcTmp += X86_PAGE_SIZE)
461 {
462 RTHCPHYS HCPhys = NIL_RTGCPHYS;
463 int rc = PGMPhysGCPhys2HCPhys(pGVM, GCPhysSrcTmp, &HCPhys);
464 AssertRCReturn(rc, rc);
465 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
466 }
467
468 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
469 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
470 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
471 GCPhysDst, GCPhysSrcTmp - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
472 if (uResult == ((uint64_t)cPages << 32))
473 return VINF_SUCCESS;
474
475 /*
476 * If the partition is out of memory, try donate another 512 pages to
477 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
478 */
479 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
480 || iTries > 16
481 || g_pfnWinHvDepositMemory == NULL)
482 {
483 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
484 return VERR_NEM_MAP_PAGES_FAILED;
485 }
486
487 size_t cPagesAdded = 0;
488 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nemr0.s.idHvPartition, 512, 0, &cPagesAdded);
489 if (!cPagesAdded)
490 {
491 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
492 return VERR_NEM_MAP_PAGES_FAILED;
493 }
494 }
495}
496
497
498/**
499 * Maps pages into the guest physical address space.
500 *
501 * Generally the caller will be under the PGM lock already, so no extra effort
502 * is needed to make sure all changes happens under it.
503 *
504 * @returns VBox status code.
505 * @param pGVM The ring-0 VM handle.
506 * @param idCpu The calling EMT. Necessary for getting the
507 * hypercall page and arguments.
508 * @thread EMT(idCpu)
509 */
510VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, VMCPUID idCpu)
511{
512 /*
513 * Unpack the call.
514 */
515 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
516 if (RT_SUCCESS(rc))
517 {
518 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
519
520 RTGCPHYS const GCPhysSrc = pGVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
521 RTGCPHYS const GCPhysDst = pGVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
522 uint32_t const cPages = pGVCpu->nem.s.Hypercall.MapPages.cPages;
523 HV_MAP_GPA_FLAGS const fFlags = pGVCpu->nem.s.Hypercall.MapPages.fFlags;
524
525 /*
526 * Do the work.
527 */
528 rc = nemR0WinMapPages(pGVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
529 }
530 return rc;
531}
532
533
534/**
535 * Worker for NEMR0UnmapPages and others.
536 */
537NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
538{
539 /*
540 * Validate input.
541 */
542 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
543
544 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
545 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
546 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
547 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
548
549 /*
550 * Compose and make the hypercall.
551 */
552 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
553 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
554 pUnmapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
555 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
556 pUnmapPages->fFlags = 0;
557
558 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
559 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
560 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
561 if (uResult == ((uint64_t)cPages << 32))
562 {
563#if 1 /* Do we need to do this? Hopefully not... */
564 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
565 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
566 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
567#endif
568 return VINF_SUCCESS;
569 }
570
571 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
572 return VERR_NEM_UNMAP_PAGES_FAILED;
573}
574
575
576/**
577 * Unmaps pages from the guest physical address space.
578 *
579 * Generally the caller will be under the PGM lock already, so no extra effort
580 * is needed to make sure all changes happens under it.
581 *
582 * @returns VBox status code.
583 * @param pGVM The ring-0 VM handle.
584 * @param idCpu The calling EMT. Necessary for getting the
585 * hypercall page and arguments.
586 * @thread EMT(idCpu)
587 */
588VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, VMCPUID idCpu)
589{
590 /*
591 * Unpack the call.
592 */
593 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
594 if (RT_SUCCESS(rc))
595 {
596 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
597
598 RTGCPHYS const GCPhys = pGVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
599 uint32_t const cPages = pGVCpu->nem.s.Hypercall.UnmapPages.cPages;
600
601 /*
602 * Do the work.
603 */
604 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
605 }
606 return rc;
607}
608
609
610#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
611/**
612 * Worker for NEMR0ExportState.
613 *
614 * Intention is to use it internally later.
615 *
616 * @returns VBox status code.
617 * @param pGVM The ring-0 VM handle.
618 * @param pGVCpu The ring-0 VCPU handle.
619 * @param pCtx The CPU context structure to import into.
620 */
621NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
622{
623 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
624 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
625 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
626
627 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
628 pInput->VpIndex = pGVCpu->idCpu;
629 pInput->RsvdZ = 0;
630
631 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
632 if ( !fWhat
633 && pGVCpu->nem.s.fCurrentInterruptWindows == pGVCpu->nem.s.fDesiredInterruptWindows)
634 return VINF_SUCCESS;
635 uintptr_t iReg = 0;
636
637 /* GPRs */
638 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
639 {
640 if (fWhat & CPUMCTX_EXTRN_RAX)
641 {
642 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
643 pInput->Elements[iReg].Name = HvX64RegisterRax;
644 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
645 iReg++;
646 }
647 if (fWhat & CPUMCTX_EXTRN_RCX)
648 {
649 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
650 pInput->Elements[iReg].Name = HvX64RegisterRcx;
651 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
652 iReg++;
653 }
654 if (fWhat & CPUMCTX_EXTRN_RDX)
655 {
656 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
657 pInput->Elements[iReg].Name = HvX64RegisterRdx;
658 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
659 iReg++;
660 }
661 if (fWhat & CPUMCTX_EXTRN_RBX)
662 {
663 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
664 pInput->Elements[iReg].Name = HvX64RegisterRbx;
665 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
666 iReg++;
667 }
668 if (fWhat & CPUMCTX_EXTRN_RSP)
669 {
670 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
671 pInput->Elements[iReg].Name = HvX64RegisterRsp;
672 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
673 iReg++;
674 }
675 if (fWhat & CPUMCTX_EXTRN_RBP)
676 {
677 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
678 pInput->Elements[iReg].Name = HvX64RegisterRbp;
679 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
680 iReg++;
681 }
682 if (fWhat & CPUMCTX_EXTRN_RSI)
683 {
684 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
685 pInput->Elements[iReg].Name = HvX64RegisterRsi;
686 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
687 iReg++;
688 }
689 if (fWhat & CPUMCTX_EXTRN_RDI)
690 {
691 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
692 pInput->Elements[iReg].Name = HvX64RegisterRdi;
693 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
694 iReg++;
695 }
696 if (fWhat & CPUMCTX_EXTRN_R8_R15)
697 {
698 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
699 pInput->Elements[iReg].Name = HvX64RegisterR8;
700 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
701 iReg++;
702 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
703 pInput->Elements[iReg].Name = HvX64RegisterR9;
704 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
705 iReg++;
706 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
707 pInput->Elements[iReg].Name = HvX64RegisterR10;
708 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
709 iReg++;
710 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
711 pInput->Elements[iReg].Name = HvX64RegisterR11;
712 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
713 iReg++;
714 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
715 pInput->Elements[iReg].Name = HvX64RegisterR12;
716 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
717 iReg++;
718 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
719 pInput->Elements[iReg].Name = HvX64RegisterR13;
720 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
721 iReg++;
722 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
723 pInput->Elements[iReg].Name = HvX64RegisterR14;
724 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
725 iReg++;
726 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
727 pInput->Elements[iReg].Name = HvX64RegisterR15;
728 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
729 iReg++;
730 }
731 }
732
733 /* RIP & Flags */
734 if (fWhat & CPUMCTX_EXTRN_RIP)
735 {
736 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
737 pInput->Elements[iReg].Name = HvX64RegisterRip;
738 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
739 iReg++;
740 }
741 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
742 {
743 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
744 pInput->Elements[iReg].Name = HvX64RegisterRflags;
745 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
746 iReg++;
747 }
748
749 /* Segments */
750# define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
751 do { \
752 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
753 pInput->Elements[a_idx].Name = a_enmName; \
754 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
755 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
756 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
757 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
758 } while (0)
759 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
760 {
761 if (fWhat & CPUMCTX_EXTRN_CS)
762 {
763 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
764 iReg++;
765 }
766 if (fWhat & CPUMCTX_EXTRN_ES)
767 {
768 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
769 iReg++;
770 }
771 if (fWhat & CPUMCTX_EXTRN_SS)
772 {
773 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
774 iReg++;
775 }
776 if (fWhat & CPUMCTX_EXTRN_DS)
777 {
778 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
779 iReg++;
780 }
781 if (fWhat & CPUMCTX_EXTRN_FS)
782 {
783 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
784 iReg++;
785 }
786 if (fWhat & CPUMCTX_EXTRN_GS)
787 {
788 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
789 iReg++;
790 }
791 }
792
793 /* Descriptor tables & task segment. */
794 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
795 {
796 if (fWhat & CPUMCTX_EXTRN_LDTR)
797 {
798 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
799 iReg++;
800 }
801 if (fWhat & CPUMCTX_EXTRN_TR)
802 {
803 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
804 iReg++;
805 }
806
807 if (fWhat & CPUMCTX_EXTRN_IDTR)
808 {
809 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
810 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
811 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
812 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
813 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
814 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
815 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
816 iReg++;
817 }
818 if (fWhat & CPUMCTX_EXTRN_GDTR)
819 {
820 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
821 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
822 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
823 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
824 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
825 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
826 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
827 iReg++;
828 }
829 }
830
831 /* Control registers. */
832 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
833 {
834 if (fWhat & CPUMCTX_EXTRN_CR0)
835 {
836 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
837 pInput->Elements[iReg].Name = HvX64RegisterCr0;
838 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
839 iReg++;
840 }
841 if (fWhat & CPUMCTX_EXTRN_CR2)
842 {
843 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
844 pInput->Elements[iReg].Name = HvX64RegisterCr2;
845 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
846 iReg++;
847 }
848 if (fWhat & CPUMCTX_EXTRN_CR3)
849 {
850 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
851 pInput->Elements[iReg].Name = HvX64RegisterCr3;
852 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
853 iReg++;
854 }
855 if (fWhat & CPUMCTX_EXTRN_CR4)
856 {
857 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
858 pInput->Elements[iReg].Name = HvX64RegisterCr4;
859 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
860 iReg++;
861 }
862 }
863 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
864 {
865 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
866 pInput->Elements[iReg].Name = HvX64RegisterCr8;
867 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pGVCpu);
868 iReg++;
869 }
870
871 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
872
873 /* Debug registers. */
874/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
875 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
876 {
877 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
878 pInput->Elements[iReg].Name = HvX64RegisterDr0;
879 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pGVCpu);
880 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
881 iReg++;
882 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
883 pInput->Elements[iReg].Name = HvX64RegisterDr1;
884 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pGVCpu);
885 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
886 iReg++;
887 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
888 pInput->Elements[iReg].Name = HvX64RegisterDr2;
889 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pGVCpu);
890 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
891 iReg++;
892 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
893 pInput->Elements[iReg].Name = HvX64RegisterDr3;
894 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pGVCpu);
895 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
896 iReg++;
897 }
898 if (fWhat & CPUMCTX_EXTRN_DR6)
899 {
900 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
901 pInput->Elements[iReg].Name = HvX64RegisterDr6;
902 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pGVCpu);
903 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
904 iReg++;
905 }
906 if (fWhat & CPUMCTX_EXTRN_DR7)
907 {
908 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
909 pInput->Elements[iReg].Name = HvX64RegisterDr7;
910 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pGVCpu);
911 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
912 iReg++;
913 }
914
915 /* Floating point state. */
916 if (fWhat & CPUMCTX_EXTRN_X87)
917 {
918 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
919 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
920 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[0].au64[0];
921 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[0].au64[1];
922 iReg++;
923 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
924 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
925 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[1].au64[0];
926 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[1].au64[1];
927 iReg++;
928 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
929 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
930 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[2].au64[0];
931 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[2].au64[1];
932 iReg++;
933 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
934 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
935 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[3].au64[0];
936 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[3].au64[1];
937 iReg++;
938 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
939 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
940 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[4].au64[0];
941 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[4].au64[1];
942 iReg++;
943 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
944 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
945 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[5].au64[0];
946 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[5].au64[1];
947 iReg++;
948 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
949 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
950 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[6].au64[0];
951 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[6].au64[1];
952 iReg++;
953 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
954 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
955 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->XState.x87.aRegs[7].au64[0];
956 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->XState.x87.aRegs[7].au64[1];
957 iReg++;
958
959 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
960 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
961 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->XState.x87.FCW;
962 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->XState.x87.FSW;
963 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->XState.x87.FTW;
964 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->XState.x87.FTW >> 8;
965 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->XState.x87.FOP;
966 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->XState.x87.FPUIP)
967 | ((uint64_t)pCtx->XState.x87.CS << 32)
968 | ((uint64_t)pCtx->XState.x87.Rsrvd1 << 48);
969 iReg++;
970/** @todo we've got trouble if if we try write just SSE w/o X87. */
971 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
972 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
973 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->XState.x87.FPUDP)
974 | ((uint64_t)pCtx->XState.x87.DS << 32)
975 | ((uint64_t)pCtx->XState.x87.Rsrvd2 << 48);
976 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->XState.x87.MXCSR;
977 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->XState.x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
978 iReg++;
979 }
980
981 /* Vector state. */
982 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
983 {
984 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
985 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
986 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[0].uXmm.s.Lo;
987 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[0].uXmm.s.Hi;
988 iReg++;
989 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
990 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
991 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[1].uXmm.s.Lo;
992 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[1].uXmm.s.Hi;
993 iReg++;
994 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
995 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
996 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[2].uXmm.s.Lo;
997 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[2].uXmm.s.Hi;
998 iReg++;
999 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1000 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
1001 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[3].uXmm.s.Lo;
1002 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[3].uXmm.s.Hi;
1003 iReg++;
1004 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1005 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
1006 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[4].uXmm.s.Lo;
1007 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[4].uXmm.s.Hi;
1008 iReg++;
1009 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1010 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
1011 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[5].uXmm.s.Lo;
1012 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[5].uXmm.s.Hi;
1013 iReg++;
1014 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1015 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
1016 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[6].uXmm.s.Lo;
1017 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[6].uXmm.s.Hi;
1018 iReg++;
1019 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1020 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
1021 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[7].uXmm.s.Lo;
1022 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[7].uXmm.s.Hi;
1023 iReg++;
1024 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1025 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1026 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[8].uXmm.s.Lo;
1027 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[8].uXmm.s.Hi;
1028 iReg++;
1029 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1030 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1031 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[9].uXmm.s.Lo;
1032 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[9].uXmm.s.Hi;
1033 iReg++;
1034 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1035 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1036 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[10].uXmm.s.Lo;
1037 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[10].uXmm.s.Hi;
1038 iReg++;
1039 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1040 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1041 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[11].uXmm.s.Lo;
1042 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[11].uXmm.s.Hi;
1043 iReg++;
1044 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1045 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1046 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[12].uXmm.s.Lo;
1047 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[12].uXmm.s.Hi;
1048 iReg++;
1049 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1050 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1051 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[13].uXmm.s.Lo;
1052 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[13].uXmm.s.Hi;
1053 iReg++;
1054 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1055 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1056 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[14].uXmm.s.Lo;
1057 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[14].uXmm.s.Hi;
1058 iReg++;
1059 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1060 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1061 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->XState.x87.aXMM[15].uXmm.s.Lo;
1062 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->XState.x87.aXMM[15].uXmm.s.Hi;
1063 iReg++;
1064 }
1065
1066 /* MSRs */
1067 // HvX64RegisterTsc - don't touch
1068 if (fWhat & CPUMCTX_EXTRN_EFER)
1069 {
1070 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1071 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1072 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1073 iReg++;
1074 }
1075 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1076 {
1077 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1078 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1079 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1080 iReg++;
1081 }
1082 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1083 {
1084 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1085 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1086 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1087 iReg++;
1088 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1089 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1090 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1091 iReg++;
1092 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1093 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1094 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1095 iReg++;
1096 }
1097 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1098 {
1099 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1100 pInput->Elements[iReg].Name = HvX64RegisterStar;
1101 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1102 iReg++;
1103 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1104 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1105 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1106 iReg++;
1107 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1108 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1109 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1110 iReg++;
1111 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1112 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1113 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1114 iReg++;
1115 }
1116 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1117 {
1118 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1119 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1120 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pGVCpu);
1121 iReg++;
1122 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1123 pInput->Elements[iReg].Name = HvX64RegisterPat;
1124 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1125 iReg++;
1126# if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1127 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1128 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1129 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pGVCpu);
1130 iReg++;
1131# endif
1132
1133 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
1134
1135 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1136 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1137 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1138 iReg++;
1139
1140 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1141
1142 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1143 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1144 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1145 iReg++;
1146 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1147 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1148 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1149 iReg++;
1150 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1151 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1152 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1153 iReg++;
1154 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1155 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1156 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1157 iReg++;
1158 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1159 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1160 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1161 iReg++;
1162 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1163 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1164 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1165 iReg++;
1166 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1167 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1168 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1169 iReg++;
1170 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1171 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1172 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1173 iReg++;
1174 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1175 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1176 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1177 iReg++;
1178 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1179 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1180 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1181 iReg++;
1182 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1183 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1184 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1185 iReg++;
1186 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1187 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1188 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1189 iReg++;
1190
1191# if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1192 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
1193 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1194 {
1195 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1196 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1197 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1198 iReg++;
1199 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1200 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1201 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pGVCpu);
1202 iReg++;
1203 }
1204# endif
1205 }
1206
1207 /* event injection (clear it). */
1208 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1209 {
1210 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1211 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1212 pInput->Elements[iReg].Value.Reg64 = 0;
1213 iReg++;
1214 }
1215
1216 /* Interruptibility state. This can get a little complicated since we get
1217 half of the state via HV_X64_VP_EXECUTION_STATE. */
1218 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1219 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1220 {
1221 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1222 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1223 pInput->Elements[iReg].Value.Reg64 = 0;
1224 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1225 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
1226 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1227 if (VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
1228 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1229 iReg++;
1230 }
1231 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1232 {
1233 if ( pGVCpu->nem.s.fLastInterruptShadow
1234 || ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1235 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip))
1236 {
1237 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1238 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1239 pInput->Elements[iReg].Value.Reg64 = 0;
1240 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1241 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
1242 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1243 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1244 //if (VMCPU_FF_IS_ANY_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
1245 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1246 iReg++;
1247 }
1248 }
1249 else
1250 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1251
1252 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1253 uint8_t const fDesiredIntWin = pGVCpu->nem.s.fDesiredInterruptWindows;
1254 if ( fDesiredIntWin
1255 || pGVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1256 {
1257 pGVCpu->nem.s.fCurrentInterruptWindows = pGVCpu->nem.s.fDesiredInterruptWindows;
1258 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1259 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1260 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1261 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1262 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1263 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1264 iReg++;
1265 }
1266
1267 /// @todo HvRegisterPendingEvent0
1268 /// @todo HvRegisterPendingEvent1
1269
1270 /*
1271 * Set the registers.
1272 */
1273 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1274
1275 /*
1276 * Make the hypercall.
1277 */
1278 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1279 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1280 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1281 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1282 VERR_NEM_SET_REGISTERS_FAILED);
1283 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1284 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1285 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1286 return VINF_SUCCESS;
1287}
1288#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1289
1290
1291/**
1292 * Export the state to the native API (out of CPUMCTX).
1293 *
1294 * @returns VBox status code
1295 * @param pGVM The ring-0 VM handle.
1296 * @param idCpu The calling EMT. Necessary for getting the
1297 * hypercall page and arguments.
1298 */
1299VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, VMCPUID idCpu)
1300{
1301#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1302 /*
1303 * Validate the call.
1304 */
1305 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
1306 if (RT_SUCCESS(rc))
1307 {
1308 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1309 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1310
1311 /*
1312 * Call worker.
1313 */
1314 rc = nemR0WinExportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx);
1315 }
1316 return rc;
1317#else
1318 RT_NOREF(pGVM, idCpu);
1319 return VERR_NOT_IMPLEMENTED;
1320#endif
1321}
1322
1323
1324#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1325/**
1326 * Worker for NEMR0ImportState.
1327 *
1328 * Intention is to use it internally later.
1329 *
1330 * @returns VBox status code.
1331 * @param pGVM The ring-0 VM handle.
1332 * @param pGVCpu The ring-0 VCPU handle.
1333 * @param pCtx The CPU context structure to import into.
1334 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1335 * @param fCanUpdateCr3 Whether it's safe to update CR3 or not.
1336 */
1337NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3)
1338{
1339 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
1340 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1341 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
1342 Assert(pCtx == &pGVCpu->cpum.GstCtx);
1343
1344 fWhat &= pCtx->fExtrn;
1345
1346 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
1347 pInput->VpIndex = pGVCpu->idCpu;
1348 pInput->fFlags = 0;
1349
1350 /* GPRs */
1351 uintptr_t iReg = 0;
1352 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1353 {
1354 if (fWhat & CPUMCTX_EXTRN_RAX)
1355 pInput->Names[iReg++] = HvX64RegisterRax;
1356 if (fWhat & CPUMCTX_EXTRN_RCX)
1357 pInput->Names[iReg++] = HvX64RegisterRcx;
1358 if (fWhat & CPUMCTX_EXTRN_RDX)
1359 pInput->Names[iReg++] = HvX64RegisterRdx;
1360 if (fWhat & CPUMCTX_EXTRN_RBX)
1361 pInput->Names[iReg++] = HvX64RegisterRbx;
1362 if (fWhat & CPUMCTX_EXTRN_RSP)
1363 pInput->Names[iReg++] = HvX64RegisterRsp;
1364 if (fWhat & CPUMCTX_EXTRN_RBP)
1365 pInput->Names[iReg++] = HvX64RegisterRbp;
1366 if (fWhat & CPUMCTX_EXTRN_RSI)
1367 pInput->Names[iReg++] = HvX64RegisterRsi;
1368 if (fWhat & CPUMCTX_EXTRN_RDI)
1369 pInput->Names[iReg++] = HvX64RegisterRdi;
1370 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1371 {
1372 pInput->Names[iReg++] = HvX64RegisterR8;
1373 pInput->Names[iReg++] = HvX64RegisterR9;
1374 pInput->Names[iReg++] = HvX64RegisterR10;
1375 pInput->Names[iReg++] = HvX64RegisterR11;
1376 pInput->Names[iReg++] = HvX64RegisterR12;
1377 pInput->Names[iReg++] = HvX64RegisterR13;
1378 pInput->Names[iReg++] = HvX64RegisterR14;
1379 pInput->Names[iReg++] = HvX64RegisterR15;
1380 }
1381 }
1382
1383 /* RIP & Flags */
1384 if (fWhat & CPUMCTX_EXTRN_RIP)
1385 pInput->Names[iReg++] = HvX64RegisterRip;
1386 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1387 pInput->Names[iReg++] = HvX64RegisterRflags;
1388
1389 /* Segments */
1390 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1391 {
1392 if (fWhat & CPUMCTX_EXTRN_CS)
1393 pInput->Names[iReg++] = HvX64RegisterCs;
1394 if (fWhat & CPUMCTX_EXTRN_ES)
1395 pInput->Names[iReg++] = HvX64RegisterEs;
1396 if (fWhat & CPUMCTX_EXTRN_SS)
1397 pInput->Names[iReg++] = HvX64RegisterSs;
1398 if (fWhat & CPUMCTX_EXTRN_DS)
1399 pInput->Names[iReg++] = HvX64RegisterDs;
1400 if (fWhat & CPUMCTX_EXTRN_FS)
1401 pInput->Names[iReg++] = HvX64RegisterFs;
1402 if (fWhat & CPUMCTX_EXTRN_GS)
1403 pInput->Names[iReg++] = HvX64RegisterGs;
1404 }
1405
1406 /* Descriptor tables and the task segment. */
1407 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1408 {
1409 if (fWhat & CPUMCTX_EXTRN_LDTR)
1410 pInput->Names[iReg++] = HvX64RegisterLdtr;
1411 if (fWhat & CPUMCTX_EXTRN_TR)
1412 pInput->Names[iReg++] = HvX64RegisterTr;
1413 if (fWhat & CPUMCTX_EXTRN_IDTR)
1414 pInput->Names[iReg++] = HvX64RegisterIdtr;
1415 if (fWhat & CPUMCTX_EXTRN_GDTR)
1416 pInput->Names[iReg++] = HvX64RegisterGdtr;
1417 }
1418
1419 /* Control registers. */
1420 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1421 {
1422 if (fWhat & CPUMCTX_EXTRN_CR0)
1423 pInput->Names[iReg++] = HvX64RegisterCr0;
1424 if (fWhat & CPUMCTX_EXTRN_CR2)
1425 pInput->Names[iReg++] = HvX64RegisterCr2;
1426 if (fWhat & CPUMCTX_EXTRN_CR3)
1427 pInput->Names[iReg++] = HvX64RegisterCr3;
1428 if (fWhat & CPUMCTX_EXTRN_CR4)
1429 pInput->Names[iReg++] = HvX64RegisterCr4;
1430 }
1431 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1432 pInput->Names[iReg++] = HvX64RegisterCr8;
1433
1434 /* Debug registers. */
1435 if (fWhat & CPUMCTX_EXTRN_DR7)
1436 pInput->Names[iReg++] = HvX64RegisterDr7;
1437 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1438 {
1439 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pCtx->fExtrn & CPUMCTX_EXTRN_DR7))
1440 {
1441 fWhat |= CPUMCTX_EXTRN_DR7;
1442 pInput->Names[iReg++] = HvX64RegisterDr7;
1443 }
1444 pInput->Names[iReg++] = HvX64RegisterDr0;
1445 pInput->Names[iReg++] = HvX64RegisterDr1;
1446 pInput->Names[iReg++] = HvX64RegisterDr2;
1447 pInput->Names[iReg++] = HvX64RegisterDr3;
1448 }
1449 if (fWhat & CPUMCTX_EXTRN_DR6)
1450 pInput->Names[iReg++] = HvX64RegisterDr6;
1451
1452 /* Floating point state. */
1453 if (fWhat & CPUMCTX_EXTRN_X87)
1454 {
1455 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1456 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1457 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1458 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1459 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1460 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1461 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1462 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1463 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1464 }
1465 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1466 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1467
1468 /* Vector state. */
1469 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1470 {
1471 pInput->Names[iReg++] = HvX64RegisterXmm0;
1472 pInput->Names[iReg++] = HvX64RegisterXmm1;
1473 pInput->Names[iReg++] = HvX64RegisterXmm2;
1474 pInput->Names[iReg++] = HvX64RegisterXmm3;
1475 pInput->Names[iReg++] = HvX64RegisterXmm4;
1476 pInput->Names[iReg++] = HvX64RegisterXmm5;
1477 pInput->Names[iReg++] = HvX64RegisterXmm6;
1478 pInput->Names[iReg++] = HvX64RegisterXmm7;
1479 pInput->Names[iReg++] = HvX64RegisterXmm8;
1480 pInput->Names[iReg++] = HvX64RegisterXmm9;
1481 pInput->Names[iReg++] = HvX64RegisterXmm10;
1482 pInput->Names[iReg++] = HvX64RegisterXmm11;
1483 pInput->Names[iReg++] = HvX64RegisterXmm12;
1484 pInput->Names[iReg++] = HvX64RegisterXmm13;
1485 pInput->Names[iReg++] = HvX64RegisterXmm14;
1486 pInput->Names[iReg++] = HvX64RegisterXmm15;
1487 }
1488
1489 /* MSRs */
1490 // HvX64RegisterTsc - don't touch
1491 if (fWhat & CPUMCTX_EXTRN_EFER)
1492 pInput->Names[iReg++] = HvX64RegisterEfer;
1493 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1494 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1495 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1496 {
1497 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1498 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1499 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1500 }
1501 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1502 {
1503 pInput->Names[iReg++] = HvX64RegisterStar;
1504 pInput->Names[iReg++] = HvX64RegisterLstar;
1505 pInput->Names[iReg++] = HvX64RegisterCstar;
1506 pInput->Names[iReg++] = HvX64RegisterSfmask;
1507 }
1508
1509# ifdef LOG_ENABLED
1510 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
1511# endif
1512 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1513 {
1514 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1515 pInput->Names[iReg++] = HvX64RegisterPat;
1516# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1517 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
1518# endif
1519 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
1520 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
1521 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
1522 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
1523 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
1524 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
1525 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
1526 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
1527 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
1528 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
1529 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
1530 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
1531 pInput->Names[iReg++] = HvX64RegisterTscAux;
1532# if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
1533 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1534 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
1535# endif
1536# ifdef LOG_ENABLED
1537 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON)
1538 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
1539# endif
1540 }
1541
1542 /* Interruptibility. */
1543 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1544 {
1545 pInput->Names[iReg++] = HvRegisterInterruptState;
1546 pInput->Names[iReg++] = HvX64RegisterRip;
1547 }
1548
1549 /* event injection */
1550 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1551 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1552 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1553 size_t const cRegs = iReg;
1554 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF_DYN(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1555
1556 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1557 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
1558 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1559
1560 /*
1561 * Make the hypercall.
1562 */
1563 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1564 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
1565 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
1566 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1567 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1568 VERR_NEM_GET_REGISTERS_FAILED);
1569 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1570
1571 /*
1572 * Copy information to the CPUM context.
1573 */
1574 iReg = 0;
1575
1576 /* GPRs */
1577 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1578 {
1579 if (fWhat & CPUMCTX_EXTRN_RAX)
1580 {
1581 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1582 pCtx->rax = paValues[iReg++].Reg64;
1583 }
1584 if (fWhat & CPUMCTX_EXTRN_RCX)
1585 {
1586 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1587 pCtx->rcx = paValues[iReg++].Reg64;
1588 }
1589 if (fWhat & CPUMCTX_EXTRN_RDX)
1590 {
1591 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1592 pCtx->rdx = paValues[iReg++].Reg64;
1593 }
1594 if (fWhat & CPUMCTX_EXTRN_RBX)
1595 {
1596 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1597 pCtx->rbx = paValues[iReg++].Reg64;
1598 }
1599 if (fWhat & CPUMCTX_EXTRN_RSP)
1600 {
1601 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1602 pCtx->rsp = paValues[iReg++].Reg64;
1603 }
1604 if (fWhat & CPUMCTX_EXTRN_RBP)
1605 {
1606 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1607 pCtx->rbp = paValues[iReg++].Reg64;
1608 }
1609 if (fWhat & CPUMCTX_EXTRN_RSI)
1610 {
1611 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1612 pCtx->rsi = paValues[iReg++].Reg64;
1613 }
1614 if (fWhat & CPUMCTX_EXTRN_RDI)
1615 {
1616 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1617 pCtx->rdi = paValues[iReg++].Reg64;
1618 }
1619 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1620 {
1621 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1622 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1623 pCtx->r8 = paValues[iReg++].Reg64;
1624 pCtx->r9 = paValues[iReg++].Reg64;
1625 pCtx->r10 = paValues[iReg++].Reg64;
1626 pCtx->r11 = paValues[iReg++].Reg64;
1627 pCtx->r12 = paValues[iReg++].Reg64;
1628 pCtx->r13 = paValues[iReg++].Reg64;
1629 pCtx->r14 = paValues[iReg++].Reg64;
1630 pCtx->r15 = paValues[iReg++].Reg64;
1631 }
1632 }
1633
1634 /* RIP & Flags */
1635 if (fWhat & CPUMCTX_EXTRN_RIP)
1636 {
1637 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1638 pCtx->rip = paValues[iReg++].Reg64;
1639 }
1640 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1641 {
1642 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1643 pCtx->rflags.u = paValues[iReg++].Reg64;
1644 }
1645
1646 /* Segments */
1647# define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1648 do { \
1649 Assert(pInput->Names[a_idx] == a_enmName); \
1650 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1651 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1652 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1653 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1654 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1655 } while (0)
1656 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1657 {
1658 if (fWhat & CPUMCTX_EXTRN_CS)
1659 {
1660 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1661 iReg++;
1662 }
1663 if (fWhat & CPUMCTX_EXTRN_ES)
1664 {
1665 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1666 iReg++;
1667 }
1668 if (fWhat & CPUMCTX_EXTRN_SS)
1669 {
1670 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1671 iReg++;
1672 }
1673 if (fWhat & CPUMCTX_EXTRN_DS)
1674 {
1675 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1676 iReg++;
1677 }
1678 if (fWhat & CPUMCTX_EXTRN_FS)
1679 {
1680 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1681 iReg++;
1682 }
1683 if (fWhat & CPUMCTX_EXTRN_GS)
1684 {
1685 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1686 iReg++;
1687 }
1688 }
1689 /* Descriptor tables and the task segment. */
1690 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1691 {
1692 if (fWhat & CPUMCTX_EXTRN_LDTR)
1693 {
1694 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1695 iReg++;
1696 }
1697 if (fWhat & CPUMCTX_EXTRN_TR)
1698 {
1699 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1700 avoid to trigger sanity assertions around the code, always fix this. */
1701 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1702 switch (pCtx->tr.Attr.n.u4Type)
1703 {
1704 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1705 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1706 break;
1707 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1708 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1709 break;
1710 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1711 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1712 break;
1713 }
1714 iReg++;
1715 }
1716 if (fWhat & CPUMCTX_EXTRN_IDTR)
1717 {
1718 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1719 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1720 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1721 iReg++;
1722 }
1723 if (fWhat & CPUMCTX_EXTRN_GDTR)
1724 {
1725 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1726 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1727 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1728 iReg++;
1729 }
1730 }
1731
1732 /* Control registers. */
1733 bool fMaybeChangedMode = false;
1734 bool fUpdateCr3 = false;
1735 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1736 {
1737 if (fWhat & CPUMCTX_EXTRN_CR0)
1738 {
1739 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1740 if (pCtx->cr0 != paValues[iReg].Reg64)
1741 {
1742 CPUMSetGuestCR0(pGVCpu, paValues[iReg].Reg64);
1743 fMaybeChangedMode = true;
1744 }
1745 iReg++;
1746 }
1747 if (fWhat & CPUMCTX_EXTRN_CR2)
1748 {
1749 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1750 pCtx->cr2 = paValues[iReg].Reg64;
1751 iReg++;
1752 }
1753 if (fWhat & CPUMCTX_EXTRN_CR3)
1754 {
1755 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1756 if (pCtx->cr3 != paValues[iReg].Reg64)
1757 {
1758 CPUMSetGuestCR3(pGVCpu, paValues[iReg].Reg64);
1759 fUpdateCr3 = true;
1760 }
1761 iReg++;
1762 }
1763 if (fWhat & CPUMCTX_EXTRN_CR4)
1764 {
1765 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1766 if (pCtx->cr4 != paValues[iReg].Reg64)
1767 {
1768 CPUMSetGuestCR4(pGVCpu, paValues[iReg].Reg64);
1769 fMaybeChangedMode = true;
1770 }
1771 iReg++;
1772 }
1773 }
1774 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1775 {
1776 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1777 APICSetTpr(pGVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1778 iReg++;
1779 }
1780
1781 /* Debug registers. */
1782 if (fWhat & CPUMCTX_EXTRN_DR7)
1783 {
1784 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1785 if (pCtx->dr[7] != paValues[iReg].Reg64)
1786 CPUMSetGuestDR7(pGVCpu, paValues[iReg].Reg64);
1787 pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
1788 iReg++;
1789 }
1790 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1791 {
1792 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1793 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1794 if (pCtx->dr[0] != paValues[iReg].Reg64)
1795 CPUMSetGuestDR0(pGVCpu, paValues[iReg].Reg64);
1796 iReg++;
1797 if (pCtx->dr[1] != paValues[iReg].Reg64)
1798 CPUMSetGuestDR1(pGVCpu, paValues[iReg].Reg64);
1799 iReg++;
1800 if (pCtx->dr[2] != paValues[iReg].Reg64)
1801 CPUMSetGuestDR2(pGVCpu, paValues[iReg].Reg64);
1802 iReg++;
1803 if (pCtx->dr[3] != paValues[iReg].Reg64)
1804 CPUMSetGuestDR3(pGVCpu, paValues[iReg].Reg64);
1805 iReg++;
1806 }
1807 if (fWhat & CPUMCTX_EXTRN_DR6)
1808 {
1809 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1810 if (pCtx->dr[6] != paValues[iReg].Reg64)
1811 CPUMSetGuestDR6(pGVCpu, paValues[iReg].Reg64);
1812 iReg++;
1813 }
1814
1815 /* Floating point state. */
1816 if (fWhat & CPUMCTX_EXTRN_X87)
1817 {
1818 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1819 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1820 pCtx->XState.x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1821 pCtx->XState.x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1822 iReg++;
1823 pCtx->XState.x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1824 pCtx->XState.x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1825 iReg++;
1826 pCtx->XState.x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1827 pCtx->XState.x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1828 iReg++;
1829 pCtx->XState.x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1830 pCtx->XState.x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1831 iReg++;
1832 pCtx->XState.x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1833 pCtx->XState.x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1834 iReg++;
1835 pCtx->XState.x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1836 pCtx->XState.x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1837 iReg++;
1838 pCtx->XState.x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1839 pCtx->XState.x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1840 iReg++;
1841 pCtx->XState.x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1842 pCtx->XState.x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1843 iReg++;
1844
1845 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1846 pCtx->XState.x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1847 pCtx->XState.x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1848 pCtx->XState.x87.FTW = paValues[iReg].FpControlStatus.FpTag
1849 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1850 pCtx->XState.x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1851 pCtx->XState.x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1852 pCtx->XState.x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1853 pCtx->XState.x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1854 iReg++;
1855 }
1856
1857 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1858 {
1859 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1860 if (fWhat & CPUMCTX_EXTRN_X87)
1861 {
1862 pCtx->XState.x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1863 pCtx->XState.x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1864 pCtx->XState.x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1865 }
1866 pCtx->XState.x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1867 pCtx->XState.x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1868 iReg++;
1869 }
1870
1871 /* Vector state. */
1872 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1873 {
1874 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1875 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1876 pCtx->XState.x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1877 pCtx->XState.x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1878 iReg++;
1879 pCtx->XState.x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1880 pCtx->XState.x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1881 iReg++;
1882 pCtx->XState.x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1883 pCtx->XState.x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1884 iReg++;
1885 pCtx->XState.x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1886 pCtx->XState.x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1887 iReg++;
1888 pCtx->XState.x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1889 pCtx->XState.x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1890 iReg++;
1891 pCtx->XState.x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1892 pCtx->XState.x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1893 iReg++;
1894 pCtx->XState.x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1895 pCtx->XState.x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1896 iReg++;
1897 pCtx->XState.x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1898 pCtx->XState.x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1899 iReg++;
1900 pCtx->XState.x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1901 pCtx->XState.x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1902 iReg++;
1903 pCtx->XState.x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1904 pCtx->XState.x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1905 iReg++;
1906 pCtx->XState.x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1907 pCtx->XState.x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1908 iReg++;
1909 pCtx->XState.x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1910 pCtx->XState.x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1911 iReg++;
1912 pCtx->XState.x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1913 pCtx->XState.x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1914 iReg++;
1915 pCtx->XState.x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1916 pCtx->XState.x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1917 iReg++;
1918 pCtx->XState.x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1919 pCtx->XState.x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1920 iReg++;
1921 pCtx->XState.x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1922 pCtx->XState.x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1923 iReg++;
1924 }
1925
1926
1927 /* MSRs */
1928 // HvX64RegisterTsc - don't touch
1929 if (fWhat & CPUMCTX_EXTRN_EFER)
1930 {
1931 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1932 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1933 {
1934 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
1935 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
1936 PGMNotifyNxeChanged(pGVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1937 pCtx->msrEFER = paValues[iReg].Reg64;
1938 fMaybeChangedMode = true;
1939 }
1940 iReg++;
1941 }
1942 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1943 {
1944 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1945 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
1946 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
1947 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1948 iReg++;
1949 }
1950 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1951 {
1952 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
1953 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
1954 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
1955 pCtx->SysEnter.cs = paValues[iReg].Reg64;
1956 iReg++;
1957
1958 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
1959 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
1960 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
1961 pCtx->SysEnter.eip = paValues[iReg].Reg64;
1962 iReg++;
1963
1964 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
1965 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
1966 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
1967 pCtx->SysEnter.esp = paValues[iReg].Reg64;
1968 iReg++;
1969 }
1970 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1971 {
1972 Assert(pInput->Names[iReg] == HvX64RegisterStar);
1973 if (pCtx->msrSTAR != paValues[iReg].Reg64)
1974 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
1975 pCtx->msrSTAR = paValues[iReg].Reg64;
1976 iReg++;
1977
1978 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
1979 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
1980 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
1981 pCtx->msrLSTAR = paValues[iReg].Reg64;
1982 iReg++;
1983
1984 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
1985 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
1986 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
1987 pCtx->msrCSTAR = paValues[iReg].Reg64;
1988 iReg++;
1989
1990 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
1991 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
1992 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
1993 pCtx->msrSFMASK = paValues[iReg].Reg64;
1994 iReg++;
1995 }
1996 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1997 {
1998 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
1999 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pGVCpu);
2000 if (paValues[iReg].Reg64 != uOldBase)
2001 {
2002 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
2003 pGVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
2004 int rc2 = APICSetBaseMsr(pGVCpu, paValues[iReg].Reg64);
2005 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64));
2006 }
2007 iReg++;
2008
2009 Assert(pInput->Names[iReg] == HvX64RegisterPat);
2010 if (pCtx->msrPAT != paValues[iReg].Reg64)
2011 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
2012 pCtx->msrPAT = paValues[iReg].Reg64;
2013 iReg++;
2014
2015# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2016 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
2017 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pGVCpu))
2018 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pGVCpu), paValues[iReg].Reg64));
2019 iReg++;
2020# endif
2021
2022 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
2023 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
2024 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2025 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2026 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2027 iReg++;
2028
2029 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2030
2031 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2032 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2033 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2034 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2035 iReg++;
2036
2037 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2038 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2039 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2040 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2041 iReg++;
2042
2043 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2044 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2045 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2046 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2047 iReg++;
2048
2049 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2050 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2051 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2052 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2053 iReg++;
2054
2055 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2056 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2057 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2058 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2059 iReg++;
2060
2061 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2062 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2063 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2064 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2065 iReg++;
2066
2067 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2068 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2069 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2070 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2071 iReg++;
2072
2073 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2074 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2075 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2076 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2077 iReg++;
2078
2079 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2080 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2081 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2082 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2083 iReg++;
2084
2085 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2086 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2087 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2088 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2089 iReg++;
2090
2091 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2092 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2093 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2094 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2095 iReg++;
2096
2097 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2098 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2099 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2100 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2101 iReg++;
2102
2103# if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2104 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2105 {
2106 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2107 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2108 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2109 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2110 iReg++;
2111 }
2112# endif
2113# ifdef LOG_ENABLED
2114 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON)
2115 {
2116 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2117 uint64_t const uFeatCtrl = CPUMGetGuestIa32FeatCtrl(pVCpu);
2118 if (paValues[iReg].Reg64 != uFeatCtrl)
2119 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, uFeatCtrl, paValues[iReg].Reg64));
2120 iReg++;
2121 }
2122# endif
2123 }
2124
2125 /* Interruptibility. */
2126 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2127 {
2128 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2129 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2130
2131 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2132 {
2133 pGVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2134 if (paValues[iReg].InterruptState.InterruptShadow)
2135 EMSetInhibitInterruptsPC(pGVCpu, paValues[iReg + 1].Reg64);
2136 else
2137 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2138 }
2139
2140 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2141 {
2142 if (paValues[iReg].InterruptState.NmiMasked)
2143 VMCPU_FF_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS);
2144 else
2145 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_BLOCK_NMIS);
2146 }
2147
2148 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2149 iReg += 2;
2150 }
2151
2152 /* Event injection. */
2153 /// @todo HvRegisterPendingInterruption
2154 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2155 if (paValues[iReg].PendingInterruption.InterruptionPending)
2156 {
2157 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2158 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2159 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2160 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2161 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2162 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2163 }
2164
2165 /// @todo HvRegisterPendingEvent0
2166 /// @todo HvRegisterPendingEvent1
2167
2168 /* Almost done, just update extrn flags and maybe change PGM mode. */
2169 pCtx->fExtrn &= ~fWhat;
2170 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2171 pCtx->fExtrn = 0;
2172
2173 /* Typical. */
2174 if (!fMaybeChangedMode && !fUpdateCr3)
2175 return VINF_SUCCESS;
2176
2177 /*
2178 * Slow.
2179 */
2180 int rc = VINF_SUCCESS;
2181 if (fMaybeChangedMode)
2182 {
2183 rc = PGMChangeMode(pGVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2184 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
2185 }
2186
2187 if (fUpdateCr3)
2188 {
2189 if (fCanUpdateCr3)
2190 {
2191 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
2192 rc = PGMUpdateCR3(pGVCpu, pCtx->cr3);
2193 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
2194 }
2195 else
2196 {
2197 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2198 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2199 }
2200 }
2201
2202 return rc;
2203}
2204#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2205
2206
2207/**
2208 * Import the state from the native API (back to CPUMCTX).
2209 *
2210 * @returns VBox status code
2211 * @param pGVM The ring-0 VM handle.
2212 * @param idCpu The calling EMT. Necessary for getting the
2213 * hypercall page and arguments.
2214 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2215 * CPUMCTX_EXTERN_ALL for everything.
2216 */
2217VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, VMCPUID idCpu, uint64_t fWhat)
2218{
2219#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2220 /*
2221 * Validate the call.
2222 */
2223 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2224 if (RT_SUCCESS(rc))
2225 {
2226 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2227 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2228
2229 /*
2230 * Call worker.
2231 */
2232 rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);
2233 }
2234 return rc;
2235#else
2236 RT_NOREF(pGVM, idCpu, fWhat);
2237 return VERR_NOT_IMPLEMENTED;
2238#endif
2239}
2240
2241
2242#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2243/**
2244 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2245 *
2246 * @returns VBox status code.
2247 * @param pGVM The ring-0 VM handle.
2248 * @param pGVCpu The ring-0 VCPU handle.
2249 * @param pcTicks Where to return the current CPU tick count.
2250 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2251 */
2252NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2253{
2254 /*
2255 * Hypercall parameters.
2256 */
2257 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2258 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2259 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2260
2261 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2262 pInput->VpIndex = pGVCpu->idCpu;
2263 pInput->fFlags = 0;
2264 pInput->Names[0] = HvX64RegisterTsc;
2265 pInput->Names[1] = HvX64RegisterTscAux;
2266
2267 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2268 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2269 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2270
2271 /*
2272 * Make the hypercall.
2273 */
2274 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2275 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2276 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2277 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2278 VERR_NEM_GET_REGISTERS_FAILED);
2279
2280 /*
2281 * Get results.
2282 */
2283 *pcTicks = paValues[0].Reg64;
2284 if (pcAux)
2285 *pcAux = paValues[0].Reg32;
2286 return VINF_SUCCESS;
2287}
2288#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2289
2290
2291/**
2292 * Queries the TSC and TSC_AUX values, putting the results in .
2293 *
2294 * @returns VBox status code
2295 * @param pGVM The ring-0 VM handle.
2296 * @param idCpu The calling EMT. Necessary for getting the
2297 * hypercall page and arguments.
2298 */
2299VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, VMCPUID idCpu)
2300{
2301#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2302 /*
2303 * Validate the call.
2304 */
2305 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2306 if (RT_SUCCESS(rc))
2307 {
2308 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2309 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2310
2311 /*
2312 * Call worker.
2313 */
2314 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2315 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2316 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2317 &pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2318 }
2319 return rc;
2320#else
2321 RT_NOREF(pGVM, idCpu);
2322 return VERR_NOT_IMPLEMENTED;
2323#endif
2324}
2325
2326
2327#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2328/**
2329 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2330 *
2331 * @returns VBox status code.
2332 * @param pGVM The ring-0 VM handle.
2333 * @param pGVCpu The ring-0 VCPU handle.
2334 * @param uPausedTscValue The TSC value at the time of pausing.
2335 */
2336NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2337{
2338 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2339
2340 /*
2341 * Set up the hypercall parameters.
2342 */
2343 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2344 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2345
2346 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2347 pInput->VpIndex = 0;
2348 pInput->RsvdZ = 0;
2349 pInput->Elements[0].Name = HvX64RegisterTsc;
2350 pInput->Elements[0].Pad0 = 0;
2351 pInput->Elements[0].Pad1 = 0;
2352 pInput->Elements[0].Value.Reg128.High64 = 0;
2353 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2354
2355 /*
2356 * Disable interrupts and do the first virtual CPU.
2357 */
2358 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2359 uint64_t const uFirstTsc = ASMReadTSC();
2360 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2361 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2362 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2363 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2364
2365 /*
2366 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2367 * that we don't introduce too much drift here.
2368 */
2369 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2370 {
2371 Assert(pInput->PartitionId == pGVM->nemr0.s.idHvPartition);
2372 Assert(pInput->RsvdZ == 0);
2373 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2374 Assert(pInput->Elements[0].Pad0 == 0);
2375 Assert(pInput->Elements[0].Pad1 == 0);
2376 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2377
2378 pInput->VpIndex = iCpu;
2379 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2380 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2381
2382 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2383 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2384 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2385 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2386 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2387 }
2388
2389 /*
2390 * Done.
2391 */
2392 ASMSetFlags(fSavedFlags);
2393 return VINF_SUCCESS;
2394}
2395#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2396
2397
2398/**
2399 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2400 *
2401 * @returns VBox status code
2402 * @param pGVM The ring-0 VM handle.
2403 * @param idCpu The calling EMT. Necessary for getting the
2404 * hypercall page and arguments.
2405 * @param uPausedTscValue The TSC value at the time of pausing.
2406 */
2407VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2408{
2409#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2410 /*
2411 * Validate the call.
2412 */
2413 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2414 if (RT_SUCCESS(rc))
2415 {
2416 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2417 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2418
2419 /*
2420 * Call worker.
2421 */
2422 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2423 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2424 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
2425 }
2426 return rc;
2427#else
2428 RT_NOREF(pGVM, idCpu, uPausedTscValue);
2429 return VERR_NOT_IMPLEMENTED;
2430#endif
2431}
2432
2433
2434VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2435{
2436#ifdef NEM_WIN_WITH_RING0_RUNLOOP
2437 if (pGVM->nemr0.s.fMayUseRing0Runloop)
2438 return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu]);
2439 return VERR_NEM_RING3_ONLY;
2440#else
2441 RT_NOREF(pGVM, idCpu);
2442 return VERR_NOT_IMPLEMENTED;
2443#endif
2444}
2445
2446
2447/**
2448 * Updates statistics in the VM structure.
2449 *
2450 * @returns VBox status code.
2451 * @param pGVM The ring-0 VM handle.
2452 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
2453 * page and arguments.
2454 */
2455VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, VMCPUID idCpu)
2456{
2457 /*
2458 * Validate the call.
2459 */
2460 int rc;
2461 if (idCpu == NIL_VMCPUID)
2462 rc = GVMMR0ValidateGVM(pGVM);
2463 else
2464 rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2465 if (RT_SUCCESS(rc))
2466 {
2467 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2468
2469 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
2470 ? &pGVM->aCpus[idCpu].nemr0.s.HypercallData
2471 : &pGVM->nemr0.s.HypercallData;
2472 if ( RT_VALID_PTR(pHypercallData->pbPage)
2473 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
2474 {
2475 if (idCpu == NIL_VMCPUID)
2476 rc = RTCritSectEnter(&pGVM->nemr0.s.HypercallDataCritSect);
2477 if (RT_SUCCESS(rc))
2478 {
2479 /*
2480 * Query the memory statistics for the partition.
2481 */
2482 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
2483 pInput->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
2484 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
2485 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
2486 pInput->ProximityDomainInfo.Flags.Reserved = 0;
2487 pInput->ProximityDomainInfo.Id = 0;
2488
2489 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
2490 RT_ZERO(*pOutput);
2491
2492 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
2493 pHypercallData->HCPhysPage,
2494 pHypercallData->HCPhysPage + sizeof(*pInput));
2495 if (uResult == HV_STATUS_SUCCESS)
2496 {
2497 pGVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
2498 pGVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
2499 rc = VINF_SUCCESS;
2500 }
2501 else
2502 {
2503 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
2504 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
2505 rc = VERR_NEM_IPE_0;
2506 }
2507
2508 if (idCpu == NIL_VMCPUID)
2509 RTCritSectLeave(&pGVM->nemr0.s.HypercallDataCritSect);
2510 }
2511 }
2512 else
2513 rc = VERR_WRONG_ORDER;
2514 }
2515 return rc;
2516}
2517
2518
2519#if 1 && defined(DEBUG_bird)
2520/**
2521 * Debug only interface for poking around and exploring Hyper-V stuff.
2522 *
2523 * @param pGVM The ring-0 VM handle.
2524 * @param idCpu The calling EMT.
2525 * @param u64Arg What to query. 0 == registers.
2526 */
2527VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, VMCPUID idCpu, uint64_t u64Arg)
2528{
2529 /*
2530 * Resolve CPU structures.
2531 */
2532 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2533 if (RT_SUCCESS(rc))
2534 {
2535 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API_1);
2536
2537 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2538 if (u64Arg == 0)
2539 {
2540 /*
2541 * Query register.
2542 */
2543 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2544 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2545
2546 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
2547 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2548 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
2549
2550 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2551 pInput->VpIndex = pGVCpu->idCpu;
2552 pInput->fFlags = 0;
2553 pInput->Names[0] = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
2554
2555 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
2556 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2557 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2558 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2559 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2560 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
2561 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
2562 rc = VINF_SUCCESS;
2563 }
2564 else if (u64Arg == 1)
2565 {
2566 /*
2567 * Query partition property.
2568 */
2569 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nemr0.s.HypercallData.pbPage;
2570 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2571
2572 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
2573 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
2574 pOutput->PropertyValue = 0;
2575
2576 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2577 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pGVCpu->nem.s.Hypercall.Experiment.uItem;
2578 pInput->uPadding = 0;
2579
2580 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
2581 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2582 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2583 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
2584 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2585 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
2586 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
2587 rc = VINF_SUCCESS;
2588 }
2589 else if (u64Arg == 2)
2590 {
2591 /*
2592 * Set register.
2593 */
2594 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2595 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2596 RT_BZERO(pInput, RT_UOFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
2597
2598 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2599 pInput->VpIndex = pGVCpu->idCpu;
2600 pInput->RsvdZ = 0;
2601 pInput->Elements[0].Name = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
2602 pInput->Elements[0].Value.Reg128.High64 = pGVCpu->nem.s.Hypercall.Experiment.uHiValue;
2603 pInput->Elements[0].Value.Reg128.Low64 = pGVCpu->nem.s.Hypercall.Experiment.uLoValue;
2604
2605 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2606 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
2607 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2608 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2609 rc = VINF_SUCCESS;
2610 }
2611 else
2612 rc = VERR_INVALID_FUNCTION;
2613 }
2614 return rc;
2615}
2616#endif /* DEBUG_bird */
2617
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette