VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 81605

Last change on this file since 81605 was 81605, checked in by vboxsync, 5 years ago

VMM (and related changes): Add support for Hygon Dhyana CPUs. Modified and improved contribution by Hongyong Zang submitted under MIT license. Thank you!

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 112.7 KB
Line 
1/* $Id: NEMR0Native-win.cpp 81605 2019-10-31 14:29:46Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/dbg.h>
42#include <iprt/memobj.h>
43#include <iprt/string.h>
44#include <iprt/time.h>
45
46
47/* Assert compile context sanity. */
48#ifndef RT_OS_WINDOWS
49# error "Windows only file!"
50#endif
51#ifndef RT_ARCH_AMD64
52# error "AMD64 only file!"
53#endif
54
55
56/*********************************************************************************************************************************
57* Internal Functions *
58*********************************************************************************************************************************/
59typedef uint32_t DWORD; /* for winerror.h constants */
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
66
67/**
68 * WinHvr.sys!WinHvDepositMemory
69 *
70 * This API will try allocates cPages on IdealNode and deposit it to the
71 * hypervisor for use with the given partition. The memory will be freed when
72 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
73 *
74 * Apparently node numbers above 64 has a different meaning.
75 */
76static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
77
78
79/*********************************************************************************************************************************
80* Internal Functions *
81*********************************************************************************************************************************/
82NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
83 uint32_t cPages, uint32_t fFlags);
84NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
85#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
86NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
87NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3);
88NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
89NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
90#endif
91DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
92 void *pvOutput, uint32_t cbOutput);
93
94
95/*
96 * Instantate the code we share with ring-0.
97 */
98#ifdef NEM_WIN_WITH_RING0_RUNLOOP
99# define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
100#else
101# undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
102#endif
103#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
104
105
106
107/**
108 * Worker for NEMR0InitVM that allocates a hypercall page.
109 *
110 * @returns VBox status code.
111 * @param pHypercallData The hypercall data page to initialize.
112 */
113static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
114{
115 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
116 if (RT_SUCCESS(rc))
117 {
118 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
119 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
120 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
121 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
122 if (RT_SUCCESS(rc))
123 return VINF_SUCCESS;
124
125 /* bail out */
126 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
127 }
128 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
129 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
130 pHypercallData->pbPage = NULL;
131 return rc;
132}
133
134/**
135 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
136 *
137 * @param pHypercallData The hypercall data page to uninitialize.
138 */
139static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
140{
141 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
142 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
143 if (pHypercallData->pbPage != NULL)
144 {
145 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
146 pHypercallData->pbPage = NULL;
147 }
148 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
149 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
150}
151
152
153/**
154 * Called by NEMR3Init to make sure we've got what we need.
155 *
156 * @returns VBox status code.
157 * @param pGVM The ring-0 VM handle.
158 * @thread EMT(0)
159 */
160VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM)
161{
162 AssertCompile(sizeof(pGVM->nemr0.s) <= sizeof(pGVM->nemr0.padding));
163 AssertCompile(sizeof(pGVM->aCpus[0].nemr0.s) <= sizeof(pGVM->aCpus[0].nemr0.padding));
164
165 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
166 AssertRCReturn(rc, rc);
167
168 /*
169 * We want to perform hypercalls here. The NT kernel started to expose a very low
170 * level interface to do this thru somewhere between build 14271 and 16299. Since
171 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
172 *
173 * We also need to deposit memory to the hypervisor for use with partition (page
174 * mapping structures, stuff).
175 */
176 RTDBGKRNLINFO hKrnlInfo;
177 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
178 if (RT_SUCCESS(rc))
179 {
180 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
181 if (RT_SUCCESS(rc))
182 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
183 RTR0DbgKrnlInfoRelease(hKrnlInfo);
184 if (RT_SUCCESS(rc))
185 {
186 /*
187 * Allocate a page for non-EMT threads to use for hypercalls (update
188 * statistics and such) and a critical section protecting it.
189 */
190 rc = RTCritSectInit(&pGVM->nemr0.s.HypercallDataCritSect);
191 if (RT_SUCCESS(rc))
192 {
193 rc = nemR0InitHypercallData(&pGVM->nemr0.s.HypercallData);
194 if (RT_SUCCESS(rc))
195 {
196 /*
197 * Allocate a page for each VCPU to place hypercall data on.
198 */
199 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
200 {
201 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
202 if (RT_FAILURE(rc))
203 {
204 while (i-- > 0)
205 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
206 break;
207 }
208 }
209 if (RT_SUCCESS(rc))
210 {
211 /*
212 * So far, so good.
213 */
214 return rc;
215 }
216
217 /*
218 * Bail out.
219 */
220 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
221 }
222 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
223 }
224 }
225 else
226 rc = VERR_NEM_MISSING_KERNEL_API;
227 }
228
229 return rc;
230}
231
232
233/**
234 * Perform an I/O control operation on the partition handle (VID.SYS).
235 *
236 * @returns NT status code.
237 * @param pGVM The ring-0 VM structure.
238 * @param pGVCpu The global (ring-0) CPU structure of the calling EMT.
239 * @param uFunction The function to perform.
240 * @param pvInput The input buffer. This must point within the VM
241 * structure so we can easily convert to a ring-3
242 * pointer if necessary.
243 * @param cbInput The size of the input. @a pvInput must be NULL when
244 * zero.
245 * @param pvOutput The output buffer. This must also point within the
246 * VM structure for ring-3 pointer magic.
247 * @param cbOutput The size of the output. @a pvOutput must be NULL
248 * when zero.
249 * @thread EMT(pGVCpu)
250 */
251DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PGVMCPU pGVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
252 void *pvOutput, uint32_t cbOutput)
253{
254#ifdef RT_STRICT
255 /*
256 * Input and output parameters are part of the VM CPU structure.
257 */
258 VMCPU_ASSERT_EMT(pGVCpu);
259 if (pvInput)
260 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
261 if (pvOutput)
262 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pGVCpu <= sizeof(*pGVCpu), VERR_INVALID_PARAMETER);
263#endif
264
265 int32_t rcNt = STATUS_UNSUCCESSFUL;
266 int rc = SUPR0IoCtlPerform(pGVM->nemr0.s.pIoCtlCtx, uFunction,
267 pvInput,
268 pvInput ? (uintptr_t)pvInput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
269 cbInput,
270 pvOutput,
271 pvOutput ? (uintptr_t)pvOutput + pGVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
272 cbOutput,
273 &rcNt);
274 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
275 return (NTSTATUS)rcNt;
276 return STATUS_UNSUCCESSFUL;
277}
278
279
280/**
281 * 2nd part of the initialization, after we've got a partition handle.
282 *
283 * @returns VBox status code.
284 * @param pGVM The ring-0 VM handle.
285 * @thread EMT(0)
286 */
287VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM)
288{
289 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
290 AssertRCReturn(rc, rc);
291 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
292 Assert(pGVM->nemr0.s.fMayUseRing0Runloop == false);
293
294 /*
295 * Copy and validate the I/O control information from ring-3.
296 */
297 NEMWINIOCTL Copy = pGVM->nem.s.IoCtlGetHvPartitionId;
298 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
299 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
300 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
301 pGVM->nemr0.s.IoCtlGetHvPartitionId = Copy;
302
303 pGVM->nemr0.s.fMayUseRing0Runloop = pGVM->nem.s.fUseRing0Runloop;
304
305 Copy = pGVM->nem.s.IoCtlStartVirtualProcessor;
306 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
307 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
308 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
309 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
310 if (RT_SUCCESS(rc))
311 pGVM->nemr0.s.IoCtlStartVirtualProcessor = Copy;
312
313 Copy = pGVM->nem.s.IoCtlStopVirtualProcessor;
314 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
315 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
316 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
317 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
318 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
319 if (RT_SUCCESS(rc))
320 pGVM->nemr0.s.IoCtlStopVirtualProcessor = Copy;
321
322 Copy = pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
323 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
324 AssertLogRelStmt( Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)
325 || Copy.cbInput == RT_OFFSETOF(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT, cMillies),
326 rc = VERR_NEM_INIT_FAILED);
327 AssertLogRelStmt(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
328 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
329 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
330 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
331 if (RT_SUCCESS(rc))
332 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext = Copy;
333
334 if ( RT_SUCCESS(rc)
335 || !pGVM->nem.s.fUseRing0Runloop)
336 {
337 /*
338 * Setup of an I/O control context for the partition handle for later use.
339 */
340 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pGVM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx);
341 AssertLogRelRCReturn(rc, rc);
342 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
343 {
344 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
345 pGVCpu->nemr0.s.offRing3ConversionDelta = (uintptr_t)pGVM->aCpus[idCpu].pVCpuR3 - (uintptr_t)pGVCpu;
346 }
347
348 /*
349 * Get the partition ID.
350 */
351 PVMCPUCC pVCpu0 = &pGVM->aCpus[0];
352 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
353 &pVCpu0->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu0->nem.s.uIoCtlBuf.idPartition));
354 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
355 pGVM->nemr0.s.idHvPartition = pVCpu0->nem.s.uIoCtlBuf.idPartition;
356 AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == pGVM->nem.s.idHvPartition,
357 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, pGVM->nem.s.idHvPartition),
358 VERR_NEM_INIT_FAILED);
359 }
360
361 return rc;
362}
363
364
365/**
366 * Cleanup the NEM parts of the VM in ring-0.
367 *
368 * This is always called and must deal the state regardless of whether
369 * NEMR0InitVM() was called or not. So, take care here.
370 *
371 * @param pGVM The ring-0 VM handle.
372 */
373VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
374{
375 pGVM->nemr0.s.idHvPartition = HV_PARTITION_ID_INVALID;
376
377 /* Clean up I/O control context. */
378 if (pGVM->nemr0.s.pIoCtlCtx)
379 {
380 int rc = SUPR0IoCtlCleanup(pGVM->nemr0.s.pIoCtlCtx);
381 AssertRC(rc);
382 pGVM->nemr0.s.pIoCtlCtx = NULL;
383 }
384
385 /* Free the hypercall pages. */
386 VMCPUID i = pGVM->cCpus;
387 while (i-- > 0)
388 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
389
390 /* The non-EMT one too. */
391 if (RTCritSectIsInitialized(&pGVM->nemr0.s.HypercallDataCritSect))
392 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
393 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
394}
395
396
397#if 0 /* for debugging GPA unmapping. */
398static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
399{
400 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nemr0.s.pbHypercallData;
401 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
402 pIn->PartitionId = pGVM->nemr0.s.idHvPartition;
403 pIn->VpIndex = pGVCpu->idCpu;
404 pIn->ByteCount = 0x10;
405 pIn->BaseGpa = GCPhys;
406 pIn->ControlFlags.AsUINT64 = 0;
407 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
408 memset(pOut, 0xfe, sizeof(*pOut));
409 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nemr0.s.HCPhysHypercallData,
410 pGVCpu->nemr0.s.HCPhysHypercallData + sizeof(*pIn));
411 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
412 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
413 __debugbreak();
414
415 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
416}
417#endif
418
419
420/**
421 * Worker for NEMR0MapPages and others.
422 */
423NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
424 uint32_t cPages, uint32_t fFlags)
425{
426 /*
427 * Validate.
428 */
429 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
430
431 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
432 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
433 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
434 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
435 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
436 if (GCPhysSrc != GCPhysDst)
437 {
438 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
439 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
440 }
441
442 /*
443 * Compose and make the hypercall.
444 * Ring-3 is not allowed to fill in the host physical addresses of the call.
445 */
446 for (uint32_t iTries = 0;; iTries++)
447 {
448 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
449 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
450 pMapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
451 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
452 pMapPages->MapFlags = fFlags;
453 pMapPages->u32ExplicitPadding = 0;
454 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
455 {
456 RTHCPHYS HCPhys = NIL_RTGCPHYS;
457 int rc = PGMPhysGCPhys2HCPhys(pGVM, GCPhysSrc, &HCPhys);
458 AssertRCReturn(rc, rc);
459 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
460 }
461
462 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
463 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
464 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
465 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
466 if (uResult == ((uint64_t)cPages << 32))
467 return VINF_SUCCESS;
468
469 /*
470 * If the partition is out of memory, try donate another 512 pages to
471 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
472 */
473 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
474 || iTries > 16
475 || g_pfnWinHvDepositMemory == NULL)
476 {
477 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
478 return VERR_NEM_MAP_PAGES_FAILED;
479 }
480
481 size_t cPagesAdded = 0;
482 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nemr0.s.idHvPartition, 512, 0, &cPagesAdded);
483 if (!cPagesAdded)
484 {
485 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
486 return VERR_NEM_MAP_PAGES_FAILED;
487 }
488 }
489}
490
491
492/**
493 * Maps pages into the guest physical address space.
494 *
495 * Generally the caller will be under the PGM lock already, so no extra effort
496 * is needed to make sure all changes happens under it.
497 *
498 * @returns VBox status code.
499 * @param pGVM The ring-0 VM handle.
500 * @param idCpu The calling EMT. Necessary for getting the
501 * hypercall page and arguments.
502 * @thread EMT(idCpu)
503 */
504VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, VMCPUID idCpu)
505{
506 /*
507 * Unpack the call.
508 */
509 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
510 if (RT_SUCCESS(rc))
511 {
512 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
513
514 RTGCPHYS const GCPhysSrc = pGVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
515 RTGCPHYS const GCPhysDst = pGVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
516 uint32_t const cPages = pGVCpu->nem.s.Hypercall.MapPages.cPages;
517 HV_MAP_GPA_FLAGS const fFlags = pGVCpu->nem.s.Hypercall.MapPages.fFlags;
518
519 /*
520 * Do the work.
521 */
522 rc = nemR0WinMapPages(pGVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
523 }
524 return rc;
525}
526
527
528/**
529 * Worker for NEMR0UnmapPages and others.
530 */
531NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
532{
533 /*
534 * Validate input.
535 */
536 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
537
538 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
539 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
540 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
541 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
542
543 /*
544 * Compose and make the hypercall.
545 */
546 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
547 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
548 pUnmapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
549 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
550 pUnmapPages->fFlags = 0;
551
552 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
553 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
554 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
555 if (uResult == ((uint64_t)cPages << 32))
556 {
557#if 1 /* Do we need to do this? Hopefully not... */
558 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
559 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
560 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
561#endif
562 return VINF_SUCCESS;
563 }
564
565 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
566 return VERR_NEM_UNMAP_PAGES_FAILED;
567}
568
569
570/**
571 * Unmaps pages from the guest physical address space.
572 *
573 * Generally the caller will be under the PGM lock already, so no extra effort
574 * is needed to make sure all changes happens under it.
575 *
576 * @returns VBox status code.
577 * @param pGVM The ring-0 VM handle.
578 * @param idCpu The calling EMT. Necessary for getting the
579 * hypercall page and arguments.
580 * @thread EMT(idCpu)
581 */
582VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, VMCPUID idCpu)
583{
584 /*
585 * Unpack the call.
586 */
587 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
588 if (RT_SUCCESS(rc))
589 {
590 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
591
592 RTGCPHYS const GCPhys = pGVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
593 uint32_t const cPages = pGVCpu->nem.s.Hypercall.UnmapPages.cPages;
594
595 /*
596 * Do the work.
597 */
598 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
599 }
600 return rc;
601}
602
603
604#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
605/**
606 * Worker for NEMR0ExportState.
607 *
608 * Intention is to use it internally later.
609 *
610 * @returns VBox status code.
611 * @param pGVM The ring-0 VM handle.
612 * @param pGVCpu The ring-0 VCPU handle.
613 * @param pCtx The CPU context structure to import into.
614 */
615NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
616{
617 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
618 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
619 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
620
621 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
622 pInput->VpIndex = pGVCpu->idCpu;
623 pInput->RsvdZ = 0;
624
625 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
626 if ( !fWhat
627 && pGVCpu->nem.s.fCurrentInterruptWindows == pGVCpu->nem.s.fDesiredInterruptWindows)
628 return VINF_SUCCESS;
629 uintptr_t iReg = 0;
630
631 /* GPRs */
632 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
633 {
634 if (fWhat & CPUMCTX_EXTRN_RAX)
635 {
636 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
637 pInput->Elements[iReg].Name = HvX64RegisterRax;
638 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
639 iReg++;
640 }
641 if (fWhat & CPUMCTX_EXTRN_RCX)
642 {
643 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
644 pInput->Elements[iReg].Name = HvX64RegisterRcx;
645 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
646 iReg++;
647 }
648 if (fWhat & CPUMCTX_EXTRN_RDX)
649 {
650 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
651 pInput->Elements[iReg].Name = HvX64RegisterRdx;
652 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
653 iReg++;
654 }
655 if (fWhat & CPUMCTX_EXTRN_RBX)
656 {
657 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
658 pInput->Elements[iReg].Name = HvX64RegisterRbx;
659 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
660 iReg++;
661 }
662 if (fWhat & CPUMCTX_EXTRN_RSP)
663 {
664 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
665 pInput->Elements[iReg].Name = HvX64RegisterRsp;
666 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
667 iReg++;
668 }
669 if (fWhat & CPUMCTX_EXTRN_RBP)
670 {
671 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
672 pInput->Elements[iReg].Name = HvX64RegisterRbp;
673 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
674 iReg++;
675 }
676 if (fWhat & CPUMCTX_EXTRN_RSI)
677 {
678 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
679 pInput->Elements[iReg].Name = HvX64RegisterRsi;
680 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
681 iReg++;
682 }
683 if (fWhat & CPUMCTX_EXTRN_RDI)
684 {
685 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
686 pInput->Elements[iReg].Name = HvX64RegisterRdi;
687 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
688 iReg++;
689 }
690 if (fWhat & CPUMCTX_EXTRN_R8_R15)
691 {
692 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
693 pInput->Elements[iReg].Name = HvX64RegisterR8;
694 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
695 iReg++;
696 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
697 pInput->Elements[iReg].Name = HvX64RegisterR9;
698 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
699 iReg++;
700 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
701 pInput->Elements[iReg].Name = HvX64RegisterR10;
702 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
703 iReg++;
704 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
705 pInput->Elements[iReg].Name = HvX64RegisterR11;
706 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
707 iReg++;
708 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
709 pInput->Elements[iReg].Name = HvX64RegisterR12;
710 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
711 iReg++;
712 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
713 pInput->Elements[iReg].Name = HvX64RegisterR13;
714 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
715 iReg++;
716 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
717 pInput->Elements[iReg].Name = HvX64RegisterR14;
718 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
719 iReg++;
720 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
721 pInput->Elements[iReg].Name = HvX64RegisterR15;
722 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
723 iReg++;
724 }
725 }
726
727 /* RIP & Flags */
728 if (fWhat & CPUMCTX_EXTRN_RIP)
729 {
730 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
731 pInput->Elements[iReg].Name = HvX64RegisterRip;
732 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
733 iReg++;
734 }
735 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
736 {
737 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
738 pInput->Elements[iReg].Name = HvX64RegisterRflags;
739 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
740 iReg++;
741 }
742
743 /* Segments */
744# define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
745 do { \
746 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
747 pInput->Elements[a_idx].Name = a_enmName; \
748 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
749 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
750 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
751 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
752 } while (0)
753 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
754 {
755 if (fWhat & CPUMCTX_EXTRN_CS)
756 {
757 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
758 iReg++;
759 }
760 if (fWhat & CPUMCTX_EXTRN_ES)
761 {
762 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
763 iReg++;
764 }
765 if (fWhat & CPUMCTX_EXTRN_SS)
766 {
767 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
768 iReg++;
769 }
770 if (fWhat & CPUMCTX_EXTRN_DS)
771 {
772 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
773 iReg++;
774 }
775 if (fWhat & CPUMCTX_EXTRN_FS)
776 {
777 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
778 iReg++;
779 }
780 if (fWhat & CPUMCTX_EXTRN_GS)
781 {
782 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
783 iReg++;
784 }
785 }
786
787 /* Descriptor tables & task segment. */
788 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
789 {
790 if (fWhat & CPUMCTX_EXTRN_LDTR)
791 {
792 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
793 iReg++;
794 }
795 if (fWhat & CPUMCTX_EXTRN_TR)
796 {
797 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
798 iReg++;
799 }
800
801 if (fWhat & CPUMCTX_EXTRN_IDTR)
802 {
803 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
804 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
805 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
806 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
807 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
808 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
809 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
810 iReg++;
811 }
812 if (fWhat & CPUMCTX_EXTRN_GDTR)
813 {
814 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
815 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
816 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
817 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
818 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
819 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
820 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
821 iReg++;
822 }
823 }
824
825 /* Control registers. */
826 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
827 {
828 if (fWhat & CPUMCTX_EXTRN_CR0)
829 {
830 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
831 pInput->Elements[iReg].Name = HvX64RegisterCr0;
832 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
833 iReg++;
834 }
835 if (fWhat & CPUMCTX_EXTRN_CR2)
836 {
837 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
838 pInput->Elements[iReg].Name = HvX64RegisterCr2;
839 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
840 iReg++;
841 }
842 if (fWhat & CPUMCTX_EXTRN_CR3)
843 {
844 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
845 pInput->Elements[iReg].Name = HvX64RegisterCr3;
846 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
847 iReg++;
848 }
849 if (fWhat & CPUMCTX_EXTRN_CR4)
850 {
851 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
852 pInput->Elements[iReg].Name = HvX64RegisterCr4;
853 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
854 iReg++;
855 }
856 }
857 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
858 {
859 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
860 pInput->Elements[iReg].Name = HvX64RegisterCr8;
861 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pGVCpu);
862 iReg++;
863 }
864
865 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
866
867 /* Debug registers. */
868/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
869 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
870 {
871 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
872 pInput->Elements[iReg].Name = HvX64RegisterDr0;
873 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pGVCpu);
874 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
875 iReg++;
876 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
877 pInput->Elements[iReg].Name = HvX64RegisterDr1;
878 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pGVCpu);
879 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
880 iReg++;
881 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
882 pInput->Elements[iReg].Name = HvX64RegisterDr2;
883 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pGVCpu);
884 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
885 iReg++;
886 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
887 pInput->Elements[iReg].Name = HvX64RegisterDr3;
888 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pGVCpu);
889 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
890 iReg++;
891 }
892 if (fWhat & CPUMCTX_EXTRN_DR6)
893 {
894 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
895 pInput->Elements[iReg].Name = HvX64RegisterDr6;
896 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pGVCpu);
897 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
898 iReg++;
899 }
900 if (fWhat & CPUMCTX_EXTRN_DR7)
901 {
902 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
903 pInput->Elements[iReg].Name = HvX64RegisterDr7;
904 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pGVCpu);
905 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
906 iReg++;
907 }
908
909 /* Floating point state. */
910 if (fWhat & CPUMCTX_EXTRN_X87)
911 {
912 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
913 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
914 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[0].au64[0];
915 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[0].au64[1];
916 iReg++;
917 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
918 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
919 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[1].au64[0];
920 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[1].au64[1];
921 iReg++;
922 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
923 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
924 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[2].au64[0];
925 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[2].au64[1];
926 iReg++;
927 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
928 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
929 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[3].au64[0];
930 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[3].au64[1];
931 iReg++;
932 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
933 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
934 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[4].au64[0];
935 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[4].au64[1];
936 iReg++;
937 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
938 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
939 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[5].au64[0];
940 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[5].au64[1];
941 iReg++;
942 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
943 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
944 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[6].au64[0];
945 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[6].au64[1];
946 iReg++;
947 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
948 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
949 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[7].au64[0];
950 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[7].au64[1];
951 iReg++;
952
953 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
954 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
955 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->pXStateR0->x87.FCW;
956 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->pXStateR0->x87.FSW;
957 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->pXStateR0->x87.FTW;
958 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->pXStateR0->x87.FTW >> 8;
959 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->pXStateR0->x87.FOP;
960 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->pXStateR0->x87.FPUIP)
961 | ((uint64_t)pCtx->pXStateR0->x87.CS << 32)
962 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd1 << 48);
963 iReg++;
964/** @todo we've got trouble if if we try write just SSE w/o X87. */
965 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
966 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
967 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->pXStateR0->x87.FPUDP)
968 | ((uint64_t)pCtx->pXStateR0->x87.DS << 32)
969 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd2 << 48);
970 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->pXStateR0->x87.MXCSR;
971 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR0->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
972 iReg++;
973 }
974
975 /* Vector state. */
976 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
977 {
978 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
979 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
980 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo;
981 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi;
982 iReg++;
983 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
984 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
985 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo;
986 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi;
987 iReg++;
988 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
989 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
990 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo;
991 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi;
992 iReg++;
993 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
994 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
995 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo;
996 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi;
997 iReg++;
998 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
999 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
1000 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo;
1001 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi;
1002 iReg++;
1003 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1004 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
1005 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo;
1006 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi;
1007 iReg++;
1008 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1009 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
1010 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo;
1011 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi;
1012 iReg++;
1013 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1014 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
1015 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo;
1016 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi;
1017 iReg++;
1018 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1019 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1020 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo;
1021 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi;
1022 iReg++;
1023 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1024 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1025 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo;
1026 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi;
1027 iReg++;
1028 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1029 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1030 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo;
1031 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi;
1032 iReg++;
1033 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1034 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1035 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo;
1036 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi;
1037 iReg++;
1038 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1039 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1040 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo;
1041 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi;
1042 iReg++;
1043 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1044 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1045 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo;
1046 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi;
1047 iReg++;
1048 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1049 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1050 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo;
1051 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi;
1052 iReg++;
1053 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1054 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1055 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo;
1056 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi;
1057 iReg++;
1058 }
1059
1060 /* MSRs */
1061 // HvX64RegisterTsc - don't touch
1062 if (fWhat & CPUMCTX_EXTRN_EFER)
1063 {
1064 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1065 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1066 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1067 iReg++;
1068 }
1069 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1070 {
1071 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1072 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1073 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1074 iReg++;
1075 }
1076 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1077 {
1078 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1079 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1080 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1081 iReg++;
1082 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1083 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1084 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1085 iReg++;
1086 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1087 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1088 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1089 iReg++;
1090 }
1091 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1092 {
1093 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1094 pInput->Elements[iReg].Name = HvX64RegisterStar;
1095 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1096 iReg++;
1097 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1098 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1099 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1100 iReg++;
1101 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1102 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1103 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1104 iReg++;
1105 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1106 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1107 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1108 iReg++;
1109 }
1110 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1111 {
1112 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1113 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1114 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pGVCpu);
1115 iReg++;
1116 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1117 pInput->Elements[iReg].Name = HvX64RegisterPat;
1118 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1119 iReg++;
1120# if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1121 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1122 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1123 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pGVCpu);
1124 iReg++;
1125# endif
1126
1127 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
1128
1129 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1130 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1131 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1132 iReg++;
1133
1134 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1135
1136 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1137 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1138 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1139 iReg++;
1140 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1141 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1142 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1143 iReg++;
1144 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1145 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1146 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1147 iReg++;
1148 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1149 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1150 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1151 iReg++;
1152 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1153 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1154 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1155 iReg++;
1156 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1157 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1158 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1159 iReg++;
1160 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1161 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1162 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1163 iReg++;
1164 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1165 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1166 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1167 iReg++;
1168 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1169 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1170 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1171 iReg++;
1172 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1173 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1174 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1175 iReg++;
1176 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1177 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1178 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1179 iReg++;
1180 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1181 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1182 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1183 iReg++;
1184
1185# if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1186 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
1187 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1188 {
1189 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1190 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1191 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1192 iReg++;
1193 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1194 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1195 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pGVCpu);
1196 iReg++;
1197 }
1198# endif
1199 }
1200
1201 /* event injection (clear it). */
1202 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1203 {
1204 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1205 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1206 pInput->Elements[iReg].Value.Reg64 = 0;
1207 iReg++;
1208 }
1209
1210 /* Interruptibility state. This can get a little complicated since we get
1211 half of the state via HV_X64_VP_EXECUTION_STATE. */
1212 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1213 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1214 {
1215 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1216 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1217 pInput->Elements[iReg].Value.Reg64 = 0;
1218 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1219 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
1220 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1221 if (VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
1222 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1223 iReg++;
1224 }
1225 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1226 {
1227 if ( pGVCpu->nem.s.fLastInterruptShadow
1228 || ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1229 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip))
1230 {
1231 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1232 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1233 pInput->Elements[iReg].Value.Reg64 = 0;
1234 if ( VMCPU_FF_IS_SET(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1235 && EMGetInhibitInterruptsPC(pGVCpu) == pCtx->rip)
1236 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1237 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1238 //if (VMCPU_FF_IS_ANY_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS))
1239 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1240 iReg++;
1241 }
1242 }
1243 else
1244 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1245
1246 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1247 uint8_t const fDesiredIntWin = pGVCpu->nem.s.fDesiredInterruptWindows;
1248 if ( fDesiredIntWin
1249 || pGVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1250 {
1251 pGVCpu->nem.s.fCurrentInterruptWindows = pGVCpu->nem.s.fDesiredInterruptWindows;
1252 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1253 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1254 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1255 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1256 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1257 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1258 iReg++;
1259 }
1260
1261 /// @todo HvRegisterPendingEvent0
1262 /// @todo HvRegisterPendingEvent1
1263
1264 /*
1265 * Set the registers.
1266 */
1267 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1268
1269 /*
1270 * Make the hypercall.
1271 */
1272 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1273 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1274 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1275 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1276 VERR_NEM_SET_REGISTERS_FAILED);
1277 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1278 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1279 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1280 return VINF_SUCCESS;
1281}
1282#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1283
1284
1285/**
1286 * Export the state to the native API (out of CPUMCTX).
1287 *
1288 * @returns VBox status code
1289 * @param pGVM The ring-0 VM handle.
1290 * @param idCpu The calling EMT. Necessary for getting the
1291 * hypercall page and arguments.
1292 */
1293VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, VMCPUID idCpu)
1294{
1295#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1296 /*
1297 * Validate the call.
1298 */
1299 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
1300 if (RT_SUCCESS(rc))
1301 {
1302 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1303 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1304
1305 /*
1306 * Call worker.
1307 */
1308 rc = nemR0WinExportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx);
1309 }
1310 return rc;
1311#else
1312 RT_NOREF(pGVM, idCpu);
1313 return VERR_NOT_IMPLEMENTED;
1314#endif
1315}
1316
1317
1318#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1319/**
1320 * Worker for NEMR0ImportState.
1321 *
1322 * Intention is to use it internally later.
1323 *
1324 * @returns VBox status code.
1325 * @param pGVM The ring-0 VM handle.
1326 * @param pGVCpu The ring-0 VCPU handle.
1327 * @param pCtx The CPU context structure to import into.
1328 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1329 * @param fCanUpdateCr3 Whether it's safe to update CR3 or not.
1330 */
1331NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3)
1332{
1333 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
1334 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1335 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1336 Assert(pCtx == &pGVCpu->cpum.GstCtx);
1337
1338 fWhat &= pCtx->fExtrn;
1339
1340 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
1341 pInput->VpIndex = pGVCpu->idCpu;
1342 pInput->fFlags = 0;
1343
1344 /* GPRs */
1345 uintptr_t iReg = 0;
1346 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1347 {
1348 if (fWhat & CPUMCTX_EXTRN_RAX)
1349 pInput->Names[iReg++] = HvX64RegisterRax;
1350 if (fWhat & CPUMCTX_EXTRN_RCX)
1351 pInput->Names[iReg++] = HvX64RegisterRcx;
1352 if (fWhat & CPUMCTX_EXTRN_RDX)
1353 pInput->Names[iReg++] = HvX64RegisterRdx;
1354 if (fWhat & CPUMCTX_EXTRN_RBX)
1355 pInput->Names[iReg++] = HvX64RegisterRbx;
1356 if (fWhat & CPUMCTX_EXTRN_RSP)
1357 pInput->Names[iReg++] = HvX64RegisterRsp;
1358 if (fWhat & CPUMCTX_EXTRN_RBP)
1359 pInput->Names[iReg++] = HvX64RegisterRbp;
1360 if (fWhat & CPUMCTX_EXTRN_RSI)
1361 pInput->Names[iReg++] = HvX64RegisterRsi;
1362 if (fWhat & CPUMCTX_EXTRN_RDI)
1363 pInput->Names[iReg++] = HvX64RegisterRdi;
1364 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1365 {
1366 pInput->Names[iReg++] = HvX64RegisterR8;
1367 pInput->Names[iReg++] = HvX64RegisterR9;
1368 pInput->Names[iReg++] = HvX64RegisterR10;
1369 pInput->Names[iReg++] = HvX64RegisterR11;
1370 pInput->Names[iReg++] = HvX64RegisterR12;
1371 pInput->Names[iReg++] = HvX64RegisterR13;
1372 pInput->Names[iReg++] = HvX64RegisterR14;
1373 pInput->Names[iReg++] = HvX64RegisterR15;
1374 }
1375 }
1376
1377 /* RIP & Flags */
1378 if (fWhat & CPUMCTX_EXTRN_RIP)
1379 pInput->Names[iReg++] = HvX64RegisterRip;
1380 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1381 pInput->Names[iReg++] = HvX64RegisterRflags;
1382
1383 /* Segments */
1384 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1385 {
1386 if (fWhat & CPUMCTX_EXTRN_CS)
1387 pInput->Names[iReg++] = HvX64RegisterCs;
1388 if (fWhat & CPUMCTX_EXTRN_ES)
1389 pInput->Names[iReg++] = HvX64RegisterEs;
1390 if (fWhat & CPUMCTX_EXTRN_SS)
1391 pInput->Names[iReg++] = HvX64RegisterSs;
1392 if (fWhat & CPUMCTX_EXTRN_DS)
1393 pInput->Names[iReg++] = HvX64RegisterDs;
1394 if (fWhat & CPUMCTX_EXTRN_FS)
1395 pInput->Names[iReg++] = HvX64RegisterFs;
1396 if (fWhat & CPUMCTX_EXTRN_GS)
1397 pInput->Names[iReg++] = HvX64RegisterGs;
1398 }
1399
1400 /* Descriptor tables and the task segment. */
1401 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1402 {
1403 if (fWhat & CPUMCTX_EXTRN_LDTR)
1404 pInput->Names[iReg++] = HvX64RegisterLdtr;
1405 if (fWhat & CPUMCTX_EXTRN_TR)
1406 pInput->Names[iReg++] = HvX64RegisterTr;
1407 if (fWhat & CPUMCTX_EXTRN_IDTR)
1408 pInput->Names[iReg++] = HvX64RegisterIdtr;
1409 if (fWhat & CPUMCTX_EXTRN_GDTR)
1410 pInput->Names[iReg++] = HvX64RegisterGdtr;
1411 }
1412
1413 /* Control registers. */
1414 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1415 {
1416 if (fWhat & CPUMCTX_EXTRN_CR0)
1417 pInput->Names[iReg++] = HvX64RegisterCr0;
1418 if (fWhat & CPUMCTX_EXTRN_CR2)
1419 pInput->Names[iReg++] = HvX64RegisterCr2;
1420 if (fWhat & CPUMCTX_EXTRN_CR3)
1421 pInput->Names[iReg++] = HvX64RegisterCr3;
1422 if (fWhat & CPUMCTX_EXTRN_CR4)
1423 pInput->Names[iReg++] = HvX64RegisterCr4;
1424 }
1425 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1426 pInput->Names[iReg++] = HvX64RegisterCr8;
1427
1428 /* Debug registers. */
1429 if (fWhat & CPUMCTX_EXTRN_DR7)
1430 pInput->Names[iReg++] = HvX64RegisterDr7;
1431 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1432 {
1433 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pCtx->fExtrn & CPUMCTX_EXTRN_DR7))
1434 {
1435 fWhat |= CPUMCTX_EXTRN_DR7;
1436 pInput->Names[iReg++] = HvX64RegisterDr7;
1437 }
1438 pInput->Names[iReg++] = HvX64RegisterDr0;
1439 pInput->Names[iReg++] = HvX64RegisterDr1;
1440 pInput->Names[iReg++] = HvX64RegisterDr2;
1441 pInput->Names[iReg++] = HvX64RegisterDr3;
1442 }
1443 if (fWhat & CPUMCTX_EXTRN_DR6)
1444 pInput->Names[iReg++] = HvX64RegisterDr6;
1445
1446 /* Floating point state. */
1447 if (fWhat & CPUMCTX_EXTRN_X87)
1448 {
1449 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1450 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1451 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1452 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1453 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1454 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1455 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1456 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1457 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1458 }
1459 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1460 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1461
1462 /* Vector state. */
1463 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1464 {
1465 pInput->Names[iReg++] = HvX64RegisterXmm0;
1466 pInput->Names[iReg++] = HvX64RegisterXmm1;
1467 pInput->Names[iReg++] = HvX64RegisterXmm2;
1468 pInput->Names[iReg++] = HvX64RegisterXmm3;
1469 pInput->Names[iReg++] = HvX64RegisterXmm4;
1470 pInput->Names[iReg++] = HvX64RegisterXmm5;
1471 pInput->Names[iReg++] = HvX64RegisterXmm6;
1472 pInput->Names[iReg++] = HvX64RegisterXmm7;
1473 pInput->Names[iReg++] = HvX64RegisterXmm8;
1474 pInput->Names[iReg++] = HvX64RegisterXmm9;
1475 pInput->Names[iReg++] = HvX64RegisterXmm10;
1476 pInput->Names[iReg++] = HvX64RegisterXmm11;
1477 pInput->Names[iReg++] = HvX64RegisterXmm12;
1478 pInput->Names[iReg++] = HvX64RegisterXmm13;
1479 pInput->Names[iReg++] = HvX64RegisterXmm14;
1480 pInput->Names[iReg++] = HvX64RegisterXmm15;
1481 }
1482
1483 /* MSRs */
1484 // HvX64RegisterTsc - don't touch
1485 if (fWhat & CPUMCTX_EXTRN_EFER)
1486 pInput->Names[iReg++] = HvX64RegisterEfer;
1487 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1488 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1489 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1490 {
1491 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1492 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1493 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1494 }
1495 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1496 {
1497 pInput->Names[iReg++] = HvX64RegisterStar;
1498 pInput->Names[iReg++] = HvX64RegisterLstar;
1499 pInput->Names[iReg++] = HvX64RegisterCstar;
1500 pInput->Names[iReg++] = HvX64RegisterSfmask;
1501 }
1502
1503# ifdef LOG_ENABLED
1504 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
1505# endif
1506 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1507 {
1508 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1509 pInput->Names[iReg++] = HvX64RegisterPat;
1510# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1511 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
1512# endif
1513 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
1514 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
1515 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
1516 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
1517 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
1518 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
1519 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
1520 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
1521 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
1522 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
1523 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
1524 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
1525 pInput->Names[iReg++] = HvX64RegisterTscAux;
1526# if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
1527 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1528 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
1529# endif
1530# ifdef LOG_ENABLED
1531 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON)
1532 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
1533# endif
1534 }
1535
1536 /* Interruptibility. */
1537 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1538 {
1539 pInput->Names[iReg++] = HvRegisterInterruptState;
1540 pInput->Names[iReg++] = HvX64RegisterRip;
1541 }
1542
1543 /* event injection */
1544 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1545 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1546 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1547 size_t const cRegs = iReg;
1548 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF_DYN(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1549
1550 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1551 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
1552 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1553
1554 /*
1555 * Make the hypercall.
1556 */
1557 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1558 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
1559 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
1560 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1561 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1562 VERR_NEM_GET_REGISTERS_FAILED);
1563 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1564
1565 /*
1566 * Copy information to the CPUM context.
1567 */
1568 iReg = 0;
1569
1570 /* GPRs */
1571 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1572 {
1573 if (fWhat & CPUMCTX_EXTRN_RAX)
1574 {
1575 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1576 pCtx->rax = paValues[iReg++].Reg64;
1577 }
1578 if (fWhat & CPUMCTX_EXTRN_RCX)
1579 {
1580 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1581 pCtx->rcx = paValues[iReg++].Reg64;
1582 }
1583 if (fWhat & CPUMCTX_EXTRN_RDX)
1584 {
1585 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1586 pCtx->rdx = paValues[iReg++].Reg64;
1587 }
1588 if (fWhat & CPUMCTX_EXTRN_RBX)
1589 {
1590 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1591 pCtx->rbx = paValues[iReg++].Reg64;
1592 }
1593 if (fWhat & CPUMCTX_EXTRN_RSP)
1594 {
1595 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1596 pCtx->rsp = paValues[iReg++].Reg64;
1597 }
1598 if (fWhat & CPUMCTX_EXTRN_RBP)
1599 {
1600 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1601 pCtx->rbp = paValues[iReg++].Reg64;
1602 }
1603 if (fWhat & CPUMCTX_EXTRN_RSI)
1604 {
1605 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1606 pCtx->rsi = paValues[iReg++].Reg64;
1607 }
1608 if (fWhat & CPUMCTX_EXTRN_RDI)
1609 {
1610 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1611 pCtx->rdi = paValues[iReg++].Reg64;
1612 }
1613 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1614 {
1615 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1616 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1617 pCtx->r8 = paValues[iReg++].Reg64;
1618 pCtx->r9 = paValues[iReg++].Reg64;
1619 pCtx->r10 = paValues[iReg++].Reg64;
1620 pCtx->r11 = paValues[iReg++].Reg64;
1621 pCtx->r12 = paValues[iReg++].Reg64;
1622 pCtx->r13 = paValues[iReg++].Reg64;
1623 pCtx->r14 = paValues[iReg++].Reg64;
1624 pCtx->r15 = paValues[iReg++].Reg64;
1625 }
1626 }
1627
1628 /* RIP & Flags */
1629 if (fWhat & CPUMCTX_EXTRN_RIP)
1630 {
1631 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1632 pCtx->rip = paValues[iReg++].Reg64;
1633 }
1634 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1635 {
1636 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1637 pCtx->rflags.u = paValues[iReg++].Reg64;
1638 }
1639
1640 /* Segments */
1641# define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1642 do { \
1643 Assert(pInput->Names[a_idx] == a_enmName); \
1644 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1645 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1646 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1647 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1648 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1649 } while (0)
1650 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1651 {
1652 if (fWhat & CPUMCTX_EXTRN_CS)
1653 {
1654 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1655 iReg++;
1656 }
1657 if (fWhat & CPUMCTX_EXTRN_ES)
1658 {
1659 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1660 iReg++;
1661 }
1662 if (fWhat & CPUMCTX_EXTRN_SS)
1663 {
1664 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1665 iReg++;
1666 }
1667 if (fWhat & CPUMCTX_EXTRN_DS)
1668 {
1669 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1670 iReg++;
1671 }
1672 if (fWhat & CPUMCTX_EXTRN_FS)
1673 {
1674 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1675 iReg++;
1676 }
1677 if (fWhat & CPUMCTX_EXTRN_GS)
1678 {
1679 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1680 iReg++;
1681 }
1682 }
1683 /* Descriptor tables and the task segment. */
1684 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1685 {
1686 if (fWhat & CPUMCTX_EXTRN_LDTR)
1687 {
1688 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1689 iReg++;
1690 }
1691 if (fWhat & CPUMCTX_EXTRN_TR)
1692 {
1693 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1694 avoid to trigger sanity assertions around the code, always fix this. */
1695 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1696 switch (pCtx->tr.Attr.n.u4Type)
1697 {
1698 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1699 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1700 break;
1701 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1702 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1703 break;
1704 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1705 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1706 break;
1707 }
1708 iReg++;
1709 }
1710 if (fWhat & CPUMCTX_EXTRN_IDTR)
1711 {
1712 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1713 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1714 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1715 iReg++;
1716 }
1717 if (fWhat & CPUMCTX_EXTRN_GDTR)
1718 {
1719 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1720 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1721 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1722 iReg++;
1723 }
1724 }
1725
1726 /* Control registers. */
1727 bool fMaybeChangedMode = false;
1728 bool fUpdateCr3 = false;
1729 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1730 {
1731 if (fWhat & CPUMCTX_EXTRN_CR0)
1732 {
1733 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1734 if (pCtx->cr0 != paValues[iReg].Reg64)
1735 {
1736 CPUMSetGuestCR0(pGVCpu, paValues[iReg].Reg64);
1737 fMaybeChangedMode = true;
1738 }
1739 iReg++;
1740 }
1741 if (fWhat & CPUMCTX_EXTRN_CR2)
1742 {
1743 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1744 pCtx->cr2 = paValues[iReg].Reg64;
1745 iReg++;
1746 }
1747 if (fWhat & CPUMCTX_EXTRN_CR3)
1748 {
1749 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1750 if (pCtx->cr3 != paValues[iReg].Reg64)
1751 {
1752 CPUMSetGuestCR3(pGVCpu, paValues[iReg].Reg64);
1753 fUpdateCr3 = true;
1754 }
1755 iReg++;
1756 }
1757 if (fWhat & CPUMCTX_EXTRN_CR4)
1758 {
1759 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1760 if (pCtx->cr4 != paValues[iReg].Reg64)
1761 {
1762 CPUMSetGuestCR4(pGVCpu, paValues[iReg].Reg64);
1763 fMaybeChangedMode = true;
1764 }
1765 iReg++;
1766 }
1767 }
1768 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1769 {
1770 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1771 APICSetTpr(pGVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1772 iReg++;
1773 }
1774
1775 /* Debug registers. */
1776 if (fWhat & CPUMCTX_EXTRN_DR7)
1777 {
1778 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1779 if (pCtx->dr[7] != paValues[iReg].Reg64)
1780 CPUMSetGuestDR7(pGVCpu, paValues[iReg].Reg64);
1781 pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
1782 iReg++;
1783 }
1784 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1785 {
1786 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1787 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1788 if (pCtx->dr[0] != paValues[iReg].Reg64)
1789 CPUMSetGuestDR0(pGVCpu, paValues[iReg].Reg64);
1790 iReg++;
1791 if (pCtx->dr[1] != paValues[iReg].Reg64)
1792 CPUMSetGuestDR1(pGVCpu, paValues[iReg].Reg64);
1793 iReg++;
1794 if (pCtx->dr[2] != paValues[iReg].Reg64)
1795 CPUMSetGuestDR2(pGVCpu, paValues[iReg].Reg64);
1796 iReg++;
1797 if (pCtx->dr[3] != paValues[iReg].Reg64)
1798 CPUMSetGuestDR3(pGVCpu, paValues[iReg].Reg64);
1799 iReg++;
1800 }
1801 if (fWhat & CPUMCTX_EXTRN_DR6)
1802 {
1803 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1804 if (pCtx->dr[6] != paValues[iReg].Reg64)
1805 CPUMSetGuestDR6(pGVCpu, paValues[iReg].Reg64);
1806 iReg++;
1807 }
1808
1809 /* Floating point state. */
1810 if (fWhat & CPUMCTX_EXTRN_X87)
1811 {
1812 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1813 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1814 pCtx->pXStateR0->x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1815 pCtx->pXStateR0->x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1816 iReg++;
1817 pCtx->pXStateR0->x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1818 pCtx->pXStateR0->x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1819 iReg++;
1820 pCtx->pXStateR0->x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1821 pCtx->pXStateR0->x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1822 iReg++;
1823 pCtx->pXStateR0->x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1824 pCtx->pXStateR0->x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1825 iReg++;
1826 pCtx->pXStateR0->x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1827 pCtx->pXStateR0->x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1828 iReg++;
1829 pCtx->pXStateR0->x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1830 pCtx->pXStateR0->x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1831 iReg++;
1832 pCtx->pXStateR0->x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1833 pCtx->pXStateR0->x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1834 iReg++;
1835 pCtx->pXStateR0->x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1836 pCtx->pXStateR0->x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1837 iReg++;
1838
1839 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1840 pCtx->pXStateR0->x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1841 pCtx->pXStateR0->x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1842 pCtx->pXStateR0->x87.FTW = paValues[iReg].FpControlStatus.FpTag
1843 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1844 pCtx->pXStateR0->x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1845 pCtx->pXStateR0->x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1846 pCtx->pXStateR0->x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1847 pCtx->pXStateR0->x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1848 iReg++;
1849 }
1850
1851 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1852 {
1853 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1854 if (fWhat & CPUMCTX_EXTRN_X87)
1855 {
1856 pCtx->pXStateR0->x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1857 pCtx->pXStateR0->x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1858 pCtx->pXStateR0->x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1859 }
1860 pCtx->pXStateR0->x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1861 pCtx->pXStateR0->x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1862 iReg++;
1863 }
1864
1865 /* Vector state. */
1866 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1867 {
1868 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1869 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1870 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1871 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1872 iReg++;
1873 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1874 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1875 iReg++;
1876 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1877 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1878 iReg++;
1879 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1880 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1881 iReg++;
1882 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1883 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1884 iReg++;
1885 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1886 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1887 iReg++;
1888 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1889 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1890 iReg++;
1891 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1892 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1893 iReg++;
1894 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1895 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1896 iReg++;
1897 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1898 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1899 iReg++;
1900 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1901 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1902 iReg++;
1903 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1904 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1905 iReg++;
1906 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1907 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1908 iReg++;
1909 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1910 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1911 iReg++;
1912 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1913 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1914 iReg++;
1915 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1916 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1917 iReg++;
1918 }
1919
1920
1921 /* MSRs */
1922 // HvX64RegisterTsc - don't touch
1923 if (fWhat & CPUMCTX_EXTRN_EFER)
1924 {
1925 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1926 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1927 {
1928 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
1929 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
1930 PGMNotifyNxeChanged(pGVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1931 pCtx->msrEFER = paValues[iReg].Reg64;
1932 fMaybeChangedMode = true;
1933 }
1934 iReg++;
1935 }
1936 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1937 {
1938 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1939 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
1940 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
1941 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1942 iReg++;
1943 }
1944 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1945 {
1946 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
1947 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
1948 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
1949 pCtx->SysEnter.cs = paValues[iReg].Reg64;
1950 iReg++;
1951
1952 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
1953 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
1954 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
1955 pCtx->SysEnter.eip = paValues[iReg].Reg64;
1956 iReg++;
1957
1958 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
1959 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
1960 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
1961 pCtx->SysEnter.esp = paValues[iReg].Reg64;
1962 iReg++;
1963 }
1964 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1965 {
1966 Assert(pInput->Names[iReg] == HvX64RegisterStar);
1967 if (pCtx->msrSTAR != paValues[iReg].Reg64)
1968 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
1969 pCtx->msrSTAR = paValues[iReg].Reg64;
1970 iReg++;
1971
1972 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
1973 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
1974 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
1975 pCtx->msrLSTAR = paValues[iReg].Reg64;
1976 iReg++;
1977
1978 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
1979 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
1980 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
1981 pCtx->msrCSTAR = paValues[iReg].Reg64;
1982 iReg++;
1983
1984 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
1985 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
1986 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
1987 pCtx->msrSFMASK = paValues[iReg].Reg64;
1988 iReg++;
1989 }
1990 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1991 {
1992 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
1993 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pGVCpu);
1994 if (paValues[iReg].Reg64 != uOldBase)
1995 {
1996 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1997 pGVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
1998 int rc2 = APICSetBaseMsr(pGVCpu, paValues[iReg].Reg64);
1999 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64));
2000 }
2001 iReg++;
2002
2003 Assert(pInput->Names[iReg] == HvX64RegisterPat);
2004 if (pCtx->msrPAT != paValues[iReg].Reg64)
2005 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
2006 pCtx->msrPAT = paValues[iReg].Reg64;
2007 iReg++;
2008
2009# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2010 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
2011 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pGVCpu))
2012 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pGVCpu), paValues[iReg].Reg64));
2013 iReg++;
2014# endif
2015
2016 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pGVCpu);
2017 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
2018 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2019 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2020 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2021 iReg++;
2022
2023 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2024
2025 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2026 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2027 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2028 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2029 iReg++;
2030
2031 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2032 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2033 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2034 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2035 iReg++;
2036
2037 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2038 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2039 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2040 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2041 iReg++;
2042
2043 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2044 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2045 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2046 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2047 iReg++;
2048
2049 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2050 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2051 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2052 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2053 iReg++;
2054
2055 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2056 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2057 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2058 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2059 iReg++;
2060
2061 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2062 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2063 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2064 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2065 iReg++;
2066
2067 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2068 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2069 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2070 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2071 iReg++;
2072
2073 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2074 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2075 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2076 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2077 iReg++;
2078
2079 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2080 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2081 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2082 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2083 iReg++;
2084
2085 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2086 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2087 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2088 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2089 iReg++;
2090
2091 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2092 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2093 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2094 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2095 iReg++;
2096
2097# if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2098 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2099 {
2100 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2101 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2102 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pGVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2103 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2104 iReg++;
2105 }
2106# endif
2107# ifdef LOG_ENABLED
2108 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCPUVendor != CPUMCPUVENDOR_HYGON)
2109 {
2110 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2111 if (paValues[iReg].Reg64 != pCtx->hwvirt.vmx.Msrs.u64FeatCtrl)
2112 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pGVCpu->idCpu, pCtx->hwvirt.vmx.Msrs.u64FeatCtrl, paValues[iReg].Reg64));
2113 iReg++;
2114 }
2115# endif
2116 }
2117
2118 /* Interruptibility. */
2119 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2120 {
2121 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2122 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2123
2124 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2125 {
2126 pGVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2127 if (paValues[iReg].InterruptState.InterruptShadow)
2128 EMSetInhibitInterruptsPC(pGVCpu, paValues[iReg + 1].Reg64);
2129 else
2130 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2131 }
2132
2133 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2134 {
2135 if (paValues[iReg].InterruptState.NmiMasked)
2136 VMCPU_FF_SET(pGVCpu, VMCPU_FF_BLOCK_NMIS);
2137 else
2138 VMCPU_FF_CLEAR(pGVCpu, VMCPU_FF_BLOCK_NMIS);
2139 }
2140
2141 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2142 iReg += 2;
2143 }
2144
2145 /* Event injection. */
2146 /// @todo HvRegisterPendingInterruption
2147 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2148 if (paValues[iReg].PendingInterruption.InterruptionPending)
2149 {
2150 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2151 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2152 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2153 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2154 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2155 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2156 }
2157
2158 /// @todo HvRegisterPendingEvent0
2159 /// @todo HvRegisterPendingEvent1
2160
2161 /* Almost done, just update extrn flags and maybe change PGM mode. */
2162 pCtx->fExtrn &= ~fWhat;
2163 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2164 pCtx->fExtrn = 0;
2165
2166 /* Typical. */
2167 if (!fMaybeChangedMode && !fUpdateCr3)
2168 return VINF_SUCCESS;
2169
2170 /*
2171 * Slow.
2172 */
2173 int rc = VINF_SUCCESS;
2174 if (fMaybeChangedMode)
2175 {
2176 rc = PGMChangeMode(pGVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2177 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
2178 }
2179
2180 if (fUpdateCr3)
2181 {
2182 if (fCanUpdateCr3)
2183 {
2184 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
2185 rc = PGMUpdateCR3(pGVCpu, pCtx->cr3);
2186 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
2187 }
2188 else
2189 {
2190 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2191 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2192 }
2193 }
2194
2195 return rc;
2196}
2197#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2198
2199
2200/**
2201 * Import the state from the native API (back to CPUMCTX).
2202 *
2203 * @returns VBox status code
2204 * @param pGVM The ring-0 VM handle.
2205 * @param idCpu The calling EMT. Necessary for getting the
2206 * hypercall page and arguments.
2207 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2208 * CPUMCTX_EXTERN_ALL for everything.
2209 */
2210VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, VMCPUID idCpu, uint64_t fWhat)
2211{
2212#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2213 /*
2214 * Validate the call.
2215 */
2216 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2217 if (RT_SUCCESS(rc))
2218 {
2219 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2220 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2221
2222 /*
2223 * Call worker.
2224 */
2225 rc = nemR0WinImportState(pGVM, pGVCpu, &pGVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);
2226 }
2227 return rc;
2228#else
2229 RT_NOREF(pGVM, idCpu, fWhat);
2230 return VERR_NOT_IMPLEMENTED;
2231#endif
2232}
2233
2234
2235#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2236/**
2237 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2238 *
2239 * @returns VBox status code.
2240 * @param pGVM The ring-0 VM handle.
2241 * @param pGVCpu The ring-0 VCPU handle.
2242 * @param pcTicks Where to return the current CPU tick count.
2243 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2244 */
2245NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2246{
2247 /*
2248 * Hypercall parameters.
2249 */
2250 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2251 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2252 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2253
2254 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2255 pInput->VpIndex = pGVCpu->idCpu;
2256 pInput->fFlags = 0;
2257 pInput->Names[0] = HvX64RegisterTsc;
2258 pInput->Names[1] = HvX64RegisterTscAux;
2259
2260 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2261 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2262 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2263
2264 /*
2265 * Make the hypercall.
2266 */
2267 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2268 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2269 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2270 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2271 VERR_NEM_GET_REGISTERS_FAILED);
2272
2273 /*
2274 * Get results.
2275 */
2276 *pcTicks = paValues[0].Reg64;
2277 if (pcAux)
2278 *pcAux = paValues[0].Reg32;
2279 return VINF_SUCCESS;
2280}
2281#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2282
2283
2284/**
2285 * Queries the TSC and TSC_AUX values, putting the results in .
2286 *
2287 * @returns VBox status code
2288 * @param pGVM The ring-0 VM handle.
2289 * @param idCpu The calling EMT. Necessary for getting the
2290 * hypercall page and arguments.
2291 */
2292VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, VMCPUID idCpu)
2293{
2294#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2295 /*
2296 * Validate the call.
2297 */
2298 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2299 if (RT_SUCCESS(rc))
2300 {
2301 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2302 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2303
2304 /*
2305 * Call worker.
2306 */
2307 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2308 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2309 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2310 &pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2311 }
2312 return rc;
2313#else
2314 RT_NOREF(pGVM, idCpu);
2315 return VERR_NOT_IMPLEMENTED;
2316#endif
2317}
2318
2319
2320#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2321/**
2322 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2323 *
2324 * @returns VBox status code.
2325 * @param pGVM The ring-0 VM handle.
2326 * @param pGVCpu The ring-0 VCPU handle.
2327 * @param uPausedTscValue The TSC value at the time of pausing.
2328 */
2329NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2330{
2331 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2332
2333 /*
2334 * Set up the hypercall parameters.
2335 */
2336 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2337 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2338
2339 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2340 pInput->VpIndex = 0;
2341 pInput->RsvdZ = 0;
2342 pInput->Elements[0].Name = HvX64RegisterTsc;
2343 pInput->Elements[0].Pad0 = 0;
2344 pInput->Elements[0].Pad1 = 0;
2345 pInput->Elements[0].Value.Reg128.High64 = 0;
2346 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2347
2348 /*
2349 * Disable interrupts and do the first virtual CPU.
2350 */
2351 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2352 uint64_t const uFirstTsc = ASMReadTSC();
2353 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2354 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2355 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2356 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2357
2358 /*
2359 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2360 * that we don't introduce too much drift here.
2361 */
2362 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2363 {
2364 Assert(pInput->PartitionId == pGVM->nemr0.s.idHvPartition);
2365 Assert(pInput->RsvdZ == 0);
2366 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2367 Assert(pInput->Elements[0].Pad0 == 0);
2368 Assert(pInput->Elements[0].Pad1 == 0);
2369 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2370
2371 pInput->VpIndex = iCpu;
2372 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2373 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2374
2375 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2376 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2377 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2378 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2379 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2380 }
2381
2382 /*
2383 * Done.
2384 */
2385 ASMSetFlags(fSavedFlags);
2386 return VINF_SUCCESS;
2387}
2388#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2389
2390
2391/**
2392 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2393 *
2394 * @returns VBox status code
2395 * @param pGVM The ring-0 VM handle.
2396 * @param idCpu The calling EMT. Necessary for getting the
2397 * hypercall page and arguments.
2398 * @param uPausedTscValue The TSC value at the time of pausing.
2399 */
2400VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2401{
2402#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2403 /*
2404 * Validate the call.
2405 */
2406 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2407 if (RT_SUCCESS(rc))
2408 {
2409 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2410 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2411
2412 /*
2413 * Call worker.
2414 */
2415 pGVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2416 pGVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2417 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
2418 }
2419 return rc;
2420#else
2421 RT_NOREF(pGVM, idCpu, uPausedTscValue);
2422 return VERR_NOT_IMPLEMENTED;
2423#endif
2424}
2425
2426
2427VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2428{
2429#ifdef NEM_WIN_WITH_RING0_RUNLOOP
2430 if (pGVM->nemr0.s.fMayUseRing0Runloop)
2431 return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu]);
2432 return VERR_NEM_RING3_ONLY;
2433#else
2434 RT_NOREF(pGVM, idCpu);
2435 return VERR_NOT_IMPLEMENTED;
2436#endif
2437}
2438
2439
2440/**
2441 * Updates statistics in the VM structure.
2442 *
2443 * @returns VBox status code.
2444 * @param pGVM The ring-0 VM handle.
2445 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
2446 * page and arguments.
2447 */
2448VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, VMCPUID idCpu)
2449{
2450 /*
2451 * Validate the call.
2452 */
2453 int rc;
2454 if (idCpu == NIL_VMCPUID)
2455 rc = GVMMR0ValidateGVM(pGVM);
2456 else
2457 rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2458 if (RT_SUCCESS(rc))
2459 {
2460 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2461
2462 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
2463 ? &pGVM->aCpus[idCpu].nemr0.s.HypercallData
2464 : &pGVM->nemr0.s.HypercallData;
2465 if ( RT_VALID_PTR(pHypercallData->pbPage)
2466 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
2467 {
2468 if (idCpu == NIL_VMCPUID)
2469 rc = RTCritSectEnter(&pGVM->nemr0.s.HypercallDataCritSect);
2470 if (RT_SUCCESS(rc))
2471 {
2472 /*
2473 * Query the memory statistics for the partition.
2474 */
2475 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
2476 pInput->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
2477 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
2478 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
2479 pInput->ProximityDomainInfo.Flags.Reserved = 0;
2480 pInput->ProximityDomainInfo.Id = 0;
2481
2482 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
2483 RT_ZERO(*pOutput);
2484
2485 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
2486 pHypercallData->HCPhysPage,
2487 pHypercallData->HCPhysPage + sizeof(*pInput));
2488 if (uResult == HV_STATUS_SUCCESS)
2489 {
2490 pGVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
2491 pGVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
2492 rc = VINF_SUCCESS;
2493 }
2494 else
2495 {
2496 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
2497 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
2498 rc = VERR_NEM_IPE_0;
2499 }
2500
2501 if (idCpu == NIL_VMCPUID)
2502 RTCritSectLeave(&pGVM->nemr0.s.HypercallDataCritSect);
2503 }
2504 }
2505 else
2506 rc = VERR_WRONG_ORDER;
2507 }
2508 return rc;
2509}
2510
2511
2512#if 1 && defined(DEBUG_bird)
2513/**
2514 * Debug only interface for poking around and exploring Hyper-V stuff.
2515 *
2516 * @param pGVM The ring-0 VM handle.
2517 * @param idCpu The calling EMT.
2518 * @param u64Arg What to query. 0 == registers.
2519 */
2520VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, VMCPUID idCpu, uint64_t u64Arg)
2521{
2522 /*
2523 * Resolve CPU structures.
2524 */
2525 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
2526 if (RT_SUCCESS(rc))
2527 {
2528 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2529
2530 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2531 if (u64Arg == 0)
2532 {
2533 /*
2534 * Query register.
2535 */
2536 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2537 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2538
2539 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
2540 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2541 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
2542
2543 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2544 pInput->VpIndex = pGVCpu->idCpu;
2545 pInput->fFlags = 0;
2546 pInput->Names[0] = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
2547
2548 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
2549 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2550 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2551 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2552 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2553 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
2554 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
2555 rc = VINF_SUCCESS;
2556 }
2557 else if (u64Arg == 1)
2558 {
2559 /*
2560 * Query partition property.
2561 */
2562 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nemr0.s.HypercallData.pbPage;
2563 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2564
2565 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
2566 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
2567 pOutput->PropertyValue = 0;
2568
2569 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2570 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pGVCpu->nem.s.Hypercall.Experiment.uItem;
2571 pInput->uPadding = 0;
2572
2573 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
2574 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2575 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2576 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
2577 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2578 pGVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
2579 pGVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
2580 rc = VINF_SUCCESS;
2581 }
2582 else if (u64Arg == 2)
2583 {
2584 /*
2585 * Set register.
2586 */
2587 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2588 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2589 RT_BZERO(pInput, RT_UOFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
2590
2591 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2592 pInput->VpIndex = pGVCpu->idCpu;
2593 pInput->RsvdZ = 0;
2594 pInput->Elements[0].Name = (HV_REGISTER_NAME)pGVCpu->nem.s.Hypercall.Experiment.uItem;
2595 pInput->Elements[0].Value.Reg128.High64 = pGVCpu->nem.s.Hypercall.Experiment.uHiValue;
2596 pInput->Elements[0].Value.Reg128.Low64 = pGVCpu->nem.s.Hypercall.Experiment.uLoValue;
2597
2598 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2599 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
2600 pGVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2601 pGVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2602 rc = VINF_SUCCESS;
2603 }
2604 else
2605 rc = VERR_INVALID_FUNCTION;
2606 }
2607 return rc;
2608}
2609#endif /* DEBUG_bird */
2610
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette