VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 75649

Last change on this file since 75649 was 74789, checked in by vboxsync, 6 years ago

vm.h,VMM,REM: s/VMCPU_FF_IS_PENDING/VMCPU_FF_IS_ANY_SET/g to emphasize the plurality of the flags argument and encourage using VMCPU_FF_IS_SET. bugref:9180

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 113.4 KB
Line 
1/* $Id: NEMR0Native-win.cpp 74789 2018-10-12 10:34:32Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vm.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/dbg.h>
42#include <iprt/memobj.h>
43#include <iprt/string.h>
44#include <iprt/time.h>
45
46
47/* Assert compile context sanity. */
48#ifndef RT_OS_WINDOWS
49# error "Windows only file!"
50#endif
51#ifndef RT_ARCH_AMD64
52# error "AMD64 only file!"
53#endif
54
55
56/*********************************************************************************************************************************
57* Internal Functions *
58*********************************************************************************************************************************/
59typedef uint32_t DWORD; /* for winerror.h constants */
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
66
67/**
68 * WinHvr.sys!WinHvDepositMemory
69 *
70 * This API will try allocates cPages on IdealNode and deposit it to the
71 * hypervisor for use with the given partition. The memory will be freed when
72 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
73 *
74 * Apparently node numbers above 64 has a different meaning.
75 */
76static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
77
78
79/*********************************************************************************************************************************
80* Internal Functions *
81*********************************************************************************************************************************/
82NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
83 uint32_t cPages, uint32_t fFlags);
84NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
85#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
86NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
87NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3);
88NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
89NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
90#endif
91DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
92 void *pvOutput, uint32_t cbOutput);
93
94
95/*
96 * Instantate the code we share with ring-0.
97 */
98#ifdef NEM_WIN_WITH_RING0_RUNLOOP
99# define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
100#else
101# undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
102#endif
103#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
104
105
106
107/**
108 * Worker for NEMR0InitVM that allocates a hypercall page.
109 *
110 * @returns VBox status code.
111 * @param pHypercallData The hypercall data page to initialize.
112 */
113static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
114{
115 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
116 if (RT_SUCCESS(rc))
117 {
118 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
119 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
120 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
121 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
122 if (RT_SUCCESS(rc))
123 return VINF_SUCCESS;
124
125 /* bail out */
126 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
127 }
128 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
129 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
130 pHypercallData->pbPage = NULL;
131 return rc;
132}
133
134/**
135 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
136 *
137 * @param pHypercallData The hypercall data page to uninitialize.
138 */
139static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
140{
141 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
142 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
143 if (pHypercallData->pbPage != NULL)
144 {
145 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
146 pHypercallData->pbPage = NULL;
147 }
148 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
149 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
150}
151
152
153/**
154 * Called by NEMR3Init to make sure we've got what we need.
155 *
156 * @returns VBox status code.
157 * @param pGVM The ring-0 VM handle.
158 * @param pVM The cross context VM handle.
159 * @thread EMT(0)
160 */
161VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVM pVM)
162{
163 AssertCompile(sizeof(pGVM->nem.s) <= sizeof(pGVM->nem.padding));
164 AssertCompile(sizeof(pGVM->aCpus[0].nem.s) <= sizeof(pGVM->aCpus[0].nem.padding));
165
166 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
167 AssertRCReturn(rc, rc);
168
169 /*
170 * We want to perform hypercalls here. The NT kernel started to expose a very low
171 * level interface to do this thru somewhere between build 14271 and 16299. Since
172 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
173 *
174 * We also need to deposit memory to the hypervisor for use with partition (page
175 * mapping structures, stuff).
176 */
177 RTDBGKRNLINFO hKrnlInfo;
178 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
179 if (RT_SUCCESS(rc))
180 {
181 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
182 if (RT_SUCCESS(rc))
183 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
184 RTR0DbgKrnlInfoRelease(hKrnlInfo);
185 if (RT_SUCCESS(rc))
186 {
187 /*
188 * Allocate a page for non-EMT threads to use for hypercalls (update
189 * statistics and such) and a critical section protecting it.
190 */
191 rc = RTCritSectInit(&pGVM->nem.s.HypercallDataCritSect);
192 if (RT_SUCCESS(rc))
193 {
194 rc = nemR0InitHypercallData(&pGVM->nem.s.HypercallData);
195 if (RT_SUCCESS(rc))
196 {
197 /*
198 * Allocate a page for each VCPU to place hypercall data on.
199 */
200 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
201 {
202 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
203 if (RT_FAILURE(rc))
204 {
205 while (i-- > 0)
206 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
207 break;
208 }
209 }
210 if (RT_SUCCESS(rc))
211 {
212 /*
213 * So far, so good.
214 */
215 return rc;
216 }
217
218 /*
219 * Bail out.
220 */
221 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
222 }
223 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
224 }
225 }
226 else
227 rc = VERR_NEM_MISSING_KERNEL_API;
228 }
229
230 RT_NOREF(pVM);
231 return rc;
232}
233
234
235/**
236 * Perform an I/O control operation on the partition handle (VID.SYS).
237 *
238 * @returns NT status code.
239 * @param pGVM The ring-0 VM structure.
240 * @param uFunction The function to perform.
241 * @param pvInput The input buffer. This must point within the VM
242 * structure so we can easily convert to a ring-3
243 * pointer if necessary.
244 * @param cbInput The size of the input. @a pvInput must be NULL when
245 * zero.
246 * @param pvOutput The output buffer. This must also point within the
247 * VM structure for ring-3 pointer magic.
248 * @param cbOutput The size of the output. @a pvOutput must be NULL
249 * when zero.
250 */
251DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
252 void *pvOutput, uint32_t cbOutput)
253{
254#ifdef RT_STRICT
255 /*
256 * Input and output parameters are part of the VM CPU structure.
257 */
258 PVM pVM = pGVM->pVM;
259 size_t const cbVM = RT_UOFFSETOF_DYN(VM, aCpus[pGVM->cCpus]);
260 if (pvInput)
261 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
262 if (pvOutput)
263 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
264#endif
265
266 int32_t rcNt = STATUS_UNSUCCESSFUL;
267 int rc = SUPR0IoCtlPerform(pGVM->nem.s.pIoCtlCtx, uFunction,
268 pvInput,
269 pvInput ? (uintptr_t)pvInput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
270 cbInput,
271 pvOutput,
272 pvOutput ? (uintptr_t)pvOutput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
273 cbOutput,
274 &rcNt);
275 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
276 return (NTSTATUS)rcNt;
277 return STATUS_UNSUCCESSFUL;
278}
279
280
281/**
282 * 2nd part of the initialization, after we've got a partition handle.
283 *
284 * @returns VBox status code.
285 * @param pGVM The ring-0 VM handle.
286 * @param pVM The cross context VM handle.
287 * @thread EMT(0)
288 */
289VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM, PVM pVM)
290{
291 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
292 AssertRCReturn(rc, rc);
293 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
294 Assert(pGVM->nem.s.fMayUseRing0Runloop == false);
295
296 /*
297 * Copy and validate the I/O control information from ring-3.
298 */
299 NEMWINIOCTL Copy = pVM->nem.s.IoCtlGetHvPartitionId;
300 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
301 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
302 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
303 pGVM->nem.s.IoCtlGetHvPartitionId = Copy;
304
305 pGVM->nem.s.fMayUseRing0Runloop = pVM->nem.s.fUseRing0Runloop;
306
307 Copy = pVM->nem.s.IoCtlStartVirtualProcessor;
308 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
309 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
310 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
311 AssertLogRelStmt(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
312 if (RT_SUCCESS(rc))
313 pGVM->nem.s.IoCtlStartVirtualProcessor = Copy;
314
315 Copy = pVM->nem.s.IoCtlStopVirtualProcessor;
316 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
317 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
318 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
319 AssertLogRelStmt(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
320 AssertLogRelStmt(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
321 if (RT_SUCCESS(rc))
322 pGVM->nem.s.IoCtlStopVirtualProcessor = Copy;
323
324 Copy = pVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
325 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
326 AssertLogRelStmt( Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)
327 || Copy.cbInput == RT_OFFSETOF(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT, cMillies),
328 rc = VERR_NEM_INIT_FAILED);
329 AssertLogRelStmt(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
330 AssertLogRelStmt(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
331 AssertLogRelStmt(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
332 AssertLogRelStmt(Copy.uFunction != pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
333 if (RT_SUCCESS(rc))
334 pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext = Copy;
335
336 if ( RT_SUCCESS(rc)
337 || !pVM->nem.s.fUseRing0Runloop)
338 {
339 /*
340 * Setup of an I/O control context for the partition handle for later use.
341 */
342 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pVM->nem.s.hPartitionDevice, 0, &pGVM->nem.s.pIoCtlCtx);
343 AssertLogRelRCReturn(rc, rc);
344 pGVM->nem.s.offRing3ConversionDelta = (uintptr_t)pVM->pVMR3 - (uintptr_t)pGVM->pVM;
345
346 /*
347 * Get the partition ID.
348 */
349 PVMCPU pVCpu = &pGVM->pVM->aCpus[0];
350 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
351 &pVCpu->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu->nem.s.uIoCtlBuf.idPartition));
352 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
353 pGVM->nem.s.idHvPartition = pVCpu->nem.s.uIoCtlBuf.idPartition;
354 AssertLogRelMsgReturn(pGVM->nem.s.idHvPartition == pVM->nem.s.idHvPartition,
355 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nem.s.idHvPartition, pVM->nem.s.idHvPartition),
356 VERR_NEM_INIT_FAILED);
357 }
358
359 return rc;
360}
361
362
363/**
364 * Cleanup the NEM parts of the VM in ring-0.
365 *
366 * This is always called and must deal the state regardless of whether
367 * NEMR0InitVM() was called or not. So, take care here.
368 *
369 * @param pGVM The ring-0 VM handle.
370 */
371VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
372{
373 pGVM->nem.s.idHvPartition = HV_PARTITION_ID_INVALID;
374
375 /* Clean up I/O control context. */
376 if (pGVM->nem.s.pIoCtlCtx)
377 {
378 int rc = SUPR0IoCtlCleanup(pGVM->nem.s.pIoCtlCtx);
379 AssertRC(rc);
380 pGVM->nem.s.pIoCtlCtx = NULL;
381 }
382
383 /* Free the hypercall pages. */
384 VMCPUID i = pGVM->cCpus;
385 while (i-- > 0)
386 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
387
388 /* The non-EMT one too. */
389 if (RTCritSectIsInitialized(&pGVM->nem.s.HypercallDataCritSect))
390 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
391 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
392}
393
394
395#if 0 /* for debugging GPA unmapping. */
396static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
397{
398 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nem.s.pbHypercallData;
399 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
400 pIn->PartitionId = pGVM->nem.s.idHvPartition;
401 pIn->VpIndex = pGVCpu->idCpu;
402 pIn->ByteCount = 0x10;
403 pIn->BaseGpa = GCPhys;
404 pIn->ControlFlags.AsUINT64 = 0;
405 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
406 memset(pOut, 0xfe, sizeof(*pOut));
407 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nem.s.HCPhysHypercallData,
408 pGVCpu->nem.s.HCPhysHypercallData + sizeof(*pIn));
409 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
410 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
411 __debugbreak();
412
413 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
414}
415#endif
416
417
418/**
419 * Worker for NEMR0MapPages and others.
420 */
421NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
422 uint32_t cPages, uint32_t fFlags)
423{
424 /*
425 * Validate.
426 */
427 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
428
429 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
430 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
431 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
432 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
433 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
434 if (GCPhysSrc != GCPhysDst)
435 {
436 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
437 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
438 }
439
440 /*
441 * Compose and make the hypercall.
442 * Ring-3 is not allowed to fill in the host physical addresses of the call.
443 */
444 for (uint32_t iTries = 0;; iTries++)
445 {
446 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
447 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
448 pMapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
449 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
450 pMapPages->MapFlags = fFlags;
451 pMapPages->u32ExplicitPadding = 0;
452 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
453 {
454 RTHCPHYS HCPhys = NIL_RTGCPHYS;
455 int rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
456 AssertRCReturn(rc, rc);
457 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
458 }
459
460 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
461 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
462 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
463 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
464 if (uResult == ((uint64_t)cPages << 32))
465 return VINF_SUCCESS;
466
467 /*
468 * If the partition is out of memory, try donate another 512 pages to
469 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
470 */
471 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
472 || iTries > 16
473 || g_pfnWinHvDepositMemory == NULL)
474 {
475 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
476 return VERR_NEM_MAP_PAGES_FAILED;
477 }
478
479 size_t cPagesAdded = 0;
480 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nem.s.idHvPartition, 512, 0, &cPagesAdded);
481 if (!cPagesAdded)
482 {
483 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
484 return VERR_NEM_MAP_PAGES_FAILED;
485 }
486 }
487}
488
489
490/**
491 * Maps pages into the guest physical address space.
492 *
493 * Generally the caller will be under the PGM lock already, so no extra effort
494 * is needed to make sure all changes happens under it.
495 *
496 * @returns VBox status code.
497 * @param pGVM The ring-0 VM handle.
498 * @param pVM The cross context VM handle.
499 * @param idCpu The calling EMT. Necessary for getting the
500 * hypercall page and arguments.
501 * @thread EMT(idCpu)
502 */
503VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
504{
505 /*
506 * Unpack the call.
507 */
508 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
509 if (RT_SUCCESS(rc))
510 {
511 PVMCPU pVCpu = &pVM->aCpus[idCpu];
512 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
513
514 RTGCPHYS const GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
515 RTGCPHYS const GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
516 uint32_t const cPages = pVCpu->nem.s.Hypercall.MapPages.cPages;
517 HV_MAP_GPA_FLAGS const fFlags = pVCpu->nem.s.Hypercall.MapPages.fFlags;
518
519 /*
520 * Do the work.
521 */
522 rc = nemR0WinMapPages(pGVM, pVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
523 }
524 return rc;
525}
526
527
528/**
529 * Worker for NEMR0UnmapPages and others.
530 */
531NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
532{
533 /*
534 * Validate input.
535 */
536 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
537
538 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
539 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
540 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
541 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
542
543 /*
544 * Compose and make the hypercall.
545 */
546 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
547 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
548 pUnmapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
549 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
550 pUnmapPages->fFlags = 0;
551
552 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
553 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
554 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
555 if (uResult == ((uint64_t)cPages << 32))
556 {
557#if 1 /* Do we need to do this? Hopefully not... */
558 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
559 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
560 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
561#endif
562 return VINF_SUCCESS;
563 }
564
565 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
566 return VERR_NEM_UNMAP_PAGES_FAILED;
567}
568
569
570/**
571 * Unmaps pages from the guest physical address space.
572 *
573 * Generally the caller will be under the PGM lock already, so no extra effort
574 * is needed to make sure all changes happens under it.
575 *
576 * @returns VBox status code.
577 * @param pGVM The ring-0 VM handle.
578 * @param pVM The cross context VM handle.
579 * @param idCpu The calling EMT. Necessary for getting the
580 * hypercall page and arguments.
581 * @thread EMT(idCpu)
582 */
583VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
584{
585 /*
586 * Unpack the call.
587 */
588 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
589 if (RT_SUCCESS(rc))
590 {
591 PVMCPU pVCpu = &pVM->aCpus[idCpu];
592 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
593
594 RTGCPHYS const GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
595 uint32_t const cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
596
597 /*
598 * Do the work.
599 */
600 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
601 }
602 return rc;
603}
604
605
606#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
607/**
608 * Worker for NEMR0ExportState.
609 *
610 * Intention is to use it internally later.
611 *
612 * @returns VBox status code.
613 * @param pGVM The ring-0 VM handle.
614 * @param pGVCpu The ring-0 VCPU handle.
615 * @param pCtx The CPU context structure to import into.
616 */
617NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
618{
619 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
620 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
621 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
622 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
623
624 pInput->PartitionId = pGVM->nem.s.idHvPartition;
625 pInput->VpIndex = pGVCpu->idCpu;
626 pInput->RsvdZ = 0;
627
628 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
629 if ( !fWhat
630 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
631 return VINF_SUCCESS;
632 uintptr_t iReg = 0;
633
634 /* GPRs */
635 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
636 {
637 if (fWhat & CPUMCTX_EXTRN_RAX)
638 {
639 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
640 pInput->Elements[iReg].Name = HvX64RegisterRax;
641 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
642 iReg++;
643 }
644 if (fWhat & CPUMCTX_EXTRN_RCX)
645 {
646 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
647 pInput->Elements[iReg].Name = HvX64RegisterRcx;
648 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
649 iReg++;
650 }
651 if (fWhat & CPUMCTX_EXTRN_RDX)
652 {
653 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
654 pInput->Elements[iReg].Name = HvX64RegisterRdx;
655 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
656 iReg++;
657 }
658 if (fWhat & CPUMCTX_EXTRN_RBX)
659 {
660 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
661 pInput->Elements[iReg].Name = HvX64RegisterRbx;
662 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
663 iReg++;
664 }
665 if (fWhat & CPUMCTX_EXTRN_RSP)
666 {
667 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
668 pInput->Elements[iReg].Name = HvX64RegisterRsp;
669 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
670 iReg++;
671 }
672 if (fWhat & CPUMCTX_EXTRN_RBP)
673 {
674 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
675 pInput->Elements[iReg].Name = HvX64RegisterRbp;
676 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
677 iReg++;
678 }
679 if (fWhat & CPUMCTX_EXTRN_RSI)
680 {
681 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
682 pInput->Elements[iReg].Name = HvX64RegisterRsi;
683 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
684 iReg++;
685 }
686 if (fWhat & CPUMCTX_EXTRN_RDI)
687 {
688 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
689 pInput->Elements[iReg].Name = HvX64RegisterRdi;
690 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
691 iReg++;
692 }
693 if (fWhat & CPUMCTX_EXTRN_R8_R15)
694 {
695 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
696 pInput->Elements[iReg].Name = HvX64RegisterR8;
697 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
698 iReg++;
699 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
700 pInput->Elements[iReg].Name = HvX64RegisterR9;
701 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
702 iReg++;
703 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
704 pInput->Elements[iReg].Name = HvX64RegisterR10;
705 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
706 iReg++;
707 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
708 pInput->Elements[iReg].Name = HvX64RegisterR11;
709 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
710 iReg++;
711 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
712 pInput->Elements[iReg].Name = HvX64RegisterR12;
713 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
714 iReg++;
715 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
716 pInput->Elements[iReg].Name = HvX64RegisterR13;
717 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
718 iReg++;
719 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
720 pInput->Elements[iReg].Name = HvX64RegisterR14;
721 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
722 iReg++;
723 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
724 pInput->Elements[iReg].Name = HvX64RegisterR15;
725 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
726 iReg++;
727 }
728 }
729
730 /* RIP & Flags */
731 if (fWhat & CPUMCTX_EXTRN_RIP)
732 {
733 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
734 pInput->Elements[iReg].Name = HvX64RegisterRip;
735 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
736 iReg++;
737 }
738 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
739 {
740 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
741 pInput->Elements[iReg].Name = HvX64RegisterRflags;
742 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
743 iReg++;
744 }
745
746 /* Segments */
747# define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
748 do { \
749 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
750 pInput->Elements[a_idx].Name = a_enmName; \
751 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
752 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
753 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
754 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
755 } while (0)
756 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
757 {
758 if (fWhat & CPUMCTX_EXTRN_CS)
759 {
760 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
761 iReg++;
762 }
763 if (fWhat & CPUMCTX_EXTRN_ES)
764 {
765 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
766 iReg++;
767 }
768 if (fWhat & CPUMCTX_EXTRN_SS)
769 {
770 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
771 iReg++;
772 }
773 if (fWhat & CPUMCTX_EXTRN_DS)
774 {
775 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
776 iReg++;
777 }
778 if (fWhat & CPUMCTX_EXTRN_FS)
779 {
780 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
781 iReg++;
782 }
783 if (fWhat & CPUMCTX_EXTRN_GS)
784 {
785 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
786 iReg++;
787 }
788 }
789
790 /* Descriptor tables & task segment. */
791 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
792 {
793 if (fWhat & CPUMCTX_EXTRN_LDTR)
794 {
795 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
796 iReg++;
797 }
798 if (fWhat & CPUMCTX_EXTRN_TR)
799 {
800 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
801 iReg++;
802 }
803
804 if (fWhat & CPUMCTX_EXTRN_IDTR)
805 {
806 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
807 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
808 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
809 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
810 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
811 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
812 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
813 iReg++;
814 }
815 if (fWhat & CPUMCTX_EXTRN_GDTR)
816 {
817 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
818 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
819 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
820 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
821 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
822 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
823 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
824 iReg++;
825 }
826 }
827
828 /* Control registers. */
829 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
830 {
831 if (fWhat & CPUMCTX_EXTRN_CR0)
832 {
833 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
834 pInput->Elements[iReg].Name = HvX64RegisterCr0;
835 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
836 iReg++;
837 }
838 if (fWhat & CPUMCTX_EXTRN_CR2)
839 {
840 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
841 pInput->Elements[iReg].Name = HvX64RegisterCr2;
842 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
843 iReg++;
844 }
845 if (fWhat & CPUMCTX_EXTRN_CR3)
846 {
847 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
848 pInput->Elements[iReg].Name = HvX64RegisterCr3;
849 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
850 iReg++;
851 }
852 if (fWhat & CPUMCTX_EXTRN_CR4)
853 {
854 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
855 pInput->Elements[iReg].Name = HvX64RegisterCr4;
856 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
857 iReg++;
858 }
859 }
860 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
861 {
862 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
863 pInput->Elements[iReg].Name = HvX64RegisterCr8;
864 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pVCpu);
865 iReg++;
866 }
867
868 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
869
870 /* Debug registers. */
871/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
872 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
873 {
874 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
875 pInput->Elements[iReg].Name = HvX64RegisterDr0;
876 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pVCpu);
877 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
878 iReg++;
879 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
880 pInput->Elements[iReg].Name = HvX64RegisterDr1;
881 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pVCpu);
882 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
883 iReg++;
884 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
885 pInput->Elements[iReg].Name = HvX64RegisterDr2;
886 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pVCpu);
887 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
888 iReg++;
889 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
890 pInput->Elements[iReg].Name = HvX64RegisterDr3;
891 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pVCpu);
892 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
893 iReg++;
894 }
895 if (fWhat & CPUMCTX_EXTRN_DR6)
896 {
897 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
898 pInput->Elements[iReg].Name = HvX64RegisterDr6;
899 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pVCpu);
900 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
901 iReg++;
902 }
903 if (fWhat & CPUMCTX_EXTRN_DR7)
904 {
905 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
906 pInput->Elements[iReg].Name = HvX64RegisterDr7;
907 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pVCpu);
908 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
909 iReg++;
910 }
911
912 /* Floating point state. */
913 if (fWhat & CPUMCTX_EXTRN_X87)
914 {
915 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
916 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
917 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[0].au64[0];
918 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[0].au64[1];
919 iReg++;
920 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
921 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
922 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[1].au64[0];
923 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[1].au64[1];
924 iReg++;
925 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
926 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
927 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[2].au64[0];
928 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[2].au64[1];
929 iReg++;
930 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
931 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
932 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[3].au64[0];
933 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[3].au64[1];
934 iReg++;
935 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
936 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
937 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[4].au64[0];
938 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[4].au64[1];
939 iReg++;
940 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
941 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
942 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[5].au64[0];
943 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[5].au64[1];
944 iReg++;
945 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
946 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
947 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[6].au64[0];
948 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[6].au64[1];
949 iReg++;
950 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
951 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
952 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[7].au64[0];
953 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[7].au64[1];
954 iReg++;
955
956 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
957 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
958 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->pXStateR0->x87.FCW;
959 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->pXStateR0->x87.FSW;
960 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->pXStateR0->x87.FTW;
961 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->pXStateR0->x87.FTW >> 8;
962 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->pXStateR0->x87.FOP;
963 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->pXStateR0->x87.FPUIP)
964 | ((uint64_t)pCtx->pXStateR0->x87.CS << 32)
965 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd1 << 48);
966 iReg++;
967/** @todo we've got trouble if if we try write just SSE w/o X87. */
968 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
969 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
970 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->pXStateR0->x87.FPUDP)
971 | ((uint64_t)pCtx->pXStateR0->x87.DS << 32)
972 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd2 << 48);
973 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->pXStateR0->x87.MXCSR;
974 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR0->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
975 iReg++;
976 }
977
978 /* Vector state. */
979 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
980 {
981 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
982 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
983 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo;
984 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi;
985 iReg++;
986 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
987 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
988 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo;
989 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi;
990 iReg++;
991 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
992 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
993 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo;
994 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi;
995 iReg++;
996 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
997 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
998 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo;
999 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi;
1000 iReg++;
1001 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1002 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
1003 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo;
1004 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi;
1005 iReg++;
1006 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1007 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
1008 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo;
1009 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi;
1010 iReg++;
1011 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1012 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
1013 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo;
1014 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi;
1015 iReg++;
1016 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1017 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
1018 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo;
1019 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi;
1020 iReg++;
1021 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1022 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1023 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo;
1024 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi;
1025 iReg++;
1026 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1027 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1028 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo;
1029 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi;
1030 iReg++;
1031 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1032 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1033 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo;
1034 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi;
1035 iReg++;
1036 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1037 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1038 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo;
1039 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi;
1040 iReg++;
1041 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1042 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1043 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo;
1044 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi;
1045 iReg++;
1046 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1047 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1048 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo;
1049 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi;
1050 iReg++;
1051 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1052 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1053 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo;
1054 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi;
1055 iReg++;
1056 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1057 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1058 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo;
1059 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi;
1060 iReg++;
1061 }
1062
1063 /* MSRs */
1064 // HvX64RegisterTsc - don't touch
1065 if (fWhat & CPUMCTX_EXTRN_EFER)
1066 {
1067 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1068 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1069 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1070 iReg++;
1071 }
1072 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1073 {
1074 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1075 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1076 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1077 iReg++;
1078 }
1079 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1080 {
1081 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1082 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1083 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1084 iReg++;
1085 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1086 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1087 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1088 iReg++;
1089 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1090 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1091 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1092 iReg++;
1093 }
1094 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1095 {
1096 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1097 pInput->Elements[iReg].Name = HvX64RegisterStar;
1098 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1099 iReg++;
1100 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1101 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1102 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1103 iReg++;
1104 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1105 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1106 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1107 iReg++;
1108 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1109 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1110 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1111 iReg++;
1112 }
1113 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1114 {
1115 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1116 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1117 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pVCpu);
1118 iReg++;
1119 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1120 pInput->Elements[iReg].Name = HvX64RegisterPat;
1121 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1122 iReg++;
1123# if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1124 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1125 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1126 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pVCpu);
1127 iReg++;
1128# endif
1129
1130 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1131
1132 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1133 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1134 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1135 iReg++;
1136
1137 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1138
1139 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1140 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1141 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1142 iReg++;
1143 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1144 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1145 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1146 iReg++;
1147 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1148 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1149 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1150 iReg++;
1151 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1152 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1153 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1154 iReg++;
1155 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1156 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1157 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1158 iReg++;
1159 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1160 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1161 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1162 iReg++;
1163 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1164 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1165 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1166 iReg++;
1167 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1168 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1169 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1170 iReg++;
1171 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1172 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1173 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1174 iReg++;
1175 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1176 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1177 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1178 iReg++;
1179 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1180 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1181 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1182 iReg++;
1183 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1184 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1185 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1186 iReg++;
1187
1188# if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1189 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1190 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1191 {
1192 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1193 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1194 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1195 iReg++;
1196 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1197 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1198 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pVCpu);
1199 iReg++;
1200 }
1201# endif
1202 }
1203
1204 /* event injection (clear it). */
1205 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1206 {
1207 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1208 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1209 pInput->Elements[iReg].Value.Reg64 = 0;
1210 iReg++;
1211 }
1212
1213 /* Interruptibility state. This can get a little complicated since we get
1214 half of the state via HV_X64_VP_EXECUTION_STATE. */
1215 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1216 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1217 {
1218 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1219 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1220 pInput->Elements[iReg].Value.Reg64 = 0;
1221 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1222 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1223 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1224 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1225 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1226 iReg++;
1227 }
1228 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1229 {
1230 if ( pVCpu->nem.s.fLastInterruptShadow
1231 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1232 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
1233 {
1234 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1235 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1236 pInput->Elements[iReg].Value.Reg64 = 0;
1237 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1238 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1239 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1240 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1241 //if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1242 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1243 iReg++;
1244 }
1245 }
1246 else
1247 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1248
1249 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1250 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
1251 if ( fDesiredIntWin
1252 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1253 {
1254 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
1255 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1256 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1257 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1258 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1259 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1260 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1261 iReg++;
1262 }
1263
1264 /// @todo HvRegisterPendingEvent0
1265 /// @todo HvRegisterPendingEvent1
1266
1267 /*
1268 * Set the registers.
1269 */
1270 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1271
1272 /*
1273 * Make the hypercall.
1274 */
1275 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1276 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1277 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1278 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1279 VERR_NEM_SET_REGISTERS_FAILED);
1280 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1281 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1282 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1283 return VINF_SUCCESS;
1284}
1285#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1286
1287
1288/**
1289 * Export the state to the native API (out of CPUMCTX).
1290 *
1291 * @returns VBox status code
1292 * @param pGVM The ring-0 VM handle.
1293 * @param pVM The cross context VM handle.
1294 * @param idCpu The calling EMT. Necessary for getting the
1295 * hypercall page and arguments.
1296 */
1297VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, PVM pVM, VMCPUID idCpu)
1298{
1299#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1300 /*
1301 * Validate the call.
1302 */
1303 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
1304 if (RT_SUCCESS(rc))
1305 {
1306 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1307 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1308 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1309
1310 /*
1311 * Call worker.
1312 */
1313 rc = nemR0WinExportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx);
1314 }
1315 return rc;
1316#else
1317 RT_NOREF(pGVM, pVM, idCpu);
1318 return VERR_NOT_IMPLEMENTED;
1319#endif
1320}
1321
1322
1323#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1324/**
1325 * Worker for NEMR0ImportState.
1326 *
1327 * Intention is to use it internally later.
1328 *
1329 * @returns VBox status code.
1330 * @param pGVM The ring-0 VM handle.
1331 * @param pGVCpu The ring-0 VCPU handle.
1332 * @param pCtx The CPU context structure to import into.
1333 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1334 * @param fCanUpdateCr3 Whether it's safe to update CR3 or not.
1335 */
1336NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3)
1337{
1338 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1339 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1340 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1341 Assert(pCtx == &pGVCpu->pVCpu->cpum.GstCtx);
1342
1343 fWhat &= pCtx->fExtrn;
1344
1345 pInput->PartitionId = pGVM->nem.s.idHvPartition;
1346 pInput->VpIndex = pGVCpu->idCpu;
1347 pInput->fFlags = 0;
1348
1349 /* GPRs */
1350 uintptr_t iReg = 0;
1351 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1352 {
1353 if (fWhat & CPUMCTX_EXTRN_RAX)
1354 pInput->Names[iReg++] = HvX64RegisterRax;
1355 if (fWhat & CPUMCTX_EXTRN_RCX)
1356 pInput->Names[iReg++] = HvX64RegisterRcx;
1357 if (fWhat & CPUMCTX_EXTRN_RDX)
1358 pInput->Names[iReg++] = HvX64RegisterRdx;
1359 if (fWhat & CPUMCTX_EXTRN_RBX)
1360 pInput->Names[iReg++] = HvX64RegisterRbx;
1361 if (fWhat & CPUMCTX_EXTRN_RSP)
1362 pInput->Names[iReg++] = HvX64RegisterRsp;
1363 if (fWhat & CPUMCTX_EXTRN_RBP)
1364 pInput->Names[iReg++] = HvX64RegisterRbp;
1365 if (fWhat & CPUMCTX_EXTRN_RSI)
1366 pInput->Names[iReg++] = HvX64RegisterRsi;
1367 if (fWhat & CPUMCTX_EXTRN_RDI)
1368 pInput->Names[iReg++] = HvX64RegisterRdi;
1369 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1370 {
1371 pInput->Names[iReg++] = HvX64RegisterR8;
1372 pInput->Names[iReg++] = HvX64RegisterR9;
1373 pInput->Names[iReg++] = HvX64RegisterR10;
1374 pInput->Names[iReg++] = HvX64RegisterR11;
1375 pInput->Names[iReg++] = HvX64RegisterR12;
1376 pInput->Names[iReg++] = HvX64RegisterR13;
1377 pInput->Names[iReg++] = HvX64RegisterR14;
1378 pInput->Names[iReg++] = HvX64RegisterR15;
1379 }
1380 }
1381
1382 /* RIP & Flags */
1383 if (fWhat & CPUMCTX_EXTRN_RIP)
1384 pInput->Names[iReg++] = HvX64RegisterRip;
1385 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1386 pInput->Names[iReg++] = HvX64RegisterRflags;
1387
1388 /* Segments */
1389 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1390 {
1391 if (fWhat & CPUMCTX_EXTRN_CS)
1392 pInput->Names[iReg++] = HvX64RegisterCs;
1393 if (fWhat & CPUMCTX_EXTRN_ES)
1394 pInput->Names[iReg++] = HvX64RegisterEs;
1395 if (fWhat & CPUMCTX_EXTRN_SS)
1396 pInput->Names[iReg++] = HvX64RegisterSs;
1397 if (fWhat & CPUMCTX_EXTRN_DS)
1398 pInput->Names[iReg++] = HvX64RegisterDs;
1399 if (fWhat & CPUMCTX_EXTRN_FS)
1400 pInput->Names[iReg++] = HvX64RegisterFs;
1401 if (fWhat & CPUMCTX_EXTRN_GS)
1402 pInput->Names[iReg++] = HvX64RegisterGs;
1403 }
1404
1405 /* Descriptor tables and the task segment. */
1406 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1407 {
1408 if (fWhat & CPUMCTX_EXTRN_LDTR)
1409 pInput->Names[iReg++] = HvX64RegisterLdtr;
1410 if (fWhat & CPUMCTX_EXTRN_TR)
1411 pInput->Names[iReg++] = HvX64RegisterTr;
1412 if (fWhat & CPUMCTX_EXTRN_IDTR)
1413 pInput->Names[iReg++] = HvX64RegisterIdtr;
1414 if (fWhat & CPUMCTX_EXTRN_GDTR)
1415 pInput->Names[iReg++] = HvX64RegisterGdtr;
1416 }
1417
1418 /* Control registers. */
1419 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1420 {
1421 if (fWhat & CPUMCTX_EXTRN_CR0)
1422 pInput->Names[iReg++] = HvX64RegisterCr0;
1423 if (fWhat & CPUMCTX_EXTRN_CR2)
1424 pInput->Names[iReg++] = HvX64RegisterCr2;
1425 if (fWhat & CPUMCTX_EXTRN_CR3)
1426 pInput->Names[iReg++] = HvX64RegisterCr3;
1427 if (fWhat & CPUMCTX_EXTRN_CR4)
1428 pInput->Names[iReg++] = HvX64RegisterCr4;
1429 }
1430 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1431 pInput->Names[iReg++] = HvX64RegisterCr8;
1432
1433 /* Debug registers. */
1434 if (fWhat & CPUMCTX_EXTRN_DR7)
1435 pInput->Names[iReg++] = HvX64RegisterDr7;
1436 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1437 {
1438 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pCtx->fExtrn & CPUMCTX_EXTRN_DR7))
1439 {
1440 fWhat |= CPUMCTX_EXTRN_DR7;
1441 pInput->Names[iReg++] = HvX64RegisterDr7;
1442 }
1443 pInput->Names[iReg++] = HvX64RegisterDr0;
1444 pInput->Names[iReg++] = HvX64RegisterDr1;
1445 pInput->Names[iReg++] = HvX64RegisterDr2;
1446 pInput->Names[iReg++] = HvX64RegisterDr3;
1447 }
1448 if (fWhat & CPUMCTX_EXTRN_DR6)
1449 pInput->Names[iReg++] = HvX64RegisterDr6;
1450
1451 /* Floating point state. */
1452 if (fWhat & CPUMCTX_EXTRN_X87)
1453 {
1454 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1455 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1456 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1457 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1458 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1459 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1460 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1461 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1462 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1463 }
1464 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1465 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1466
1467 /* Vector state. */
1468 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1469 {
1470 pInput->Names[iReg++] = HvX64RegisterXmm0;
1471 pInput->Names[iReg++] = HvX64RegisterXmm1;
1472 pInput->Names[iReg++] = HvX64RegisterXmm2;
1473 pInput->Names[iReg++] = HvX64RegisterXmm3;
1474 pInput->Names[iReg++] = HvX64RegisterXmm4;
1475 pInput->Names[iReg++] = HvX64RegisterXmm5;
1476 pInput->Names[iReg++] = HvX64RegisterXmm6;
1477 pInput->Names[iReg++] = HvX64RegisterXmm7;
1478 pInput->Names[iReg++] = HvX64RegisterXmm8;
1479 pInput->Names[iReg++] = HvX64RegisterXmm9;
1480 pInput->Names[iReg++] = HvX64RegisterXmm10;
1481 pInput->Names[iReg++] = HvX64RegisterXmm11;
1482 pInput->Names[iReg++] = HvX64RegisterXmm12;
1483 pInput->Names[iReg++] = HvX64RegisterXmm13;
1484 pInput->Names[iReg++] = HvX64RegisterXmm14;
1485 pInput->Names[iReg++] = HvX64RegisterXmm15;
1486 }
1487
1488 /* MSRs */
1489 // HvX64RegisterTsc - don't touch
1490 if (fWhat & CPUMCTX_EXTRN_EFER)
1491 pInput->Names[iReg++] = HvX64RegisterEfer;
1492 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1493 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1494 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1495 {
1496 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1497 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1498 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1499 }
1500 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1501 {
1502 pInput->Names[iReg++] = HvX64RegisterStar;
1503 pInput->Names[iReg++] = HvX64RegisterLstar;
1504 pInput->Names[iReg++] = HvX64RegisterCstar;
1505 pInput->Names[iReg++] = HvX64RegisterSfmask;
1506 }
1507
1508# ifdef LOG_ENABLED
1509 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1510# endif
1511 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1512 {
1513 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1514 pInput->Names[iReg++] = HvX64RegisterPat;
1515# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1516 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
1517# endif
1518 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
1519 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
1520 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
1521 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
1522 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
1523 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
1524 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
1525 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
1526 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
1527 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
1528 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
1529 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
1530 pInput->Names[iReg++] = HvX64RegisterTscAux;
1531# if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
1532 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1533 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
1534# endif
1535# ifdef LOG_ENABLED
1536 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1537 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
1538# endif
1539 }
1540
1541 /* Interruptibility. */
1542 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1543 {
1544 pInput->Names[iReg++] = HvRegisterInterruptState;
1545 pInput->Names[iReg++] = HvX64RegisterRip;
1546 }
1547
1548 /* event injection */
1549 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1550 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1551 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1552 size_t const cRegs = iReg;
1553 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF_DYN(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1554
1555 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1556 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
1557 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1558
1559 /*
1560 * Make the hypercall.
1561 */
1562 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1563 pGVCpu->nem.s.HypercallData.HCPhysPage,
1564 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1565 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1566 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1567 VERR_NEM_GET_REGISTERS_FAILED);
1568 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1569
1570 /*
1571 * Copy information to the CPUM context.
1572 */
1573 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
1574 iReg = 0;
1575
1576 /* GPRs */
1577 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1578 {
1579 if (fWhat & CPUMCTX_EXTRN_RAX)
1580 {
1581 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1582 pCtx->rax = paValues[iReg++].Reg64;
1583 }
1584 if (fWhat & CPUMCTX_EXTRN_RCX)
1585 {
1586 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1587 pCtx->rcx = paValues[iReg++].Reg64;
1588 }
1589 if (fWhat & CPUMCTX_EXTRN_RDX)
1590 {
1591 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1592 pCtx->rdx = paValues[iReg++].Reg64;
1593 }
1594 if (fWhat & CPUMCTX_EXTRN_RBX)
1595 {
1596 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1597 pCtx->rbx = paValues[iReg++].Reg64;
1598 }
1599 if (fWhat & CPUMCTX_EXTRN_RSP)
1600 {
1601 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1602 pCtx->rsp = paValues[iReg++].Reg64;
1603 }
1604 if (fWhat & CPUMCTX_EXTRN_RBP)
1605 {
1606 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1607 pCtx->rbp = paValues[iReg++].Reg64;
1608 }
1609 if (fWhat & CPUMCTX_EXTRN_RSI)
1610 {
1611 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1612 pCtx->rsi = paValues[iReg++].Reg64;
1613 }
1614 if (fWhat & CPUMCTX_EXTRN_RDI)
1615 {
1616 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1617 pCtx->rdi = paValues[iReg++].Reg64;
1618 }
1619 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1620 {
1621 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1622 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1623 pCtx->r8 = paValues[iReg++].Reg64;
1624 pCtx->r9 = paValues[iReg++].Reg64;
1625 pCtx->r10 = paValues[iReg++].Reg64;
1626 pCtx->r11 = paValues[iReg++].Reg64;
1627 pCtx->r12 = paValues[iReg++].Reg64;
1628 pCtx->r13 = paValues[iReg++].Reg64;
1629 pCtx->r14 = paValues[iReg++].Reg64;
1630 pCtx->r15 = paValues[iReg++].Reg64;
1631 }
1632 }
1633
1634 /* RIP & Flags */
1635 if (fWhat & CPUMCTX_EXTRN_RIP)
1636 {
1637 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1638 pCtx->rip = paValues[iReg++].Reg64;
1639 }
1640 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1641 {
1642 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1643 pCtx->rflags.u = paValues[iReg++].Reg64;
1644 }
1645
1646 /* Segments */
1647# define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1648 do { \
1649 Assert(pInput->Names[a_idx] == a_enmName); \
1650 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1651 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1652 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1653 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1654 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1655 } while (0)
1656 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1657 {
1658 if (fWhat & CPUMCTX_EXTRN_CS)
1659 {
1660 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1661 iReg++;
1662 }
1663 if (fWhat & CPUMCTX_EXTRN_ES)
1664 {
1665 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1666 iReg++;
1667 }
1668 if (fWhat & CPUMCTX_EXTRN_SS)
1669 {
1670 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1671 iReg++;
1672 }
1673 if (fWhat & CPUMCTX_EXTRN_DS)
1674 {
1675 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1676 iReg++;
1677 }
1678 if (fWhat & CPUMCTX_EXTRN_FS)
1679 {
1680 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1681 iReg++;
1682 }
1683 if (fWhat & CPUMCTX_EXTRN_GS)
1684 {
1685 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1686 iReg++;
1687 }
1688 }
1689 /* Descriptor tables and the task segment. */
1690 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1691 {
1692 if (fWhat & CPUMCTX_EXTRN_LDTR)
1693 {
1694 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1695 iReg++;
1696 }
1697 if (fWhat & CPUMCTX_EXTRN_TR)
1698 {
1699 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1700 avoid to trigger sanity assertions around the code, always fix this. */
1701 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1702 switch (pCtx->tr.Attr.n.u4Type)
1703 {
1704 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1705 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1706 break;
1707 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1708 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1709 break;
1710 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1711 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1712 break;
1713 }
1714 iReg++;
1715 }
1716 if (fWhat & CPUMCTX_EXTRN_IDTR)
1717 {
1718 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1719 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1720 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1721 iReg++;
1722 }
1723 if (fWhat & CPUMCTX_EXTRN_GDTR)
1724 {
1725 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1726 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1727 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1728 iReg++;
1729 }
1730 }
1731
1732 /* Control registers. */
1733 bool fMaybeChangedMode = false;
1734 bool fUpdateCr3 = false;
1735 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1736 {
1737 if (fWhat & CPUMCTX_EXTRN_CR0)
1738 {
1739 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1740 if (pCtx->cr0 != paValues[iReg].Reg64)
1741 {
1742 CPUMSetGuestCR0(pVCpu, paValues[iReg].Reg64);
1743 fMaybeChangedMode = true;
1744 }
1745 iReg++;
1746 }
1747 if (fWhat & CPUMCTX_EXTRN_CR2)
1748 {
1749 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1750 pCtx->cr2 = paValues[iReg].Reg64;
1751 iReg++;
1752 }
1753 if (fWhat & CPUMCTX_EXTRN_CR3)
1754 {
1755 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1756 if (pCtx->cr3 != paValues[iReg].Reg64)
1757 {
1758 CPUMSetGuestCR3(pVCpu, paValues[iReg].Reg64);
1759 fUpdateCr3 = true;
1760 }
1761 iReg++;
1762 }
1763 if (fWhat & CPUMCTX_EXTRN_CR4)
1764 {
1765 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1766 if (pCtx->cr4 != paValues[iReg].Reg64)
1767 {
1768 CPUMSetGuestCR4(pVCpu, paValues[iReg].Reg64);
1769 fMaybeChangedMode = true;
1770 }
1771 iReg++;
1772 }
1773 }
1774 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1775 {
1776 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1777 APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1778 iReg++;
1779 }
1780
1781 /* Debug registers. */
1782 if (fWhat & CPUMCTX_EXTRN_DR7)
1783 {
1784 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1785 if (pCtx->dr[7] != paValues[iReg].Reg64)
1786 CPUMSetGuestDR7(pVCpu, paValues[iReg].Reg64);
1787 pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
1788 iReg++;
1789 }
1790 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1791 {
1792 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1793 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1794 if (pCtx->dr[0] != paValues[iReg].Reg64)
1795 CPUMSetGuestDR0(pVCpu, paValues[iReg].Reg64);
1796 iReg++;
1797 if (pCtx->dr[1] != paValues[iReg].Reg64)
1798 CPUMSetGuestDR1(pVCpu, paValues[iReg].Reg64);
1799 iReg++;
1800 if (pCtx->dr[2] != paValues[iReg].Reg64)
1801 CPUMSetGuestDR2(pVCpu, paValues[iReg].Reg64);
1802 iReg++;
1803 if (pCtx->dr[3] != paValues[iReg].Reg64)
1804 CPUMSetGuestDR3(pVCpu, paValues[iReg].Reg64);
1805 iReg++;
1806 }
1807 if (fWhat & CPUMCTX_EXTRN_DR6)
1808 {
1809 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1810 if (pCtx->dr[6] != paValues[iReg].Reg64)
1811 CPUMSetGuestDR6(pVCpu, paValues[iReg].Reg64);
1812 iReg++;
1813 }
1814
1815 /* Floating point state. */
1816 if (fWhat & CPUMCTX_EXTRN_X87)
1817 {
1818 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1819 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1820 pCtx->pXStateR0->x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1821 pCtx->pXStateR0->x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1822 iReg++;
1823 pCtx->pXStateR0->x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1824 pCtx->pXStateR0->x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1825 iReg++;
1826 pCtx->pXStateR0->x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1827 pCtx->pXStateR0->x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1828 iReg++;
1829 pCtx->pXStateR0->x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1830 pCtx->pXStateR0->x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1831 iReg++;
1832 pCtx->pXStateR0->x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1833 pCtx->pXStateR0->x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1834 iReg++;
1835 pCtx->pXStateR0->x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1836 pCtx->pXStateR0->x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1837 iReg++;
1838 pCtx->pXStateR0->x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1839 pCtx->pXStateR0->x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1840 iReg++;
1841 pCtx->pXStateR0->x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1842 pCtx->pXStateR0->x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1843 iReg++;
1844
1845 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1846 pCtx->pXStateR0->x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1847 pCtx->pXStateR0->x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1848 pCtx->pXStateR0->x87.FTW = paValues[iReg].FpControlStatus.FpTag
1849 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1850 pCtx->pXStateR0->x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1851 pCtx->pXStateR0->x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1852 pCtx->pXStateR0->x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1853 pCtx->pXStateR0->x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1854 iReg++;
1855 }
1856
1857 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1858 {
1859 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1860 if (fWhat & CPUMCTX_EXTRN_X87)
1861 {
1862 pCtx->pXStateR0->x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1863 pCtx->pXStateR0->x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1864 pCtx->pXStateR0->x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1865 }
1866 pCtx->pXStateR0->x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1867 pCtx->pXStateR0->x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1868 iReg++;
1869 }
1870
1871 /* Vector state. */
1872 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1873 {
1874 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1875 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1876 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1877 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1878 iReg++;
1879 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1880 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1881 iReg++;
1882 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1883 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1884 iReg++;
1885 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1886 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1887 iReg++;
1888 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1889 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1890 iReg++;
1891 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1892 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1893 iReg++;
1894 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1895 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1896 iReg++;
1897 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1898 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1899 iReg++;
1900 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1901 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1902 iReg++;
1903 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1904 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1905 iReg++;
1906 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1907 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1908 iReg++;
1909 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1910 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1911 iReg++;
1912 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1913 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1914 iReg++;
1915 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1916 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1917 iReg++;
1918 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1919 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1920 iReg++;
1921 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1922 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1923 iReg++;
1924 }
1925
1926
1927 /* MSRs */
1928 // HvX64RegisterTsc - don't touch
1929 if (fWhat & CPUMCTX_EXTRN_EFER)
1930 {
1931 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1932 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1933 {
1934 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
1935 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
1936 PGMNotifyNxeChanged(pVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1937 pCtx->msrEFER = paValues[iReg].Reg64;
1938 fMaybeChangedMode = true;
1939 }
1940 iReg++;
1941 }
1942 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1943 {
1944 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1945 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
1946 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
1947 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1948 iReg++;
1949 }
1950 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1951 {
1952 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
1953 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
1954 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
1955 pCtx->SysEnter.cs = paValues[iReg].Reg64;
1956 iReg++;
1957
1958 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
1959 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
1960 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
1961 pCtx->SysEnter.eip = paValues[iReg].Reg64;
1962 iReg++;
1963
1964 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
1965 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
1966 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
1967 pCtx->SysEnter.esp = paValues[iReg].Reg64;
1968 iReg++;
1969 }
1970 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1971 {
1972 Assert(pInput->Names[iReg] == HvX64RegisterStar);
1973 if (pCtx->msrSTAR != paValues[iReg].Reg64)
1974 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
1975 pCtx->msrSTAR = paValues[iReg].Reg64;
1976 iReg++;
1977
1978 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
1979 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
1980 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
1981 pCtx->msrLSTAR = paValues[iReg].Reg64;
1982 iReg++;
1983
1984 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
1985 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
1986 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
1987 pCtx->msrCSTAR = paValues[iReg].Reg64;
1988 iReg++;
1989
1990 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
1991 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
1992 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
1993 pCtx->msrSFMASK = paValues[iReg].Reg64;
1994 iReg++;
1995 }
1996 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1997 {
1998 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
1999 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
2000 if (paValues[iReg].Reg64 != uOldBase)
2001 {
2002 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
2003 pVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
2004 int rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64);
2005 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64));
2006 }
2007 iReg++;
2008
2009 Assert(pInput->Names[iReg] == HvX64RegisterPat);
2010 if (pCtx->msrPAT != paValues[iReg].Reg64)
2011 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
2012 pCtx->msrPAT = paValues[iReg].Reg64;
2013 iReg++;
2014
2015# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2016 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
2017 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pVCpu))
2018 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64));
2019 iReg++;
2020# endif
2021
2022 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
2023 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
2024 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2025 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2026 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2027 iReg++;
2028
2029 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2030
2031 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2032 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2033 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2034 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2035 iReg++;
2036
2037 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2038 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2039 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2040 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2041 iReg++;
2042
2043 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2044 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2045 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2046 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2047 iReg++;
2048
2049 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2050 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2051 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2052 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2053 iReg++;
2054
2055 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2056 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2057 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2058 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2059 iReg++;
2060
2061 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2062 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2063 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2064 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2065 iReg++;
2066
2067 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2068 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2069 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2070 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2071 iReg++;
2072
2073 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2074 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2075 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2076 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2077 iReg++;
2078
2079 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2080 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2081 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2082 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2083 iReg++;
2084
2085 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2086 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2087 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2088 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2089 iReg++;
2090
2091 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2092 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2093 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2094 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2095 iReg++;
2096
2097 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2098 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2099 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2100 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2101 iReg++;
2102
2103# if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2104 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2105 {
2106 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2107 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2108 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2109 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2110 iReg++;
2111 }
2112# endif
2113# ifdef LOG_ENABLED
2114 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2115 {
2116 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2117 if (paValues[iReg].Reg64 != CPUMGetGuestIa32FeatureControl(pVCpu))
2118 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32FeatureControl(pVCpu), paValues[iReg].Reg64));
2119 iReg++;
2120 }
2121# endif
2122 }
2123
2124 /* Interruptibility. */
2125 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2126 {
2127 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2128 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2129
2130 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2131 {
2132 pVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2133 if (paValues[iReg].InterruptState.InterruptShadow)
2134 EMSetInhibitInterruptsPC(pVCpu, paValues[iReg + 1].Reg64);
2135 else
2136 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2137 }
2138
2139 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2140 {
2141 if (paValues[iReg].InterruptState.NmiMasked)
2142 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2143 else
2144 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2145 }
2146
2147 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2148 iReg += 2;
2149 }
2150
2151 /* Event injection. */
2152 /// @todo HvRegisterPendingInterruption
2153 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2154 if (paValues[iReg].PendingInterruption.InterruptionPending)
2155 {
2156 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2157 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2158 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2159 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2160 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2161 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2162 }
2163
2164 /// @todo HvRegisterPendingEvent0
2165 /// @todo HvRegisterPendingEvent1
2166
2167 /* Almost done, just update extrn flags and maybe change PGM mode. */
2168 pCtx->fExtrn &= ~fWhat;
2169 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2170 pCtx->fExtrn = 0;
2171
2172 /* Typical. */
2173 if (!fMaybeChangedMode && !fUpdateCr3)
2174 return VINF_SUCCESS;
2175
2176 /*
2177 * Slow.
2178 */
2179 int rc = VINF_SUCCESS;
2180 if (fMaybeChangedMode)
2181 {
2182 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2183 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
2184 }
2185
2186 if (fUpdateCr3)
2187 {
2188 if (fCanUpdateCr3)
2189 {
2190 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
2191 rc = PGMUpdateCR3(pVCpu, pCtx->cr3);
2192 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
2193 }
2194 else
2195 {
2196 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2197 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2198 }
2199 }
2200
2201 return rc;
2202}
2203#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2204
2205
2206/**
2207 * Import the state from the native API (back to CPUMCTX).
2208 *
2209 * @returns VBox status code
2210 * @param pGVM The ring-0 VM handle.
2211 * @param pVM The cross context VM handle.
2212 * @param idCpu The calling EMT. Necessary for getting the
2213 * hypercall page and arguments.
2214 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2215 * CPUMCTX_EXTERN_ALL for everything.
2216 */
2217VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t fWhat)
2218{
2219#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2220 /*
2221 * Validate the call.
2222 */
2223 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2224 if (RT_SUCCESS(rc))
2225 {
2226 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2227 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2228 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2229
2230 /*
2231 * Call worker.
2232 */
2233 rc = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);
2234 }
2235 return rc;
2236#else
2237 RT_NOREF(pGVM, pVM, idCpu, fWhat);
2238 return VERR_NOT_IMPLEMENTED;
2239#endif
2240}
2241
2242
2243#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2244/**
2245 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2246 *
2247 * @returns VBox status code.
2248 * @param pGVM The ring-0 VM handle.
2249 * @param pGVCpu The ring-0 VCPU handle.
2250 * @param pcTicks Where to return the current CPU tick count.
2251 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2252 */
2253NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2254{
2255 /*
2256 * Hypercall parameters.
2257 */
2258 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2259 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2260 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2261
2262 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2263 pInput->VpIndex = pGVCpu->idCpu;
2264 pInput->fFlags = 0;
2265 pInput->Names[0] = HvX64RegisterTsc;
2266 pInput->Names[1] = HvX64RegisterTscAux;
2267
2268 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2269 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2270 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2271
2272 /*
2273 * Make the hypercall.
2274 */
2275 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2276 pGVCpu->nem.s.HypercallData.HCPhysPage,
2277 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2278 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2279 VERR_NEM_GET_REGISTERS_FAILED);
2280
2281 /*
2282 * Get results.
2283 */
2284 *pcTicks = paValues[0].Reg64;
2285 if (pcAux)
2286 *pcAux = paValues[0].Reg32;
2287 return VINF_SUCCESS;
2288}
2289#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2290
2291
2292/**
2293 * Queries the TSC and TSC_AUX values, putting the results in .
2294 *
2295 * @returns VBox status code
2296 * @param pGVM The ring-0 VM handle.
2297 * @param pVM The cross context VM handle.
2298 * @param idCpu The calling EMT. Necessary for getting the
2299 * hypercall page and arguments.
2300 */
2301VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2302{
2303#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2304 /*
2305 * Validate the call.
2306 */
2307 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2308 if (RT_SUCCESS(rc))
2309 {
2310 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2311 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2312 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2313
2314 /*
2315 * Call worker.
2316 */
2317 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2318 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2319 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2320 &pVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2321 }
2322 return rc;
2323#else
2324 RT_NOREF(pGVM, pVM, idCpu);
2325 return VERR_NOT_IMPLEMENTED;
2326#endif
2327}
2328
2329
2330#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2331/**
2332 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2333 *
2334 * @returns VBox status code.
2335 * @param pGVM The ring-0 VM handle.
2336 * @param pGVCpu The ring-0 VCPU handle.
2337 * @param uPausedTscValue The TSC value at the time of pausing.
2338 */
2339NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2340{
2341 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2342
2343 /*
2344 * Set up the hypercall parameters.
2345 */
2346 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2347 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2348
2349 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2350 pInput->VpIndex = 0;
2351 pInput->RsvdZ = 0;
2352 pInput->Elements[0].Name = HvX64RegisterTsc;
2353 pInput->Elements[0].Pad0 = 0;
2354 pInput->Elements[0].Pad1 = 0;
2355 pInput->Elements[0].Value.Reg128.High64 = 0;
2356 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2357
2358 /*
2359 * Disable interrupts and do the first virtual CPU.
2360 */
2361 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2362 uint64_t const uFirstTsc = ASMReadTSC();
2363 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2364 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */);
2365 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2366 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2367
2368 /*
2369 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2370 * that we don't introduce too much drift here.
2371 */
2372 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2373 {
2374 Assert(pInput->PartitionId == pGVM->nem.s.idHvPartition);
2375 Assert(pInput->RsvdZ == 0);
2376 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2377 Assert(pInput->Elements[0].Pad0 == 0);
2378 Assert(pInput->Elements[0].Pad1 == 0);
2379 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2380
2381 pInput->VpIndex = iCpu;
2382 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2383 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2384
2385 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2386 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */);
2387 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2388 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2389 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2390 }
2391
2392 /*
2393 * Done.
2394 */
2395 ASMSetFlags(fSavedFlags);
2396 return VINF_SUCCESS;
2397}
2398#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2399
2400
2401/**
2402 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2403 *
2404 * @returns VBox status code
2405 * @param pGVM The ring-0 VM handle.
2406 * @param pVM The cross context VM handle.
2407 * @param idCpu The calling EMT. Necessary for getting the
2408 * hypercall page and arguments.
2409 * @param uPausedTscValue The TSC value at the time of pausing.
2410 */
2411VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2412{
2413#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2414 /*
2415 * Validate the call.
2416 */
2417 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2418 if (RT_SUCCESS(rc))
2419 {
2420 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2421 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2422 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2423
2424 /*
2425 * Call worker.
2426 */
2427 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2428 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2429 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
2430 }
2431 return rc;
2432#else
2433 RT_NOREF(pGVM, pVM, idCpu, uPausedTscValue);
2434 return VERR_NOT_IMPLEMENTED;
2435#endif
2436}
2437
2438
2439VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2440{
2441#ifdef NEM_WIN_WITH_RING0_RUNLOOP
2442 if (pGVM->nem.s.fMayUseRing0Runloop)
2443 {
2444 PVM pVM = pGVM->pVM;
2445 return nemHCWinRunGC(pVM, &pVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]);
2446 }
2447 return VERR_NEM_RING3_ONLY;
2448#else
2449 RT_NOREF(pGVM, idCpu);
2450 return VERR_NOT_IMPLEMENTED;
2451#endif
2452}
2453
2454
2455/**
2456 * Updates statistics in the VM structure.
2457 *
2458 * @returns VBox status code.
2459 * @param pGVM The ring-0 VM handle.
2460 * @param pVM The cross context VM handle.
2461 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
2462 * page and arguments.
2463 */
2464VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2465{
2466 /*
2467 * Validate the call.
2468 */
2469 int rc;
2470 if (idCpu == NIL_VMCPUID)
2471 rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
2472 else
2473 rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2474 if (RT_SUCCESS(rc))
2475 {
2476 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2477
2478 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
2479 ? &pGVM->aCpus[idCpu].nem.s.HypercallData
2480 : &pGVM->nem.s.HypercallData;
2481 if ( RT_VALID_PTR(pHypercallData->pbPage)
2482 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
2483 {
2484 if (idCpu == NIL_VMCPUID)
2485 rc = RTCritSectEnter(&pGVM->nem.s.HypercallDataCritSect);
2486 if (RT_SUCCESS(rc))
2487 {
2488 /*
2489 * Query the memory statistics for the partition.
2490 */
2491 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
2492 pInput->TargetPartitionId = pGVM->nem.s.idHvPartition;
2493 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
2494 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
2495 pInput->ProximityDomainInfo.Flags.Reserved = 0;
2496 pInput->ProximityDomainInfo.Id = 0;
2497
2498 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
2499 RT_ZERO(*pOutput);
2500
2501 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
2502 pHypercallData->HCPhysPage,
2503 pHypercallData->HCPhysPage + sizeof(*pInput));
2504 if (uResult == HV_STATUS_SUCCESS)
2505 {
2506 pVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
2507 pVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
2508 rc = VINF_SUCCESS;
2509 }
2510 else
2511 {
2512 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
2513 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
2514 rc = VERR_NEM_IPE_0;
2515 }
2516
2517 if (idCpu == NIL_VMCPUID)
2518 RTCritSectLeave(&pGVM->nem.s.HypercallDataCritSect);
2519 }
2520 }
2521 else
2522 rc = VERR_WRONG_ORDER;
2523 }
2524 return rc;
2525}
2526
2527
2528#if 1 && defined(DEBUG_bird)
2529/**
2530 * Debug only interface for poking around and exploring Hyper-V stuff.
2531 *
2532 * @param pGVM The ring-0 VM handle.
2533 * @param pVM The cross context VM handle.
2534 * @param idCpu The calling EMT.
2535 * @param u64Arg What to query. 0 == registers.
2536 */
2537VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t u64Arg)
2538{
2539 /*
2540 * Resolve CPU structures.
2541 */
2542 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2543 if (RT_SUCCESS(rc))
2544 {
2545 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2546
2547 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2548 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2549 if (u64Arg == 0)
2550 {
2551 /*
2552 * Query register.
2553 */
2554 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2555 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2556
2557 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
2558 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2559 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
2560
2561 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2562 pInput->VpIndex = pGVCpu->idCpu;
2563 pInput->fFlags = 0;
2564 pInput->Names[0] = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2565
2566 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
2567 pGVCpu->nem.s.HypercallData.HCPhysPage,
2568 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2569 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2570 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2571 pVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
2572 pVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
2573 rc = VINF_SUCCESS;
2574 }
2575 else if (u64Arg == 1)
2576 {
2577 /*
2578 * Query partition property.
2579 */
2580 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nem.s.HypercallData.pbPage;
2581 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2582
2583 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
2584 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
2585 pOutput->PropertyValue = 0;
2586
2587 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2588 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pVCpu->nem.s.Hypercall.Experiment.uItem;
2589 pInput->uPadding = 0;
2590
2591 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
2592 pGVCpu->nem.s.HypercallData.HCPhysPage,
2593 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2594 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
2595 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2596 pVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
2597 pVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
2598 rc = VINF_SUCCESS;
2599 }
2600 else if (u64Arg == 2)
2601 {
2602 /*
2603 * Set register.
2604 */
2605 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2606 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2607 RT_BZERO(pInput, RT_UOFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
2608
2609 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2610 pInput->VpIndex = pGVCpu->idCpu;
2611 pInput->RsvdZ = 0;
2612 pInput->Elements[0].Name = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2613 pInput->Elements[0].Value.Reg128.High64 = pVCpu->nem.s.Hypercall.Experiment.uHiValue;
2614 pInput->Elements[0].Value.Reg128.Low64 = pVCpu->nem.s.Hypercall.Experiment.uLoValue;
2615
2616 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2617 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
2618 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2619 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2620 rc = VINF_SUCCESS;
2621 }
2622 else
2623 rc = VERR_INVALID_FUNCTION;
2624 }
2625 return rc;
2626}
2627#endif /* DEBUG_bird */
2628
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette