VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 72552

Last change on this file since 72552 was 72546, checked in by vboxsync, 7 years ago

NEM/win,TM: Setting TSC on TM start/restore/resume/etc as best we can. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 111.2 KB
Line 
1/* $Id: NEMR0Native-win.cpp 72546 2018-06-13 15:45:39Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include "NEMInternal.h"
35#include <VBox/vmm/gvm.h>
36#include <VBox/vmm/vm.h>
37#include <VBox/vmm/gvmm.h>
38#include <VBox/param.h>
39
40#include <iprt/dbg.h>
41#include <iprt/memobj.h>
42#include <iprt/string.h>
43
44
45/* Assert compile context sanity. */
46#ifndef RT_OS_WINDOWS
47# error "Windows only file!"
48#endif
49#ifndef RT_ARCH_AMD64
50# error "AMD64 only file!"
51#endif
52
53
54/*********************************************************************************************************************************
55* Internal Functions *
56*********************************************************************************************************************************/
57typedef uint32_t DWORD; /* for winerror.h constants */
58
59
60/*********************************************************************************************************************************
61* Global Variables *
62*********************************************************************************************************************************/
63static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
64
65/**
66 * WinHvr.sys!WinHvDepositMemory
67 *
68 * This API will try allocates cPages on IdealNode and deposit it to the
69 * hypervisor for use with the given partition. The memory will be freed when
70 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
71 *
72 * Apparently node numbers above 64 has a different meaning.
73 */
74static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
75
76
77/*********************************************************************************************************************************
78* Internal Functions *
79*********************************************************************************************************************************/
80NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
81 uint32_t cPages, uint32_t fFlags);
82NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
83NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
84NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat);
85NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
86NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
87DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
88 void *pvOutput, uint32_t cbOutput);
89
90
91/*
92 * Instantate the code we share with ring-0.
93 */
94#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
95
96/**
97 * Worker for NEMR0InitVM that allocates a hypercall page.
98 *
99 * @returns VBox status code.
100 * @param pHypercallData The hypercall data page to initialize.
101 */
102static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
103{
104 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
105 if (RT_SUCCESS(rc))
106 {
107 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
108 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
109 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
110 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
111 if (RT_SUCCESS(rc))
112 return VINF_SUCCESS;
113
114 /* bail out */
115 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
116 }
117 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
118 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
119 pHypercallData->pbPage = NULL;
120 return rc;
121}
122
123/**
124 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
125 *
126 * @param pHypercallData The hypercall data page to uninitialize.
127 */
128static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
129{
130 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
131 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
132 if (pHypercallData->pbPage != NULL)
133 {
134 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
135 pHypercallData->pbPage = NULL;
136 }
137 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
138 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
139}
140
141
142/**
143 * Called by NEMR3Init to make sure we've got what we need.
144 *
145 * @returns VBox status code.
146 * @param pGVM The ring-0 VM handle.
147 * @param pVM The cross context VM handle.
148 * @thread EMT(0)
149 */
150VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVM pVM)
151{
152 AssertCompile(sizeof(pGVM->nem.s) <= sizeof(pGVM->nem.padding));
153 AssertCompile(sizeof(pGVM->aCpus[0].nem.s) <= sizeof(pGVM->aCpus[0].nem.padding));
154
155 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
156 AssertRCReturn(rc, rc);
157
158 /*
159 * We want to perform hypercalls here. The NT kernel started to expose a very low
160 * level interface to do this thru somewhere between build 14271 and 16299. Since
161 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
162 *
163 * We also need to deposit memory to the hypervisor for use with partition (page
164 * mapping structures, stuff).
165 */
166 RTDBGKRNLINFO hKrnlInfo;
167 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
168 if (RT_SUCCESS(rc))
169 {
170 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
171 if (RT_SUCCESS(rc))
172 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
173 RTR0DbgKrnlInfoRelease(hKrnlInfo);
174 if (RT_SUCCESS(rc))
175 {
176 /*
177 * Allocate a page for non-EMT threads to use for hypercalls (update
178 * statistics and such) and a critical section protecting it.
179 */
180 rc = RTCritSectInit(&pGVM->nem.s.HypercallDataCritSect);
181 if (RT_SUCCESS(rc))
182 {
183 rc = nemR0InitHypercallData(&pGVM->nem.s.HypercallData);
184 if (RT_SUCCESS(rc))
185 {
186 /*
187 * Allocate a page for each VCPU to place hypercall data on.
188 */
189 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
190 {
191 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
192 if (RT_FAILURE(rc))
193 {
194 while (i-- > 0)
195 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
196 break;
197 }
198 }
199 if (RT_SUCCESS(rc))
200 {
201 /*
202 * So far, so good.
203 */
204 return rc;
205 }
206
207 /*
208 * Bail out.
209 */
210 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
211 }
212 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
213 }
214 }
215 else
216 rc = VERR_NEM_MISSING_KERNEL_API;
217 }
218
219 RT_NOREF(pVM);
220 return rc;
221}
222
223
224/**
225 * Perform an I/O control operation on the partition handle (VID.SYS).
226 *
227 * @returns NT status code.
228 * @param pGVM The ring-0 VM structure.
229 * @param uFunction The function to perform.
230 * @param pvInput The input buffer. This must point within the VM
231 * structure so we can easily convert to a ring-3
232 * pointer if necessary.
233 * @param cbInput The size of the input. @a pvInput must be NULL when
234 * zero.
235 * @param pvOutput The output buffer. This must also point within the
236 * VM structure for ring-3 pointer magic.
237 * @param cbOutput The size of the output. @a pvOutput must be NULL
238 * when zero.
239 */
240DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
241 void *pvOutput, uint32_t cbOutput)
242{
243#ifdef RT_STRICT
244 /*
245 * Input and output parameters are part of the VM CPU structure.
246 */
247 PVM pVM = pGVM->pVM;
248 size_t const cbVM = RT_UOFFSETOF(VM, aCpus[pGVM->cCpus]);
249 if (pvInput)
250 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
251 if (pvOutput)
252 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
253#endif
254
255 int32_t rcNt = STATUS_UNSUCCESSFUL;
256 int rc = SUPR0IoCtlPerform(pGVM->nem.s.pIoCtlCtx, uFunction,
257 pvInput,
258 pvInput ? (uintptr_t)pvInput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
259 cbInput,
260 pvOutput,
261 pvOutput ? (uintptr_t)pvOutput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
262 cbOutput,
263 &rcNt);
264 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
265 return (NTSTATUS)rcNt;
266 return STATUS_UNSUCCESSFUL;
267}
268
269
270/**
271 * 2nd part of the initialization, after we've got a partition handle.
272 *
273 * @returns VBox status code.
274 * @param pGVM The ring-0 VM handle.
275 * @param pVM The cross context VM handle.
276 * @thread EMT(0)
277 */
278VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM, PVM pVM)
279{
280 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
281 AssertRCReturn(rc, rc);
282 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
283
284 /*
285 * Copy and validate the I/O control information from ring-3.
286 */
287 NEMWINIOCTL Copy = pVM->nem.s.IoCtlGetHvPartitionId;
288 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
289 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
290 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
291 pGVM->nem.s.IoCtlGetHvPartitionId = Copy;
292
293 Copy = pVM->nem.s.IoCtlStartVirtualProcessor;
294 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
295 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
296 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
297 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
298 pGVM->nem.s.IoCtlStartVirtualProcessor = Copy;
299
300 Copy = pVM->nem.s.IoCtlStopVirtualProcessor;
301 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
302 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
303 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
304 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
305 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
306 pGVM->nem.s.IoCtlStopVirtualProcessor = Copy;
307
308 Copy = pVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
309 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
310 AssertLogRelReturn(Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT), VERR_NEM_INIT_FAILED);
311 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
312 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
313 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
314 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
315 pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext = Copy;
316
317 /*
318 * Setup of an I/O control context for the partition handle for later use.
319 */
320 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pVM->nem.s.hPartitionDevice, 0, &pGVM->nem.s.pIoCtlCtx);
321 AssertLogRelRCReturn(rc, rc);
322 pGVM->nem.s.offRing3ConversionDelta = (uintptr_t)pVM->pVMR3 - (uintptr_t)pGVM->pVM;
323
324 /*
325 * Get the partition ID.
326 */
327 PVMCPU pVCpu = &pGVM->pVM->aCpus[0];
328 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
329 &pVCpu->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu->nem.s.uIoCtlBuf.idPartition));
330 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
331 pGVM->nem.s.idHvPartition = pVCpu->nem.s.uIoCtlBuf.idPartition;
332 AssertLogRelMsgReturn(pGVM->nem.s.idHvPartition == pVM->nem.s.idHvPartition,
333 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nem.s.idHvPartition, pVM->nem.s.idHvPartition),
334 VERR_NEM_INIT_FAILED);
335
336 return rc;
337}
338
339
340/**
341 * Cleanup the NEM parts of the VM in ring-0.
342 *
343 * This is always called and must deal the state regardless of whether
344 * NEMR0InitVM() was called or not. So, take care here.
345 *
346 * @param pGVM The ring-0 VM handle.
347 */
348VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
349{
350 pGVM->nem.s.idHvPartition = HV_PARTITION_ID_INVALID;
351
352 /* Clean up I/O control context. */
353 if (pGVM->nem.s.pIoCtlCtx)
354 {
355 int rc = SUPR0IoCtlCleanup(pGVM->nem.s.pIoCtlCtx);
356 AssertRC(rc);
357 pGVM->nem.s.pIoCtlCtx = NULL;
358 }
359
360 /* Free the hypercall pages. */
361 VMCPUID i = pGVM->cCpus;
362 while (i-- > 0)
363 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
364
365 /* The non-EMT one too. */
366 if (RTCritSectIsInitialized(&pGVM->nem.s.HypercallDataCritSect))
367 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
368 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
369}
370
371
372#if 0 /* for debugging GPA unmapping. */
373static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
374{
375 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nem.s.pbHypercallData;
376 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
377 pIn->PartitionId = pGVM->nem.s.idHvPartition;
378 pIn->VpIndex = pGVCpu->idCpu;
379 pIn->ByteCount = 0x10;
380 pIn->BaseGpa = GCPhys;
381 pIn->ControlFlags.AsUINT64 = 0;
382 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
383 memset(pOut, 0xfe, sizeof(*pOut));
384 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nem.s.HCPhysHypercallData,
385 pGVCpu->nem.s.HCPhysHypercallData + sizeof(*pIn));
386 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
387 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
388 __debugbreak();
389
390 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
391}
392#endif
393
394
395/**
396 * Worker for NEMR0MapPages and others.
397 */
398NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
399 uint32_t cPages, uint32_t fFlags)
400{
401 /*
402 * Validate.
403 */
404 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
405
406 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
407 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
408 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
409 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
410 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
411 if (GCPhysSrc != GCPhysDst)
412 {
413 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
414 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
415 }
416
417 /*
418 * Compose and make the hypercall.
419 * Ring-3 is not allowed to fill in the host physical addresses of the call.
420 */
421 for (uint32_t iTries = 0;; iTries++)
422 {
423 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
424 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
425 pMapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
426 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
427 pMapPages->MapFlags = fFlags;
428 pMapPages->u32ExplicitPadding = 0;
429 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
430 {
431 RTHCPHYS HCPhys = NIL_RTGCPHYS;
432 int rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
433 AssertRCReturn(rc, rc);
434 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
435 }
436
437 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
438 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
439 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
440 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
441 if (uResult == ((uint64_t)cPages << 32))
442 return VINF_SUCCESS;
443
444 /*
445 * If the partition is out of memory, try donate another 512 pages to
446 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
447 */
448 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
449 || iTries > 16
450 || g_pfnWinHvDepositMemory == NULL)
451 {
452 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
453 return VERR_NEM_MAP_PAGES_FAILED;
454 }
455
456 size_t cPagesAdded = 0;
457 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nem.s.idHvPartition, 512, 0, &cPagesAdded);
458 if (!cPagesAdded)
459 {
460 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
461 return VERR_NEM_MAP_PAGES_FAILED;
462 }
463 }
464}
465
466
467/**
468 * Maps pages into the guest physical address space.
469 *
470 * Generally the caller will be under the PGM lock already, so no extra effort
471 * is needed to make sure all changes happens under it.
472 *
473 * @returns VBox status code.
474 * @param pGVM The ring-0 VM handle.
475 * @param pVM The cross context VM handle.
476 * @param idCpu The calling EMT. Necessary for getting the
477 * hypercall page and arguments.
478 * @thread EMT(idCpu)
479 */
480VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
481{
482 /*
483 * Unpack the call.
484 */
485 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
486 if (RT_SUCCESS(rc))
487 {
488 PVMCPU pVCpu = &pVM->aCpus[idCpu];
489 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
490
491 RTGCPHYS const GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
492 RTGCPHYS const GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
493 uint32_t const cPages = pVCpu->nem.s.Hypercall.MapPages.cPages;
494 HV_MAP_GPA_FLAGS const fFlags = pVCpu->nem.s.Hypercall.MapPages.fFlags;
495
496 /*
497 * Do the work.
498 */
499 rc = nemR0WinMapPages(pGVM, pVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
500 }
501 return rc;
502}
503
504
505/**
506 * Worker for NEMR0UnmapPages and others.
507 */
508NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
509{
510 /*
511 * Validate input.
512 */
513 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
514
515 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
516 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
517 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
518 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
519
520 /*
521 * Compose and make the hypercall.
522 */
523 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
524 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
525 pUnmapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
526 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
527 pUnmapPages->fFlags = 0;
528
529 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
530 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
531 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
532 if (uResult == ((uint64_t)cPages << 32))
533 {
534#if 1 /* Do we need to do this? Hopefully not... */
535 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
536 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
537 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
538#endif
539 return VINF_SUCCESS;
540 }
541
542 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
543 return VERR_NEM_UNMAP_PAGES_FAILED;
544}
545
546
547/**
548 * Unmaps pages from the guest physical address space.
549 *
550 * Generally the caller will be under the PGM lock already, so no extra effort
551 * is needed to make sure all changes happens under it.
552 *
553 * @returns VBox status code.
554 * @param pGVM The ring-0 VM handle.
555 * @param pVM The cross context VM handle.
556 * @param idCpu The calling EMT. Necessary for getting the
557 * hypercall page and arguments.
558 * @thread EMT(idCpu)
559 */
560VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
561{
562 /*
563 * Unpack the call.
564 */
565 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
566 if (RT_SUCCESS(rc))
567 {
568 PVMCPU pVCpu = &pVM->aCpus[idCpu];
569 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
570
571 RTGCPHYS const GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
572 uint32_t const cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
573
574 /*
575 * Do the work.
576 */
577 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
578 }
579 return rc;
580}
581
582
583/**
584 * Worker for NEMR0ExportState.
585 *
586 * Intention is to use it internally later.
587 *
588 * @returns VBox status code.
589 * @param pGVM The ring-0 VM handle.
590 * @param pGVCpu The ring-0 VCPU handle.
591 * @param pCtx The CPU context structure to import into.
592 */
593NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
594{
595 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
596 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
597 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
598 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
599
600 pInput->PartitionId = pGVM->nem.s.idHvPartition;
601 pInput->VpIndex = pGVCpu->idCpu;
602 pInput->RsvdZ = 0;
603
604 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
605 if ( !fWhat
606 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
607 return VINF_SUCCESS;
608 uintptr_t iReg = 0;
609
610 /* GPRs */
611 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
612 {
613 if (fWhat & CPUMCTX_EXTRN_RAX)
614 {
615 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
616 pInput->Elements[iReg].Name = HvX64RegisterRax;
617 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
618 iReg++;
619 }
620 if (fWhat & CPUMCTX_EXTRN_RCX)
621 {
622 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
623 pInput->Elements[iReg].Name = HvX64RegisterRcx;
624 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
625 iReg++;
626 }
627 if (fWhat & CPUMCTX_EXTRN_RDX)
628 {
629 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
630 pInput->Elements[iReg].Name = HvX64RegisterRdx;
631 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
632 iReg++;
633 }
634 if (fWhat & CPUMCTX_EXTRN_RBX)
635 {
636 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
637 pInput->Elements[iReg].Name = HvX64RegisterRbx;
638 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
639 iReg++;
640 }
641 if (fWhat & CPUMCTX_EXTRN_RSP)
642 {
643 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
644 pInput->Elements[iReg].Name = HvX64RegisterRsp;
645 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
646 iReg++;
647 }
648 if (fWhat & CPUMCTX_EXTRN_RBP)
649 {
650 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
651 pInput->Elements[iReg].Name = HvX64RegisterRbp;
652 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
653 iReg++;
654 }
655 if (fWhat & CPUMCTX_EXTRN_RSI)
656 {
657 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
658 pInput->Elements[iReg].Name = HvX64RegisterRsi;
659 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
660 iReg++;
661 }
662 if (fWhat & CPUMCTX_EXTRN_RDI)
663 {
664 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
665 pInput->Elements[iReg].Name = HvX64RegisterRdi;
666 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
667 iReg++;
668 }
669 if (fWhat & CPUMCTX_EXTRN_R8_R15)
670 {
671 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
672 pInput->Elements[iReg].Name = HvX64RegisterR8;
673 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
674 iReg++;
675 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
676 pInput->Elements[iReg].Name = HvX64RegisterR9;
677 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
678 iReg++;
679 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
680 pInput->Elements[iReg].Name = HvX64RegisterR10;
681 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
682 iReg++;
683 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
684 pInput->Elements[iReg].Name = HvX64RegisterR11;
685 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
686 iReg++;
687 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
688 pInput->Elements[iReg].Name = HvX64RegisterR12;
689 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
690 iReg++;
691 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
692 pInput->Elements[iReg].Name = HvX64RegisterR13;
693 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
694 iReg++;
695 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
696 pInput->Elements[iReg].Name = HvX64RegisterR14;
697 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
698 iReg++;
699 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
700 pInput->Elements[iReg].Name = HvX64RegisterR15;
701 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
702 iReg++;
703 }
704 }
705
706 /* RIP & Flags */
707 if (fWhat & CPUMCTX_EXTRN_RIP)
708 {
709 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
710 pInput->Elements[iReg].Name = HvX64RegisterRip;
711 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
712 iReg++;
713 }
714 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
715 {
716 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
717 pInput->Elements[iReg].Name = HvX64RegisterRflags;
718 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
719 iReg++;
720 }
721
722 /* Segments */
723#define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
724 do { \
725 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
726 pInput->Elements[a_idx].Name = a_enmName; \
727 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
728 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
729 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
730 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
731 } while (0)
732 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
733 {
734 if (fWhat & CPUMCTX_EXTRN_CS)
735 {
736 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
737 iReg++;
738 }
739 if (fWhat & CPUMCTX_EXTRN_ES)
740 {
741 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
742 iReg++;
743 }
744 if (fWhat & CPUMCTX_EXTRN_SS)
745 {
746 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
747 iReg++;
748 }
749 if (fWhat & CPUMCTX_EXTRN_DS)
750 {
751 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
752 iReg++;
753 }
754 if (fWhat & CPUMCTX_EXTRN_FS)
755 {
756 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
757 iReg++;
758 }
759 if (fWhat & CPUMCTX_EXTRN_GS)
760 {
761 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
762 iReg++;
763 }
764 }
765
766 /* Descriptor tables & task segment. */
767 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
768 {
769 if (fWhat & CPUMCTX_EXTRN_LDTR)
770 {
771 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
772 iReg++;
773 }
774 if (fWhat & CPUMCTX_EXTRN_TR)
775 {
776 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
777 iReg++;
778 }
779
780 if (fWhat & CPUMCTX_EXTRN_IDTR)
781 {
782 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
783 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
784 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
785 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
786 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
787 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
788 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
789 iReg++;
790 }
791 if (fWhat & CPUMCTX_EXTRN_GDTR)
792 {
793 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
794 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
795 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
796 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
797 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
798 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
799 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
800 iReg++;
801 }
802 }
803
804 /* Control registers. */
805 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
806 {
807 if (fWhat & CPUMCTX_EXTRN_CR0)
808 {
809 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
810 pInput->Elements[iReg].Name = HvX64RegisterCr0;
811 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
812 iReg++;
813 }
814 if (fWhat & CPUMCTX_EXTRN_CR2)
815 {
816 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
817 pInput->Elements[iReg].Name = HvX64RegisterCr2;
818 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
819 iReg++;
820 }
821 if (fWhat & CPUMCTX_EXTRN_CR3)
822 {
823 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
824 pInput->Elements[iReg].Name = HvX64RegisterCr3;
825 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
826 iReg++;
827 }
828 if (fWhat & CPUMCTX_EXTRN_CR4)
829 {
830 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
831 pInput->Elements[iReg].Name = HvX64RegisterCr4;
832 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
833 iReg++;
834 }
835 }
836 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
837 {
838 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
839 pInput->Elements[iReg].Name = HvX64RegisterCr8;
840 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pVCpu);
841 iReg++;
842 }
843
844 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
845
846 /* Debug registers. */
847/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
848 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
849 {
850 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
851 pInput->Elements[iReg].Name = HvX64RegisterDr0;
852 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pVCpu);
853 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
854 iReg++;
855 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
856 pInput->Elements[iReg].Name = HvX64RegisterDr1;
857 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pVCpu);
858 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
859 iReg++;
860 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
861 pInput->Elements[iReg].Name = HvX64RegisterDr2;
862 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pVCpu);
863 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
864 iReg++;
865 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
866 pInput->Elements[iReg].Name = HvX64RegisterDr3;
867 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pVCpu);
868 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
869 iReg++;
870 }
871 if (fWhat & CPUMCTX_EXTRN_DR6)
872 {
873 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
874 pInput->Elements[iReg].Name = HvX64RegisterDr6;
875 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pVCpu);
876 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
877 iReg++;
878 }
879 if (fWhat & CPUMCTX_EXTRN_DR7)
880 {
881 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
882 pInput->Elements[iReg].Name = HvX64RegisterDr7;
883 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pVCpu);
884 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
885 iReg++;
886 }
887
888 /* Floating point state. */
889 if (fWhat & CPUMCTX_EXTRN_X87)
890 {
891 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
892 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
893 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[0].au64[0];
894 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[0].au64[1];
895 iReg++;
896 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
897 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
898 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[1].au64[0];
899 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[1].au64[1];
900 iReg++;
901 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
902 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
903 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[2].au64[0];
904 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[2].au64[1];
905 iReg++;
906 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
907 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
908 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[3].au64[0];
909 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[3].au64[1];
910 iReg++;
911 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
912 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
913 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[4].au64[0];
914 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[4].au64[1];
915 iReg++;
916 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
917 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
918 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[5].au64[0];
919 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[5].au64[1];
920 iReg++;
921 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
922 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
923 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[6].au64[0];
924 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[6].au64[1];
925 iReg++;
926 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
927 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
928 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[7].au64[0];
929 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[7].au64[1];
930 iReg++;
931
932 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
933 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
934 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->pXStateR0->x87.FCW;
935 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->pXStateR0->x87.FSW;
936 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->pXStateR0->x87.FTW;
937 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->pXStateR0->x87.FTW >> 8;
938 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->pXStateR0->x87.FOP;
939 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->pXStateR0->x87.FPUIP)
940 | ((uint64_t)pCtx->pXStateR0->x87.CS << 32)
941 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd1 << 48);
942 iReg++;
943/** @todo we've got trouble if if we try write just SSE w/o X87. */
944 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
945 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
946 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->pXStateR0->x87.FPUDP)
947 | ((uint64_t)pCtx->pXStateR0->x87.DS << 32)
948 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd2 << 48);
949 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->pXStateR0->x87.MXCSR;
950 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR0->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
951 iReg++;
952 }
953
954 /* Vector state. */
955 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
956 {
957 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
958 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
959 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo;
960 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi;
961 iReg++;
962 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
963 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
964 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo;
965 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi;
966 iReg++;
967 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
968 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
969 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo;
970 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi;
971 iReg++;
972 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
973 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
974 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo;
975 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi;
976 iReg++;
977 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
978 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
979 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo;
980 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi;
981 iReg++;
982 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
983 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
984 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo;
985 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi;
986 iReg++;
987 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
988 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
989 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo;
990 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi;
991 iReg++;
992 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
993 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
994 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo;
995 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi;
996 iReg++;
997 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
998 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
999 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo;
1000 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi;
1001 iReg++;
1002 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1003 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1004 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo;
1005 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi;
1006 iReg++;
1007 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1008 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1009 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo;
1010 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi;
1011 iReg++;
1012 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1013 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1014 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo;
1015 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi;
1016 iReg++;
1017 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1018 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1019 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo;
1020 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi;
1021 iReg++;
1022 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1023 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1024 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo;
1025 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi;
1026 iReg++;
1027 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1028 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1029 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo;
1030 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi;
1031 iReg++;
1032 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1033 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1034 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo;
1035 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi;
1036 iReg++;
1037 }
1038
1039 /* MSRs */
1040 // HvX64RegisterTsc - don't touch
1041 if (fWhat & CPUMCTX_EXTRN_EFER)
1042 {
1043 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1044 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1045 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1046 iReg++;
1047 }
1048 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1049 {
1050 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1051 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1052 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1053 iReg++;
1054 }
1055 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1056 {
1057 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1058 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1059 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1060 iReg++;
1061 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1062 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1063 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1064 iReg++;
1065 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1066 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1067 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1068 iReg++;
1069 }
1070 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1071 {
1072 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1073 pInput->Elements[iReg].Name = HvX64RegisterStar;
1074 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1075 iReg++;
1076 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1077 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1078 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1079 iReg++;
1080 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1081 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1082 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1083 iReg++;
1084 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1085 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1086 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1087 iReg++;
1088 }
1089 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1090 {
1091 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1092 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1093 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pVCpu);
1094 iReg++;
1095 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1096 pInput->Elements[iReg].Name = HvX64RegisterPat;
1097 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1098 iReg++;
1099#if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1100 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1101 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1102 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pVCpu);
1103 iReg++;
1104#endif
1105
1106 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1107
1108 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1109 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1110 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1111 iReg++;
1112
1113 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1114
1115 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1116 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1117 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1118 iReg++;
1119 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1120 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1121 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1122 iReg++;
1123 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1124 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1125 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1126 iReg++;
1127 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1128 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1129 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1130 iReg++;
1131 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1132 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1133 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1134 iReg++;
1135 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1136 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1137 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1138 iReg++;
1139 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1140 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1141 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1142 iReg++;
1143 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1144 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1145 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1146 iReg++;
1147 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1148 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1149 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1150 iReg++;
1151 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1152 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1153 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1154 iReg++;
1155 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1156 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1157 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1158 iReg++;
1159 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1160 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1161 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1162 iReg++;
1163
1164#if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1165 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1166 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1167 {
1168 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1169 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1170 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1171 iReg++;
1172 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1173 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1174 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pVCpu);
1175 iReg++;
1176 }
1177#endif
1178 }
1179
1180 /* event injection (clear it). */
1181 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1182 {
1183 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1184 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1185 pInput->Elements[iReg].Value.Reg64 = 0;
1186 iReg++;
1187 }
1188
1189 /* Interruptibility state. This can get a little complicated since we get
1190 half of the state via HV_X64_VP_EXECUTION_STATE. */
1191 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1192 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1193 {
1194 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1195 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1196 pInput->Elements[iReg].Value.Reg64 = 0;
1197 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1198 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1199 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1200 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1201 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1202 iReg++;
1203 }
1204 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1205 {
1206 if ( pVCpu->nem.s.fLastInterruptShadow
1207 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1208 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
1209 {
1210 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1211 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1212 pInput->Elements[iReg].Value.Reg64 = 0;
1213 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1214 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1215 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1216 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1217 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1218 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1219 iReg++;
1220 }
1221 }
1222 else
1223 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1224
1225 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1226 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
1227 if ( fDesiredIntWin
1228 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1229 {
1230 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
1231 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1232 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1233 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1234 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1235 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1236 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1237 iReg++;
1238 }
1239
1240 /// @todo HvRegisterPendingEvent0
1241 /// @todo HvRegisterPendingEvent1
1242
1243 /*
1244 * Set the registers.
1245 */
1246 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1247
1248 /*
1249 * Make the hypercall.
1250 */
1251 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1252 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1253 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1254 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1255 VERR_NEM_SET_REGISTERS_FAILED);
1256 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1257 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1258 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1259 return VINF_SUCCESS;
1260}
1261
1262
1263/**
1264 * Export the state to the native API (out of CPUMCTX).
1265 *
1266 * @returns VBox status code
1267 * @param pGVM The ring-0 VM handle.
1268 * @param pVM The cross context VM handle.
1269 * @param idCpu The calling EMT. Necessary for getting the
1270 * hypercall page and arguments.
1271 */
1272VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, PVM pVM, VMCPUID idCpu)
1273{
1274 /*
1275 * Validate the call.
1276 */
1277 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
1278 if (RT_SUCCESS(rc))
1279 {
1280 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1281 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1282 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1283
1284 /*
1285 * Call worker.
1286 */
1287 rc = nemR0WinExportState(pGVM, pGVCpu, CPUMQueryGuestCtxPtr(pVCpu));
1288 }
1289 return rc;
1290}
1291
1292
1293/**
1294 * Worker for NEMR0ImportState.
1295 *
1296 * Intention is to use it internally later.
1297 *
1298 * @returns VBox status code.
1299 * @param pGVM The ring-0 VM handle.
1300 * @param pGVCpu The ring-0 VCPU handle.
1301 * @param pCtx The CPU context structure to import into.
1302 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1303 */
1304NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat)
1305{
1306 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1307 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1308 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1309
1310 fWhat &= pCtx->fExtrn;
1311
1312 pInput->PartitionId = pGVM->nem.s.idHvPartition;
1313 pInput->VpIndex = pGVCpu->idCpu;
1314 pInput->fFlags = 0;
1315
1316 /* GPRs */
1317 uintptr_t iReg = 0;
1318 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1319 {
1320 if (fWhat & CPUMCTX_EXTRN_RAX)
1321 pInput->Names[iReg++] = HvX64RegisterRax;
1322 if (fWhat & CPUMCTX_EXTRN_RCX)
1323 pInput->Names[iReg++] = HvX64RegisterRcx;
1324 if (fWhat & CPUMCTX_EXTRN_RDX)
1325 pInput->Names[iReg++] = HvX64RegisterRdx;
1326 if (fWhat & CPUMCTX_EXTRN_RBX)
1327 pInput->Names[iReg++] = HvX64RegisterRbx;
1328 if (fWhat & CPUMCTX_EXTRN_RSP)
1329 pInput->Names[iReg++] = HvX64RegisterRsp;
1330 if (fWhat & CPUMCTX_EXTRN_RBP)
1331 pInput->Names[iReg++] = HvX64RegisterRbp;
1332 if (fWhat & CPUMCTX_EXTRN_RSI)
1333 pInput->Names[iReg++] = HvX64RegisterRsi;
1334 if (fWhat & CPUMCTX_EXTRN_RDI)
1335 pInput->Names[iReg++] = HvX64RegisterRdi;
1336 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1337 {
1338 pInput->Names[iReg++] = HvX64RegisterR8;
1339 pInput->Names[iReg++] = HvX64RegisterR9;
1340 pInput->Names[iReg++] = HvX64RegisterR10;
1341 pInput->Names[iReg++] = HvX64RegisterR11;
1342 pInput->Names[iReg++] = HvX64RegisterR12;
1343 pInput->Names[iReg++] = HvX64RegisterR13;
1344 pInput->Names[iReg++] = HvX64RegisterR14;
1345 pInput->Names[iReg++] = HvX64RegisterR15;
1346 }
1347 }
1348
1349 /* RIP & Flags */
1350 if (fWhat & CPUMCTX_EXTRN_RIP)
1351 pInput->Names[iReg++] = HvX64RegisterRip;
1352 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1353 pInput->Names[iReg++] = HvX64RegisterRflags;
1354
1355 /* Segments */
1356 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1357 {
1358 if (fWhat & CPUMCTX_EXTRN_CS)
1359 pInput->Names[iReg++] = HvX64RegisterCs;
1360 if (fWhat & CPUMCTX_EXTRN_ES)
1361 pInput->Names[iReg++] = HvX64RegisterEs;
1362 if (fWhat & CPUMCTX_EXTRN_SS)
1363 pInput->Names[iReg++] = HvX64RegisterSs;
1364 if (fWhat & CPUMCTX_EXTRN_DS)
1365 pInput->Names[iReg++] = HvX64RegisterDs;
1366 if (fWhat & CPUMCTX_EXTRN_FS)
1367 pInput->Names[iReg++] = HvX64RegisterFs;
1368 if (fWhat & CPUMCTX_EXTRN_GS)
1369 pInput->Names[iReg++] = HvX64RegisterGs;
1370 }
1371
1372 /* Descriptor tables and the task segment. */
1373 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1374 {
1375 if (fWhat & CPUMCTX_EXTRN_LDTR)
1376 pInput->Names[iReg++] = HvX64RegisterLdtr;
1377 if (fWhat & CPUMCTX_EXTRN_TR)
1378 pInput->Names[iReg++] = HvX64RegisterTr;
1379 if (fWhat & CPUMCTX_EXTRN_IDTR)
1380 pInput->Names[iReg++] = HvX64RegisterIdtr;
1381 if (fWhat & CPUMCTX_EXTRN_GDTR)
1382 pInput->Names[iReg++] = HvX64RegisterGdtr;
1383 }
1384
1385 /* Control registers. */
1386 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1387 {
1388 if (fWhat & CPUMCTX_EXTRN_CR0)
1389 pInput->Names[iReg++] = HvX64RegisterCr0;
1390 if (fWhat & CPUMCTX_EXTRN_CR2)
1391 pInput->Names[iReg++] = HvX64RegisterCr2;
1392 if (fWhat & CPUMCTX_EXTRN_CR3)
1393 pInput->Names[iReg++] = HvX64RegisterCr3;
1394 if (fWhat & CPUMCTX_EXTRN_CR4)
1395 pInput->Names[iReg++] = HvX64RegisterCr4;
1396 }
1397 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1398 pInput->Names[iReg++] = HvX64RegisterCr8;
1399
1400 /* Debug registers. */
1401 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1402 {
1403 pInput->Names[iReg++] = HvX64RegisterDr0;
1404 pInput->Names[iReg++] = HvX64RegisterDr1;
1405 pInput->Names[iReg++] = HvX64RegisterDr2;
1406 pInput->Names[iReg++] = HvX64RegisterDr3;
1407 }
1408 if (fWhat & CPUMCTX_EXTRN_DR6)
1409 pInput->Names[iReg++] = HvX64RegisterDr6;
1410 if (fWhat & CPUMCTX_EXTRN_DR7)
1411 pInput->Names[iReg++] = HvX64RegisterDr7;
1412
1413 /* Floating point state. */
1414 if (fWhat & CPUMCTX_EXTRN_X87)
1415 {
1416 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1417 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1418 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1419 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1420 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1421 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1422 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1423 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1424 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1425 }
1426 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1427 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1428
1429 /* Vector state. */
1430 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1431 {
1432 pInput->Names[iReg++] = HvX64RegisterXmm0;
1433 pInput->Names[iReg++] = HvX64RegisterXmm1;
1434 pInput->Names[iReg++] = HvX64RegisterXmm2;
1435 pInput->Names[iReg++] = HvX64RegisterXmm3;
1436 pInput->Names[iReg++] = HvX64RegisterXmm4;
1437 pInput->Names[iReg++] = HvX64RegisterXmm5;
1438 pInput->Names[iReg++] = HvX64RegisterXmm6;
1439 pInput->Names[iReg++] = HvX64RegisterXmm7;
1440 pInput->Names[iReg++] = HvX64RegisterXmm8;
1441 pInput->Names[iReg++] = HvX64RegisterXmm9;
1442 pInput->Names[iReg++] = HvX64RegisterXmm10;
1443 pInput->Names[iReg++] = HvX64RegisterXmm11;
1444 pInput->Names[iReg++] = HvX64RegisterXmm12;
1445 pInput->Names[iReg++] = HvX64RegisterXmm13;
1446 pInput->Names[iReg++] = HvX64RegisterXmm14;
1447 pInput->Names[iReg++] = HvX64RegisterXmm15;
1448 }
1449
1450 /* MSRs */
1451 // HvX64RegisterTsc - don't touch
1452 if (fWhat & CPUMCTX_EXTRN_EFER)
1453 pInput->Names[iReg++] = HvX64RegisterEfer;
1454 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1455 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1456 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1457 {
1458 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1459 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1460 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1461 }
1462 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1463 {
1464 pInput->Names[iReg++] = HvX64RegisterStar;
1465 pInput->Names[iReg++] = HvX64RegisterLstar;
1466 pInput->Names[iReg++] = HvX64RegisterCstar;
1467 pInput->Names[iReg++] = HvX64RegisterSfmask;
1468 }
1469
1470#ifdef LOG_ENABLED
1471 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1472#endif
1473 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1474 {
1475 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1476 pInput->Names[iReg++] = HvX64RegisterPat;
1477#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1478 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
1479#endif
1480 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
1481 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
1482 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
1483 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
1484 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
1485 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
1486 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
1487 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
1488 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
1489 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
1490 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
1491 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
1492 pInput->Names[iReg++] = HvX64RegisterTscAux;
1493#if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
1494 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1495 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
1496#endif
1497#ifdef LOG_ENABLED
1498 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1499 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
1500#endif
1501 }
1502
1503 /* Interruptibility. */
1504 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1505 {
1506 pInput->Names[iReg++] = HvRegisterInterruptState;
1507 pInput->Names[iReg++] = HvX64RegisterRip;
1508 }
1509
1510 /* event injection */
1511 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1512 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1513 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1514 size_t const cRegs = iReg;
1515 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1516
1517 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1518 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
1519 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1520
1521 /*
1522 * Make the hypercall.
1523 */
1524 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1525 pGVCpu->nem.s.HypercallData.HCPhysPage,
1526 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1527 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1528 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1529 VERR_NEM_GET_REGISTERS_FAILED);
1530 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1531
1532 /*
1533 * Copy information to the CPUM context.
1534 */
1535 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
1536 iReg = 0;
1537
1538 /* GPRs */
1539 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1540 {
1541 if (fWhat & CPUMCTX_EXTRN_RAX)
1542 {
1543 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1544 pCtx->rax = paValues[iReg++].Reg64;
1545 }
1546 if (fWhat & CPUMCTX_EXTRN_RCX)
1547 {
1548 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1549 pCtx->rcx = paValues[iReg++].Reg64;
1550 }
1551 if (fWhat & CPUMCTX_EXTRN_RDX)
1552 {
1553 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1554 pCtx->rdx = paValues[iReg++].Reg64;
1555 }
1556 if (fWhat & CPUMCTX_EXTRN_RBX)
1557 {
1558 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1559 pCtx->rbx = paValues[iReg++].Reg64;
1560 }
1561 if (fWhat & CPUMCTX_EXTRN_RSP)
1562 {
1563 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1564 pCtx->rsp = paValues[iReg++].Reg64;
1565 }
1566 if (fWhat & CPUMCTX_EXTRN_RBP)
1567 {
1568 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1569 pCtx->rbp = paValues[iReg++].Reg64;
1570 }
1571 if (fWhat & CPUMCTX_EXTRN_RSI)
1572 {
1573 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1574 pCtx->rsi = paValues[iReg++].Reg64;
1575 }
1576 if (fWhat & CPUMCTX_EXTRN_RDI)
1577 {
1578 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1579 pCtx->rdi = paValues[iReg++].Reg64;
1580 }
1581 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1582 {
1583 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1584 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1585 pCtx->r8 = paValues[iReg++].Reg64;
1586 pCtx->r9 = paValues[iReg++].Reg64;
1587 pCtx->r10 = paValues[iReg++].Reg64;
1588 pCtx->r11 = paValues[iReg++].Reg64;
1589 pCtx->r12 = paValues[iReg++].Reg64;
1590 pCtx->r13 = paValues[iReg++].Reg64;
1591 pCtx->r14 = paValues[iReg++].Reg64;
1592 pCtx->r15 = paValues[iReg++].Reg64;
1593 }
1594 }
1595
1596 /* RIP & Flags */
1597 if (fWhat & CPUMCTX_EXTRN_RIP)
1598 {
1599 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1600 pCtx->rip = paValues[iReg++].Reg64;
1601 }
1602 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1603 {
1604 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1605 pCtx->rflags.u = paValues[iReg++].Reg64;
1606 }
1607
1608 /* Segments */
1609#define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1610 do { \
1611 Assert(pInput->Names[a_idx] == a_enmName); \
1612 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1613 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1614 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1615 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1616 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1617 } while (0)
1618 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1619 {
1620 if (fWhat & CPUMCTX_EXTRN_CS)
1621 {
1622 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1623 iReg++;
1624 }
1625 if (fWhat & CPUMCTX_EXTRN_ES)
1626 {
1627 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1628 iReg++;
1629 }
1630 if (fWhat & CPUMCTX_EXTRN_SS)
1631 {
1632 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1633 iReg++;
1634 }
1635 if (fWhat & CPUMCTX_EXTRN_DS)
1636 {
1637 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1638 iReg++;
1639 }
1640 if (fWhat & CPUMCTX_EXTRN_FS)
1641 {
1642 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1643 iReg++;
1644 }
1645 if (fWhat & CPUMCTX_EXTRN_GS)
1646 {
1647 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1648 iReg++;
1649 }
1650 }
1651 /* Descriptor tables and the task segment. */
1652 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1653 {
1654 if (fWhat & CPUMCTX_EXTRN_LDTR)
1655 {
1656 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1657 iReg++;
1658 }
1659 if (fWhat & CPUMCTX_EXTRN_TR)
1660 {
1661 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1662 avoid to trigger sanity assertions around the code, always fix this. */
1663 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1664 switch (pCtx->tr.Attr.n.u4Type)
1665 {
1666 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1667 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1668 break;
1669 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1670 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1671 break;
1672 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1673 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1674 break;
1675 }
1676 iReg++;
1677 }
1678 if (fWhat & CPUMCTX_EXTRN_IDTR)
1679 {
1680 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1681 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1682 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1683 iReg++;
1684 }
1685 if (fWhat & CPUMCTX_EXTRN_GDTR)
1686 {
1687 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1688 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1689 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1690 iReg++;
1691 }
1692 }
1693
1694 /* Control registers. */
1695 bool fMaybeChangedMode = false;
1696 bool fFlushTlb = false;
1697 bool fFlushGlobalTlb = false;
1698 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1699 {
1700 if (fWhat & CPUMCTX_EXTRN_CR0)
1701 {
1702 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1703 if (pCtx->cr0 != paValues[iReg].Reg64)
1704 {
1705 CPUMSetGuestCR0(pVCpu, paValues[iReg].Reg64);
1706 fMaybeChangedMode = true;
1707 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1708 }
1709 iReg++;
1710 }
1711 if (fWhat & CPUMCTX_EXTRN_CR2)
1712 {
1713 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1714 pCtx->cr2 = paValues[iReg].Reg64;
1715 iReg++;
1716 }
1717 if (fWhat & CPUMCTX_EXTRN_CR3)
1718 {
1719 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1720 if (pCtx->cr3 != paValues[iReg].Reg64)
1721 {
1722 CPUMSetGuestCR3(pVCpu, paValues[iReg].Reg64);
1723 fFlushTlb = true;
1724 }
1725 iReg++;
1726 }
1727 if (fWhat & CPUMCTX_EXTRN_CR4)
1728 {
1729 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1730 if (pCtx->cr4 != paValues[iReg].Reg64)
1731 {
1732 CPUMSetGuestCR4(pVCpu, paValues[iReg].Reg64);
1733 fMaybeChangedMode = true;
1734 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1735 }
1736 iReg++;
1737 }
1738 }
1739 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1740 {
1741 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1742 APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1743 iReg++;
1744 }
1745
1746 /* Debug registers. */
1747/** @todo fixme */
1748 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1749 {
1750 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1751 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1752 if (pCtx->dr[0] != paValues[iReg].Reg64)
1753 CPUMSetGuestDR0(pVCpu, paValues[iReg].Reg64);
1754 iReg++;
1755 if (pCtx->dr[1] != paValues[iReg].Reg64)
1756 CPUMSetGuestDR1(pVCpu, paValues[iReg].Reg64);
1757 iReg++;
1758 if (pCtx->dr[2] != paValues[iReg].Reg64)
1759 CPUMSetGuestDR2(pVCpu, paValues[iReg].Reg64);
1760 iReg++;
1761 if (pCtx->dr[3] != paValues[iReg].Reg64)
1762 CPUMSetGuestDR3(pVCpu, paValues[iReg].Reg64);
1763 iReg++;
1764 }
1765 if (fWhat & CPUMCTX_EXTRN_DR6)
1766 {
1767 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1768 if (pCtx->dr[6] != paValues[iReg].Reg64)
1769 CPUMSetGuestDR6(pVCpu, paValues[iReg].Reg64);
1770 iReg++;
1771 }
1772 if (fWhat & CPUMCTX_EXTRN_DR7)
1773 {
1774 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1775 if (pCtx->dr[7] != paValues[iReg].Reg64)
1776 CPUMSetGuestDR7(pVCpu, paValues[iReg].Reg64);
1777 iReg++;
1778 }
1779
1780 /* Floating point state. */
1781 if (fWhat & CPUMCTX_EXTRN_X87)
1782 {
1783 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1784 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1785 pCtx->pXStateR0->x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1786 pCtx->pXStateR0->x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1787 iReg++;
1788 pCtx->pXStateR0->x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1789 pCtx->pXStateR0->x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1790 iReg++;
1791 pCtx->pXStateR0->x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1792 pCtx->pXStateR0->x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1793 iReg++;
1794 pCtx->pXStateR0->x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1795 pCtx->pXStateR0->x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1796 iReg++;
1797 pCtx->pXStateR0->x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1798 pCtx->pXStateR0->x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1799 iReg++;
1800 pCtx->pXStateR0->x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1801 pCtx->pXStateR0->x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1802 iReg++;
1803 pCtx->pXStateR0->x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1804 pCtx->pXStateR0->x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1805 iReg++;
1806 pCtx->pXStateR0->x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1807 pCtx->pXStateR0->x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1808 iReg++;
1809
1810 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1811 pCtx->pXStateR0->x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1812 pCtx->pXStateR0->x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1813 pCtx->pXStateR0->x87.FTW = paValues[iReg].FpControlStatus.FpTag
1814 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1815 pCtx->pXStateR0->x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1816 pCtx->pXStateR0->x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1817 pCtx->pXStateR0->x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1818 pCtx->pXStateR0->x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1819 iReg++;
1820 }
1821
1822 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1823 {
1824 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1825 if (fWhat & CPUMCTX_EXTRN_X87)
1826 {
1827 pCtx->pXStateR0->x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1828 pCtx->pXStateR0->x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1829 pCtx->pXStateR0->x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1830 }
1831 pCtx->pXStateR0->x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1832 pCtx->pXStateR0->x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1833 iReg++;
1834 }
1835
1836 /* Vector state. */
1837 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1838 {
1839 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1840 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1841 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1842 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1843 iReg++;
1844 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1845 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1846 iReg++;
1847 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1848 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1849 iReg++;
1850 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1851 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1852 iReg++;
1853 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1854 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1855 iReg++;
1856 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1857 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1858 iReg++;
1859 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1860 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1861 iReg++;
1862 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1863 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1864 iReg++;
1865 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1866 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1867 iReg++;
1868 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1869 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1870 iReg++;
1871 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1872 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1873 iReg++;
1874 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1875 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1876 iReg++;
1877 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1878 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1879 iReg++;
1880 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1881 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1882 iReg++;
1883 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1884 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1885 iReg++;
1886 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1887 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1888 iReg++;
1889 }
1890
1891
1892 /* MSRs */
1893 // HvX64RegisterTsc - don't touch
1894 if (fWhat & CPUMCTX_EXTRN_EFER)
1895 {
1896 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1897 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1898 {
1899 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
1900 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
1901 PGMNotifyNxeChanged(pVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1902 pCtx->msrEFER = paValues[iReg].Reg64;
1903 fMaybeChangedMode = true;
1904 }
1905 iReg++;
1906 }
1907 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1908 {
1909 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1910 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
1911 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
1912 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1913 iReg++;
1914 }
1915 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1916 {
1917 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
1918 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
1919 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
1920 pCtx->SysEnter.cs = paValues[iReg].Reg64;
1921 iReg++;
1922
1923 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
1924 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
1925 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
1926 pCtx->SysEnter.eip = paValues[iReg].Reg64;
1927 iReg++;
1928
1929 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
1930 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
1931 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
1932 pCtx->SysEnter.esp = paValues[iReg].Reg64;
1933 iReg++;
1934 }
1935 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1936 {
1937 Assert(pInput->Names[iReg] == HvX64RegisterStar);
1938 if (pCtx->msrSTAR != paValues[iReg].Reg64)
1939 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
1940 pCtx->msrSTAR = paValues[iReg].Reg64;
1941 iReg++;
1942
1943 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
1944 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
1945 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
1946 pCtx->msrLSTAR = paValues[iReg].Reg64;
1947 iReg++;
1948
1949 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
1950 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
1951 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
1952 pCtx->msrCSTAR = paValues[iReg].Reg64;
1953 iReg++;
1954
1955 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
1956 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
1957 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
1958 pCtx->msrSFMASK = paValues[iReg].Reg64;
1959 iReg++;
1960 }
1961 bool fUpdateApicBase = false;
1962 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1963 {
1964 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
1965 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1966 if (paValues[iReg].Reg64 != uOldBase)
1967 {
1968 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1969 pVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
1970 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64);
1971 if (rc2 == VINF_CPUM_R3_MSR_WRITE)
1972 {
1973 pVCpu->nem.s.uPendingApicBase = paValues[iReg].Reg64;
1974 fUpdateApicBase = true;
1975 }
1976 else
1977 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", VBOXSTRICTRC_VAL(rc2), paValues[iReg].Reg64));
1978 }
1979 iReg++;
1980
1981 Assert(pInput->Names[iReg] == HvX64RegisterPat);
1982 if (pCtx->msrPAT != paValues[iReg].Reg64)
1983 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
1984 pCtx->msrPAT = paValues[iReg].Reg64;
1985 iReg++;
1986
1987#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1988 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
1989 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pVCpu))
1990 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64));
1991 iReg++;
1992#endif
1993
1994 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1995 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
1996 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
1997 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
1998 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
1999 iReg++;
2000
2001 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2002
2003 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2004 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2005 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2006 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2007 iReg++;
2008
2009 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2010 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2011 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2012 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2013 iReg++;
2014
2015 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2016 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2017 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2018 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2019 iReg++;
2020
2021 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2022 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2023 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2024 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2025 iReg++;
2026
2027 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2028 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2029 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2030 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2031 iReg++;
2032
2033 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2034 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2035 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2036 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2037 iReg++;
2038
2039 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2040 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2041 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2042 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2043 iReg++;
2044
2045 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2046 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2047 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2048 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2049 iReg++;
2050
2051 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2052 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2053 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2054 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2055 iReg++;
2056
2057 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2058 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2059 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2060 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2061 iReg++;
2062
2063 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2064 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2065 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2066 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2067 iReg++;
2068
2069 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2070 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2071 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2072 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2073 iReg++;
2074
2075#if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2076 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2077 {
2078 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2079 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2080 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2081 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2082 iReg++;
2083 }
2084#endif
2085#ifdef LOG_ENABLED
2086 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2087 {
2088 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2089 if (paValues[iReg].Reg64 != CPUMGetGuestIa32FeatureControl(pVCpu))
2090 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32FeatureControl(pVCpu), paValues[iReg].Reg64));
2091 iReg++;
2092 }
2093#endif
2094 }
2095
2096 /* Interruptibility. */
2097 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2098 {
2099 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2100 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2101
2102 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2103 {
2104 pVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2105 if (paValues[iReg].InterruptState.InterruptShadow)
2106 EMSetInhibitInterruptsPC(pVCpu, paValues[iReg + 1].Reg64);
2107 else
2108 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2109 }
2110
2111 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2112 {
2113 if (paValues[iReg].InterruptState.NmiMasked)
2114 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2115 else
2116 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2117 }
2118
2119 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2120 iReg += 2;
2121 }
2122
2123 /* Event injection. */
2124 /// @todo HvRegisterPendingInterruption
2125 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2126 if (paValues[iReg].PendingInterruption.InterruptionPending)
2127 {
2128 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2129 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2130 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2131 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2132 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2133 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2134 }
2135
2136 /// @todo HvRegisterPendingEvent0
2137 /// @todo HvRegisterPendingEvent1
2138
2139 /* Almost done, just update extrn flags and maybe change PGM mode. */
2140 pCtx->fExtrn &= ~fWhat;
2141 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2142 pCtx->fExtrn = 0;
2143
2144 /* Typical. */
2145 if (!fMaybeChangedMode && !fFlushTlb && !fUpdateApicBase)
2146 return VINF_SUCCESS;
2147
2148 /*
2149 * Slow.
2150 */
2151 int rc = VINF_SUCCESS;
2152 if (fMaybeChangedMode)
2153 {
2154 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2155 if (rc == VINF_PGM_CHANGE_MODE)
2156 {
2157 LogFlow(("nemR0WinImportState: -> VERR_NEM_CHANGE_PGM_MODE!\n"));
2158 return VERR_NEM_CHANGE_PGM_MODE;
2159 }
2160 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
2161 }
2162
2163 if (fFlushTlb)
2164 {
2165 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2166 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2167 }
2168
2169 if (fUpdateApicBase && rc == VINF_SUCCESS)
2170 {
2171 LogFlow(("nemR0WinImportState: -> VERR_NEM_UPDATE_APIC_BASE!\n"));
2172 rc = VERR_NEM_UPDATE_APIC_BASE;
2173 }
2174
2175 return rc;
2176}
2177
2178
2179/**
2180 * Import the state from the native API (back to CPUMCTX).
2181 *
2182 * @returns VBox status code
2183 * @param pGVM The ring-0 VM handle.
2184 * @param pVM The cross context VM handle.
2185 * @param idCpu The calling EMT. Necessary for getting the
2186 * hypercall page and arguments.
2187 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2188 * CPUMCTX_EXTERN_ALL for everything.
2189 */
2190VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t fWhat)
2191{
2192 /*
2193 * Validate the call.
2194 */
2195 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2196 if (RT_SUCCESS(rc))
2197 {
2198 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2199 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2200 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2201
2202 /*
2203 * Call worker.
2204 */
2205 rc = nemR0WinImportState(pGVM, pGVCpu, CPUMQueryGuestCtxPtr(pVCpu), fWhat);
2206 }
2207 return rc;
2208}
2209
2210
2211/**
2212 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2213 *
2214 * @returns VBox status code.
2215 * @param pGVM The ring-0 VM handle.
2216 * @param pGVCpu The ring-0 VCPU handle.
2217 * @param pcTicks Where to return the current CPU tick count.
2218 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2219 */
2220NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2221{
2222 /*
2223 * Hypercall parameters.
2224 */
2225 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2226 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2227 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2228
2229 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2230 pInput->VpIndex = pGVCpu->idCpu;
2231 pInput->fFlags = 0;
2232 pInput->Names[0] = HvX64RegisterTsc;
2233 pInput->Names[1] = HvX64RegisterTscAux;
2234
2235 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2236 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2237 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2238
2239 /*
2240 * Make the hypercall.
2241 */
2242 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2243 pGVCpu->nem.s.HypercallData.HCPhysPage,
2244 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2245 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2246 VERR_NEM_GET_REGISTERS_FAILED);
2247
2248 /*
2249 * Get results.
2250 */
2251 *pcTicks = paValues[0].Reg64;
2252 if (pcAux)
2253 *pcAux = paValues[0].Reg32;
2254 return VINF_SUCCESS;
2255}
2256
2257
2258/**
2259 * Queries the TSC and TSC_AUX values, putting the results in .
2260 *
2261 * @returns VBox status code
2262 * @param pGVM The ring-0 VM handle.
2263 * @param pVM The cross context VM handle.
2264 * @param idCpu The calling EMT. Necessary for getting the
2265 * hypercall page and arguments.
2266 */
2267VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2268{
2269 /*
2270 * Validate the call.
2271 */
2272 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2273 if (RT_SUCCESS(rc))
2274 {
2275 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2276 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2277 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2278
2279 /*
2280 * Call worker.
2281 */
2282 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2283 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2284 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2285 &pVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2286 }
2287 return rc;
2288}
2289
2290
2291/**
2292 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2293 *
2294 * @returns VBox status code.
2295 * @param pGVM The ring-0 VM handle.
2296 * @param pGVCpu The ring-0 VCPU handle.
2297 * @param uPausedTscValue The TSC value at the time of pausing.
2298 */
2299NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2300{
2301 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2302
2303 /*
2304 * Set up the hypercall parameters.
2305 */
2306 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2307 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2308
2309 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2310 pInput->VpIndex = 0;
2311 pInput->RsvdZ = 0;
2312 pInput->Elements[0].Name = HvX64RegisterTsc;
2313 pInput->Elements[0].Pad0 = 0;
2314 pInput->Elements[0].Pad1 = 0;
2315 pInput->Elements[0].Value.Reg128.High64 = 0;
2316 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2317
2318 /*
2319 * Disable interrupts and do the first virtual CPU.
2320 */
2321 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2322 uint64_t const uFirstTsc = ASMReadTSC();
2323 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2324 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */);
2325 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2326 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2327
2328 /*
2329 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2330 * that we don't introduce too much drift here.
2331 */
2332 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2333 {
2334 Assert(pInput->PartitionId == pGVM->nem.s.idHvPartition);
2335 Assert(pInput->RsvdZ == 0);
2336 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2337 Assert(pInput->Elements[0].Pad0 == 0);
2338 Assert(pInput->Elements[0].Pad1 == 0);
2339 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2340
2341 pInput->VpIndex = iCpu;
2342 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2343 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2344
2345 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2346 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */);
2347 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2348 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2349 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2350 }
2351
2352 /*
2353 * Done.
2354 */
2355 ASMSetFlags(fSavedFlags);
2356 return VINF_SUCCESS;
2357}
2358
2359
2360/**
2361 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2362 *
2363 * @returns VBox status code
2364 * @param pGVM The ring-0 VM handle.
2365 * @param pVM The cross context VM handle.
2366 * @param idCpu The calling EMT. Necessary for getting the
2367 * hypercall page and arguments.
2368 * @param uPausedTscValue The TSC value at the time of pausing.
2369 */
2370VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2371{
2372 /*
2373 * Validate the call.
2374 */
2375 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2376 if (RT_SUCCESS(rc))
2377 {
2378 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2379 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2380 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2381
2382 /*
2383 * Call worker.
2384 */
2385 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2386 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2387 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
2388 }
2389 return rc;
2390}
2391
2392
2393VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2394{
2395#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2396 PVM pVM = pGVM->pVM;
2397 return nemHCWinRunGC(pVM, &pVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]);
2398#else
2399 RT_NOREF(pGVM, idCpu);
2400 return VERR_NOT_IMPLEMENTED;
2401#endif
2402}
2403
2404
2405/**
2406 * Updates statistics in the VM structure.
2407 *
2408 * @returns VBox status code.
2409 * @param pGVM The ring-0 VM handle.
2410 * @param pVM The cross context VM handle.
2411 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
2412 * page and arguments.
2413 */
2414VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2415{
2416 /*
2417 * Validate the call.
2418 */
2419 int rc;
2420 if (idCpu == NIL_VMCPUID)
2421 rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
2422 else
2423 rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2424 if (RT_SUCCESS(rc))
2425 {
2426 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2427
2428 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
2429 ? &pGVM->aCpus[idCpu].nem.s.HypercallData
2430 : &pGVM->nem.s.HypercallData;
2431 if ( RT_VALID_PTR(pHypercallData->pbPage)
2432 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
2433 {
2434 if (idCpu == NIL_VMCPUID)
2435 rc = RTCritSectEnter(&pGVM->nem.s.HypercallDataCritSect);
2436 if (RT_SUCCESS(rc))
2437 {
2438 /*
2439 * Query the memory statistics for the partition.
2440 */
2441 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
2442 pInput->TargetPartitionId = pGVM->nem.s.idHvPartition;
2443 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
2444 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
2445 pInput->ProximityDomainInfo.Flags.Reserved = 0;
2446 pInput->ProximityDomainInfo.Id = 0;
2447
2448 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
2449 RT_ZERO(*pOutput);
2450
2451 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
2452 pHypercallData->HCPhysPage,
2453 pHypercallData->HCPhysPage + sizeof(*pInput));
2454 if (uResult == HV_STATUS_SUCCESS)
2455 {
2456 pVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
2457 pVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
2458 rc = VINF_SUCCESS;
2459 }
2460 else
2461 {
2462 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
2463 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
2464 rc = VERR_NEM_IPE_0;
2465 }
2466
2467 if (idCpu == NIL_VMCPUID)
2468 RTCritSectLeave(&pGVM->nem.s.HypercallDataCritSect);
2469 }
2470 }
2471 else
2472 rc = VERR_WRONG_ORDER;
2473 }
2474 return rc;
2475}
2476
2477
2478#if 1 && defined(DEBUG_bird)
2479/**
2480 * Debug only interface for poking around and exploring Hyper-V stuff.
2481 *
2482 * @param pGVM The ring-0 VM handle.
2483 * @param pVM The cross context VM handle.
2484 * @param idCpu The calling EMT.
2485 * @param u64Arg What to query. 0 == registers.
2486 */
2487VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t u64Arg)
2488{
2489 /*
2490 * Resolve CPU structures.
2491 */
2492 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2493 if (RT_SUCCESS(rc))
2494 {
2495 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2496
2497 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2498 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2499 if (u64Arg == 0)
2500 {
2501 /*
2502 * Query register.
2503 */
2504 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2505 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2506
2507 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
2508 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2509 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
2510
2511 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2512 pInput->VpIndex = pGVCpu->idCpu;
2513 pInput->fFlags = 0;
2514 pInput->Names[0] = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2515
2516 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
2517 pGVCpu->nem.s.HypercallData.HCPhysPage,
2518 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2519 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2520 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2521 pVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
2522 pVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
2523 rc = VINF_SUCCESS;
2524 }
2525 else if (u64Arg == 1)
2526 {
2527 /*
2528 * Query partition property.
2529 */
2530 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nem.s.HypercallData.pbPage;
2531 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2532
2533 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
2534 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
2535 pOutput->PropertyValue = 0;
2536
2537 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2538 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pVCpu->nem.s.Hypercall.Experiment.uItem;
2539 pInput->uPadding = 0;
2540
2541 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
2542 pGVCpu->nem.s.HypercallData.HCPhysPage,
2543 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2544 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
2545 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2546 pVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
2547 pVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
2548 rc = VINF_SUCCESS;
2549 }
2550 else if (u64Arg == 2)
2551 {
2552 /*
2553 * Set register.
2554 */
2555 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2556 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2557 RT_BZERO(pInput, RT_OFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
2558
2559 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2560 pInput->VpIndex = pGVCpu->idCpu;
2561 pInput->RsvdZ = 0;
2562 pInput->Elements[0].Name = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2563 pInput->Elements[0].Value.Reg128.High64 = pVCpu->nem.s.Hypercall.Experiment.uHiValue;
2564 pInput->Elements[0].Value.Reg128.Low64 = pVCpu->nem.s.Hypercall.Experiment.uLoValue;
2565
2566 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2567 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
2568 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2569 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2570 rc = VINF_SUCCESS;
2571 }
2572 else
2573 rc = VERR_INVALID_FUNCTION;
2574 }
2575 return rc;
2576}
2577#endif /* DEBUG_bird */
2578
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette