VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 72963

Last change on this file since 72963 was 72924, checked in by vboxsync, 6 years ago

NEM/win: Make it possible to select between ring-0 runloop hypercalls+VID.SYS and ring-3 runloop using WHv API via CFGM setting. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 113.0 KB
Line 
1/* $Id: NEMR0Native-win.cpp 72924 2018-07-05 16:14:26Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vm.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/dbg.h>
42#include <iprt/memobj.h>
43#include <iprt/string.h>
44
45
46/* Assert compile context sanity. */
47#ifndef RT_OS_WINDOWS
48# error "Windows only file!"
49#endif
50#ifndef RT_ARCH_AMD64
51# error "AMD64 only file!"
52#endif
53
54
55/*********************************************************************************************************************************
56* Internal Functions *
57*********************************************************************************************************************************/
58typedef uint32_t DWORD; /* for winerror.h constants */
59
60
61/*********************************************************************************************************************************
62* Global Variables *
63*********************************************************************************************************************************/
64static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
65
66/**
67 * WinHvr.sys!WinHvDepositMemory
68 *
69 * This API will try allocates cPages on IdealNode and deposit it to the
70 * hypervisor for use with the given partition. The memory will be freed when
71 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
72 *
73 * Apparently node numbers above 64 has a different meaning.
74 */
75static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
76
77
78/*********************************************************************************************************************************
79* Internal Functions *
80*********************************************************************************************************************************/
81NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
82 uint32_t cPages, uint32_t fFlags);
83NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
84#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
85NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
86NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat);
87NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
88NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
89#endif
90DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
91 void *pvOutput, uint32_t cbOutput);
92
93
94/*
95 * Instantate the code we share with ring-0.
96 */
97#ifdef NEM_WIN_WITH_RING0_RUNLOOP
98# define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
99#else
100# undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
101#endif
102#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
103
104
105
106/**
107 * Worker for NEMR0InitVM that allocates a hypercall page.
108 *
109 * @returns VBox status code.
110 * @param pHypercallData The hypercall data page to initialize.
111 */
112static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
113{
114 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
115 if (RT_SUCCESS(rc))
116 {
117 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
118 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
119 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
120 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
121 if (RT_SUCCESS(rc))
122 return VINF_SUCCESS;
123
124 /* bail out */
125 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
126 }
127 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
128 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
129 pHypercallData->pbPage = NULL;
130 return rc;
131}
132
133/**
134 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
135 *
136 * @param pHypercallData The hypercall data page to uninitialize.
137 */
138static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
139{
140 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
141 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
142 if (pHypercallData->pbPage != NULL)
143 {
144 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
145 pHypercallData->pbPage = NULL;
146 }
147 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
148 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
149}
150
151
152/**
153 * Called by NEMR3Init to make sure we've got what we need.
154 *
155 * @returns VBox status code.
156 * @param pGVM The ring-0 VM handle.
157 * @param pVM The cross context VM handle.
158 * @thread EMT(0)
159 */
160VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVM pVM)
161{
162 AssertCompile(sizeof(pGVM->nem.s) <= sizeof(pGVM->nem.padding));
163 AssertCompile(sizeof(pGVM->aCpus[0].nem.s) <= sizeof(pGVM->aCpus[0].nem.padding));
164
165 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
166 AssertRCReturn(rc, rc);
167
168 /*
169 * We want to perform hypercalls here. The NT kernel started to expose a very low
170 * level interface to do this thru somewhere between build 14271 and 16299. Since
171 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
172 *
173 * We also need to deposit memory to the hypervisor for use with partition (page
174 * mapping structures, stuff).
175 */
176 RTDBGKRNLINFO hKrnlInfo;
177 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
178 if (RT_SUCCESS(rc))
179 {
180 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
181 if (RT_SUCCESS(rc))
182 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
183 RTR0DbgKrnlInfoRelease(hKrnlInfo);
184 if (RT_SUCCESS(rc))
185 {
186 /*
187 * Allocate a page for non-EMT threads to use for hypercalls (update
188 * statistics and such) and a critical section protecting it.
189 */
190 rc = RTCritSectInit(&pGVM->nem.s.HypercallDataCritSect);
191 if (RT_SUCCESS(rc))
192 {
193 rc = nemR0InitHypercallData(&pGVM->nem.s.HypercallData);
194 if (RT_SUCCESS(rc))
195 {
196 /*
197 * Allocate a page for each VCPU to place hypercall data on.
198 */
199 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
200 {
201 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
202 if (RT_FAILURE(rc))
203 {
204 while (i-- > 0)
205 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
206 break;
207 }
208 }
209 if (RT_SUCCESS(rc))
210 {
211 /*
212 * So far, so good.
213 */
214 return rc;
215 }
216
217 /*
218 * Bail out.
219 */
220 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
221 }
222 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
223 }
224 }
225 else
226 rc = VERR_NEM_MISSING_KERNEL_API;
227 }
228
229 RT_NOREF(pVM);
230 return rc;
231}
232
233
234/**
235 * Perform an I/O control operation on the partition handle (VID.SYS).
236 *
237 * @returns NT status code.
238 * @param pGVM The ring-0 VM structure.
239 * @param uFunction The function to perform.
240 * @param pvInput The input buffer. This must point within the VM
241 * structure so we can easily convert to a ring-3
242 * pointer if necessary.
243 * @param cbInput The size of the input. @a pvInput must be NULL when
244 * zero.
245 * @param pvOutput The output buffer. This must also point within the
246 * VM structure for ring-3 pointer magic.
247 * @param cbOutput The size of the output. @a pvOutput must be NULL
248 * when zero.
249 */
250DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
251 void *pvOutput, uint32_t cbOutput)
252{
253#ifdef RT_STRICT
254 /*
255 * Input and output parameters are part of the VM CPU structure.
256 */
257 PVM pVM = pGVM->pVM;
258 size_t const cbVM = RT_UOFFSETOF(VM, aCpus[pGVM->cCpus]);
259 if (pvInput)
260 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
261 if (pvOutput)
262 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
263#endif
264
265 int32_t rcNt = STATUS_UNSUCCESSFUL;
266 int rc = SUPR0IoCtlPerform(pGVM->nem.s.pIoCtlCtx, uFunction,
267 pvInput,
268 pvInput ? (uintptr_t)pvInput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
269 cbInput,
270 pvOutput,
271 pvOutput ? (uintptr_t)pvOutput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
272 cbOutput,
273 &rcNt);
274 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
275 return (NTSTATUS)rcNt;
276 return STATUS_UNSUCCESSFUL;
277}
278
279
280/**
281 * 2nd part of the initialization, after we've got a partition handle.
282 *
283 * @returns VBox status code.
284 * @param pGVM The ring-0 VM handle.
285 * @param pVM The cross context VM handle.
286 * @thread EMT(0)
287 */
288VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM, PVM pVM)
289{
290 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
291 AssertRCReturn(rc, rc);
292 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
293
294 /*
295 * Copy and validate the I/O control information from ring-3.
296 */
297 NEMWINIOCTL Copy = pVM->nem.s.IoCtlGetHvPartitionId;
298 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
299 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
300 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
301 pGVM->nem.s.IoCtlGetHvPartitionId = Copy;
302
303 Copy = pVM->nem.s.IoCtlStartVirtualProcessor;
304 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
305 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
306 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
307 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
308 pGVM->nem.s.IoCtlStartVirtualProcessor = Copy;
309
310 Copy = pVM->nem.s.IoCtlStopVirtualProcessor;
311 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
312 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
313 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
314 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
315 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
316 pGVM->nem.s.IoCtlStopVirtualProcessor = Copy;
317
318 Copy = pVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
319 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
320 AssertLogRelReturn(Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT), VERR_NEM_INIT_FAILED);
321 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
322 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
323 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
324 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
325 pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext = Copy;
326
327 /*
328 * Setup of an I/O control context for the partition handle for later use.
329 */
330 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pVM->nem.s.hPartitionDevice, 0, &pGVM->nem.s.pIoCtlCtx);
331 AssertLogRelRCReturn(rc, rc);
332 pGVM->nem.s.offRing3ConversionDelta = (uintptr_t)pVM->pVMR3 - (uintptr_t)pGVM->pVM;
333
334 /*
335 * Get the partition ID.
336 */
337 PVMCPU pVCpu = &pGVM->pVM->aCpus[0];
338 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
339 &pVCpu->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu->nem.s.uIoCtlBuf.idPartition));
340 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
341 pGVM->nem.s.idHvPartition = pVCpu->nem.s.uIoCtlBuf.idPartition;
342 AssertLogRelMsgReturn(pGVM->nem.s.idHvPartition == pVM->nem.s.idHvPartition,
343 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nem.s.idHvPartition, pVM->nem.s.idHvPartition),
344 VERR_NEM_INIT_FAILED);
345
346 return rc;
347}
348
349
350/**
351 * Cleanup the NEM parts of the VM in ring-0.
352 *
353 * This is always called and must deal the state regardless of whether
354 * NEMR0InitVM() was called or not. So, take care here.
355 *
356 * @param pGVM The ring-0 VM handle.
357 */
358VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
359{
360 pGVM->nem.s.idHvPartition = HV_PARTITION_ID_INVALID;
361
362 /* Clean up I/O control context. */
363 if (pGVM->nem.s.pIoCtlCtx)
364 {
365 int rc = SUPR0IoCtlCleanup(pGVM->nem.s.pIoCtlCtx);
366 AssertRC(rc);
367 pGVM->nem.s.pIoCtlCtx = NULL;
368 }
369
370 /* Free the hypercall pages. */
371 VMCPUID i = pGVM->cCpus;
372 while (i-- > 0)
373 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
374
375 /* The non-EMT one too. */
376 if (RTCritSectIsInitialized(&pGVM->nem.s.HypercallDataCritSect))
377 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
378 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
379}
380
381
382#if 0 /* for debugging GPA unmapping. */
383static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
384{
385 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nem.s.pbHypercallData;
386 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
387 pIn->PartitionId = pGVM->nem.s.idHvPartition;
388 pIn->VpIndex = pGVCpu->idCpu;
389 pIn->ByteCount = 0x10;
390 pIn->BaseGpa = GCPhys;
391 pIn->ControlFlags.AsUINT64 = 0;
392 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
393 memset(pOut, 0xfe, sizeof(*pOut));
394 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nem.s.HCPhysHypercallData,
395 pGVCpu->nem.s.HCPhysHypercallData + sizeof(*pIn));
396 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
397 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
398 __debugbreak();
399
400 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
401}
402#endif
403
404
405/**
406 * Worker for NEMR0MapPages and others.
407 */
408NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
409 uint32_t cPages, uint32_t fFlags)
410{
411 /*
412 * Validate.
413 */
414 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
415
416 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
417 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
418 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
419 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
420 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
421 if (GCPhysSrc != GCPhysDst)
422 {
423 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
424 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
425 }
426
427 /*
428 * Compose and make the hypercall.
429 * Ring-3 is not allowed to fill in the host physical addresses of the call.
430 */
431 for (uint32_t iTries = 0;; iTries++)
432 {
433 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
434 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
435 pMapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
436 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
437 pMapPages->MapFlags = fFlags;
438 pMapPages->u32ExplicitPadding = 0;
439 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
440 {
441 RTHCPHYS HCPhys = NIL_RTGCPHYS;
442 int rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
443 AssertRCReturn(rc, rc);
444 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
445 }
446
447 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
448 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
449 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
450 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
451 if (uResult == ((uint64_t)cPages << 32))
452 return VINF_SUCCESS;
453
454 /*
455 * If the partition is out of memory, try donate another 512 pages to
456 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
457 */
458 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
459 || iTries > 16
460 || g_pfnWinHvDepositMemory == NULL)
461 {
462 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
463 return VERR_NEM_MAP_PAGES_FAILED;
464 }
465
466 size_t cPagesAdded = 0;
467 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nem.s.idHvPartition, 512, 0, &cPagesAdded);
468 if (!cPagesAdded)
469 {
470 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
471 return VERR_NEM_MAP_PAGES_FAILED;
472 }
473 }
474}
475
476
477/**
478 * Maps pages into the guest physical address space.
479 *
480 * Generally the caller will be under the PGM lock already, so no extra effort
481 * is needed to make sure all changes happens under it.
482 *
483 * @returns VBox status code.
484 * @param pGVM The ring-0 VM handle.
485 * @param pVM The cross context VM handle.
486 * @param idCpu The calling EMT. Necessary for getting the
487 * hypercall page and arguments.
488 * @thread EMT(idCpu)
489 */
490VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
491{
492 /*
493 * Unpack the call.
494 */
495 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
496 if (RT_SUCCESS(rc))
497 {
498 PVMCPU pVCpu = &pVM->aCpus[idCpu];
499 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
500
501 RTGCPHYS const GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
502 RTGCPHYS const GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
503 uint32_t const cPages = pVCpu->nem.s.Hypercall.MapPages.cPages;
504 HV_MAP_GPA_FLAGS const fFlags = pVCpu->nem.s.Hypercall.MapPages.fFlags;
505
506 /*
507 * Do the work.
508 */
509 rc = nemR0WinMapPages(pGVM, pVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
510 }
511 return rc;
512}
513
514
515/**
516 * Worker for NEMR0UnmapPages and others.
517 */
518NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
519{
520 /*
521 * Validate input.
522 */
523 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
524
525 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
526 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
527 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
528 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
529
530 /*
531 * Compose and make the hypercall.
532 */
533 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
534 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
535 pUnmapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
536 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
537 pUnmapPages->fFlags = 0;
538
539 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
540 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
541 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
542 if (uResult == ((uint64_t)cPages << 32))
543 {
544#if 1 /* Do we need to do this? Hopefully not... */
545 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
546 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
547 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
548#endif
549 return VINF_SUCCESS;
550 }
551
552 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
553 return VERR_NEM_UNMAP_PAGES_FAILED;
554}
555
556
557/**
558 * Unmaps pages from the guest physical address space.
559 *
560 * Generally the caller will be under the PGM lock already, so no extra effort
561 * is needed to make sure all changes happens under it.
562 *
563 * @returns VBox status code.
564 * @param pGVM The ring-0 VM handle.
565 * @param pVM The cross context VM handle.
566 * @param idCpu The calling EMT. Necessary for getting the
567 * hypercall page and arguments.
568 * @thread EMT(idCpu)
569 */
570VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
571{
572 /*
573 * Unpack the call.
574 */
575 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
576 if (RT_SUCCESS(rc))
577 {
578 PVMCPU pVCpu = &pVM->aCpus[idCpu];
579 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
580
581 RTGCPHYS const GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
582 uint32_t const cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
583
584 /*
585 * Do the work.
586 */
587 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
588 }
589 return rc;
590}
591
592
593#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
594/**
595 * Worker for NEMR0ExportState.
596 *
597 * Intention is to use it internally later.
598 *
599 * @returns VBox status code.
600 * @param pGVM The ring-0 VM handle.
601 * @param pGVCpu The ring-0 VCPU handle.
602 * @param pCtx The CPU context structure to import into.
603 */
604NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
605{
606 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
607 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
608 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
609 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
610
611 pInput->PartitionId = pGVM->nem.s.idHvPartition;
612 pInput->VpIndex = pGVCpu->idCpu;
613 pInput->RsvdZ = 0;
614
615 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
616 if ( !fWhat
617 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
618 return VINF_SUCCESS;
619 uintptr_t iReg = 0;
620
621 /* GPRs */
622 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
623 {
624 if (fWhat & CPUMCTX_EXTRN_RAX)
625 {
626 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
627 pInput->Elements[iReg].Name = HvX64RegisterRax;
628 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
629 iReg++;
630 }
631 if (fWhat & CPUMCTX_EXTRN_RCX)
632 {
633 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
634 pInput->Elements[iReg].Name = HvX64RegisterRcx;
635 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
636 iReg++;
637 }
638 if (fWhat & CPUMCTX_EXTRN_RDX)
639 {
640 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
641 pInput->Elements[iReg].Name = HvX64RegisterRdx;
642 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
643 iReg++;
644 }
645 if (fWhat & CPUMCTX_EXTRN_RBX)
646 {
647 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
648 pInput->Elements[iReg].Name = HvX64RegisterRbx;
649 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
650 iReg++;
651 }
652 if (fWhat & CPUMCTX_EXTRN_RSP)
653 {
654 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
655 pInput->Elements[iReg].Name = HvX64RegisterRsp;
656 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
657 iReg++;
658 }
659 if (fWhat & CPUMCTX_EXTRN_RBP)
660 {
661 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
662 pInput->Elements[iReg].Name = HvX64RegisterRbp;
663 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
664 iReg++;
665 }
666 if (fWhat & CPUMCTX_EXTRN_RSI)
667 {
668 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
669 pInput->Elements[iReg].Name = HvX64RegisterRsi;
670 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
671 iReg++;
672 }
673 if (fWhat & CPUMCTX_EXTRN_RDI)
674 {
675 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
676 pInput->Elements[iReg].Name = HvX64RegisterRdi;
677 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
678 iReg++;
679 }
680 if (fWhat & CPUMCTX_EXTRN_R8_R15)
681 {
682 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
683 pInput->Elements[iReg].Name = HvX64RegisterR8;
684 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
685 iReg++;
686 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
687 pInput->Elements[iReg].Name = HvX64RegisterR9;
688 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
689 iReg++;
690 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
691 pInput->Elements[iReg].Name = HvX64RegisterR10;
692 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
693 iReg++;
694 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
695 pInput->Elements[iReg].Name = HvX64RegisterR11;
696 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
697 iReg++;
698 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
699 pInput->Elements[iReg].Name = HvX64RegisterR12;
700 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
701 iReg++;
702 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
703 pInput->Elements[iReg].Name = HvX64RegisterR13;
704 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
705 iReg++;
706 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
707 pInput->Elements[iReg].Name = HvX64RegisterR14;
708 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
709 iReg++;
710 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
711 pInput->Elements[iReg].Name = HvX64RegisterR15;
712 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
713 iReg++;
714 }
715 }
716
717 /* RIP & Flags */
718 if (fWhat & CPUMCTX_EXTRN_RIP)
719 {
720 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
721 pInput->Elements[iReg].Name = HvX64RegisterRip;
722 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
723 iReg++;
724 }
725 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
726 {
727 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
728 pInput->Elements[iReg].Name = HvX64RegisterRflags;
729 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
730 iReg++;
731 }
732
733 /* Segments */
734# define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
735 do { \
736 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
737 pInput->Elements[a_idx].Name = a_enmName; \
738 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
739 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
740 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
741 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
742 } while (0)
743 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
744 {
745 if (fWhat & CPUMCTX_EXTRN_CS)
746 {
747 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
748 iReg++;
749 }
750 if (fWhat & CPUMCTX_EXTRN_ES)
751 {
752 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
753 iReg++;
754 }
755 if (fWhat & CPUMCTX_EXTRN_SS)
756 {
757 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
758 iReg++;
759 }
760 if (fWhat & CPUMCTX_EXTRN_DS)
761 {
762 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
763 iReg++;
764 }
765 if (fWhat & CPUMCTX_EXTRN_FS)
766 {
767 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
768 iReg++;
769 }
770 if (fWhat & CPUMCTX_EXTRN_GS)
771 {
772 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
773 iReg++;
774 }
775 }
776
777 /* Descriptor tables & task segment. */
778 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
779 {
780 if (fWhat & CPUMCTX_EXTRN_LDTR)
781 {
782 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
783 iReg++;
784 }
785 if (fWhat & CPUMCTX_EXTRN_TR)
786 {
787 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
788 iReg++;
789 }
790
791 if (fWhat & CPUMCTX_EXTRN_IDTR)
792 {
793 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
794 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
795 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
796 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
797 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
798 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
799 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
800 iReg++;
801 }
802 if (fWhat & CPUMCTX_EXTRN_GDTR)
803 {
804 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
805 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
806 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
807 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
808 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
809 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
810 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
811 iReg++;
812 }
813 }
814
815 /* Control registers. */
816 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
817 {
818 if (fWhat & CPUMCTX_EXTRN_CR0)
819 {
820 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
821 pInput->Elements[iReg].Name = HvX64RegisterCr0;
822 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
823 iReg++;
824 }
825 if (fWhat & CPUMCTX_EXTRN_CR2)
826 {
827 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
828 pInput->Elements[iReg].Name = HvX64RegisterCr2;
829 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
830 iReg++;
831 }
832 if (fWhat & CPUMCTX_EXTRN_CR3)
833 {
834 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
835 pInput->Elements[iReg].Name = HvX64RegisterCr3;
836 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
837 iReg++;
838 }
839 if (fWhat & CPUMCTX_EXTRN_CR4)
840 {
841 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
842 pInput->Elements[iReg].Name = HvX64RegisterCr4;
843 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
844 iReg++;
845 }
846 }
847 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
848 {
849 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
850 pInput->Elements[iReg].Name = HvX64RegisterCr8;
851 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pVCpu);
852 iReg++;
853 }
854
855 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
856
857 /* Debug registers. */
858/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
859 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
860 {
861 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
862 pInput->Elements[iReg].Name = HvX64RegisterDr0;
863 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pVCpu);
864 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
865 iReg++;
866 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
867 pInput->Elements[iReg].Name = HvX64RegisterDr1;
868 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pVCpu);
869 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
870 iReg++;
871 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
872 pInput->Elements[iReg].Name = HvX64RegisterDr2;
873 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pVCpu);
874 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
875 iReg++;
876 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
877 pInput->Elements[iReg].Name = HvX64RegisterDr3;
878 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pVCpu);
879 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
880 iReg++;
881 }
882 if (fWhat & CPUMCTX_EXTRN_DR6)
883 {
884 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
885 pInput->Elements[iReg].Name = HvX64RegisterDr6;
886 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pVCpu);
887 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
888 iReg++;
889 }
890 if (fWhat & CPUMCTX_EXTRN_DR7)
891 {
892 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
893 pInput->Elements[iReg].Name = HvX64RegisterDr7;
894 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pVCpu);
895 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
896 iReg++;
897 }
898
899 /* Floating point state. */
900 if (fWhat & CPUMCTX_EXTRN_X87)
901 {
902 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
903 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
904 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[0].au64[0];
905 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[0].au64[1];
906 iReg++;
907 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
908 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
909 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[1].au64[0];
910 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[1].au64[1];
911 iReg++;
912 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
913 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
914 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[2].au64[0];
915 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[2].au64[1];
916 iReg++;
917 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
918 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
919 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[3].au64[0];
920 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[3].au64[1];
921 iReg++;
922 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
923 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
924 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[4].au64[0];
925 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[4].au64[1];
926 iReg++;
927 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
928 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
929 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[5].au64[0];
930 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[5].au64[1];
931 iReg++;
932 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
933 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
934 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[6].au64[0];
935 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[6].au64[1];
936 iReg++;
937 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
938 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
939 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[7].au64[0];
940 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[7].au64[1];
941 iReg++;
942
943 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
944 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
945 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->pXStateR0->x87.FCW;
946 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->pXStateR0->x87.FSW;
947 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->pXStateR0->x87.FTW;
948 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->pXStateR0->x87.FTW >> 8;
949 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->pXStateR0->x87.FOP;
950 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->pXStateR0->x87.FPUIP)
951 | ((uint64_t)pCtx->pXStateR0->x87.CS << 32)
952 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd1 << 48);
953 iReg++;
954/** @todo we've got trouble if if we try write just SSE w/o X87. */
955 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
956 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
957 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->pXStateR0->x87.FPUDP)
958 | ((uint64_t)pCtx->pXStateR0->x87.DS << 32)
959 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd2 << 48);
960 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->pXStateR0->x87.MXCSR;
961 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR0->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
962 iReg++;
963 }
964
965 /* Vector state. */
966 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
967 {
968 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
969 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
970 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo;
971 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi;
972 iReg++;
973 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
974 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
975 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo;
976 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi;
977 iReg++;
978 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
979 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
980 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo;
981 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi;
982 iReg++;
983 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
984 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
985 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo;
986 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi;
987 iReg++;
988 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
989 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
990 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo;
991 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi;
992 iReg++;
993 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
994 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
995 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo;
996 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi;
997 iReg++;
998 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
999 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
1000 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo;
1001 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi;
1002 iReg++;
1003 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1004 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
1005 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo;
1006 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi;
1007 iReg++;
1008 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1009 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1010 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo;
1011 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi;
1012 iReg++;
1013 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1014 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1015 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo;
1016 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi;
1017 iReg++;
1018 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1019 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1020 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo;
1021 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi;
1022 iReg++;
1023 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1024 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1025 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo;
1026 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi;
1027 iReg++;
1028 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1029 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1030 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo;
1031 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi;
1032 iReg++;
1033 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1034 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1035 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo;
1036 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi;
1037 iReg++;
1038 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1039 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1040 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo;
1041 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi;
1042 iReg++;
1043 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1044 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1045 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo;
1046 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi;
1047 iReg++;
1048 }
1049
1050 /* MSRs */
1051 // HvX64RegisterTsc - don't touch
1052 if (fWhat & CPUMCTX_EXTRN_EFER)
1053 {
1054 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1055 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1056 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1057 iReg++;
1058 }
1059 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1060 {
1061 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1062 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1063 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1064 iReg++;
1065 }
1066 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1067 {
1068 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1069 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1070 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1071 iReg++;
1072 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1073 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1074 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1075 iReg++;
1076 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1077 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1078 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1079 iReg++;
1080 }
1081 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1082 {
1083 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1084 pInput->Elements[iReg].Name = HvX64RegisterStar;
1085 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1086 iReg++;
1087 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1088 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1089 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1090 iReg++;
1091 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1092 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1093 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1094 iReg++;
1095 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1096 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1097 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1098 iReg++;
1099 }
1100 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1101 {
1102 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1103 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1104 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pVCpu);
1105 iReg++;
1106 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1107 pInput->Elements[iReg].Name = HvX64RegisterPat;
1108 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1109 iReg++;
1110# if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1111 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1112 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1113 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pVCpu);
1114 iReg++;
1115# endif
1116
1117 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1118
1119 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1120 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1121 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1122 iReg++;
1123
1124 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1125
1126 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1127 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1128 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1129 iReg++;
1130 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1131 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1132 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1133 iReg++;
1134 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1135 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1136 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1137 iReg++;
1138 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1139 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1140 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1141 iReg++;
1142 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1143 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1144 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1145 iReg++;
1146 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1147 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1148 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1149 iReg++;
1150 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1151 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1152 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1153 iReg++;
1154 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1155 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1156 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1157 iReg++;
1158 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1159 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1160 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1161 iReg++;
1162 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1163 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1164 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1165 iReg++;
1166 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1167 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1168 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1169 iReg++;
1170 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1171 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1172 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1173 iReg++;
1174
1175# if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1176 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1177 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1178 {
1179 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1180 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1181 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1182 iReg++;
1183 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1184 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1185 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pVCpu);
1186 iReg++;
1187 }
1188# endif
1189 }
1190
1191 /* event injection (clear it). */
1192 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1193 {
1194 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1195 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1196 pInput->Elements[iReg].Value.Reg64 = 0;
1197 iReg++;
1198 }
1199
1200 /* Interruptibility state. This can get a little complicated since we get
1201 half of the state via HV_X64_VP_EXECUTION_STATE. */
1202 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1203 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1204 {
1205 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1206 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1207 pInput->Elements[iReg].Value.Reg64 = 0;
1208 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1209 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1210 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1211 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1212 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1213 iReg++;
1214 }
1215 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1216 {
1217 if ( pVCpu->nem.s.fLastInterruptShadow
1218 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1219 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
1220 {
1221 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1222 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1223 pInput->Elements[iReg].Value.Reg64 = 0;
1224 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1225 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1226 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1227 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1228 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1229 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1230 iReg++;
1231 }
1232 }
1233 else
1234 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1235
1236 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1237 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
1238 if ( fDesiredIntWin
1239 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1240 {
1241 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
1242 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1243 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1244 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1245 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1246 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1247 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1248 iReg++;
1249 }
1250
1251 /// @todo HvRegisterPendingEvent0
1252 /// @todo HvRegisterPendingEvent1
1253
1254 /*
1255 * Set the registers.
1256 */
1257 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1258
1259 /*
1260 * Make the hypercall.
1261 */
1262 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1263 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1264 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1265 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1266 VERR_NEM_SET_REGISTERS_FAILED);
1267 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1268 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1269 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1270 return VINF_SUCCESS;
1271}
1272#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1273
1274
1275/**
1276 * Export the state to the native API (out of CPUMCTX).
1277 *
1278 * @returns VBox status code
1279 * @param pGVM The ring-0 VM handle.
1280 * @param pVM The cross context VM handle.
1281 * @param idCpu The calling EMT. Necessary for getting the
1282 * hypercall page and arguments.
1283 */
1284VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, PVM pVM, VMCPUID idCpu)
1285{
1286#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1287 /*
1288 * Validate the call.
1289 */
1290 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
1291 if (RT_SUCCESS(rc))
1292 {
1293 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1294 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1295 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1296
1297 /*
1298 * Call worker.
1299 */
1300 rc = nemR0WinExportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx);
1301 }
1302 return rc;
1303#else
1304 RT_NOREF(pGVM, pVM, idCpu);
1305 return VERR_NOT_IMPLEMENTED;
1306#endif
1307}
1308
1309
1310#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1311/**
1312 * Worker for NEMR0ImportState.
1313 *
1314 * Intention is to use it internally later.
1315 *
1316 * @returns VBox status code.
1317 * @param pGVM The ring-0 VM handle.
1318 * @param pGVCpu The ring-0 VCPU handle.
1319 * @param pCtx The CPU context structure to import into.
1320 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1321 */
1322NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat)
1323{
1324 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1325 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1326 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1327 Assert(pCtx == &pGVCpu->pVCpu->cpum.GstCtx);
1328
1329 fWhat &= pCtx->fExtrn;
1330
1331 pInput->PartitionId = pGVM->nem.s.idHvPartition;
1332 pInput->VpIndex = pGVCpu->idCpu;
1333 pInput->fFlags = 0;
1334
1335 /* GPRs */
1336 uintptr_t iReg = 0;
1337 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1338 {
1339 if (fWhat & CPUMCTX_EXTRN_RAX)
1340 pInput->Names[iReg++] = HvX64RegisterRax;
1341 if (fWhat & CPUMCTX_EXTRN_RCX)
1342 pInput->Names[iReg++] = HvX64RegisterRcx;
1343 if (fWhat & CPUMCTX_EXTRN_RDX)
1344 pInput->Names[iReg++] = HvX64RegisterRdx;
1345 if (fWhat & CPUMCTX_EXTRN_RBX)
1346 pInput->Names[iReg++] = HvX64RegisterRbx;
1347 if (fWhat & CPUMCTX_EXTRN_RSP)
1348 pInput->Names[iReg++] = HvX64RegisterRsp;
1349 if (fWhat & CPUMCTX_EXTRN_RBP)
1350 pInput->Names[iReg++] = HvX64RegisterRbp;
1351 if (fWhat & CPUMCTX_EXTRN_RSI)
1352 pInput->Names[iReg++] = HvX64RegisterRsi;
1353 if (fWhat & CPUMCTX_EXTRN_RDI)
1354 pInput->Names[iReg++] = HvX64RegisterRdi;
1355 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1356 {
1357 pInput->Names[iReg++] = HvX64RegisterR8;
1358 pInput->Names[iReg++] = HvX64RegisterR9;
1359 pInput->Names[iReg++] = HvX64RegisterR10;
1360 pInput->Names[iReg++] = HvX64RegisterR11;
1361 pInput->Names[iReg++] = HvX64RegisterR12;
1362 pInput->Names[iReg++] = HvX64RegisterR13;
1363 pInput->Names[iReg++] = HvX64RegisterR14;
1364 pInput->Names[iReg++] = HvX64RegisterR15;
1365 }
1366 }
1367
1368 /* RIP & Flags */
1369 if (fWhat & CPUMCTX_EXTRN_RIP)
1370 pInput->Names[iReg++] = HvX64RegisterRip;
1371 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1372 pInput->Names[iReg++] = HvX64RegisterRflags;
1373
1374 /* Segments */
1375 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1376 {
1377 if (fWhat & CPUMCTX_EXTRN_CS)
1378 pInput->Names[iReg++] = HvX64RegisterCs;
1379 if (fWhat & CPUMCTX_EXTRN_ES)
1380 pInput->Names[iReg++] = HvX64RegisterEs;
1381 if (fWhat & CPUMCTX_EXTRN_SS)
1382 pInput->Names[iReg++] = HvX64RegisterSs;
1383 if (fWhat & CPUMCTX_EXTRN_DS)
1384 pInput->Names[iReg++] = HvX64RegisterDs;
1385 if (fWhat & CPUMCTX_EXTRN_FS)
1386 pInput->Names[iReg++] = HvX64RegisterFs;
1387 if (fWhat & CPUMCTX_EXTRN_GS)
1388 pInput->Names[iReg++] = HvX64RegisterGs;
1389 }
1390
1391 /* Descriptor tables and the task segment. */
1392 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1393 {
1394 if (fWhat & CPUMCTX_EXTRN_LDTR)
1395 pInput->Names[iReg++] = HvX64RegisterLdtr;
1396 if (fWhat & CPUMCTX_EXTRN_TR)
1397 pInput->Names[iReg++] = HvX64RegisterTr;
1398 if (fWhat & CPUMCTX_EXTRN_IDTR)
1399 pInput->Names[iReg++] = HvX64RegisterIdtr;
1400 if (fWhat & CPUMCTX_EXTRN_GDTR)
1401 pInput->Names[iReg++] = HvX64RegisterGdtr;
1402 }
1403
1404 /* Control registers. */
1405 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1406 {
1407 if (fWhat & CPUMCTX_EXTRN_CR0)
1408 pInput->Names[iReg++] = HvX64RegisterCr0;
1409 if (fWhat & CPUMCTX_EXTRN_CR2)
1410 pInput->Names[iReg++] = HvX64RegisterCr2;
1411 if (fWhat & CPUMCTX_EXTRN_CR3)
1412 pInput->Names[iReg++] = HvX64RegisterCr3;
1413 if (fWhat & CPUMCTX_EXTRN_CR4)
1414 pInput->Names[iReg++] = HvX64RegisterCr4;
1415 }
1416 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1417 pInput->Names[iReg++] = HvX64RegisterCr8;
1418
1419 /* Debug registers. */
1420 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1421 {
1422 pInput->Names[iReg++] = HvX64RegisterDr0;
1423 pInput->Names[iReg++] = HvX64RegisterDr1;
1424 pInput->Names[iReg++] = HvX64RegisterDr2;
1425 pInput->Names[iReg++] = HvX64RegisterDr3;
1426 }
1427 if (fWhat & CPUMCTX_EXTRN_DR6)
1428 pInput->Names[iReg++] = HvX64RegisterDr6;
1429 if (fWhat & CPUMCTX_EXTRN_DR7)
1430 pInput->Names[iReg++] = HvX64RegisterDr7;
1431
1432 /* Floating point state. */
1433 if (fWhat & CPUMCTX_EXTRN_X87)
1434 {
1435 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1436 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1437 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1438 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1439 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1440 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1441 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1442 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1443 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1444 }
1445 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1446 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1447
1448 /* Vector state. */
1449 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1450 {
1451 pInput->Names[iReg++] = HvX64RegisterXmm0;
1452 pInput->Names[iReg++] = HvX64RegisterXmm1;
1453 pInput->Names[iReg++] = HvX64RegisterXmm2;
1454 pInput->Names[iReg++] = HvX64RegisterXmm3;
1455 pInput->Names[iReg++] = HvX64RegisterXmm4;
1456 pInput->Names[iReg++] = HvX64RegisterXmm5;
1457 pInput->Names[iReg++] = HvX64RegisterXmm6;
1458 pInput->Names[iReg++] = HvX64RegisterXmm7;
1459 pInput->Names[iReg++] = HvX64RegisterXmm8;
1460 pInput->Names[iReg++] = HvX64RegisterXmm9;
1461 pInput->Names[iReg++] = HvX64RegisterXmm10;
1462 pInput->Names[iReg++] = HvX64RegisterXmm11;
1463 pInput->Names[iReg++] = HvX64RegisterXmm12;
1464 pInput->Names[iReg++] = HvX64RegisterXmm13;
1465 pInput->Names[iReg++] = HvX64RegisterXmm14;
1466 pInput->Names[iReg++] = HvX64RegisterXmm15;
1467 }
1468
1469 /* MSRs */
1470 // HvX64RegisterTsc - don't touch
1471 if (fWhat & CPUMCTX_EXTRN_EFER)
1472 pInput->Names[iReg++] = HvX64RegisterEfer;
1473 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1474 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1475 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1476 {
1477 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1478 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1479 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1480 }
1481 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1482 {
1483 pInput->Names[iReg++] = HvX64RegisterStar;
1484 pInput->Names[iReg++] = HvX64RegisterLstar;
1485 pInput->Names[iReg++] = HvX64RegisterCstar;
1486 pInput->Names[iReg++] = HvX64RegisterSfmask;
1487 }
1488
1489# ifdef LOG_ENABLED
1490 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1491# endif
1492 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1493 {
1494 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1495 pInput->Names[iReg++] = HvX64RegisterPat;
1496# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1497 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
1498# endif
1499 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
1500 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
1501 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
1502 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
1503 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
1504 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
1505 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
1506 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
1507 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
1508 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
1509 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
1510 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
1511 pInput->Names[iReg++] = HvX64RegisterTscAux;
1512# if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
1513 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1514 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
1515# endif
1516# ifdef LOG_ENABLED
1517 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1518 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
1519# endif
1520 }
1521
1522 /* Interruptibility. */
1523 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1524 {
1525 pInput->Names[iReg++] = HvRegisterInterruptState;
1526 pInput->Names[iReg++] = HvX64RegisterRip;
1527 }
1528
1529 /* event injection */
1530 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1531 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1532 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1533 size_t const cRegs = iReg;
1534 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1535
1536 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1537 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
1538 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1539
1540 /*
1541 * Make the hypercall.
1542 */
1543 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1544 pGVCpu->nem.s.HypercallData.HCPhysPage,
1545 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1546 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1547 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1548 VERR_NEM_GET_REGISTERS_FAILED);
1549 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1550
1551 /*
1552 * Copy information to the CPUM context.
1553 */
1554 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
1555 iReg = 0;
1556
1557 /* GPRs */
1558 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1559 {
1560 if (fWhat & CPUMCTX_EXTRN_RAX)
1561 {
1562 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1563 pCtx->rax = paValues[iReg++].Reg64;
1564 }
1565 if (fWhat & CPUMCTX_EXTRN_RCX)
1566 {
1567 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1568 pCtx->rcx = paValues[iReg++].Reg64;
1569 }
1570 if (fWhat & CPUMCTX_EXTRN_RDX)
1571 {
1572 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1573 pCtx->rdx = paValues[iReg++].Reg64;
1574 }
1575 if (fWhat & CPUMCTX_EXTRN_RBX)
1576 {
1577 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1578 pCtx->rbx = paValues[iReg++].Reg64;
1579 }
1580 if (fWhat & CPUMCTX_EXTRN_RSP)
1581 {
1582 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1583 pCtx->rsp = paValues[iReg++].Reg64;
1584 }
1585 if (fWhat & CPUMCTX_EXTRN_RBP)
1586 {
1587 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1588 pCtx->rbp = paValues[iReg++].Reg64;
1589 }
1590 if (fWhat & CPUMCTX_EXTRN_RSI)
1591 {
1592 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1593 pCtx->rsi = paValues[iReg++].Reg64;
1594 }
1595 if (fWhat & CPUMCTX_EXTRN_RDI)
1596 {
1597 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1598 pCtx->rdi = paValues[iReg++].Reg64;
1599 }
1600 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1601 {
1602 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1603 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1604 pCtx->r8 = paValues[iReg++].Reg64;
1605 pCtx->r9 = paValues[iReg++].Reg64;
1606 pCtx->r10 = paValues[iReg++].Reg64;
1607 pCtx->r11 = paValues[iReg++].Reg64;
1608 pCtx->r12 = paValues[iReg++].Reg64;
1609 pCtx->r13 = paValues[iReg++].Reg64;
1610 pCtx->r14 = paValues[iReg++].Reg64;
1611 pCtx->r15 = paValues[iReg++].Reg64;
1612 }
1613 }
1614
1615 /* RIP & Flags */
1616 if (fWhat & CPUMCTX_EXTRN_RIP)
1617 {
1618 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1619 pCtx->rip = paValues[iReg++].Reg64;
1620 }
1621 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1622 {
1623 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1624 pCtx->rflags.u = paValues[iReg++].Reg64;
1625 }
1626
1627 /* Segments */
1628# define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1629 do { \
1630 Assert(pInput->Names[a_idx] == a_enmName); \
1631 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1632 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1633 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1634 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1635 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1636 } while (0)
1637 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1638 {
1639 if (fWhat & CPUMCTX_EXTRN_CS)
1640 {
1641 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1642 iReg++;
1643 }
1644 if (fWhat & CPUMCTX_EXTRN_ES)
1645 {
1646 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1647 iReg++;
1648 }
1649 if (fWhat & CPUMCTX_EXTRN_SS)
1650 {
1651 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1652 iReg++;
1653 }
1654 if (fWhat & CPUMCTX_EXTRN_DS)
1655 {
1656 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1657 iReg++;
1658 }
1659 if (fWhat & CPUMCTX_EXTRN_FS)
1660 {
1661 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1662 iReg++;
1663 }
1664 if (fWhat & CPUMCTX_EXTRN_GS)
1665 {
1666 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1667 iReg++;
1668 }
1669 }
1670 /* Descriptor tables and the task segment. */
1671 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1672 {
1673 if (fWhat & CPUMCTX_EXTRN_LDTR)
1674 {
1675 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1676 iReg++;
1677 }
1678 if (fWhat & CPUMCTX_EXTRN_TR)
1679 {
1680 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1681 avoid to trigger sanity assertions around the code, always fix this. */
1682 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1683 switch (pCtx->tr.Attr.n.u4Type)
1684 {
1685 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1686 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1687 break;
1688 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1689 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1690 break;
1691 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1692 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1693 break;
1694 }
1695 iReg++;
1696 }
1697 if (fWhat & CPUMCTX_EXTRN_IDTR)
1698 {
1699 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1700 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1701 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1702 iReg++;
1703 }
1704 if (fWhat & CPUMCTX_EXTRN_GDTR)
1705 {
1706 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1707 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1708 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1709 iReg++;
1710 }
1711 }
1712
1713 /* Control registers. */
1714 bool fMaybeChangedMode = false;
1715 bool fFlushTlb = false;
1716 bool fFlushGlobalTlb = false;
1717 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1718 {
1719 if (fWhat & CPUMCTX_EXTRN_CR0)
1720 {
1721 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1722 if (pCtx->cr0 != paValues[iReg].Reg64)
1723 {
1724 CPUMSetGuestCR0(pVCpu, paValues[iReg].Reg64);
1725 fMaybeChangedMode = true;
1726 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1727 }
1728 iReg++;
1729 }
1730 if (fWhat & CPUMCTX_EXTRN_CR2)
1731 {
1732 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1733 pCtx->cr2 = paValues[iReg].Reg64;
1734 iReg++;
1735 }
1736 if (fWhat & CPUMCTX_EXTRN_CR3)
1737 {
1738 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1739 if (pCtx->cr3 != paValues[iReg].Reg64)
1740 {
1741 CPUMSetGuestCR3(pVCpu, paValues[iReg].Reg64);
1742 fFlushTlb = true;
1743 }
1744 iReg++;
1745 }
1746 if (fWhat & CPUMCTX_EXTRN_CR4)
1747 {
1748 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1749 if (pCtx->cr4 != paValues[iReg].Reg64)
1750 {
1751 CPUMSetGuestCR4(pVCpu, paValues[iReg].Reg64);
1752 fMaybeChangedMode = true;
1753 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1754 }
1755 iReg++;
1756 }
1757 }
1758 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1759 {
1760 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1761 APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1762 iReg++;
1763 }
1764
1765 /* Debug registers. */
1766/** @todo fixme */
1767/** @todo There are recalc issues here. Recalc will get register content and
1768 * that may assert since we doesn't clear CPUMCTX_EXTRN_ until the end. */
1769 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1770 {
1771 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1772 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1773 if (pCtx->dr[0] != paValues[iReg].Reg64)
1774 CPUMSetGuestDR0(pVCpu, paValues[iReg].Reg64);
1775 iReg++;
1776 if (pCtx->dr[1] != paValues[iReg].Reg64)
1777 CPUMSetGuestDR1(pVCpu, paValues[iReg].Reg64);
1778 iReg++;
1779 if (pCtx->dr[2] != paValues[iReg].Reg64)
1780 CPUMSetGuestDR2(pVCpu, paValues[iReg].Reg64);
1781 iReg++;
1782 if (pCtx->dr[3] != paValues[iReg].Reg64)
1783 CPUMSetGuestDR3(pVCpu, paValues[iReg].Reg64);
1784 iReg++;
1785 }
1786 if (fWhat & CPUMCTX_EXTRN_DR6)
1787 {
1788 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1789 if (pCtx->dr[6] != paValues[iReg].Reg64)
1790 CPUMSetGuestDR6(pVCpu, paValues[iReg].Reg64);
1791 iReg++;
1792 }
1793 if (fWhat & CPUMCTX_EXTRN_DR7)
1794 {
1795 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1796 if (pCtx->dr[7] != paValues[iReg].Reg64)
1797 CPUMSetGuestDR7(pVCpu, paValues[iReg].Reg64);
1798 iReg++;
1799 }
1800
1801 /* Floating point state. */
1802 if (fWhat & CPUMCTX_EXTRN_X87)
1803 {
1804 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1805 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1806 pCtx->pXStateR0->x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1807 pCtx->pXStateR0->x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1808 iReg++;
1809 pCtx->pXStateR0->x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1810 pCtx->pXStateR0->x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1811 iReg++;
1812 pCtx->pXStateR0->x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1813 pCtx->pXStateR0->x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1814 iReg++;
1815 pCtx->pXStateR0->x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1816 pCtx->pXStateR0->x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1817 iReg++;
1818 pCtx->pXStateR0->x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1819 pCtx->pXStateR0->x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1820 iReg++;
1821 pCtx->pXStateR0->x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1822 pCtx->pXStateR0->x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1823 iReg++;
1824 pCtx->pXStateR0->x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1825 pCtx->pXStateR0->x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1826 iReg++;
1827 pCtx->pXStateR0->x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1828 pCtx->pXStateR0->x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1829 iReg++;
1830
1831 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1832 pCtx->pXStateR0->x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1833 pCtx->pXStateR0->x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1834 pCtx->pXStateR0->x87.FTW = paValues[iReg].FpControlStatus.FpTag
1835 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1836 pCtx->pXStateR0->x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1837 pCtx->pXStateR0->x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1838 pCtx->pXStateR0->x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1839 pCtx->pXStateR0->x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1840 iReg++;
1841 }
1842
1843 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1844 {
1845 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1846 if (fWhat & CPUMCTX_EXTRN_X87)
1847 {
1848 pCtx->pXStateR0->x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1849 pCtx->pXStateR0->x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1850 pCtx->pXStateR0->x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1851 }
1852 pCtx->pXStateR0->x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1853 pCtx->pXStateR0->x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1854 iReg++;
1855 }
1856
1857 /* Vector state. */
1858 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1859 {
1860 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1861 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1862 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1863 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1864 iReg++;
1865 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1866 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1867 iReg++;
1868 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1869 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1870 iReg++;
1871 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1872 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1873 iReg++;
1874 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1875 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1876 iReg++;
1877 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1878 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1879 iReg++;
1880 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1881 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1882 iReg++;
1883 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1884 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1885 iReg++;
1886 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1887 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1888 iReg++;
1889 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1890 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1891 iReg++;
1892 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1893 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1894 iReg++;
1895 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1896 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1897 iReg++;
1898 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1899 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1900 iReg++;
1901 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1902 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1903 iReg++;
1904 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1905 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1906 iReg++;
1907 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1908 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1909 iReg++;
1910 }
1911
1912
1913 /* MSRs */
1914 // HvX64RegisterTsc - don't touch
1915 if (fWhat & CPUMCTX_EXTRN_EFER)
1916 {
1917 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1918 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1919 {
1920 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
1921 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
1922 PGMNotifyNxeChanged(pVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1923 pCtx->msrEFER = paValues[iReg].Reg64;
1924 fMaybeChangedMode = true;
1925 }
1926 iReg++;
1927 }
1928 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1929 {
1930 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1931 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
1932 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
1933 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1934 iReg++;
1935 }
1936 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1937 {
1938 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
1939 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
1940 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
1941 pCtx->SysEnter.cs = paValues[iReg].Reg64;
1942 iReg++;
1943
1944 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
1945 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
1946 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
1947 pCtx->SysEnter.eip = paValues[iReg].Reg64;
1948 iReg++;
1949
1950 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
1951 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
1952 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
1953 pCtx->SysEnter.esp = paValues[iReg].Reg64;
1954 iReg++;
1955 }
1956 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1957 {
1958 Assert(pInput->Names[iReg] == HvX64RegisterStar);
1959 if (pCtx->msrSTAR != paValues[iReg].Reg64)
1960 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
1961 pCtx->msrSTAR = paValues[iReg].Reg64;
1962 iReg++;
1963
1964 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
1965 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
1966 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
1967 pCtx->msrLSTAR = paValues[iReg].Reg64;
1968 iReg++;
1969
1970 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
1971 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
1972 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
1973 pCtx->msrCSTAR = paValues[iReg].Reg64;
1974 iReg++;
1975
1976 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
1977 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
1978 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
1979 pCtx->msrSFMASK = paValues[iReg].Reg64;
1980 iReg++;
1981 }
1982 bool fUpdateApicBase = false;
1983 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1984 {
1985 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
1986 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1987 if (paValues[iReg].Reg64 != uOldBase)
1988 {
1989 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1990 pVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
1991 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64);
1992 if (rc2 == VINF_CPUM_R3_MSR_WRITE)
1993 {
1994 pVCpu->nem.s.uPendingApicBase = paValues[iReg].Reg64;
1995 fUpdateApicBase = true;
1996 }
1997 else
1998 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", VBOXSTRICTRC_VAL(rc2), paValues[iReg].Reg64));
1999 }
2000 iReg++;
2001
2002 Assert(pInput->Names[iReg] == HvX64RegisterPat);
2003 if (pCtx->msrPAT != paValues[iReg].Reg64)
2004 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
2005 pCtx->msrPAT = paValues[iReg].Reg64;
2006 iReg++;
2007
2008# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2009 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
2010 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pVCpu))
2011 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64));
2012 iReg++;
2013# endif
2014
2015 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
2016 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
2017 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2018 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2019 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2020 iReg++;
2021
2022 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2023
2024 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2025 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2026 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2027 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2028 iReg++;
2029
2030 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2031 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2032 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2033 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2034 iReg++;
2035
2036 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2037 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2038 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2039 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2040 iReg++;
2041
2042 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2043 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2044 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2045 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2046 iReg++;
2047
2048 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2049 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2050 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2051 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2052 iReg++;
2053
2054 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2055 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2056 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2057 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2058 iReg++;
2059
2060 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2061 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2062 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2063 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2064 iReg++;
2065
2066 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2067 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2068 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2069 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2070 iReg++;
2071
2072 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2073 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2074 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2075 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2076 iReg++;
2077
2078 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2079 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2080 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2081 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2082 iReg++;
2083
2084 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2085 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2086 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2087 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2088 iReg++;
2089
2090 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2091 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2092 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2093 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2094 iReg++;
2095
2096# if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2097 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2098 {
2099 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2100 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2101 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2102 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2103 iReg++;
2104 }
2105# endif
2106# ifdef LOG_ENABLED
2107 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2108 {
2109 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2110 if (paValues[iReg].Reg64 != CPUMGetGuestIa32FeatureControl(pVCpu))
2111 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32FeatureControl(pVCpu), paValues[iReg].Reg64));
2112 iReg++;
2113 }
2114# endif
2115 }
2116
2117 /* Interruptibility. */
2118 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2119 {
2120 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2121 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2122
2123 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2124 {
2125 pVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2126 if (paValues[iReg].InterruptState.InterruptShadow)
2127 EMSetInhibitInterruptsPC(pVCpu, paValues[iReg + 1].Reg64);
2128 else
2129 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2130 }
2131
2132 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2133 {
2134 if (paValues[iReg].InterruptState.NmiMasked)
2135 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2136 else
2137 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2138 }
2139
2140 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2141 iReg += 2;
2142 }
2143
2144 /* Event injection. */
2145 /// @todo HvRegisterPendingInterruption
2146 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2147 if (paValues[iReg].PendingInterruption.InterruptionPending)
2148 {
2149 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2150 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2151 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2152 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2153 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2154 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2155 }
2156
2157 /// @todo HvRegisterPendingEvent0
2158 /// @todo HvRegisterPendingEvent1
2159
2160 /* Almost done, just update extrn flags and maybe change PGM mode. */
2161 pCtx->fExtrn &= ~fWhat;
2162 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2163 pCtx->fExtrn = 0;
2164
2165 /* Typical. */
2166 if (!fMaybeChangedMode && !fFlushTlb && !fUpdateApicBase)
2167 return VINF_SUCCESS;
2168
2169 /*
2170 * Slow.
2171 */
2172 int rc = VINF_SUCCESS;
2173 if (fMaybeChangedMode)
2174 {
2175 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2176 if (rc == VINF_PGM_CHANGE_MODE)
2177 {
2178 LogFlow(("nemR0WinImportState: -> VERR_NEM_CHANGE_PGM_MODE!\n"));
2179 return VERR_NEM_CHANGE_PGM_MODE;
2180 }
2181 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
2182 }
2183
2184 if (fFlushTlb)
2185 {
2186 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2187 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2188 }
2189
2190 if (fUpdateApicBase && rc == VINF_SUCCESS)
2191 {
2192 LogFlow(("nemR0WinImportState: -> VERR_NEM_UPDATE_APIC_BASE!\n"));
2193 rc = VERR_NEM_UPDATE_APIC_BASE;
2194 }
2195
2196 return rc;
2197}
2198#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2199
2200
2201/**
2202 * Import the state from the native API (back to CPUMCTX).
2203 *
2204 * @returns VBox status code
2205 * @param pGVM The ring-0 VM handle.
2206 * @param pVM The cross context VM handle.
2207 * @param idCpu The calling EMT. Necessary for getting the
2208 * hypercall page and arguments.
2209 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2210 * CPUMCTX_EXTERN_ALL for everything.
2211 */
2212VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t fWhat)
2213{
2214#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2215 /*
2216 * Validate the call.
2217 */
2218 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2219 if (RT_SUCCESS(rc))
2220 {
2221 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2222 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2223 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2224
2225 /*
2226 * Call worker.
2227 */
2228 rc = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fWhat);
2229 }
2230 return rc;
2231#else
2232 RT_NOREF(pGVM, pVM, idCpu, fWhat);
2233 return VERR_NOT_IMPLEMENTED;
2234#endif
2235}
2236
2237
2238#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2239/**
2240 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2241 *
2242 * @returns VBox status code.
2243 * @param pGVM The ring-0 VM handle.
2244 * @param pGVCpu The ring-0 VCPU handle.
2245 * @param pcTicks Where to return the current CPU tick count.
2246 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2247 */
2248NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2249{
2250 /*
2251 * Hypercall parameters.
2252 */
2253 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2254 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2255 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2256
2257 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2258 pInput->VpIndex = pGVCpu->idCpu;
2259 pInput->fFlags = 0;
2260 pInput->Names[0] = HvX64RegisterTsc;
2261 pInput->Names[1] = HvX64RegisterTscAux;
2262
2263 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2264 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2265 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2266
2267 /*
2268 * Make the hypercall.
2269 */
2270 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2271 pGVCpu->nem.s.HypercallData.HCPhysPage,
2272 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2273 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2274 VERR_NEM_GET_REGISTERS_FAILED);
2275
2276 /*
2277 * Get results.
2278 */
2279 *pcTicks = paValues[0].Reg64;
2280 if (pcAux)
2281 *pcAux = paValues[0].Reg32;
2282 return VINF_SUCCESS;
2283}
2284#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2285
2286
2287/**
2288 * Queries the TSC and TSC_AUX values, putting the results in .
2289 *
2290 * @returns VBox status code
2291 * @param pGVM The ring-0 VM handle.
2292 * @param pVM The cross context VM handle.
2293 * @param idCpu The calling EMT. Necessary for getting the
2294 * hypercall page and arguments.
2295 */
2296VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2297{
2298#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2299 /*
2300 * Validate the call.
2301 */
2302 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2303 if (RT_SUCCESS(rc))
2304 {
2305 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2306 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2307 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2308
2309 /*
2310 * Call worker.
2311 */
2312 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2313 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2314 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2315 &pVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2316 }
2317 return rc;
2318#else
2319 RT_NOREF(pGVM, pVM, idCpu);
2320 return VERR_NOT_IMPLEMENTED;
2321#endif
2322}
2323
2324
2325#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2326/**
2327 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2328 *
2329 * @returns VBox status code.
2330 * @param pGVM The ring-0 VM handle.
2331 * @param pGVCpu The ring-0 VCPU handle.
2332 * @param uPausedTscValue The TSC value at the time of pausing.
2333 */
2334NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2335{
2336 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2337
2338 /*
2339 * Set up the hypercall parameters.
2340 */
2341 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2342 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2343
2344 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2345 pInput->VpIndex = 0;
2346 pInput->RsvdZ = 0;
2347 pInput->Elements[0].Name = HvX64RegisterTsc;
2348 pInput->Elements[0].Pad0 = 0;
2349 pInput->Elements[0].Pad1 = 0;
2350 pInput->Elements[0].Value.Reg128.High64 = 0;
2351 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2352
2353 /*
2354 * Disable interrupts and do the first virtual CPU.
2355 */
2356 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2357 uint64_t const uFirstTsc = ASMReadTSC();
2358 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2359 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */);
2360 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2361 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2362
2363 /*
2364 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2365 * that we don't introduce too much drift here.
2366 */
2367 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2368 {
2369 Assert(pInput->PartitionId == pGVM->nem.s.idHvPartition);
2370 Assert(pInput->RsvdZ == 0);
2371 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2372 Assert(pInput->Elements[0].Pad0 == 0);
2373 Assert(pInput->Elements[0].Pad1 == 0);
2374 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2375
2376 pInput->VpIndex = iCpu;
2377 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2378 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2379
2380 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2381 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */);
2382 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2383 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2384 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2385 }
2386
2387 /*
2388 * Done.
2389 */
2390 ASMSetFlags(fSavedFlags);
2391 return VINF_SUCCESS;
2392}
2393#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2394
2395
2396/**
2397 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2398 *
2399 * @returns VBox status code
2400 * @param pGVM The ring-0 VM handle.
2401 * @param pVM The cross context VM handle.
2402 * @param idCpu The calling EMT. Necessary for getting the
2403 * hypercall page and arguments.
2404 * @param uPausedTscValue The TSC value at the time of pausing.
2405 */
2406VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2407{
2408#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2409 /*
2410 * Validate the call.
2411 */
2412 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2413 if (RT_SUCCESS(rc))
2414 {
2415 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2416 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2417 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2418
2419 /*
2420 * Call worker.
2421 */
2422 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2423 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2424 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
2425 }
2426 return rc;
2427#else
2428 RT_NOREF(pGVM, pVM, idCpu, uPausedTscValue);
2429 return VERR_NOT_IMPLEMENTED;
2430#endif
2431}
2432
2433
2434VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2435{
2436#ifdef NEM_WIN_WITH_RING0_RUNLOOP
2437 PVM pVM = pGVM->pVM;
2438 return nemHCWinRunGC(pVM, &pVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]);
2439#else
2440 RT_NOREF(pGVM, idCpu);
2441 return VERR_NOT_IMPLEMENTED;
2442#endif
2443}
2444
2445
2446/**
2447 * Updates statistics in the VM structure.
2448 *
2449 * @returns VBox status code.
2450 * @param pGVM The ring-0 VM handle.
2451 * @param pVM The cross context VM handle.
2452 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
2453 * page and arguments.
2454 */
2455VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2456{
2457 /*
2458 * Validate the call.
2459 */
2460 int rc;
2461 if (idCpu == NIL_VMCPUID)
2462 rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
2463 else
2464 rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2465 if (RT_SUCCESS(rc))
2466 {
2467 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2468
2469 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
2470 ? &pGVM->aCpus[idCpu].nem.s.HypercallData
2471 : &pGVM->nem.s.HypercallData;
2472 if ( RT_VALID_PTR(pHypercallData->pbPage)
2473 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
2474 {
2475 if (idCpu == NIL_VMCPUID)
2476 rc = RTCritSectEnter(&pGVM->nem.s.HypercallDataCritSect);
2477 if (RT_SUCCESS(rc))
2478 {
2479 /*
2480 * Query the memory statistics for the partition.
2481 */
2482 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
2483 pInput->TargetPartitionId = pGVM->nem.s.idHvPartition;
2484 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
2485 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
2486 pInput->ProximityDomainInfo.Flags.Reserved = 0;
2487 pInput->ProximityDomainInfo.Id = 0;
2488
2489 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
2490 RT_ZERO(*pOutput);
2491
2492 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
2493 pHypercallData->HCPhysPage,
2494 pHypercallData->HCPhysPage + sizeof(*pInput));
2495 if (uResult == HV_STATUS_SUCCESS)
2496 {
2497 pVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
2498 pVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
2499 rc = VINF_SUCCESS;
2500 }
2501 else
2502 {
2503 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
2504 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
2505 rc = VERR_NEM_IPE_0;
2506 }
2507
2508 if (idCpu == NIL_VMCPUID)
2509 RTCritSectLeave(&pGVM->nem.s.HypercallDataCritSect);
2510 }
2511 }
2512 else
2513 rc = VERR_WRONG_ORDER;
2514 }
2515 return rc;
2516}
2517
2518
2519#if 1 && defined(DEBUG_bird)
2520/**
2521 * Debug only interface for poking around and exploring Hyper-V stuff.
2522 *
2523 * @param pGVM The ring-0 VM handle.
2524 * @param pVM The cross context VM handle.
2525 * @param idCpu The calling EMT.
2526 * @param u64Arg What to query. 0 == registers.
2527 */
2528VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t u64Arg)
2529{
2530 /*
2531 * Resolve CPU structures.
2532 */
2533 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2534 if (RT_SUCCESS(rc))
2535 {
2536 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2537
2538 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2539 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2540 if (u64Arg == 0)
2541 {
2542 /*
2543 * Query register.
2544 */
2545 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2546 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2547
2548 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
2549 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2550 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
2551
2552 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2553 pInput->VpIndex = pGVCpu->idCpu;
2554 pInput->fFlags = 0;
2555 pInput->Names[0] = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2556
2557 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
2558 pGVCpu->nem.s.HypercallData.HCPhysPage,
2559 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2560 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2561 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2562 pVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
2563 pVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
2564 rc = VINF_SUCCESS;
2565 }
2566 else if (u64Arg == 1)
2567 {
2568 /*
2569 * Query partition property.
2570 */
2571 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nem.s.HypercallData.pbPage;
2572 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2573
2574 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
2575 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
2576 pOutput->PropertyValue = 0;
2577
2578 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2579 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pVCpu->nem.s.Hypercall.Experiment.uItem;
2580 pInput->uPadding = 0;
2581
2582 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
2583 pGVCpu->nem.s.HypercallData.HCPhysPage,
2584 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
2585 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
2586 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2587 pVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
2588 pVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
2589 rc = VINF_SUCCESS;
2590 }
2591 else if (u64Arg == 2)
2592 {
2593 /*
2594 * Set register.
2595 */
2596 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
2597 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2598 RT_BZERO(pInput, RT_OFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
2599
2600 pInput->PartitionId = pGVM->nem.s.idHvPartition;
2601 pInput->VpIndex = pGVCpu->idCpu;
2602 pInput->RsvdZ = 0;
2603 pInput->Elements[0].Name = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2604 pInput->Elements[0].Value.Reg128.High64 = pVCpu->nem.s.Hypercall.Experiment.uHiValue;
2605 pInput->Elements[0].Value.Reg128.Low64 = pVCpu->nem.s.Hypercall.Experiment.uLoValue;
2606
2607 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2608 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
2609 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2610 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2611 rc = VINF_SUCCESS;
2612 }
2613 else
2614 rc = VERR_INVALID_FUNCTION;
2615 }
2616 return rc;
2617}
2618#endif /* DEBUG_bird */
2619
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette