VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 72229

Last change on this file since 72229 was 72229, checked in by vboxsync, 7 years ago

NEM/win: Always set HvX64RegisterDeliverabilityNotifications on exit if we're waiting to deliver an interrupt or NMI. This fixes the NT OS loader up/down hang.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 79.2 KB
Line 
1/* $Id: NEMR0Native-win.cpp 72229 2018-05-17 09:07:22Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#include <iprt/nt/nt.h>
24#include <iprt/nt/hyperv.h>
25#include <iprt/nt/vid.h>
26#include <winerror.h>
27
28#include <VBox/vmm/nem.h>
29#include <VBox/vmm/iem.h>
30#include <VBox/vmm/em.h>
31#include <VBox/vmm/apic.h>
32#include <VBox/vmm/pdm.h>
33#include "NEMInternal.h"
34#include <VBox/vmm/gvm.h>
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/gvmm.h>
37#include <VBox/param.h>
38
39#include <iprt/dbg.h>
40#include <iprt/memobj.h>
41#include <iprt/string.h>
42
43
44/* Assert compile context sanity. */
45#ifndef RT_OS_WINDOWS
46# error "Windows only file!"
47#endif
48#ifndef RT_ARCH_AMD64
49# error "AMD64 only file!"
50#endif
51
52
53/*********************************************************************************************************************************
54* Internal Functions *
55*********************************************************************************************************************************/
56typedef uint32_t DWORD; /* for winerror.h constants */
57
58
59/*********************************************************************************************************************************
60* Global Variables *
61*********************************************************************************************************************************/
62static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
63
64
65/*********************************************************************************************************************************
66* Internal Functions *
67*********************************************************************************************************************************/
68NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
69 uint32_t cPages, uint32_t fFlags);
70NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
71NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
72NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat);
73DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
74 void *pvOutput, uint32_t cbOutput);
75
76
77/*
78 * Instantate the code we share with ring-0.
79 */
80#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
81
82
83/**
84 * Called by NEMR3Init to make sure we've got what we need.
85 *
86 * @returns VBox status code.
87 * @param pGVM The ring-0 VM handle.
88 * @param pVM The cross context VM handle.
89 * @thread EMT(0)
90 */
91VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVM pVM)
92{
93 AssertCompile(sizeof(pGVM->nem.s) <= sizeof(pGVM->nem.padding));
94 AssertCompile(sizeof(pGVM->aCpus[0].nem.s) <= sizeof(pGVM->aCpus[0].nem.padding));
95
96 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
97 AssertRCReturn(rc, rc);
98
99 /*
100 * We want to perform hypercalls here. The NT kernel started to expose a very low
101 * level interface to do this thru somewhere between build 14271 and 16299. Since
102 * we need build 17083 to get anywhere at all, the exact build is not relevant here.
103 */
104 RTDBGKRNLINFO hKrnlInfo;
105 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
106 if (RT_SUCCESS(rc))
107 {
108 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
109 RTR0DbgKrnlInfoRelease(hKrnlInfo);
110 if (RT_SUCCESS(rc))
111 {
112 /*
113 * Allocate a page for each VCPU to place hypercall data on.
114 */
115 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
116 {
117 PGVMCPU pGVCpu = &pGVM->aCpus[i];
118 rc = RTR0MemObjAllocPage(&pGVCpu->nem.s.hHypercallDataMemObj, PAGE_SIZE, false /*fExecutable*/);
119 if (RT_SUCCESS(rc))
120 {
121 pGVCpu->nem.s.HCPhysHypercallData = RTR0MemObjGetPagePhysAddr(pGVCpu->nem.s.hHypercallDataMemObj, 0 /*iPage*/);
122 pGVCpu->nem.s.pbHypercallData = (uint8_t *)RTR0MemObjAddress(pGVCpu->nem.s.hHypercallDataMemObj);
123 AssertStmt(pGVCpu->nem.s.HCPhysHypercallData != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
124 AssertStmt(pGVCpu->nem.s.pbHypercallData, rc = VERR_INTERNAL_ERROR_3);
125 }
126 else
127 pGVCpu->nem.s.hHypercallDataMemObj = NIL_RTR0MEMOBJ;
128 if (RT_FAILURE(rc))
129 {
130 /* bail. */
131 do
132 {
133 RTR0MemObjFree(pGVCpu->nem.s.hHypercallDataMemObj, true /*fFreeMappings*/);
134 pGVCpu->nem.s.hHypercallDataMemObj = NIL_RTR0MEMOBJ;
135 pGVCpu->nem.s.HCPhysHypercallData = NIL_RTHCPHYS;
136 pGVCpu->nem.s.pbHypercallData = NULL;
137 } while (i-- > 0);
138 return rc;
139 }
140 }
141 /*
142 * So far, so good.
143 */
144 return rc;
145 }
146
147 rc = VERR_NEM_MISSING_KERNEL_API;
148 }
149
150 RT_NOREF(pGVM, pVM);
151 return rc;
152}
153
154
155/**
156 * Perform an I/O control operation on the partition handle (VID.SYS).
157 *
158 * @returns NT status code.
159 * @param pGVM The ring-0 VM structure.
160 * @param uFunction The function to perform.
161 * @param pvInput The input buffer. This must point within the VM
162 * structure so we can easily convert to a ring-3
163 * pointer if necessary.
164 * @param cbInput The size of the input. @a pvInput must be NULL when
165 * zero.
166 * @param pvOutput The output buffer. This must also point within the
167 * VM structure for ring-3 pointer magic.
168 * @param cbOutput The size of the output. @a pvOutput must be NULL
169 * when zero.
170 */
171DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
172 void *pvOutput, uint32_t cbOutput)
173{
174#ifdef RT_STRICT
175 /*
176 * Input and output parameters are part of the VM CPU structure.
177 */
178 PVM pVM = pGVM->pVM;
179 size_t const cbVM = RT_UOFFSETOF(VM, aCpus[pGVM->cCpus]);
180 if (pvInput)
181 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
182 if (pvOutput)
183 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
184#endif
185
186 int32_t rcNt = STATUS_UNSUCCESSFUL;
187 int rc = SUPR0IoCtlPerform(pGVM->nem.s.pIoCtlCtx, uFunction,
188 pvInput,
189 pvInput ? (uintptr_t)pvInput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
190 cbInput,
191 pvOutput,
192 pvOutput ? (uintptr_t)pvOutput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
193 cbOutput,
194 &rcNt);
195 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
196 return (NTSTATUS)rcNt;
197 return STATUS_UNSUCCESSFUL;
198}
199
200
201/**
202 * 2nd part of the initialization, after we've got a partition handle.
203 *
204 * @returns VBox status code.
205 * @param pGVM The ring-0 VM handle.
206 * @param pVM The cross context VM handle.
207 * @thread EMT(0)
208 */
209VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM, PVM pVM)
210{
211 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
212 AssertRCReturn(rc, rc);
213 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
214
215 /*
216 * Copy and validate the I/O control information from ring-3.
217 */
218 NEMWINIOCTL Copy = pVM->nem.s.IoCtlGetHvPartitionId;
219 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
220 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
221 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
222 pGVM->nem.s.IoCtlGetHvPartitionId = Copy;
223
224 Copy = pVM->nem.s.IoCtlStartVirtualProcessor;
225 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
226 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
227 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
228 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
229 pGVM->nem.s.IoCtlStartVirtualProcessor = Copy;
230
231 Copy = pVM->nem.s.IoCtlStopVirtualProcessor;
232 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
233 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
234 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
235 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
236 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
237 pGVM->nem.s.IoCtlStopVirtualProcessor = Copy;
238
239 Copy = pVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
240 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
241 AssertLogRelReturn(Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT), VERR_NEM_INIT_FAILED);
242 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
243 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
244 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
245 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
246 pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext = Copy;
247
248 /*
249 * Setup of an I/O control context for the partition handle for later use.
250 */
251 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pVM->nem.s.hPartitionDevice, 0, &pGVM->nem.s.pIoCtlCtx);
252 AssertLogRelRCReturn(rc, rc);
253 pGVM->nem.s.offRing3ConversionDelta = (uintptr_t)pVM->pVMR3 - (uintptr_t)pGVM->pVM;
254
255 /*
256 * Get the partition ID.
257 */
258 PVMCPU pVCpu = &pGVM->pVM->aCpus[0];
259 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
260 &pVCpu->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu->nem.s.uIoCtlBuf.idPartition));
261 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
262 pGVM->nem.s.idHvPartition = pVCpu->nem.s.uIoCtlBuf.idPartition;
263 AssertLogRelMsgReturn(pGVM->nem.s.idHvPartition == pVM->nem.s.idHvPartition,
264 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nem.s.idHvPartition, pVM->nem.s.idHvPartition),
265 VERR_NEM_INIT_FAILED);
266
267
268 return rc;
269}
270
271
272/**
273 * Cleanup the NEM parts of the VM in ring-0.
274 *
275 * This is always called and must deal the state regardless of whether
276 * NEMR0InitVM() was called or not. So, take care here.
277 *
278 * @param pGVM The ring-0 VM handle.
279 */
280VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
281{
282 pGVM->nem.s.idHvPartition = HV_PARTITION_ID_INVALID;
283
284 /* Clean up I/O control context. */
285 if (pGVM->nem.s.pIoCtlCtx)
286 {
287 int rc = SUPR0IoCtlCleanup(pGVM->nem.s.pIoCtlCtx);
288 AssertRC(rc);
289 pGVM->nem.s.pIoCtlCtx = NULL;
290 }
291
292 /* Free the hypercall pages. */
293 VMCPUID i = pGVM->cCpus;
294 while (i-- > 0)
295 {
296 PGVMCPU pGVCpu = &pGVM->aCpus[i];
297 if (pGVCpu->nem.s.pbHypercallData)
298 {
299 pGVCpu->nem.s.pbHypercallData = NULL;
300 int rc = RTR0MemObjFree(pGVCpu->nem.s.hHypercallDataMemObj, true /*fFreeMappings*/);
301 AssertRC(rc);
302 }
303 pGVCpu->nem.s.hHypercallDataMemObj = NIL_RTR0MEMOBJ;
304 pGVCpu->nem.s.HCPhysHypercallData = NIL_RTHCPHYS;
305 }
306}
307
308
309#if 0 /* for debugging GPA unmapping. */
310static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
311{
312 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nem.s.pbHypercallData;
313 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
314 pIn->PartitionId = pGVM->nem.s.idHvPartition;
315 pIn->VpIndex = pGVCpu->idCpu;
316 pIn->ByteCount = 0x10;
317 pIn->BaseGpa = GCPhys;
318 pIn->ControlFlags.AsUINT64 = 0;
319 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
320 memset(pOut, 0xfe, sizeof(*pOut));
321 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nem.s.HCPhysHypercallData,
322 pGVCpu->nem.s.HCPhysHypercallData + sizeof(*pIn));
323 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
324 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
325 __debugbreak();
326
327 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
328}
329#endif
330
331
332/**
333 * Worker for NEMR0MapPages and others.
334 */
335NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
336 uint32_t cPages, uint32_t fFlags)
337{
338 /*
339 * Validate.
340 */
341 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
342
343 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
344 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
345 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
346 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
347 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
348 if (GCPhysSrc != GCPhysDst)
349 {
350 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
351 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
352 }
353
354 /*
355 * Compose and make the hypercall.
356 * Ring-3 is not allowed to fill in the host physical addresses of the call.
357 */
358 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nem.s.pbHypercallData;
359 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
360 pMapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
361 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
362 pMapPages->MapFlags = fFlags;
363 pMapPages->u32ExplicitPadding = 0;
364 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
365 {
366 RTHCPHYS HCPhys = NIL_RTGCPHYS;
367 int rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
368 AssertRCReturn(rc, rc);
369 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
370 }
371
372 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
373 pGVCpu->nem.s.HCPhysHypercallData, 0);
374 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
375 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
376 if (uResult == ((uint64_t)cPages << 32))
377 return VINF_SUCCESS;
378
379 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
380 return VERR_NEM_MAP_PAGES_FAILED;
381}
382
383
384/**
385 * Maps pages into the guest physical address space.
386 *
387 * Generally the caller will be under the PGM lock already, so no extra effort
388 * is needed to make sure all changes happens under it.
389 *
390 * @returns VBox status code.
391 * @param pGVM The ring-0 VM handle.
392 * @param pVM The cross context VM handle.
393 * @param idCpu The calling EMT. Necessary for getting the
394 * hypercall page and arguments.
395 * @thread EMT(idCpu)
396 */
397VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
398{
399 /*
400 * Unpack the call.
401 */
402 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
403 if (RT_SUCCESS(rc))
404 {
405 PVMCPU pVCpu = &pVM->aCpus[idCpu];
406 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
407
408 RTGCPHYS const GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
409 RTGCPHYS const GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
410 uint32_t const cPages = pVCpu->nem.s.Hypercall.MapPages.cPages;
411 HV_MAP_GPA_FLAGS const fFlags = pVCpu->nem.s.Hypercall.MapPages.fFlags;
412
413 /*
414 * Do the work.
415 */
416 rc = nemR0WinMapPages(pGVM, pVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
417 }
418 return rc;
419}
420
421
422/**
423 * Worker for NEMR0UnmapPages and others.
424 */
425NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
426{
427 /*
428 * Validate input.
429 */
430 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
431
432 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
433 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
434 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
435 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
436
437 /*
438 * Compose and make the hypercall.
439 */
440 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nem.s.pbHypercallData;
441 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
442 pUnmapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
443 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
444 pUnmapPages->fFlags = 0;
445
446 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
447 pGVCpu->nem.s.HCPhysHypercallData, 0);
448 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
449 if (uResult == ((uint64_t)cPages << 32))
450 {
451#if 1 /* Do we need to do this? Hopefully not... */
452 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
453 pGVCpu->nem.s.HCPhysHypercallData, 0);
454 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
455#endif
456 return VINF_SUCCESS;
457 }
458
459 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
460 return VERR_NEM_UNMAP_PAGES_FAILED;
461}
462
463
464/**
465 * Unmaps pages from the guest physical address space.
466 *
467 * Generally the caller will be under the PGM lock already, so no extra effort
468 * is needed to make sure all changes happens under it.
469 *
470 * @returns VBox status code.
471 * @param pGVM The ring-0 VM handle.
472 * @param pVM The cross context VM handle.
473 * @param idCpu The calling EMT. Necessary for getting the
474 * hypercall page and arguments.
475 * @thread EMT(idCpu)
476 */
477VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
478{
479 /*
480 * Unpack the call.
481 */
482 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
483 if (RT_SUCCESS(rc))
484 {
485 PVMCPU pVCpu = &pVM->aCpus[idCpu];
486 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
487
488 RTGCPHYS const GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
489 uint32_t const cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
490
491 /*
492 * Do the work.
493 */
494 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
495 }
496 return rc;
497}
498
499
500/**
501 * Worker for NEMR0ExportState.
502 *
503 * Intention is to use it internally later.
504 *
505 * @returns VBox status code.
506 * @param pGVM The ring-0 VM handle.
507 * @param pGVCpu The irng-0 VCPU handle.
508 * @param pCtx The CPU context structure to import into.
509 */
510NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
511{
512 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
513 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.pbHypercallData;
514 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
515
516 pInput->PartitionId = pGVM->nem.s.idHvPartition;
517 pInput->VpIndex = pGVCpu->idCpu;
518 pInput->RsvdZ = 0;
519
520 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
521 if ( !fWhat
522 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
523 return VINF_SUCCESS;
524 uintptr_t iReg = 0;
525
526 /* GPRs */
527 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
528 {
529 if (fWhat & CPUMCTX_EXTRN_RAX)
530 {
531 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
532 pInput->Elements[iReg].Name = HvX64RegisterRax;
533 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
534 iReg++;
535 }
536 if (fWhat & CPUMCTX_EXTRN_RCX)
537 {
538 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
539 pInput->Elements[iReg].Name = HvX64RegisterRcx;
540 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
541 iReg++;
542 }
543 if (fWhat & CPUMCTX_EXTRN_RDX)
544 {
545 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
546 pInput->Elements[iReg].Name = HvX64RegisterRdx;
547 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
548 iReg++;
549 }
550 if (fWhat & CPUMCTX_EXTRN_RBX)
551 {
552 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
553 pInput->Elements[iReg].Name = HvX64RegisterRbx;
554 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
555 iReg++;
556 }
557 if (fWhat & CPUMCTX_EXTRN_RSP)
558 {
559 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
560 pInput->Elements[iReg].Name = HvX64RegisterRsp;
561 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
562 iReg++;
563 }
564 if (fWhat & CPUMCTX_EXTRN_RBP)
565 {
566 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
567 pInput->Elements[iReg].Name = HvX64RegisterRbp;
568 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
569 iReg++;
570 }
571 if (fWhat & CPUMCTX_EXTRN_RSI)
572 {
573 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
574 pInput->Elements[iReg].Name = HvX64RegisterRsi;
575 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
576 iReg++;
577 }
578 if (fWhat & CPUMCTX_EXTRN_RDI)
579 {
580 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
581 pInput->Elements[iReg].Name = HvX64RegisterRdi;
582 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
583 iReg++;
584 }
585 if (fWhat & CPUMCTX_EXTRN_R8_R15)
586 {
587 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
588 pInput->Elements[iReg].Name = HvX64RegisterR8;
589 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
590 iReg++;
591 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
592 pInput->Elements[iReg].Name = HvX64RegisterR9;
593 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
594 iReg++;
595 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
596 pInput->Elements[iReg].Name = HvX64RegisterR10;
597 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
598 iReg++;
599 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
600 pInput->Elements[iReg].Name = HvX64RegisterR11;
601 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
602 iReg++;
603 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
604 pInput->Elements[iReg].Name = HvX64RegisterR12;
605 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
606 iReg++;
607 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
608 pInput->Elements[iReg].Name = HvX64RegisterR13;
609 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
610 iReg++;
611 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
612 pInput->Elements[iReg].Name = HvX64RegisterR14;
613 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
614 iReg++;
615 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
616 pInput->Elements[iReg].Name = HvX64RegisterR15;
617 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
618 iReg++;
619 }
620 }
621
622 /* RIP & Flags */
623 if (fWhat & CPUMCTX_EXTRN_RIP)
624 {
625 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
626 pInput->Elements[iReg].Name = HvX64RegisterRip;
627 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
628 iReg++;
629 }
630 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
631 {
632 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
633 pInput->Elements[iReg].Name = HvX64RegisterRflags;
634 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
635 iReg++;
636 }
637
638 /* Segments */
639#define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
640 do { \
641 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
642 pInput->Elements[a_idx].Name = a_enmName; \
643 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
644 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
645 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
646 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
647 } while (0)
648 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
649 {
650 if (fWhat & CPUMCTX_EXTRN_CS)
651 {
652 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
653 iReg++;
654 }
655 if (fWhat & CPUMCTX_EXTRN_ES)
656 {
657 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
658 iReg++;
659 }
660 if (fWhat & CPUMCTX_EXTRN_SS)
661 {
662 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
663 iReg++;
664 }
665 if (fWhat & CPUMCTX_EXTRN_DS)
666 {
667 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
668 iReg++;
669 }
670 if (fWhat & CPUMCTX_EXTRN_FS)
671 {
672 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
673 iReg++;
674 }
675 if (fWhat & CPUMCTX_EXTRN_GS)
676 {
677 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
678 iReg++;
679 }
680 }
681
682 /* Descriptor tables & task segment. */
683 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
684 {
685 if (fWhat & CPUMCTX_EXTRN_LDTR)
686 {
687 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
688 iReg++;
689 }
690 if (fWhat & CPUMCTX_EXTRN_TR)
691 {
692 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
693 iReg++;
694 }
695
696 if (fWhat & CPUMCTX_EXTRN_IDTR)
697 {
698 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
699 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
700 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
701 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
702 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
703 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
704 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
705 iReg++;
706 }
707 if (fWhat & CPUMCTX_EXTRN_GDTR)
708 {
709 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
710 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
711 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
712 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
713 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
714 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
715 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
716 iReg++;
717 }
718 }
719
720 /* Control registers. */
721 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
722 {
723 if (fWhat & CPUMCTX_EXTRN_CR0)
724 {
725 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
726 pInput->Elements[iReg].Name = HvX64RegisterCr0;
727 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
728 iReg++;
729 }
730 if (fWhat & CPUMCTX_EXTRN_CR2)
731 {
732 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
733 pInput->Elements[iReg].Name = HvX64RegisterCr2;
734 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
735 iReg++;
736 }
737 if (fWhat & CPUMCTX_EXTRN_CR3)
738 {
739 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
740 pInput->Elements[iReg].Name = HvX64RegisterCr3;
741 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
742 iReg++;
743 }
744 if (fWhat & CPUMCTX_EXTRN_CR4)
745 {
746 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
747 pInput->Elements[iReg].Name = HvX64RegisterCr4;
748 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
749 iReg++;
750 }
751 }
752 /** @todo CR8/TPR */
753 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
754 pInput->Elements[iReg].Name = HvX64RegisterCr8;
755 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pVCpu);
756 iReg++;
757
758 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
759
760 /* Debug registers. */
761/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
762 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
763 {
764 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
765 pInput->Elements[iReg].Name = HvX64RegisterDr0;
766 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pVCpu);
767 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
768 iReg++;
769 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
770 pInput->Elements[iReg].Name = HvX64RegisterDr1;
771 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pVCpu);
772 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
773 iReg++;
774 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
775 pInput->Elements[iReg].Name = HvX64RegisterDr2;
776 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pVCpu);
777 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
778 iReg++;
779 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
780 pInput->Elements[iReg].Name = HvX64RegisterDr3;
781 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pVCpu);
782 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
783 iReg++;
784 }
785 if (fWhat & CPUMCTX_EXTRN_DR6)
786 {
787 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
788 pInput->Elements[iReg].Name = HvX64RegisterDr6;
789 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pVCpu);
790 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
791 iReg++;
792 }
793 if (fWhat & CPUMCTX_EXTRN_DR7)
794 {
795 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
796 pInput->Elements[iReg].Name = HvX64RegisterDr7;
797 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pVCpu);
798 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
799 iReg++;
800 }
801
802 /* Floating point state. */
803 if (fWhat & CPUMCTX_EXTRN_X87)
804 {
805 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
806 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
807 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[0].au64[0];
808 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[0].au64[1];
809 iReg++;
810 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
811 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
812 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[1].au64[0];
813 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[1].au64[1];
814 iReg++;
815 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
816 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
817 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[2].au64[0];
818 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[2].au64[1];
819 iReg++;
820 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
821 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
822 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[3].au64[0];
823 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[3].au64[1];
824 iReg++;
825 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
826 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
827 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[4].au64[0];
828 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[4].au64[1];
829 iReg++;
830 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
831 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
832 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[5].au64[0];
833 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[5].au64[1];
834 iReg++;
835 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
836 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
837 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[6].au64[0];
838 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[6].au64[1];
839 iReg++;
840 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
841 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
842 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[7].au64[0];
843 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[7].au64[1];
844 iReg++;
845
846 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
847 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
848 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->pXStateR0->x87.FCW;
849 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->pXStateR0->x87.FSW;
850 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->pXStateR0->x87.FTW;
851 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->pXStateR0->x87.FTW >> 8;
852 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->pXStateR0->x87.FOP;
853 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->pXStateR0->x87.FPUIP)
854 | ((uint64_t)pCtx->pXStateR0->x87.CS << 32)
855 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd1 << 48);
856 iReg++;
857/** @todo we've got trouble if if we try write just SSE w/o X87. */
858 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
859 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
860 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->pXStateR0->x87.FPUDP)
861 | ((uint64_t)pCtx->pXStateR0->x87.DS << 32)
862 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd2 << 48);
863 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->pXStateR0->x87.MXCSR;
864 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR0->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
865 iReg++;
866 }
867
868 /* Vector state. */
869 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
870 {
871 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
872 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
873 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo;
874 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi;
875 iReg++;
876 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
877 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
878 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo;
879 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi;
880 iReg++;
881 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
882 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
883 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo;
884 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi;
885 iReg++;
886 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
887 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
888 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo;
889 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi;
890 iReg++;
891 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
892 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
893 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo;
894 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi;
895 iReg++;
896 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
897 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
898 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo;
899 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi;
900 iReg++;
901 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
902 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
903 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo;
904 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi;
905 iReg++;
906 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
907 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
908 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo;
909 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi;
910 iReg++;
911 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
912 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
913 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo;
914 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi;
915 iReg++;
916 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
917 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
918 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo;
919 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi;
920 iReg++;
921 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
922 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
923 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo;
924 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi;
925 iReg++;
926 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
927 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
928 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo;
929 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi;
930 iReg++;
931 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
932 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
933 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo;
934 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi;
935 iReg++;
936 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
937 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
938 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo;
939 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi;
940 iReg++;
941 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
942 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
943 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo;
944 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi;
945 iReg++;
946 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
947 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
948 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo;
949 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi;
950 iReg++;
951 }
952
953 /* MSRs */
954 // HvX64RegisterTsc - don't touch
955 /** @todo does HvX64RegisterTsc include TSC_AUX? Is it TSC_AUX? */
956 if (fWhat & CPUMCTX_EXTRN_EFER)
957 {
958 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
959 pInput->Elements[iReg].Name = HvX64RegisterEfer;
960 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
961 iReg++;
962 }
963 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
964 {
965 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
966 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
967 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
968 iReg++;
969 }
970 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
971 {
972 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
973 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
974 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
975 iReg++;
976 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
977 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
978 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
979 iReg++;
980 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
981 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
982 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
983 iReg++;
984 }
985 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
986 {
987 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
988 pInput->Elements[iReg].Name = HvX64RegisterStar;
989 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
990 iReg++;
991 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
992 pInput->Elements[iReg].Name = HvX64RegisterLstar;
993 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
994 iReg++;
995 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
996 pInput->Elements[iReg].Name = HvX64RegisterCstar;
997 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
998 iReg++;
999 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1000 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1001 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1002 iReg++;
1003 }
1004 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1005 {
1006 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1007 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1008 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pVCpu);
1009 iReg++;
1010 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1011 pInput->Elements[iReg].Name = HvX64RegisterPat;
1012 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1013 iReg++;
1014 }
1015
1016 /* event injection (always clear it). */
1017 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1018 {
1019 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1020 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1021 pInput->Elements[iReg].Value.Reg64 = 0;
1022 iReg++;
1023 }
1024
1025 /* Interruptibility state. This can get a little complicated since we get
1026 half of the state via HV_X64_VP_EXECUTION_STATE. */
1027 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1028 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1029 {
1030 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1031 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1032 pInput->Elements[iReg].Value.Reg64 = 0;
1033 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1034 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1035 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1036 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1037 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1038 iReg++;
1039 }
1040 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1041 {
1042 if ( pVCpu->nem.s.fLastInterruptShadow
1043 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1044 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
1045 {
1046 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1047 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1048 pInput->Elements[iReg].Value.Reg64 = 0;
1049 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1050 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1051 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1052 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1053 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1054 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1055 iReg++;
1056 }
1057 }
1058 else
1059 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1060
1061 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1062 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
1063 if ( fDesiredIntWin
1064 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1065 {
1066 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
1067 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1068 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1069 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1070 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1071 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1072 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1073 iReg++;
1074 }
1075
1076 /// @todo HvRegisterPendingEvent0
1077 /// @todo HvRegisterPendingEvent1
1078
1079 /*
1080 * Set the registers.
1081 */
1082 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nem.s.pbHypercallData < PAGE_SIZE); /* max is 127 */
1083
1084 /*
1085 * Make the hypercall.
1086 */
1087 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1088 pGVCpu->nem.s.HCPhysHypercallData, 0 /*GCPhysOutput*/);
1089 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1090 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1091 VERR_NEM_SET_REGISTERS_FAILED);
1092 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1093 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1094 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1095 return VINF_SUCCESS;
1096}
1097
1098
1099/**
1100 * Export the state to the native API (out of CPUMCTX).
1101 *
1102 * @returns VBox status code
1103 * @param pGVM The ring-0 VM handle.
1104 * @param pVM The cross context VM handle.
1105 * @param idCpu The calling EMT. Necessary for getting the
1106 * hypercall page and arguments.
1107 */
1108VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, PVM pVM, VMCPUID idCpu)
1109{
1110 /*
1111 * Validate the call.
1112 */
1113 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
1114 if (RT_SUCCESS(rc))
1115 {
1116 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1117 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1118 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1119
1120 /*
1121 * Call worker.
1122 */
1123 rc = nemR0WinExportState(pGVM, pGVCpu, CPUMQueryGuestCtxPtr(pVCpu));
1124 }
1125 return rc;
1126}
1127
1128
1129/**
1130 * Worker for NEMR0ImportState.
1131 *
1132 * Intention is to use it internally later.
1133 *
1134 * @returns VBox status code.
1135 * @param pGVM The ring-0 VM handle.
1136 * @param pGVCpu The irng-0 VCPU handle.
1137 * @param pCtx The CPU context structure to import into.
1138 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1139 */
1140NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat)
1141{
1142 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.pbHypercallData;
1143 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1144
1145 fWhat &= pCtx->fExtrn;
1146
1147 pInput->PartitionId = pGVM->nem.s.idHvPartition;
1148 pInput->VpIndex = pGVCpu->idCpu;
1149 pInput->fFlags = 0;
1150
1151 /* GPRs */
1152 uintptr_t iReg = 0;
1153 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1154 {
1155 if (fWhat & CPUMCTX_EXTRN_RAX)
1156 pInput->Names[iReg++] = HvX64RegisterRax;
1157 if (fWhat & CPUMCTX_EXTRN_RCX)
1158 pInput->Names[iReg++] = HvX64RegisterRcx;
1159 if (fWhat & CPUMCTX_EXTRN_RDX)
1160 pInput->Names[iReg++] = HvX64RegisterRdx;
1161 if (fWhat & CPUMCTX_EXTRN_RBX)
1162 pInput->Names[iReg++] = HvX64RegisterRbx;
1163 if (fWhat & CPUMCTX_EXTRN_RSP)
1164 pInput->Names[iReg++] = HvX64RegisterRsp;
1165 if (fWhat & CPUMCTX_EXTRN_RBP)
1166 pInput->Names[iReg++] = HvX64RegisterRbp;
1167 if (fWhat & CPUMCTX_EXTRN_RSI)
1168 pInput->Names[iReg++] = HvX64RegisterRsi;
1169 if (fWhat & CPUMCTX_EXTRN_RDI)
1170 pInput->Names[iReg++] = HvX64RegisterRdi;
1171 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1172 {
1173 pInput->Names[iReg++] = HvX64RegisterR8;
1174 pInput->Names[iReg++] = HvX64RegisterR9;
1175 pInput->Names[iReg++] = HvX64RegisterR10;
1176 pInput->Names[iReg++] = HvX64RegisterR11;
1177 pInput->Names[iReg++] = HvX64RegisterR12;
1178 pInput->Names[iReg++] = HvX64RegisterR13;
1179 pInput->Names[iReg++] = HvX64RegisterR14;
1180 pInput->Names[iReg++] = HvX64RegisterR15;
1181 }
1182 }
1183
1184 /* RIP & Flags */
1185 if (fWhat & CPUMCTX_EXTRN_RIP)
1186 pInput->Names[iReg++] = HvX64RegisterRip;
1187 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1188 pInput->Names[iReg++] = HvX64RegisterRflags;
1189
1190 /* Segments */
1191 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1192 {
1193 if (fWhat & CPUMCTX_EXTRN_CS)
1194 pInput->Names[iReg++] = HvX64RegisterCs;
1195 if (fWhat & CPUMCTX_EXTRN_ES)
1196 pInput->Names[iReg++] = HvX64RegisterEs;
1197 if (fWhat & CPUMCTX_EXTRN_SS)
1198 pInput->Names[iReg++] = HvX64RegisterSs;
1199 if (fWhat & CPUMCTX_EXTRN_DS)
1200 pInput->Names[iReg++] = HvX64RegisterDs;
1201 if (fWhat & CPUMCTX_EXTRN_FS)
1202 pInput->Names[iReg++] = HvX64RegisterFs;
1203 if (fWhat & CPUMCTX_EXTRN_GS)
1204 pInput->Names[iReg++] = HvX64RegisterGs;
1205 }
1206
1207 /* Descriptor tables and the task segment. */
1208 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1209 {
1210 if (fWhat & CPUMCTX_EXTRN_LDTR)
1211 pInput->Names[iReg++] = HvX64RegisterLdtr;
1212 if (fWhat & CPUMCTX_EXTRN_TR)
1213 pInput->Names[iReg++] = HvX64RegisterTr;
1214 if (fWhat & CPUMCTX_EXTRN_IDTR)
1215 pInput->Names[iReg++] = HvX64RegisterIdtr;
1216 if (fWhat & CPUMCTX_EXTRN_GDTR)
1217 pInput->Names[iReg++] = HvX64RegisterGdtr;
1218 }
1219
1220 /* Control registers. */
1221 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1222 {
1223 if (fWhat & CPUMCTX_EXTRN_CR0)
1224 pInput->Names[iReg++] = HvX64RegisterCr0;
1225 if (fWhat & CPUMCTX_EXTRN_CR2)
1226 pInput->Names[iReg++] = HvX64RegisterCr2;
1227 if (fWhat & CPUMCTX_EXTRN_CR3)
1228 pInput->Names[iReg++] = HvX64RegisterCr3;
1229 if (fWhat & CPUMCTX_EXTRN_CR4)
1230 pInput->Names[iReg++] = HvX64RegisterCr4;
1231 }
1232 pInput->Names[iReg++] = HvX64RegisterCr8; /// @todo CR8/TPR
1233
1234 /* Debug registers. */
1235 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1236 {
1237 pInput->Names[iReg++] = HvX64RegisterDr0;
1238 pInput->Names[iReg++] = HvX64RegisterDr1;
1239 pInput->Names[iReg++] = HvX64RegisterDr2;
1240 pInput->Names[iReg++] = HvX64RegisterDr3;
1241 }
1242 if (fWhat & CPUMCTX_EXTRN_DR6)
1243 pInput->Names[iReg++] = HvX64RegisterDr6;
1244 if (fWhat & CPUMCTX_EXTRN_DR7)
1245 pInput->Names[iReg++] = HvX64RegisterDr7;
1246
1247 /* Floating point state. */
1248 if (fWhat & CPUMCTX_EXTRN_X87)
1249 {
1250 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1251 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1252 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1253 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1254 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1255 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1256 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1257 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1258 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1259 }
1260 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1261 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1262
1263 /* Vector state. */
1264 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1265 {
1266 pInput->Names[iReg++] = HvX64RegisterXmm0;
1267 pInput->Names[iReg++] = HvX64RegisterXmm1;
1268 pInput->Names[iReg++] = HvX64RegisterXmm2;
1269 pInput->Names[iReg++] = HvX64RegisterXmm3;
1270 pInput->Names[iReg++] = HvX64RegisterXmm4;
1271 pInput->Names[iReg++] = HvX64RegisterXmm5;
1272 pInput->Names[iReg++] = HvX64RegisterXmm6;
1273 pInput->Names[iReg++] = HvX64RegisterXmm7;
1274 pInput->Names[iReg++] = HvX64RegisterXmm8;
1275 pInput->Names[iReg++] = HvX64RegisterXmm9;
1276 pInput->Names[iReg++] = HvX64RegisterXmm10;
1277 pInput->Names[iReg++] = HvX64RegisterXmm11;
1278 pInput->Names[iReg++] = HvX64RegisterXmm12;
1279 pInput->Names[iReg++] = HvX64RegisterXmm13;
1280 pInput->Names[iReg++] = HvX64RegisterXmm14;
1281 pInput->Names[iReg++] = HvX64RegisterXmm15;
1282 }
1283
1284 /* MSRs */
1285 // HvX64RegisterTsc - don't touch
1286 if (fWhat & CPUMCTX_EXTRN_EFER)
1287 pInput->Names[iReg++] = HvX64RegisterEfer;
1288 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1289 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1290 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1291 {
1292 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1293 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1294 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1295 }
1296 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1297 {
1298 pInput->Names[iReg++] = HvX64RegisterStar;
1299 pInput->Names[iReg++] = HvX64RegisterLstar;
1300 pInput->Names[iReg++] = HvX64RegisterCstar;
1301 pInput->Names[iReg++] = HvX64RegisterSfmask;
1302 }
1303
1304 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1305 {
1306 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1307 pInput->Names[iReg++] = HvX64RegisterPat;
1308 }
1309
1310 /* Interruptibility. */
1311 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1312 {
1313 pInput->Names[iReg++] = HvRegisterInterruptState;
1314 pInput->Names[iReg++] = HvX64RegisterRip;
1315 }
1316
1317 /* event injection */
1318 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1319 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1320 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1321 size_t const cRegs = iReg;
1322 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1323
1324 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1325 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nem.s.pbHypercallData < PAGE_SIZE); /* (max is around 168 registers) */
1326 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1327
1328 /*
1329 * Make the hypercall.
1330 */
1331 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1332 pGVCpu->nem.s.HCPhysHypercallData,
1333 pGVCpu->nem.s.HCPhysHypercallData + cbInput);
1334 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1335 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1336 VERR_NEM_GET_REGISTERS_FAILED);
1337 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1338
1339 /*
1340 * Copy information to the CPUM context.
1341 */
1342 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
1343 iReg = 0;
1344
1345 /* GPRs */
1346 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1347 {
1348 if (fWhat & CPUMCTX_EXTRN_RAX)
1349 {
1350 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1351 pCtx->rax = paValues[iReg++].Reg64;
1352 }
1353 if (fWhat & CPUMCTX_EXTRN_RCX)
1354 {
1355 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1356 pCtx->rcx = paValues[iReg++].Reg64;
1357 }
1358 if (fWhat & CPUMCTX_EXTRN_RDX)
1359 {
1360 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1361 pCtx->rdx = paValues[iReg++].Reg64;
1362 }
1363 if (fWhat & CPUMCTX_EXTRN_RBX)
1364 {
1365 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1366 pCtx->rbx = paValues[iReg++].Reg64;
1367 }
1368 if (fWhat & CPUMCTX_EXTRN_RSP)
1369 {
1370 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1371 pCtx->rsp = paValues[iReg++].Reg64;
1372 }
1373 if (fWhat & CPUMCTX_EXTRN_RBP)
1374 {
1375 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1376 pCtx->rbp = paValues[iReg++].Reg64;
1377 }
1378 if (fWhat & CPUMCTX_EXTRN_RSI)
1379 {
1380 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1381 pCtx->rsi = paValues[iReg++].Reg64;
1382 }
1383 if (fWhat & CPUMCTX_EXTRN_RDI)
1384 {
1385 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1386 pCtx->rdi = paValues[iReg++].Reg64;
1387 }
1388 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1389 {
1390 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1391 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1392 pCtx->r8 = paValues[iReg++].Reg64;
1393 pCtx->r9 = paValues[iReg++].Reg64;
1394 pCtx->r10 = paValues[iReg++].Reg64;
1395 pCtx->r11 = paValues[iReg++].Reg64;
1396 pCtx->r12 = paValues[iReg++].Reg64;
1397 pCtx->r13 = paValues[iReg++].Reg64;
1398 pCtx->r14 = paValues[iReg++].Reg64;
1399 pCtx->r15 = paValues[iReg++].Reg64;
1400 }
1401 }
1402
1403 /* RIP & Flags */
1404 if (fWhat & CPUMCTX_EXTRN_RIP)
1405 {
1406 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1407 pCtx->rip = paValues[iReg++].Reg64;
1408 }
1409 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1410 {
1411 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1412 pCtx->rflags.u = paValues[iReg++].Reg64;
1413 }
1414
1415 /* Segments */
1416#define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1417 do { \
1418 Assert(pInput->Names[a_idx] == a_enmName); \
1419 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1420 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1421 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1422 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1423 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1424 } while (0)
1425 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1426 {
1427 if (fWhat & CPUMCTX_EXTRN_CS)
1428 {
1429 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1430 iReg++;
1431 }
1432 if (fWhat & CPUMCTX_EXTRN_ES)
1433 {
1434 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1435 iReg++;
1436 }
1437 if (fWhat & CPUMCTX_EXTRN_SS)
1438 {
1439 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1440 iReg++;
1441 }
1442 if (fWhat & CPUMCTX_EXTRN_DS)
1443 {
1444 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1445 iReg++;
1446 }
1447 if (fWhat & CPUMCTX_EXTRN_FS)
1448 {
1449 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1450 iReg++;
1451 }
1452 if (fWhat & CPUMCTX_EXTRN_GS)
1453 {
1454 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1455 iReg++;
1456 }
1457 }
1458 /* Descriptor tables and the task segment. */
1459 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1460 {
1461 if (fWhat & CPUMCTX_EXTRN_LDTR)
1462 {
1463 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1464 iReg++;
1465 }
1466 if (fWhat & CPUMCTX_EXTRN_TR)
1467 {
1468 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1469 avoid to trigger sanity assertions around the code, always fix this. */
1470 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1471 switch (pCtx->tr.Attr.n.u4Type)
1472 {
1473 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1474 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1475 break;
1476 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1477 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1478 break;
1479 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1480 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1481 break;
1482 }
1483 iReg++;
1484 }
1485 if (fWhat & CPUMCTX_EXTRN_IDTR)
1486 {
1487 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1488 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1489 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1490 iReg++;
1491 }
1492 if (fWhat & CPUMCTX_EXTRN_GDTR)
1493 {
1494 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1495 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1496 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1497 iReg++;
1498 }
1499 }
1500
1501 /* Control registers. */
1502 bool fMaybeChangedMode = false;
1503 bool fFlushTlb = false;
1504 bool fFlushGlobalTlb = false;
1505 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1506 {
1507 if (fWhat & CPUMCTX_EXTRN_CR0)
1508 {
1509 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1510 if (pCtx->cr0 != paValues[iReg].Reg64)
1511 {
1512 CPUMSetGuestCR0(pVCpu, paValues[iReg].Reg64);
1513 fMaybeChangedMode = true;
1514 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1515 }
1516 iReg++;
1517 }
1518 if (fWhat & CPUMCTX_EXTRN_CR2)
1519 {
1520 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1521 pCtx->cr2 = paValues[iReg].Reg64;
1522 iReg++;
1523 }
1524 if (fWhat & CPUMCTX_EXTRN_CR3)
1525 {
1526 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1527 if (pCtx->cr3 != paValues[iReg].Reg64)
1528 {
1529 CPUMSetGuestCR3(pVCpu, paValues[iReg].Reg64);
1530 fFlushTlb = true;
1531 }
1532 iReg++;
1533 }
1534 if (fWhat & CPUMCTX_EXTRN_CR4)
1535 {
1536 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1537 if (pCtx->cr4 != paValues[iReg].Reg64)
1538 {
1539 CPUMSetGuestCR4(pVCpu, paValues[iReg].Reg64);
1540 fMaybeChangedMode = true;
1541 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1542 }
1543 iReg++;
1544 }
1545 }
1546
1547 /// @todo CR8/TPR
1548 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1549 APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1550 iReg++;
1551
1552 /* Debug registers. */
1553/** @todo fixme */
1554 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1555 {
1556 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1557 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1558 if (pCtx->dr[0] != paValues[iReg].Reg64)
1559 CPUMSetGuestDR0(pVCpu, paValues[iReg].Reg64);
1560 iReg++;
1561 if (pCtx->dr[1] != paValues[iReg].Reg64)
1562 CPUMSetGuestDR1(pVCpu, paValues[iReg].Reg64);
1563 iReg++;
1564 if (pCtx->dr[2] != paValues[iReg].Reg64)
1565 CPUMSetGuestDR2(pVCpu, paValues[iReg].Reg64);
1566 iReg++;
1567 if (pCtx->dr[3] != paValues[iReg].Reg64)
1568 CPUMSetGuestDR3(pVCpu, paValues[iReg].Reg64);
1569 iReg++;
1570 }
1571 if (fWhat & CPUMCTX_EXTRN_DR6)
1572 {
1573 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1574 if (pCtx->dr[6] != paValues[iReg].Reg64)
1575 CPUMSetGuestDR6(pVCpu, paValues[iReg].Reg64);
1576 iReg++;
1577 }
1578 if (fWhat & CPUMCTX_EXTRN_DR7)
1579 {
1580 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1581 if (pCtx->dr[7] != paValues[iReg].Reg64)
1582 CPUMSetGuestDR6(pVCpu, paValues[iReg].Reg64);
1583 iReg++;
1584 }
1585
1586 /* Floating point state. */
1587 if (fWhat & CPUMCTX_EXTRN_X87)
1588 {
1589 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1590 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1591 pCtx->pXStateR0->x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1592 pCtx->pXStateR0->x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1593 iReg++;
1594 pCtx->pXStateR0->x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1595 pCtx->pXStateR0->x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1596 iReg++;
1597 pCtx->pXStateR0->x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1598 pCtx->pXStateR0->x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1599 iReg++;
1600 pCtx->pXStateR0->x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1601 pCtx->pXStateR0->x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1602 iReg++;
1603 pCtx->pXStateR0->x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1604 pCtx->pXStateR0->x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1605 iReg++;
1606 pCtx->pXStateR0->x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1607 pCtx->pXStateR0->x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1608 iReg++;
1609 pCtx->pXStateR0->x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1610 pCtx->pXStateR0->x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1611 iReg++;
1612 pCtx->pXStateR0->x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1613 pCtx->pXStateR0->x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1614 iReg++;
1615
1616 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1617 pCtx->pXStateR0->x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1618 pCtx->pXStateR0->x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1619 pCtx->pXStateR0->x87.FTW = paValues[iReg].FpControlStatus.FpTag
1620 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1621 pCtx->pXStateR0->x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1622 pCtx->pXStateR0->x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1623 pCtx->pXStateR0->x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1624 pCtx->pXStateR0->x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1625 iReg++;
1626 }
1627
1628 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1629 {
1630 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1631 if (fWhat & CPUMCTX_EXTRN_X87)
1632 {
1633 pCtx->pXStateR0->x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1634 pCtx->pXStateR0->x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1635 pCtx->pXStateR0->x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1636 }
1637 pCtx->pXStateR0->x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1638 pCtx->pXStateR0->x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1639 iReg++;
1640 }
1641
1642 /* Vector state. */
1643 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1644 {
1645 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1646 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1647 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1648 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1649 iReg++;
1650 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1651 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1652 iReg++;
1653 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1654 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1655 iReg++;
1656 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1657 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1658 iReg++;
1659 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1660 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1661 iReg++;
1662 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1663 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1664 iReg++;
1665 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1666 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1667 iReg++;
1668 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1669 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1670 iReg++;
1671 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1672 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1673 iReg++;
1674 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1675 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1676 iReg++;
1677 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1678 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1679 iReg++;
1680 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1681 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1682 iReg++;
1683 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1684 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1685 iReg++;
1686 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1687 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1688 iReg++;
1689 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1690 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1691 iReg++;
1692 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1693 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1694 iReg++;
1695 }
1696
1697
1698 /* MSRs */
1699 // HvX64RegisterTsc - don't touch
1700 if (fWhat & CPUMCTX_EXTRN_EFER)
1701 {
1702 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1703 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1704 {
1705 pCtx->msrEFER = paValues[iReg].Reg64;
1706 fMaybeChangedMode = true;
1707 }
1708 iReg++;
1709 }
1710 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1711 {
1712 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1713 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1714 iReg++;
1715 }
1716 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1717 {
1718 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
1719 pCtx->SysEnter.cs = paValues[iReg].Reg64;
1720 iReg++;
1721 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
1722 pCtx->SysEnter.eip = paValues[iReg].Reg64;
1723 iReg++;
1724 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
1725 pCtx->SysEnter.esp = paValues[iReg].Reg64;
1726 iReg++;
1727 }
1728 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1729 {
1730 Assert(pInput->Names[iReg] == HvX64RegisterStar);
1731 pCtx->msrSTAR = paValues[iReg].Reg64;
1732 iReg++;
1733 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
1734 pCtx->msrLSTAR = paValues[iReg].Reg64;
1735 iReg++;
1736 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
1737 pCtx->msrCSTAR = paValues[iReg].Reg64;
1738 iReg++;
1739 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
1740 pCtx->msrSFMASK = paValues[iReg].Reg64;
1741 iReg++;
1742 }
1743 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1744 {
1745 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
1746 if (paValues[iReg].Reg64 != APICGetBaseMsrNoCheck(pVCpu))
1747 {
1748 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64);
1749 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1750 }
1751 iReg++;
1752
1753 Assert(pInput->Names[iReg] == HvX64RegisterPat);
1754 pCtx->msrPAT = paValues[iReg].Reg64;
1755 iReg++;
1756 }
1757
1758 /* Interruptibility. */
1759 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1760 {
1761 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
1762 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
1763
1764 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1765 {
1766 pVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
1767 if (paValues[iReg].InterruptState.InterruptShadow)
1768 {
1769 EMSetInhibitInterruptsPC(pVCpu, paValues[iReg + 1].Reg64);
1770 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1771 }
1772 else
1773 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1774 }
1775
1776 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1777 {
1778 if (paValues[iReg].InterruptState.NmiMasked)
1779 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1780 else
1781 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1782 }
1783
1784 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
1785 iReg += 2;
1786 }
1787
1788 /* Event injection. */
1789 /// @todo HvRegisterPendingInterruption
1790 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
1791 if (paValues[iReg].PendingInterruption.InterruptionPending)
1792 {
1793 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
1794 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
1795 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
1796 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
1797 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
1798 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
1799 }
1800
1801 /// @todo HvRegisterPendingEvent0
1802 /// @todo HvRegisterPendingEvent1
1803
1804 /* Almost done, just update extrn flags and maybe change PGM mode. */
1805 pCtx->fExtrn &= ~fWhat;
1806
1807 /* Typical. */
1808 if (!fMaybeChangedMode && !fFlushTlb)
1809 return VINF_SUCCESS;
1810
1811 /*
1812 * Slow.
1813 */
1814 int rc = VINF_SUCCESS;
1815 if (fMaybeChangedMode)
1816 {
1817 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1818 if (rc == VINF_PGM_CHANGE_MODE)
1819 {
1820 LogFlow(("nemR0WinImportState: -> VERR_NEM_CHANGE_PGM_MODE!\n"));
1821 return VERR_NEM_CHANGE_PGM_MODE;
1822 }
1823 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
1824 }
1825
1826 if (fFlushTlb)
1827 {
1828 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
1829 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
1830 }
1831
1832 return rc;
1833}
1834
1835
1836/**
1837 * Import the state from the native API (back to CPUMCTX).
1838 *
1839 * @returns VBox status code
1840 * @param pGVM The ring-0 VM handle.
1841 * @param pVM The cross context VM handle.
1842 * @param idCpu The calling EMT. Necessary for getting the
1843 * hypercall page and arguments.
1844 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
1845 * CPUMCTX_EXTERN_ALL for everything.
1846 */
1847VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t fWhat)
1848{
1849 /*
1850 * Validate the call.
1851 */
1852 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
1853 if (RT_SUCCESS(rc))
1854 {
1855 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1856 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1857 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1858
1859 /*
1860 * Call worker.
1861 */
1862 rc = nemR0WinImportState(pGVM, pGVCpu, CPUMQueryGuestCtxPtr(pVCpu), fWhat);
1863 }
1864 return rc;
1865}
1866
1867
1868VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
1869{
1870#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1871 PVM pVM = pGVM->pVM;
1872 return nemHCWinRunGC(pVM, &pVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]);
1873#else
1874 RT_NOREF(pGVM, idCpu);
1875 return VERR_NOT_IMPLEMENTED;
1876#endif
1877}
1878
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette