VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 72514

Last change on this file since 72514 was 72488, checked in by vboxsync, 7 years ago

NEM,CPUM,EM: Don't sync in/out the entire state when leaving the inner NEM loop, only what IEM/TRPM might need. Speeds up MMIO and I/O requiring return to ring-3. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 99.5 KB
Line 
1/* $Id: NEMR0Native-win.cpp 72488 2018-06-09 12:24:35Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#include <iprt/nt/nt.h>
24#include <iprt/nt/hyperv.h>
25#include <iprt/nt/vid.h>
26#include <winerror.h>
27
28#include <VBox/vmm/nem.h>
29#include <VBox/vmm/iem.h>
30#include <VBox/vmm/em.h>
31#include <VBox/vmm/apic.h>
32#include <VBox/vmm/pdm.h>
33#include "NEMInternal.h"
34#include <VBox/vmm/gvm.h>
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/gvmm.h>
37#include <VBox/param.h>
38
39#include <iprt/dbg.h>
40#include <iprt/memobj.h>
41#include <iprt/string.h>
42
43
44/* Assert compile context sanity. */
45#ifndef RT_OS_WINDOWS
46# error "Windows only file!"
47#endif
48#ifndef RT_ARCH_AMD64
49# error "AMD64 only file!"
50#endif
51
52
53/*********************************************************************************************************************************
54* Internal Functions *
55*********************************************************************************************************************************/
56typedef uint32_t DWORD; /* for winerror.h constants */
57
58
59/*********************************************************************************************************************************
60* Global Variables *
61*********************************************************************************************************************************/
62static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
63
64/**
65 * WinHvr.sys!WinHvDepositMemory
66 *
67 * This API will try allocates cPages on IdealNode and deposit it to the
68 * hypervisor for use with the given partition. The memory will be freed when
69 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
70 *
71 * Apparently node numbers above 64 has a different meaning.
72 */
73static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
74
75
76/*********************************************************************************************************************************
77* Internal Functions *
78*********************************************************************************************************************************/
79NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
80 uint32_t cPages, uint32_t fFlags);
81NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
82NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
83NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat);
84DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
85 void *pvOutput, uint32_t cbOutput);
86
87
88/*
89 * Instantate the code we share with ring-0.
90 */
91#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
92
93/**
94 * Worker for NEMR0InitVM that allocates a hypercall page.
95 *
96 * @returns VBox status code.
97 * @param pHypercallData The hypercall data page to initialize.
98 */
99static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
100{
101 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
102 if (RT_SUCCESS(rc))
103 {
104 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
105 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
106 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
107 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
108 if (RT_SUCCESS(rc))
109 return VINF_SUCCESS;
110
111 /* bail out */
112 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
113 }
114 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
115 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
116 pHypercallData->pbPage = NULL;
117 return rc;
118}
119
120/**
121 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
122 *
123 * @param pHypercallData The hypercall data page to uninitialize.
124 */
125static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
126{
127 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
128 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
129 if (pHypercallData->pbPage != NULL)
130 {
131 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
132 pHypercallData->pbPage = NULL;
133 }
134 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
135 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
136}
137
138
139/**
140 * Called by NEMR3Init to make sure we've got what we need.
141 *
142 * @returns VBox status code.
143 * @param pGVM The ring-0 VM handle.
144 * @param pVM The cross context VM handle.
145 * @thread EMT(0)
146 */
147VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVM pVM)
148{
149 AssertCompile(sizeof(pGVM->nem.s) <= sizeof(pGVM->nem.padding));
150 AssertCompile(sizeof(pGVM->aCpus[0].nem.s) <= sizeof(pGVM->aCpus[0].nem.padding));
151
152 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
153 AssertRCReturn(rc, rc);
154
155 /*
156 * We want to perform hypercalls here. The NT kernel started to expose a very low
157 * level interface to do this thru somewhere between build 14271 and 16299. Since
158 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
159 *
160 * We also need to deposit memory to the hypervisor for use with partition (page
161 * mapping structures, stuff).
162 */
163 RTDBGKRNLINFO hKrnlInfo;
164 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
165 if (RT_SUCCESS(rc))
166 {
167 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
168 if (RT_SUCCESS(rc))
169 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
170 RTR0DbgKrnlInfoRelease(hKrnlInfo);
171 if (RT_SUCCESS(rc))
172 {
173 /*
174 * Allocate a page for non-EMT threads to use for hypercalls (update
175 * statistics and such) and a critical section protecting it.
176 */
177 rc = RTCritSectInit(&pGVM->nem.s.HypercallDataCritSect);
178 if (RT_SUCCESS(rc))
179 {
180 rc = nemR0InitHypercallData(&pGVM->nem.s.HypercallData);
181 if (RT_SUCCESS(rc))
182 {
183 /*
184 * Allocate a page for each VCPU to place hypercall data on.
185 */
186 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
187 {
188 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
189 if (RT_FAILURE(rc))
190 {
191 while (i-- > 0)
192 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
193 break;
194 }
195 }
196 if (RT_SUCCESS(rc))
197 {
198 /*
199 * So far, so good.
200 */
201 return rc;
202 }
203
204 /*
205 * Bail out.
206 */
207 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
208 }
209 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
210 }
211 }
212 else
213 rc = VERR_NEM_MISSING_KERNEL_API;
214 }
215
216 RT_NOREF(pVM);
217 return rc;
218}
219
220
221/**
222 * Perform an I/O control operation on the partition handle (VID.SYS).
223 *
224 * @returns NT status code.
225 * @param pGVM The ring-0 VM structure.
226 * @param uFunction The function to perform.
227 * @param pvInput The input buffer. This must point within the VM
228 * structure so we can easily convert to a ring-3
229 * pointer if necessary.
230 * @param cbInput The size of the input. @a pvInput must be NULL when
231 * zero.
232 * @param pvOutput The output buffer. This must also point within the
233 * VM structure for ring-3 pointer magic.
234 * @param cbOutput The size of the output. @a pvOutput must be NULL
235 * when zero.
236 */
237DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput,
238 void *pvOutput, uint32_t cbOutput)
239{
240#ifdef RT_STRICT
241 /*
242 * Input and output parameters are part of the VM CPU structure.
243 */
244 PVM pVM = pGVM->pVM;
245 size_t const cbVM = RT_UOFFSETOF(VM, aCpus[pGVM->cCpus]);
246 if (pvInput)
247 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
248 if (pvOutput)
249 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
250#endif
251
252 int32_t rcNt = STATUS_UNSUCCESSFUL;
253 int rc = SUPR0IoCtlPerform(pGVM->nem.s.pIoCtlCtx, uFunction,
254 pvInput,
255 pvInput ? (uintptr_t)pvInput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
256 cbInput,
257 pvOutput,
258 pvOutput ? (uintptr_t)pvOutput + pGVM->nem.s.offRing3ConversionDelta : NIL_RTR3PTR,
259 cbOutput,
260 &rcNt);
261 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
262 return (NTSTATUS)rcNt;
263 return STATUS_UNSUCCESSFUL;
264}
265
266
267/**
268 * 2nd part of the initialization, after we've got a partition handle.
269 *
270 * @returns VBox status code.
271 * @param pGVM The ring-0 VM handle.
272 * @param pVM The cross context VM handle.
273 * @thread EMT(0)
274 */
275VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM, PVM pVM)
276{
277 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
278 AssertRCReturn(rc, rc);
279 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
280
281 /*
282 * Copy and validate the I/O control information from ring-3.
283 */
284 NEMWINIOCTL Copy = pVM->nem.s.IoCtlGetHvPartitionId;
285 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
286 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
287 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
288 pGVM->nem.s.IoCtlGetHvPartitionId = Copy;
289
290 Copy = pVM->nem.s.IoCtlStartVirtualProcessor;
291 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
292 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
293 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
294 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
295 pGVM->nem.s.IoCtlStartVirtualProcessor = Copy;
296
297 Copy = pVM->nem.s.IoCtlStopVirtualProcessor;
298 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
299 AssertLogRelReturn(Copy.cbInput == sizeof(HV_VP_INDEX), VERR_NEM_INIT_FAILED);
300 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
301 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
302 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
303 pGVM->nem.s.IoCtlStopVirtualProcessor = Copy;
304
305 Copy = pVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
306 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
307 AssertLogRelReturn(Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT), VERR_NEM_INIT_FAILED);
308 AssertLogRelReturn(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
309 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, VERR_NEM_INIT_FAILED);
310 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
311 AssertLogRelReturn(Copy.uFunction != pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction, VERR_NEM_INIT_FAILED);
312 pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext = Copy;
313
314 /*
315 * Setup of an I/O control context for the partition handle for later use.
316 */
317 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pVM->nem.s.hPartitionDevice, 0, &pGVM->nem.s.pIoCtlCtx);
318 AssertLogRelRCReturn(rc, rc);
319 pGVM->nem.s.offRing3ConversionDelta = (uintptr_t)pVM->pVMR3 - (uintptr_t)pGVM->pVM;
320
321 /*
322 * Get the partition ID.
323 */
324 PVMCPU pVCpu = &pGVM->pVM->aCpus[0];
325 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
326 &pVCpu->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu->nem.s.uIoCtlBuf.idPartition));
327 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
328 pGVM->nem.s.idHvPartition = pVCpu->nem.s.uIoCtlBuf.idPartition;
329 AssertLogRelMsgReturn(pGVM->nem.s.idHvPartition == pVM->nem.s.idHvPartition,
330 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nem.s.idHvPartition, pVM->nem.s.idHvPartition),
331 VERR_NEM_INIT_FAILED);
332
333
334 return rc;
335}
336
337
338/**
339 * Cleanup the NEM parts of the VM in ring-0.
340 *
341 * This is always called and must deal the state regardless of whether
342 * NEMR0InitVM() was called or not. So, take care here.
343 *
344 * @param pGVM The ring-0 VM handle.
345 */
346VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
347{
348 pGVM->nem.s.idHvPartition = HV_PARTITION_ID_INVALID;
349
350 /* Clean up I/O control context. */
351 if (pGVM->nem.s.pIoCtlCtx)
352 {
353 int rc = SUPR0IoCtlCleanup(pGVM->nem.s.pIoCtlCtx);
354 AssertRC(rc);
355 pGVM->nem.s.pIoCtlCtx = NULL;
356 }
357
358 /* Free the hypercall pages. */
359 VMCPUID i = pGVM->cCpus;
360 while (i-- > 0)
361 nemR0DeleteHypercallData(&pGVM->aCpus[i].nem.s.HypercallData);
362
363 /* The non-EMT one too. */
364 if (RTCritSectIsInitialized(&pGVM->nem.s.HypercallDataCritSect))
365 RTCritSectDelete(&pGVM->nem.s.HypercallDataCritSect);
366 nemR0DeleteHypercallData(&pGVM->nem.s.HypercallData);
367}
368
369
370#if 0 /* for debugging GPA unmapping. */
371static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
372{
373 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nem.s.pbHypercallData;
374 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
375 pIn->PartitionId = pGVM->nem.s.idHvPartition;
376 pIn->VpIndex = pGVCpu->idCpu;
377 pIn->ByteCount = 0x10;
378 pIn->BaseGpa = GCPhys;
379 pIn->ControlFlags.AsUINT64 = 0;
380 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
381 memset(pOut, 0xfe, sizeof(*pOut));
382 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nem.s.HCPhysHypercallData,
383 pGVCpu->nem.s.HCPhysHypercallData + sizeof(*pIn));
384 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
385 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
386 __debugbreak();
387
388 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
389}
390#endif
391
392
393/**
394 * Worker for NEMR0MapPages and others.
395 */
396NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVM pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
397 uint32_t cPages, uint32_t fFlags)
398{
399 /*
400 * Validate.
401 */
402 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
403
404 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
405 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
406 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
407 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
408 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
409 if (GCPhysSrc != GCPhysDst)
410 {
411 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
412 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
413 }
414
415 /*
416 * Compose and make the hypercall.
417 * Ring-3 is not allowed to fill in the host physical addresses of the call.
418 */
419 for (uint32_t iTries = 0;; iTries++)
420 {
421 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
422 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
423 pMapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
424 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
425 pMapPages->MapFlags = fFlags;
426 pMapPages->u32ExplicitPadding = 0;
427 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
428 {
429 RTHCPHYS HCPhys = NIL_RTGCPHYS;
430 int rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
431 AssertRCReturn(rc, rc);
432 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
433 }
434
435 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
436 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
437 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
438 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
439 if (uResult == ((uint64_t)cPages << 32))
440 return VINF_SUCCESS;
441
442 /*
443 * If the partition is out of memory, try donate another 512 pages to
444 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
445 */
446 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
447 || iTries > 16
448 || g_pfnWinHvDepositMemory == NULL)
449 {
450 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
451 return VERR_NEM_MAP_PAGES_FAILED;
452 }
453
454 size_t cPagesAdded = 0;
455 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nem.s.idHvPartition, 512, 0, &cPagesAdded);
456 if (!cPagesAdded)
457 {
458 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
459 return VERR_NEM_MAP_PAGES_FAILED;
460 }
461 }
462}
463
464
465/**
466 * Maps pages into the guest physical address space.
467 *
468 * Generally the caller will be under the PGM lock already, so no extra effort
469 * is needed to make sure all changes happens under it.
470 *
471 * @returns VBox status code.
472 * @param pGVM The ring-0 VM handle.
473 * @param pVM The cross context VM handle.
474 * @param idCpu The calling EMT. Necessary for getting the
475 * hypercall page and arguments.
476 * @thread EMT(idCpu)
477 */
478VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
479{
480 /*
481 * Unpack the call.
482 */
483 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
484 if (RT_SUCCESS(rc))
485 {
486 PVMCPU pVCpu = &pVM->aCpus[idCpu];
487 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
488
489 RTGCPHYS const GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
490 RTGCPHYS const GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
491 uint32_t const cPages = pVCpu->nem.s.Hypercall.MapPages.cPages;
492 HV_MAP_GPA_FLAGS const fFlags = pVCpu->nem.s.Hypercall.MapPages.fFlags;
493
494 /*
495 * Do the work.
496 */
497 rc = nemR0WinMapPages(pGVM, pVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
498 }
499 return rc;
500}
501
502
503/**
504 * Worker for NEMR0UnmapPages and others.
505 */
506NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
507{
508 /*
509 * Validate input.
510 */
511 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
512
513 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
514 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
515 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
516 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
517
518 /*
519 * Compose and make the hypercall.
520 */
521 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nem.s.HypercallData.pbPage;
522 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
523 pUnmapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
524 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
525 pUnmapPages->fFlags = 0;
526
527 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
528 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
529 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
530 if (uResult == ((uint64_t)cPages << 32))
531 {
532#if 1 /* Do we need to do this? Hopefully not... */
533 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
534 pGVCpu->nem.s.HypercallData.HCPhysPage, 0);
535 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
536#endif
537 return VINF_SUCCESS;
538 }
539
540 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
541 return VERR_NEM_UNMAP_PAGES_FAILED;
542}
543
544
545/**
546 * Unmaps pages from the guest physical address space.
547 *
548 * Generally the caller will be under the PGM lock already, so no extra effort
549 * is needed to make sure all changes happens under it.
550 *
551 * @returns VBox status code.
552 * @param pGVM The ring-0 VM handle.
553 * @param pVM The cross context VM handle.
554 * @param idCpu The calling EMT. Necessary for getting the
555 * hypercall page and arguments.
556 * @thread EMT(idCpu)
557 */
558VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
559{
560 /*
561 * Unpack the call.
562 */
563 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
564 if (RT_SUCCESS(rc))
565 {
566 PVMCPU pVCpu = &pVM->aCpus[idCpu];
567 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
568
569 RTGCPHYS const GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
570 uint32_t const cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
571
572 /*
573 * Do the work.
574 */
575 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
576 }
577 return rc;
578}
579
580
581/**
582 * Worker for NEMR0ExportState.
583 *
584 * Intention is to use it internally later.
585 *
586 * @returns VBox status code.
587 * @param pGVM The ring-0 VM handle.
588 * @param pGVCpu The irng-0 VCPU handle.
589 * @param pCtx The CPU context structure to import into.
590 */
591NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
592{
593 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
594 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
595 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
596
597 pInput->PartitionId = pGVM->nem.s.idHvPartition;
598 pInput->VpIndex = pGVCpu->idCpu;
599 pInput->RsvdZ = 0;
600
601 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
602 if ( !fWhat
603 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
604 return VINF_SUCCESS;
605 uintptr_t iReg = 0;
606
607 /* GPRs */
608 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
609 {
610 if (fWhat & CPUMCTX_EXTRN_RAX)
611 {
612 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
613 pInput->Elements[iReg].Name = HvX64RegisterRax;
614 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
615 iReg++;
616 }
617 if (fWhat & CPUMCTX_EXTRN_RCX)
618 {
619 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
620 pInput->Elements[iReg].Name = HvX64RegisterRcx;
621 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
622 iReg++;
623 }
624 if (fWhat & CPUMCTX_EXTRN_RDX)
625 {
626 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
627 pInput->Elements[iReg].Name = HvX64RegisterRdx;
628 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
629 iReg++;
630 }
631 if (fWhat & CPUMCTX_EXTRN_RBX)
632 {
633 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
634 pInput->Elements[iReg].Name = HvX64RegisterRbx;
635 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
636 iReg++;
637 }
638 if (fWhat & CPUMCTX_EXTRN_RSP)
639 {
640 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
641 pInput->Elements[iReg].Name = HvX64RegisterRsp;
642 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
643 iReg++;
644 }
645 if (fWhat & CPUMCTX_EXTRN_RBP)
646 {
647 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
648 pInput->Elements[iReg].Name = HvX64RegisterRbp;
649 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
650 iReg++;
651 }
652 if (fWhat & CPUMCTX_EXTRN_RSI)
653 {
654 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
655 pInput->Elements[iReg].Name = HvX64RegisterRsi;
656 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
657 iReg++;
658 }
659 if (fWhat & CPUMCTX_EXTRN_RDI)
660 {
661 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
662 pInput->Elements[iReg].Name = HvX64RegisterRdi;
663 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
664 iReg++;
665 }
666 if (fWhat & CPUMCTX_EXTRN_R8_R15)
667 {
668 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
669 pInput->Elements[iReg].Name = HvX64RegisterR8;
670 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
671 iReg++;
672 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
673 pInput->Elements[iReg].Name = HvX64RegisterR9;
674 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
675 iReg++;
676 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
677 pInput->Elements[iReg].Name = HvX64RegisterR10;
678 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
679 iReg++;
680 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
681 pInput->Elements[iReg].Name = HvX64RegisterR11;
682 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
683 iReg++;
684 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
685 pInput->Elements[iReg].Name = HvX64RegisterR12;
686 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
687 iReg++;
688 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
689 pInput->Elements[iReg].Name = HvX64RegisterR13;
690 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
691 iReg++;
692 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
693 pInput->Elements[iReg].Name = HvX64RegisterR14;
694 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
695 iReg++;
696 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
697 pInput->Elements[iReg].Name = HvX64RegisterR15;
698 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
699 iReg++;
700 }
701 }
702
703 /* RIP & Flags */
704 if (fWhat & CPUMCTX_EXTRN_RIP)
705 {
706 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
707 pInput->Elements[iReg].Name = HvX64RegisterRip;
708 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
709 iReg++;
710 }
711 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
712 {
713 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
714 pInput->Elements[iReg].Name = HvX64RegisterRflags;
715 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
716 iReg++;
717 }
718
719 /* Segments */
720#define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
721 do { \
722 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
723 pInput->Elements[a_idx].Name = a_enmName; \
724 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
725 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
726 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
727 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
728 } while (0)
729 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
730 {
731 if (fWhat & CPUMCTX_EXTRN_CS)
732 {
733 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
734 iReg++;
735 }
736 if (fWhat & CPUMCTX_EXTRN_ES)
737 {
738 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
739 iReg++;
740 }
741 if (fWhat & CPUMCTX_EXTRN_SS)
742 {
743 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
744 iReg++;
745 }
746 if (fWhat & CPUMCTX_EXTRN_DS)
747 {
748 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
749 iReg++;
750 }
751 if (fWhat & CPUMCTX_EXTRN_FS)
752 {
753 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
754 iReg++;
755 }
756 if (fWhat & CPUMCTX_EXTRN_GS)
757 {
758 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
759 iReg++;
760 }
761 }
762
763 /* Descriptor tables & task segment. */
764 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
765 {
766 if (fWhat & CPUMCTX_EXTRN_LDTR)
767 {
768 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
769 iReg++;
770 }
771 if (fWhat & CPUMCTX_EXTRN_TR)
772 {
773 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
774 iReg++;
775 }
776
777 if (fWhat & CPUMCTX_EXTRN_IDTR)
778 {
779 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
780 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
781 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
782 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
783 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
784 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
785 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
786 iReg++;
787 }
788 if (fWhat & CPUMCTX_EXTRN_GDTR)
789 {
790 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
791 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
792 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
793 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
794 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
795 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
796 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
797 iReg++;
798 }
799 }
800
801 /* Control registers. */
802 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
803 {
804 if (fWhat & CPUMCTX_EXTRN_CR0)
805 {
806 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
807 pInput->Elements[iReg].Name = HvX64RegisterCr0;
808 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
809 iReg++;
810 }
811 if (fWhat & CPUMCTX_EXTRN_CR2)
812 {
813 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
814 pInput->Elements[iReg].Name = HvX64RegisterCr2;
815 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
816 iReg++;
817 }
818 if (fWhat & CPUMCTX_EXTRN_CR3)
819 {
820 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
821 pInput->Elements[iReg].Name = HvX64RegisterCr3;
822 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
823 iReg++;
824 }
825 if (fWhat & CPUMCTX_EXTRN_CR4)
826 {
827 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
828 pInput->Elements[iReg].Name = HvX64RegisterCr4;
829 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
830 iReg++;
831 }
832 }
833 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
834 {
835 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
836 pInput->Elements[iReg].Name = HvX64RegisterCr8;
837 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pVCpu);
838 iReg++;
839 }
840
841 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
842
843 /* Debug registers. */
844/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
845 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
846 {
847 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
848 pInput->Elements[iReg].Name = HvX64RegisterDr0;
849 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pVCpu);
850 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
851 iReg++;
852 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
853 pInput->Elements[iReg].Name = HvX64RegisterDr1;
854 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pVCpu);
855 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
856 iReg++;
857 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
858 pInput->Elements[iReg].Name = HvX64RegisterDr2;
859 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pVCpu);
860 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
861 iReg++;
862 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
863 pInput->Elements[iReg].Name = HvX64RegisterDr3;
864 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pVCpu);
865 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
866 iReg++;
867 }
868 if (fWhat & CPUMCTX_EXTRN_DR6)
869 {
870 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
871 pInput->Elements[iReg].Name = HvX64RegisterDr6;
872 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pVCpu);
873 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
874 iReg++;
875 }
876 if (fWhat & CPUMCTX_EXTRN_DR7)
877 {
878 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
879 pInput->Elements[iReg].Name = HvX64RegisterDr7;
880 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pVCpu);
881 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
882 iReg++;
883 }
884
885 /* Floating point state. */
886 if (fWhat & CPUMCTX_EXTRN_X87)
887 {
888 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
889 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
890 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[0].au64[0];
891 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[0].au64[1];
892 iReg++;
893 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
894 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
895 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[1].au64[0];
896 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[1].au64[1];
897 iReg++;
898 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
899 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
900 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[2].au64[0];
901 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[2].au64[1];
902 iReg++;
903 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
904 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
905 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[3].au64[0];
906 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[3].au64[1];
907 iReg++;
908 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
909 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
910 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[4].au64[0];
911 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[4].au64[1];
912 iReg++;
913 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
914 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
915 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[5].au64[0];
916 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[5].au64[1];
917 iReg++;
918 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
919 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
920 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[6].au64[0];
921 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[6].au64[1];
922 iReg++;
923 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
924 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
925 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[7].au64[0];
926 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[7].au64[1];
927 iReg++;
928
929 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
930 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
931 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->pXStateR0->x87.FCW;
932 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->pXStateR0->x87.FSW;
933 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->pXStateR0->x87.FTW;
934 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->pXStateR0->x87.FTW >> 8;
935 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->pXStateR0->x87.FOP;
936 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->pXStateR0->x87.FPUIP)
937 | ((uint64_t)pCtx->pXStateR0->x87.CS << 32)
938 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd1 << 48);
939 iReg++;
940/** @todo we've got trouble if if we try write just SSE w/o X87. */
941 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
942 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
943 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->pXStateR0->x87.FPUDP)
944 | ((uint64_t)pCtx->pXStateR0->x87.DS << 32)
945 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd2 << 48);
946 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->pXStateR0->x87.MXCSR;
947 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR0->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
948 iReg++;
949 }
950
951 /* Vector state. */
952 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
953 {
954 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
955 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
956 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo;
957 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi;
958 iReg++;
959 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
960 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
961 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo;
962 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi;
963 iReg++;
964 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
965 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
966 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo;
967 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi;
968 iReg++;
969 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
970 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
971 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo;
972 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi;
973 iReg++;
974 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
975 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
976 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo;
977 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi;
978 iReg++;
979 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
980 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
981 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo;
982 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi;
983 iReg++;
984 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
985 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
986 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo;
987 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi;
988 iReg++;
989 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
990 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
991 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo;
992 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi;
993 iReg++;
994 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
995 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
996 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo;
997 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi;
998 iReg++;
999 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1000 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1001 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo;
1002 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi;
1003 iReg++;
1004 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1005 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1006 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo;
1007 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi;
1008 iReg++;
1009 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1010 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1011 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo;
1012 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi;
1013 iReg++;
1014 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1015 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1016 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo;
1017 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi;
1018 iReg++;
1019 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1020 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1021 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo;
1022 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi;
1023 iReg++;
1024 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1025 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1026 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo;
1027 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi;
1028 iReg++;
1029 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1030 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1031 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo;
1032 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi;
1033 iReg++;
1034 }
1035
1036 /* MSRs */
1037 // HvX64RegisterTsc - don't touch
1038 if (fWhat & CPUMCTX_EXTRN_EFER)
1039 {
1040 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1041 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1042 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1043 iReg++;
1044 }
1045 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1046 {
1047 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1048 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1049 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1050 iReg++;
1051 }
1052 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1053 {
1054 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1055 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1056 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1057 iReg++;
1058 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1059 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1060 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1061 iReg++;
1062 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1063 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1064 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1065 iReg++;
1066 }
1067 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1068 {
1069 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1070 pInput->Elements[iReg].Name = HvX64RegisterStar;
1071 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1072 iReg++;
1073 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1074 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1075 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1076 iReg++;
1077 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1078 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1079 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1080 iReg++;
1081 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1082 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1083 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1084 iReg++;
1085 }
1086 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1087 {
1088 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1089 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1090 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pVCpu);
1091 iReg++;
1092 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1093 pInput->Elements[iReg].Name = HvX64RegisterPat;
1094 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1095 iReg++;
1096#if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1097 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1098 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1099 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pVCpu);
1100 iReg++;
1101#endif
1102
1103 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1104
1105 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1106 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1107 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1108 iReg++;
1109
1110 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1111
1112 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1113 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1114 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1115 iReg++;
1116 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1117 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1118 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1119 iReg++;
1120 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1121 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1122 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1123 iReg++;
1124 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1125 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1126 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1127 iReg++;
1128 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1129 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1130 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1131 iReg++;
1132 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1133 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1134 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1135 iReg++;
1136 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1137 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1138 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1139 iReg++;
1140 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1141 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1142 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1143 iReg++;
1144 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1145 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1146 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1147 iReg++;
1148 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1149 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1150 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1151 iReg++;
1152 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1153 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1154 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1155 iReg++;
1156 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1157 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1158 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1159 iReg++;
1160
1161#if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1162 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1163 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1164 {
1165 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1166 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1167 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1168 iReg++;
1169 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1170 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1171 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pVCpu);
1172 iReg++;
1173 }
1174#endif
1175 }
1176
1177 /* event injection (clear it). */
1178 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1179 {
1180 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1181 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1182 pInput->Elements[iReg].Value.Reg64 = 0;
1183 iReg++;
1184 }
1185
1186 /* Interruptibility state. This can get a little complicated since we get
1187 half of the state via HV_X64_VP_EXECUTION_STATE. */
1188 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1189 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1190 {
1191 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1192 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1193 pInput->Elements[iReg].Value.Reg64 = 0;
1194 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1195 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1196 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1197 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1198 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1199 iReg++;
1200 }
1201 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1202 {
1203 if ( pVCpu->nem.s.fLastInterruptShadow
1204 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1205 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
1206 {
1207 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1208 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1209 pInput->Elements[iReg].Value.Reg64 = 0;
1210 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1211 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1212 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1213 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1214 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1215 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1216 iReg++;
1217 }
1218 }
1219 else
1220 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1221
1222 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1223 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
1224 if ( fDesiredIntWin
1225 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1226 {
1227 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
1228 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1229 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1230 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1231 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1232 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1233 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1234 iReg++;
1235 }
1236
1237 /// @todo HvRegisterPendingEvent0
1238 /// @todo HvRegisterPendingEvent1
1239
1240 /*
1241 * Set the registers.
1242 */
1243 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1244
1245 /*
1246 * Make the hypercall.
1247 */
1248 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1249 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1250 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1251 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1252 VERR_NEM_SET_REGISTERS_FAILED);
1253 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1254 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1255 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1256 return VINF_SUCCESS;
1257}
1258
1259
1260/**
1261 * Export the state to the native API (out of CPUMCTX).
1262 *
1263 * @returns VBox status code
1264 * @param pGVM The ring-0 VM handle.
1265 * @param pVM The cross context VM handle.
1266 * @param idCpu The calling EMT. Necessary for getting the
1267 * hypercall page and arguments.
1268 */
1269VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, PVM pVM, VMCPUID idCpu)
1270{
1271 /*
1272 * Validate the call.
1273 */
1274 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
1275 if (RT_SUCCESS(rc))
1276 {
1277 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1278 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1279 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1280
1281 /*
1282 * Call worker.
1283 */
1284 rc = nemR0WinExportState(pGVM, pGVCpu, CPUMQueryGuestCtxPtr(pVCpu));
1285 }
1286 return rc;
1287}
1288
1289
1290/**
1291 * Worker for NEMR0ImportState.
1292 *
1293 * Intention is to use it internally later.
1294 *
1295 * @returns VBox status code.
1296 * @param pGVM The ring-0 VM handle.
1297 * @param pGVCpu The irng-0 VCPU handle.
1298 * @param pCtx The CPU context structure to import into.
1299 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1300 */
1301NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat)
1302{
1303 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage;
1304 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1305
1306 fWhat &= pCtx->fExtrn;
1307
1308 pInput->PartitionId = pGVM->nem.s.idHvPartition;
1309 pInput->VpIndex = pGVCpu->idCpu;
1310 pInput->fFlags = 0;
1311
1312 /* GPRs */
1313 uintptr_t iReg = 0;
1314 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1315 {
1316 if (fWhat & CPUMCTX_EXTRN_RAX)
1317 pInput->Names[iReg++] = HvX64RegisterRax;
1318 if (fWhat & CPUMCTX_EXTRN_RCX)
1319 pInput->Names[iReg++] = HvX64RegisterRcx;
1320 if (fWhat & CPUMCTX_EXTRN_RDX)
1321 pInput->Names[iReg++] = HvX64RegisterRdx;
1322 if (fWhat & CPUMCTX_EXTRN_RBX)
1323 pInput->Names[iReg++] = HvX64RegisterRbx;
1324 if (fWhat & CPUMCTX_EXTRN_RSP)
1325 pInput->Names[iReg++] = HvX64RegisterRsp;
1326 if (fWhat & CPUMCTX_EXTRN_RBP)
1327 pInput->Names[iReg++] = HvX64RegisterRbp;
1328 if (fWhat & CPUMCTX_EXTRN_RSI)
1329 pInput->Names[iReg++] = HvX64RegisterRsi;
1330 if (fWhat & CPUMCTX_EXTRN_RDI)
1331 pInput->Names[iReg++] = HvX64RegisterRdi;
1332 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1333 {
1334 pInput->Names[iReg++] = HvX64RegisterR8;
1335 pInput->Names[iReg++] = HvX64RegisterR9;
1336 pInput->Names[iReg++] = HvX64RegisterR10;
1337 pInput->Names[iReg++] = HvX64RegisterR11;
1338 pInput->Names[iReg++] = HvX64RegisterR12;
1339 pInput->Names[iReg++] = HvX64RegisterR13;
1340 pInput->Names[iReg++] = HvX64RegisterR14;
1341 pInput->Names[iReg++] = HvX64RegisterR15;
1342 }
1343 }
1344
1345 /* RIP & Flags */
1346 if (fWhat & CPUMCTX_EXTRN_RIP)
1347 pInput->Names[iReg++] = HvX64RegisterRip;
1348 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1349 pInput->Names[iReg++] = HvX64RegisterRflags;
1350
1351 /* Segments */
1352 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1353 {
1354 if (fWhat & CPUMCTX_EXTRN_CS)
1355 pInput->Names[iReg++] = HvX64RegisterCs;
1356 if (fWhat & CPUMCTX_EXTRN_ES)
1357 pInput->Names[iReg++] = HvX64RegisterEs;
1358 if (fWhat & CPUMCTX_EXTRN_SS)
1359 pInput->Names[iReg++] = HvX64RegisterSs;
1360 if (fWhat & CPUMCTX_EXTRN_DS)
1361 pInput->Names[iReg++] = HvX64RegisterDs;
1362 if (fWhat & CPUMCTX_EXTRN_FS)
1363 pInput->Names[iReg++] = HvX64RegisterFs;
1364 if (fWhat & CPUMCTX_EXTRN_GS)
1365 pInput->Names[iReg++] = HvX64RegisterGs;
1366 }
1367
1368 /* Descriptor tables and the task segment. */
1369 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1370 {
1371 if (fWhat & CPUMCTX_EXTRN_LDTR)
1372 pInput->Names[iReg++] = HvX64RegisterLdtr;
1373 if (fWhat & CPUMCTX_EXTRN_TR)
1374 pInput->Names[iReg++] = HvX64RegisterTr;
1375 if (fWhat & CPUMCTX_EXTRN_IDTR)
1376 pInput->Names[iReg++] = HvX64RegisterIdtr;
1377 if (fWhat & CPUMCTX_EXTRN_GDTR)
1378 pInput->Names[iReg++] = HvX64RegisterGdtr;
1379 }
1380
1381 /* Control registers. */
1382 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1383 {
1384 if (fWhat & CPUMCTX_EXTRN_CR0)
1385 pInput->Names[iReg++] = HvX64RegisterCr0;
1386 if (fWhat & CPUMCTX_EXTRN_CR2)
1387 pInput->Names[iReg++] = HvX64RegisterCr2;
1388 if (fWhat & CPUMCTX_EXTRN_CR3)
1389 pInput->Names[iReg++] = HvX64RegisterCr3;
1390 if (fWhat & CPUMCTX_EXTRN_CR4)
1391 pInput->Names[iReg++] = HvX64RegisterCr4;
1392 }
1393 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1394 pInput->Names[iReg++] = HvX64RegisterCr8;
1395
1396 /* Debug registers. */
1397 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1398 {
1399 pInput->Names[iReg++] = HvX64RegisterDr0;
1400 pInput->Names[iReg++] = HvX64RegisterDr1;
1401 pInput->Names[iReg++] = HvX64RegisterDr2;
1402 pInput->Names[iReg++] = HvX64RegisterDr3;
1403 }
1404 if (fWhat & CPUMCTX_EXTRN_DR6)
1405 pInput->Names[iReg++] = HvX64RegisterDr6;
1406 if (fWhat & CPUMCTX_EXTRN_DR7)
1407 pInput->Names[iReg++] = HvX64RegisterDr7;
1408
1409 /* Floating point state. */
1410 if (fWhat & CPUMCTX_EXTRN_X87)
1411 {
1412 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1413 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1414 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1415 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1416 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1417 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1418 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1419 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1420 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1421 }
1422 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1423 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1424
1425 /* Vector state. */
1426 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1427 {
1428 pInput->Names[iReg++] = HvX64RegisterXmm0;
1429 pInput->Names[iReg++] = HvX64RegisterXmm1;
1430 pInput->Names[iReg++] = HvX64RegisterXmm2;
1431 pInput->Names[iReg++] = HvX64RegisterXmm3;
1432 pInput->Names[iReg++] = HvX64RegisterXmm4;
1433 pInput->Names[iReg++] = HvX64RegisterXmm5;
1434 pInput->Names[iReg++] = HvX64RegisterXmm6;
1435 pInput->Names[iReg++] = HvX64RegisterXmm7;
1436 pInput->Names[iReg++] = HvX64RegisterXmm8;
1437 pInput->Names[iReg++] = HvX64RegisterXmm9;
1438 pInput->Names[iReg++] = HvX64RegisterXmm10;
1439 pInput->Names[iReg++] = HvX64RegisterXmm11;
1440 pInput->Names[iReg++] = HvX64RegisterXmm12;
1441 pInput->Names[iReg++] = HvX64RegisterXmm13;
1442 pInput->Names[iReg++] = HvX64RegisterXmm14;
1443 pInput->Names[iReg++] = HvX64RegisterXmm15;
1444 }
1445
1446 /* MSRs */
1447 // HvX64RegisterTsc - don't touch
1448 if (fWhat & CPUMCTX_EXTRN_EFER)
1449 pInput->Names[iReg++] = HvX64RegisterEfer;
1450 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1451 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1452 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1453 {
1454 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1455 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1456 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1457 }
1458 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1459 {
1460 pInput->Names[iReg++] = HvX64RegisterStar;
1461 pInput->Names[iReg++] = HvX64RegisterLstar;
1462 pInput->Names[iReg++] = HvX64RegisterCstar;
1463 pInput->Names[iReg++] = HvX64RegisterSfmask;
1464 }
1465
1466#ifdef LOG_ENABLED
1467 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1468#endif
1469 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1470 {
1471 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1472 pInput->Names[iReg++] = HvX64RegisterPat;
1473#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1474 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
1475#endif
1476 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
1477 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
1478 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
1479 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
1480 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
1481 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
1482 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
1483 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
1484 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
1485 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
1486 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
1487 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
1488 pInput->Names[iReg++] = HvX64RegisterTscAux;
1489#if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
1490 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1491 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
1492#endif
1493#ifdef LOG_ENABLED
1494 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1495 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
1496#endif
1497 }
1498
1499 /* Interruptibility. */
1500 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1501 {
1502 pInput->Names[iReg++] = HvRegisterInterruptState;
1503 pInput->Names[iReg++] = HvX64RegisterRip;
1504 }
1505
1506 /* event injection */
1507 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1508 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1509 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1510 size_t const cRegs = iReg;
1511 size_t const cbInput = RT_ALIGN_Z(RT_OFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1512
1513 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1514 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nem.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
1515 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1516
1517 /*
1518 * Make the hypercall.
1519 */
1520 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1521 pGVCpu->nem.s.HypercallData.HCPhysPage,
1522 pGVCpu->nem.s.HypercallData.HCPhysPage + cbInput);
1523 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1524 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1525 VERR_NEM_GET_REGISTERS_FAILED);
1526 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1527
1528 /*
1529 * Copy information to the CPUM context.
1530 */
1531 PVMCPU pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
1532 iReg = 0;
1533
1534 /* GPRs */
1535 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1536 {
1537 if (fWhat & CPUMCTX_EXTRN_RAX)
1538 {
1539 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1540 pCtx->rax = paValues[iReg++].Reg64;
1541 }
1542 if (fWhat & CPUMCTX_EXTRN_RCX)
1543 {
1544 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1545 pCtx->rcx = paValues[iReg++].Reg64;
1546 }
1547 if (fWhat & CPUMCTX_EXTRN_RDX)
1548 {
1549 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1550 pCtx->rdx = paValues[iReg++].Reg64;
1551 }
1552 if (fWhat & CPUMCTX_EXTRN_RBX)
1553 {
1554 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1555 pCtx->rbx = paValues[iReg++].Reg64;
1556 }
1557 if (fWhat & CPUMCTX_EXTRN_RSP)
1558 {
1559 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1560 pCtx->rsp = paValues[iReg++].Reg64;
1561 }
1562 if (fWhat & CPUMCTX_EXTRN_RBP)
1563 {
1564 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1565 pCtx->rbp = paValues[iReg++].Reg64;
1566 }
1567 if (fWhat & CPUMCTX_EXTRN_RSI)
1568 {
1569 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1570 pCtx->rsi = paValues[iReg++].Reg64;
1571 }
1572 if (fWhat & CPUMCTX_EXTRN_RDI)
1573 {
1574 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1575 pCtx->rdi = paValues[iReg++].Reg64;
1576 }
1577 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1578 {
1579 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1580 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1581 pCtx->r8 = paValues[iReg++].Reg64;
1582 pCtx->r9 = paValues[iReg++].Reg64;
1583 pCtx->r10 = paValues[iReg++].Reg64;
1584 pCtx->r11 = paValues[iReg++].Reg64;
1585 pCtx->r12 = paValues[iReg++].Reg64;
1586 pCtx->r13 = paValues[iReg++].Reg64;
1587 pCtx->r14 = paValues[iReg++].Reg64;
1588 pCtx->r15 = paValues[iReg++].Reg64;
1589 }
1590 }
1591
1592 /* RIP & Flags */
1593 if (fWhat & CPUMCTX_EXTRN_RIP)
1594 {
1595 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1596 pCtx->rip = paValues[iReg++].Reg64;
1597 }
1598 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1599 {
1600 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1601 pCtx->rflags.u = paValues[iReg++].Reg64;
1602 }
1603
1604 /* Segments */
1605#define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1606 do { \
1607 Assert(pInput->Names[a_idx] == a_enmName); \
1608 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1609 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1610 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1611 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1612 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1613 } while (0)
1614 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1615 {
1616 if (fWhat & CPUMCTX_EXTRN_CS)
1617 {
1618 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1619 iReg++;
1620 }
1621 if (fWhat & CPUMCTX_EXTRN_ES)
1622 {
1623 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1624 iReg++;
1625 }
1626 if (fWhat & CPUMCTX_EXTRN_SS)
1627 {
1628 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1629 iReg++;
1630 }
1631 if (fWhat & CPUMCTX_EXTRN_DS)
1632 {
1633 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1634 iReg++;
1635 }
1636 if (fWhat & CPUMCTX_EXTRN_FS)
1637 {
1638 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1639 iReg++;
1640 }
1641 if (fWhat & CPUMCTX_EXTRN_GS)
1642 {
1643 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1644 iReg++;
1645 }
1646 }
1647 /* Descriptor tables and the task segment. */
1648 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1649 {
1650 if (fWhat & CPUMCTX_EXTRN_LDTR)
1651 {
1652 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1653 iReg++;
1654 }
1655 if (fWhat & CPUMCTX_EXTRN_TR)
1656 {
1657 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1658 avoid to trigger sanity assertions around the code, always fix this. */
1659 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1660 switch (pCtx->tr.Attr.n.u4Type)
1661 {
1662 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1663 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1664 break;
1665 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1666 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1667 break;
1668 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1669 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1670 break;
1671 }
1672 iReg++;
1673 }
1674 if (fWhat & CPUMCTX_EXTRN_IDTR)
1675 {
1676 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1677 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1678 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1679 iReg++;
1680 }
1681 if (fWhat & CPUMCTX_EXTRN_GDTR)
1682 {
1683 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1684 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1685 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1686 iReg++;
1687 }
1688 }
1689
1690 /* Control registers. */
1691 bool fMaybeChangedMode = false;
1692 bool fFlushTlb = false;
1693 bool fFlushGlobalTlb = false;
1694 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1695 {
1696 if (fWhat & CPUMCTX_EXTRN_CR0)
1697 {
1698 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1699 if (pCtx->cr0 != paValues[iReg].Reg64)
1700 {
1701 CPUMSetGuestCR0(pVCpu, paValues[iReg].Reg64);
1702 fMaybeChangedMode = true;
1703 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1704 }
1705 iReg++;
1706 }
1707 if (fWhat & CPUMCTX_EXTRN_CR2)
1708 {
1709 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1710 pCtx->cr2 = paValues[iReg].Reg64;
1711 iReg++;
1712 }
1713 if (fWhat & CPUMCTX_EXTRN_CR3)
1714 {
1715 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1716 if (pCtx->cr3 != paValues[iReg].Reg64)
1717 {
1718 CPUMSetGuestCR3(pVCpu, paValues[iReg].Reg64);
1719 fFlushTlb = true;
1720 }
1721 iReg++;
1722 }
1723 if (fWhat & CPUMCTX_EXTRN_CR4)
1724 {
1725 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1726 if (pCtx->cr4 != paValues[iReg].Reg64)
1727 {
1728 CPUMSetGuestCR4(pVCpu, paValues[iReg].Reg64);
1729 fMaybeChangedMode = true;
1730 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
1731 }
1732 iReg++;
1733 }
1734 }
1735 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1736 {
1737 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1738 APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1739 iReg++;
1740 }
1741
1742 /* Debug registers. */
1743/** @todo fixme */
1744 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1745 {
1746 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1747 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1748 if (pCtx->dr[0] != paValues[iReg].Reg64)
1749 CPUMSetGuestDR0(pVCpu, paValues[iReg].Reg64);
1750 iReg++;
1751 if (pCtx->dr[1] != paValues[iReg].Reg64)
1752 CPUMSetGuestDR1(pVCpu, paValues[iReg].Reg64);
1753 iReg++;
1754 if (pCtx->dr[2] != paValues[iReg].Reg64)
1755 CPUMSetGuestDR2(pVCpu, paValues[iReg].Reg64);
1756 iReg++;
1757 if (pCtx->dr[3] != paValues[iReg].Reg64)
1758 CPUMSetGuestDR3(pVCpu, paValues[iReg].Reg64);
1759 iReg++;
1760 }
1761 if (fWhat & CPUMCTX_EXTRN_DR6)
1762 {
1763 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1764 if (pCtx->dr[6] != paValues[iReg].Reg64)
1765 CPUMSetGuestDR6(pVCpu, paValues[iReg].Reg64);
1766 iReg++;
1767 }
1768 if (fWhat & CPUMCTX_EXTRN_DR7)
1769 {
1770 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1771 if (pCtx->dr[7] != paValues[iReg].Reg64)
1772 CPUMSetGuestDR7(pVCpu, paValues[iReg].Reg64);
1773 iReg++;
1774 }
1775
1776 /* Floating point state. */
1777 if (fWhat & CPUMCTX_EXTRN_X87)
1778 {
1779 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1780 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1781 pCtx->pXStateR0->x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1782 pCtx->pXStateR0->x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1783 iReg++;
1784 pCtx->pXStateR0->x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1785 pCtx->pXStateR0->x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1786 iReg++;
1787 pCtx->pXStateR0->x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1788 pCtx->pXStateR0->x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1789 iReg++;
1790 pCtx->pXStateR0->x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1791 pCtx->pXStateR0->x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1792 iReg++;
1793 pCtx->pXStateR0->x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1794 pCtx->pXStateR0->x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1795 iReg++;
1796 pCtx->pXStateR0->x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1797 pCtx->pXStateR0->x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1798 iReg++;
1799 pCtx->pXStateR0->x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1800 pCtx->pXStateR0->x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1801 iReg++;
1802 pCtx->pXStateR0->x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1803 pCtx->pXStateR0->x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1804 iReg++;
1805
1806 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1807 pCtx->pXStateR0->x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1808 pCtx->pXStateR0->x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1809 pCtx->pXStateR0->x87.FTW = paValues[iReg].FpControlStatus.FpTag
1810 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1811 pCtx->pXStateR0->x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1812 pCtx->pXStateR0->x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1813 pCtx->pXStateR0->x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1814 pCtx->pXStateR0->x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1815 iReg++;
1816 }
1817
1818 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1819 {
1820 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1821 if (fWhat & CPUMCTX_EXTRN_X87)
1822 {
1823 pCtx->pXStateR0->x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1824 pCtx->pXStateR0->x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1825 pCtx->pXStateR0->x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1826 }
1827 pCtx->pXStateR0->x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1828 pCtx->pXStateR0->x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1829 iReg++;
1830 }
1831
1832 /* Vector state. */
1833 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1834 {
1835 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1836 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1837 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1838 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1839 iReg++;
1840 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1841 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1842 iReg++;
1843 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1844 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1845 iReg++;
1846 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1847 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1848 iReg++;
1849 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1850 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1851 iReg++;
1852 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1853 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1854 iReg++;
1855 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1856 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1857 iReg++;
1858 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1859 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1860 iReg++;
1861 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1862 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1863 iReg++;
1864 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1865 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1866 iReg++;
1867 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1868 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1869 iReg++;
1870 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1871 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1872 iReg++;
1873 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1874 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1875 iReg++;
1876 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1877 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1878 iReg++;
1879 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1880 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1881 iReg++;
1882 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1883 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1884 iReg++;
1885 }
1886
1887
1888 /* MSRs */
1889 // HvX64RegisterTsc - don't touch
1890 if (fWhat & CPUMCTX_EXTRN_EFER)
1891 {
1892 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1893 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1894 {
1895 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
1896 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
1897 PGMNotifyNxeChanged(pVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1898 pCtx->msrEFER = paValues[iReg].Reg64;
1899 fMaybeChangedMode = true;
1900 }
1901 iReg++;
1902 }
1903 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1904 {
1905 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1906 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
1907 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
1908 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1909 iReg++;
1910 }
1911 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1912 {
1913 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
1914 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
1915 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
1916 pCtx->SysEnter.cs = paValues[iReg].Reg64;
1917 iReg++;
1918
1919 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
1920 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
1921 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
1922 pCtx->SysEnter.eip = paValues[iReg].Reg64;
1923 iReg++;
1924
1925 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
1926 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
1927 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
1928 pCtx->SysEnter.esp = paValues[iReg].Reg64;
1929 iReg++;
1930 }
1931 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1932 {
1933 Assert(pInput->Names[iReg] == HvX64RegisterStar);
1934 if (pCtx->msrSTAR != paValues[iReg].Reg64)
1935 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
1936 pCtx->msrSTAR = paValues[iReg].Reg64;
1937 iReg++;
1938
1939 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
1940 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
1941 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
1942 pCtx->msrLSTAR = paValues[iReg].Reg64;
1943 iReg++;
1944
1945 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
1946 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
1947 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
1948 pCtx->msrCSTAR = paValues[iReg].Reg64;
1949 iReg++;
1950
1951 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
1952 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
1953 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
1954 pCtx->msrSFMASK = paValues[iReg].Reg64;
1955 iReg++;
1956 }
1957 bool fUpdateApicBase = false;
1958 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1959 {
1960 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
1961 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1962 if (paValues[iReg].Reg64 != uOldBase)
1963 {
1964 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1965 pVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
1966 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64);
1967 if (rc2 == VINF_CPUM_R3_MSR_WRITE)
1968 {
1969 pVCpu->nem.s.uPendingApicBase = paValues[iReg].Reg64;
1970 fUpdateApicBase = true;
1971 }
1972 else
1973 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", VBOXSTRICTRC_VAL(rc2), paValues[iReg].Reg64));
1974 }
1975 iReg++;
1976
1977 Assert(pInput->Names[iReg] == HvX64RegisterPat);
1978 if (pCtx->msrPAT != paValues[iReg].Reg64)
1979 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
1980 pCtx->msrPAT = paValues[iReg].Reg64;
1981 iReg++;
1982
1983#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1984 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
1985 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pVCpu))
1986 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64));
1987 iReg++;
1988#endif
1989
1990 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1991 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
1992 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
1993 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
1994 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
1995 iReg++;
1996
1997 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1998
1999 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2000 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2001 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2002 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2003 iReg++;
2004
2005 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2006 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2007 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2008 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2009 iReg++;
2010
2011 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2012 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2013 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2014 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2015 iReg++;
2016
2017 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2018 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2019 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2020 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2021 iReg++;
2022
2023 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2024 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2025 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2026 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2027 iReg++;
2028
2029 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2030 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2031 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2032 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2033 iReg++;
2034
2035 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2036 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2037 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2038 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2039 iReg++;
2040
2041 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2042 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2043 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2044 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2045 iReg++;
2046
2047 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2048 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2049 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2050 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2051 iReg++;
2052
2053 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2054 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2055 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2056 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2057 iReg++;
2058
2059 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2060 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2061 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2062 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2063 iReg++;
2064
2065 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2066 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2067 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2068 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2069 iReg++;
2070
2071#if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2072 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2073 {
2074 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2075 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2076 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2077 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2078 iReg++;
2079 }
2080#endif
2081#ifdef LOG_ENABLED
2082 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2083 {
2084 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2085 if (paValues[iReg].Reg64 != CPUMGetGuestIa32FeatureControl(pVCpu))
2086 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32FeatureControl(pVCpu), paValues[iReg].Reg64));
2087 iReg++;
2088 }
2089#endif
2090 }
2091
2092 /* Interruptibility. */
2093 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2094 {
2095 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2096 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2097
2098 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2099 {
2100 pVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2101 if (paValues[iReg].InterruptState.InterruptShadow)
2102 EMSetInhibitInterruptsPC(pVCpu, paValues[iReg + 1].Reg64);
2103 else
2104 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2105 }
2106
2107 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2108 {
2109 if (paValues[iReg].InterruptState.NmiMasked)
2110 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2111 else
2112 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2113 }
2114
2115 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2116 iReg += 2;
2117 }
2118
2119 /* Event injection. */
2120 /// @todo HvRegisterPendingInterruption
2121 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2122 if (paValues[iReg].PendingInterruption.InterruptionPending)
2123 {
2124 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2125 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2126 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2127 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2128 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2129 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2130 }
2131
2132 /// @todo HvRegisterPendingEvent0
2133 /// @todo HvRegisterPendingEvent1
2134
2135 /* Almost done, just update extrn flags and maybe change PGM mode. */
2136 pCtx->fExtrn &= ~fWhat;
2137 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2138 pCtx->fExtrn = 0;
2139
2140 /* Typical. */
2141 if (!fMaybeChangedMode && !fFlushTlb && !fUpdateApicBase)
2142 return VINF_SUCCESS;
2143
2144 /*
2145 * Slow.
2146 */
2147 int rc = VINF_SUCCESS;
2148 if (fMaybeChangedMode)
2149 {
2150 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2151 if (rc == VINF_PGM_CHANGE_MODE)
2152 {
2153 LogFlow(("nemR0WinImportState: -> VERR_NEM_CHANGE_PGM_MODE!\n"));
2154 return VERR_NEM_CHANGE_PGM_MODE;
2155 }
2156 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
2157 }
2158
2159 if (fFlushTlb)
2160 {
2161 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2162 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2163 }
2164
2165 if (fUpdateApicBase && rc == VINF_SUCCESS)
2166 {
2167 LogFlow(("nemR0WinImportState: -> VERR_NEM_UPDATE_APIC_BASE!\n"));
2168 rc = VERR_NEM_UPDATE_APIC_BASE;
2169 }
2170
2171 return rc;
2172}
2173
2174
2175/**
2176 * Import the state from the native API (back to CPUMCTX).
2177 *
2178 * @returns VBox status code
2179 * @param pGVM The ring-0 VM handle.
2180 * @param pVM The cross context VM handle.
2181 * @param idCpu The calling EMT. Necessary for getting the
2182 * hypercall page and arguments.
2183 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2184 * CPUMCTX_EXTERN_ALL for everything.
2185 */
2186VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t fWhat)
2187{
2188 /*
2189 * Validate the call.
2190 */
2191 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2192 if (RT_SUCCESS(rc))
2193 {
2194 PVMCPU pVCpu = &pVM->aCpus[idCpu];
2195 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2196 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2197
2198 /*
2199 * Call worker.
2200 */
2201 rc = nemR0WinImportState(pGVM, pGVCpu, CPUMQueryGuestCtxPtr(pVCpu), fWhat);
2202 }
2203 return rc;
2204}
2205
2206
2207VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2208{
2209#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2210 PVM pVM = pGVM->pVM;
2211 return nemHCWinRunGC(pVM, &pVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]);
2212#else
2213 RT_NOREF(pGVM, idCpu);
2214 return VERR_NOT_IMPLEMENTED;
2215#endif
2216}
2217
2218
2219/**
2220 * Updates statistics in the VM structure.
2221 *
2222 * @returns VBox status code.
2223 * @param pGVM The ring-0 VM handle.
2224 * @param pVM The cross context VM handle.
2225 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
2226 * page and arguments.
2227 */
2228VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, PVM pVM, VMCPUID idCpu)
2229{
2230 /*
2231 * Validate the call.
2232 */
2233 int rc;
2234 if (idCpu == NIL_VMCPUID)
2235 rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
2236 else
2237 rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2238 if (RT_SUCCESS(rc))
2239 {
2240 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2241
2242 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
2243 ? &pGVM->aCpus[idCpu].nem.s.HypercallData
2244 : &pGVM->nem.s.HypercallData;
2245 if ( RT_VALID_PTR(pHypercallData->pbPage)
2246 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
2247 {
2248 if (idCpu == NIL_VMCPUID)
2249 rc = RTCritSectEnter(&pGVM->nem.s.HypercallDataCritSect);
2250 if (RT_SUCCESS(rc))
2251 {
2252 /*
2253 * Query the memory statistics for the partition.
2254 */
2255 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
2256 pInput->TargetPartitionId = pGVM->nem.s.idHvPartition;
2257 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
2258 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
2259 pInput->ProximityDomainInfo.Flags.Reserved = 0;
2260 pInput->ProximityDomainInfo.Id = 0;
2261
2262 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
2263 RT_ZERO(*pOutput);
2264
2265 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
2266 pHypercallData->HCPhysPage,
2267 pHypercallData->HCPhysPage + sizeof(*pInput));
2268 if (uResult == HV_STATUS_SUCCESS)
2269 {
2270 pVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
2271 pVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
2272 rc = VINF_SUCCESS;
2273 }
2274 else
2275 {
2276 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
2277 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
2278 rc = VERR_NEM_IPE_0;
2279 }
2280
2281 if (idCpu == NIL_VMCPUID)
2282 RTCritSectLeave(&pGVM->nem.s.HypercallDataCritSect);
2283 }
2284 }
2285 else
2286 rc = VERR_WRONG_ORDER;
2287 }
2288 return rc;
2289}
2290
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette