VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 71075

Last change on this file since 71075 was 71075, checked in by vboxsync, 7 years ago

VMM,SUPDrv: More NEM/win experimentation. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 12.9 KB
Line 
1/* $Id: NEMR0Native-win.cpp 71075 2018-02-20 21:10:45Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#include <iprt/nt/hyperv.h>
24
25#include <VBox/vmm/nem.h>
26#include <VBox/vmm/iem.h>
27#include <VBox/vmm/em.h>
28#include <VBox/vmm/apic.h>
29#include "NEMInternal.h"
30#include <VBox/vmm/gvm.h>
31#include <VBox/vmm/vm.h>
32#include <VBox/vmm/gvmm.h>
33#include <VBox/param.h>
34
35#include <iprt/dbg.h>
36#include <iprt/memobj.h>
37#include <iprt/string.h>
38
39
40/* Assert compile context sanity. */
41#ifndef RT_OS_WINDOWS
42# error "Windows only file!"
43#endif
44#ifndef RT_ARCH_AMD64
45# error "AMD64 only file!"
46#endif
47
48
49/*********************************************************************************************************************************
50* Global Variables *
51*********************************************************************************************************************************/
52static uint64_t (* g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t GCPhysInput, uint64_t GCPhysOutput);
53
54
55
56/**
57 * Called by NEMR3Init to make sure we've got what we need.
58 *
59 * @returns VBox status code.
60 * @param pGVM The ring-0 VM handle.
61 * @param pVM The cross context VM handle.
62 * @thread EMT(0)
63 */
64VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVM pVM)
65{
66 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
67 AssertRCReturn(rc, rc);
68
69 /*
70 * We want to perform hypercalls here. The NT kernel started to expose a very low
71 * level interface to do this thru somewhere between build 14271 and 16299. Since
72 * we need build 17083 to get anywhere at all, the exact build is not relevant here.
73 */
74 RTDBGKRNLINFO hKrnlInfo;
75 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
76 if (RT_SUCCESS(rc))
77 {
78 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
79 RTR0DbgKrnlInfoRelease(hKrnlInfo);
80 if (RT_SUCCESS(rc))
81 {
82 /*
83 * Allocate a page for each VCPU to place hypercall data on.
84 */
85 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
86 {
87 PGVMCPU pGVCpu = &pGVM->aCpus[i];
88 rc = RTR0MemObjAllocPage(&pGVCpu->nem.s.hHypercallDataMemObj, PAGE_SIZE, false /*fExecutable*/);
89 if (RT_SUCCESS(rc))
90 {
91 pGVCpu->nem.s.HCPhysHypercallData = RTR0MemObjGetPagePhysAddr(pGVCpu->nem.s.hHypercallDataMemObj, 0 /*iPage*/);
92 pGVCpu->nem.s.pbHypercallData = (uint8_t *)RTR0MemObjAddress(pGVCpu->nem.s.hHypercallDataMemObj);
93 AssertStmt(pGVCpu->nem.s.HCPhysHypercallData != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
94 AssertStmt(pGVCpu->nem.s.pbHypercallData, rc = VERR_INTERNAL_ERROR_3);
95 }
96 else
97 pGVCpu->nem.s.hHypercallDataMemObj = NIL_RTR0MEMOBJ;
98 if (RT_FAILURE(rc))
99 {
100 /* bail. */
101 do
102 {
103 RTR0MemObjFree(pGVCpu->nem.s.hHypercallDataMemObj, true /*fFreeMappings*/);
104 pGVCpu->nem.s.hHypercallDataMemObj = NIL_RTR0MEMOBJ;
105 pGVCpu->nem.s.HCPhysHypercallData = NIL_RTHCPHYS;
106 pGVCpu->nem.s.pbHypercallData = NULL;
107 } while (i-- > 0);
108 return rc;
109 }
110 }
111 /*
112 * So far, so good.
113 */
114 /** @todo would be good if we could establish the partition ID ourselves. */
115 /** @todop this is too EARLY! */
116 pGVM->nem.s.idHvPartition = pVM->nem.s.idHvPartition;
117 return rc;
118 }
119
120 rc = VERR_NEM_MISSING_KERNEL_API;
121 }
122
123 RT_NOREF(pGVM, pVM);
124 return rc;
125}
126
127
128/**
129 * Cleanup the NEM parts of the VM in ring-0.
130 *
131 * This is always called and must deal the state regardless of whether
132 * NEMR0InitVM() was called or not. So, take care here.
133 *
134 * @param pGVM The ring-0 VM handle.
135 */
136VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
137{
138 pGVM->nem.s.idHvPartition = HV_PARTITION_ID_INVALID;
139
140 /* Free the hypercall pages. */
141 VMCPUID i = pGVM->cCpus;
142 while (i-- > 0)
143 {
144 PGVMCPU pGVCpu = &pGVM->aCpus[i];
145 if (pGVCpu->nem.s.pbHypercallData)
146 {
147 pGVCpu->nem.s.pbHypercallData = NULL;
148 int rc = RTR0MemObjFree(pGVCpu->nem.s.hHypercallDataMemObj, true /*fFreeMappings*/);
149 AssertRC(rc);
150 }
151 pGVCpu->nem.s.hHypercallDataMemObj = NIL_RTR0MEMOBJ;
152 pGVCpu->nem.s.HCPhysHypercallData = NIL_RTHCPHYS;
153 }
154}
155
156
157/**
158 * Maps pages into the guest physical address space.
159 *
160 * Generally the caller will be under the PGM lock already, so no extra effort
161 * is needed to make sure all changes happens under it.
162 *
163 * @returns VBox status code.
164 * @param pGVM The ring-0 VM handle.
165 * @param pVM The cross context VM handle.
166 * @param idCpu The calling EMT. Necessary for getting the
167 * hypercall page and arguments.
168 * @thread EMT(idCpu)
169 */
170VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
171{
172 /*
173 * Validate the call.
174 */
175 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
176 if (RT_SUCCESS(rc))
177 {
178 PVMCPU pVCpu = &pVM->aCpus[idCpu];
179 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
180 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
181
182 RTGCPHYS GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
183 RTGCPHYS const GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
184 uint32_t const cPages = pVCpu->nem.s.Hypercall.MapPages.cPages;
185 HV_MAP_GPA_FLAGS const fFlags = pVCpu->nem.s.Hypercall.MapPages.fFlags;
186
187 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
188 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
189 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
190 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
191 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
192 if (GCPhysSrc != GCPhysDst)
193 {
194 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
195 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
196 }
197
198 /** @todo fix pGVM->nem.s.idHvPartition init. */
199 if (pGVM->nem.s.idHvPartition == 0)
200 pGVM->nem.s.idHvPartition = pVM->nem.s.idHvPartition;
201
202 /*
203 * Compose and make the hypercall.
204 * Ring-3 is not allowed to fill in the host physical addresses of the call.
205 */
206 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nem.s.pbHypercallData;
207 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
208 pMapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
209 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
210 pMapPages->MapFlags = fFlags;
211 pMapPages->u32ExplicitPadding = 0;
212 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
213 {
214 RTHCPHYS HCPhys = NIL_RTGCPHYS;
215 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
216 AssertRCBreak(rc);
217 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
218 }
219 if (RT_SUCCESS(rc))
220 {
221 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
222 pGVCpu->nem.s.HCPhysHypercallData, 0);
223 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
224 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
225 if (uResult == ((uint64_t)cPages << 32))
226 rc = VINF_SUCCESS;
227 else
228 {
229 rc = VERR_NEM_MAP_PAGES_FAILED;
230 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
231 }
232 }
233 }
234 return rc;
235}
236
237
238#if 0 /* for debugging GPA unmapping. */
239static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
240{
241 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nem.s.pbHypercallData;
242 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
243 pIn->PartitionId = pGVM->nem.s.idHvPartition;
244 pIn->VpIndex = pGVCpu->idCpu;
245 pIn->ByteCount = 0x10;
246 pIn->BaseGpa = GCPhys;
247 pIn->ControlFlags.AsUINT64 = 0;
248 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
249 memset(pOut, 0xfe, sizeof(*pOut));
250 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nem.s.HCPhysHypercallData,
251 pGVCpu->nem.s.HCPhysHypercallData + sizeof(*pIn));
252 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
253 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
254 __debugbreak();
255
256 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
257}
258#endif
259
260
261/**
262 * Unmaps pages from the guest physical address space.
263 *
264 * Generally the caller will be under the PGM lock already, so no extra effort
265 * is needed to make sure all changes happens under it.
266 *
267 * @returns VBox status code.
268 * @param pGVM The ring-0 VM handle.
269 * @param pVM The cross context VM handle.
270 * @param idCpu The calling EMT. Necessary for getting the
271 * hypercall page and arguments.
272 * @thread EMT(idCpu)
273 */
274VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
275{
276 /*
277 * Validate the call.
278 */
279 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
280 if (RT_SUCCESS(rc))
281 {
282 PVMCPU pVCpu = &pVM->aCpus[idCpu];
283 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
284 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
285
286 RTGCPHYS GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
287 uint32_t const cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
288
289 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
290 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
291 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
292 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
293
294 /** @todo fix pGVM->nem.s.idHvPartition init. */
295 if (pGVM->nem.s.idHvPartition == 0)
296 pGVM->nem.s.idHvPartition = pVM->nem.s.idHvPartition;
297
298 /*
299 * Compose and make the hypercall.
300 */
301 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nem.s.pbHypercallData;
302 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
303 pUnmapPages->TargetPartitionId = pGVM->nem.s.idHvPartition;
304 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
305 pUnmapPages->fFlags = 0;
306
307 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
308 pGVCpu->nem.s.HCPhysHypercallData, 0);
309 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
310 if (uResult == ((uint64_t)cPages << 32))
311 {
312#if 1 /* Do we need to do this? Hopefully not... */
313 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
314 pGVCpu->nem.s.HCPhysHypercallData, 0);
315 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR));
316#endif
317 rc = VINF_SUCCESS;
318 }
319 else
320 {
321 rc = VERR_NEM_UNMAP_PAGES_FAILED;
322 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
323 }
324 }
325 return rc;
326}
327
328
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette