VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp@ 5123

Last change on this file since 5123 was 5031, checked in by vboxsync, 17 years ago

Use GVMMR3CreateVM. the new GVM structure is a ring-0 only VM structure. the old VM structure is the shared ring-0, ring-3 and GC VM structure.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 29.0 KB
Line 
1/* $Id: GVMMR0.cpp 5031 2007-09-25 22:27:37Z vboxsync $ */
2/** @file
3 * GVMM - Global VM Manager.
4 */
5
6/*
7 * Copyright (C) 2007 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 */
18
19
20/** @page pg_GVMM GVMM - The Global VM Manager
21 *
22 * The Global VM Manager lives in ring-0. It's main function at the moment
23 * is to manage a list of all running VMs, keep a ring-0 only structure (GVM)
24 * for each of them, and assign them unique identifiers (so GMM can track
25 * page owners). The idea for the future is to add an idle priority kernel
26 * thread that can take care of tasks like page sharing.
27 *
28 * The GVMM will create a ring-0 object for each VM when it's registered,
29 * this is both for session cleanup purposes and for having a point where
30 * it's possible to implement usage polices later (in SUPR0ObjRegister).
31 */
32
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_GVMM
38#include <VBox/gvmm.h>
39#include "GVMMR0Internal.h"
40#include <VBox/gvm.h>
41#include <VBox/vm.h>
42#include <VBox/err.h>
43#include <iprt/alloc.h>
44#include <iprt/semaphore.h>
45#include <VBox/log.h>
46#include <iprt/thread.h>
47#include <iprt/param.h>
48#include <iprt/string.h>
49#include <iprt/assert.h>
50#include <iprt/mem.h>
51#include <iprt/memobj.h>
52
53
54/*******************************************************************************
55* Structures and Typedefs *
56*******************************************************************************/
57
58/**
59 * Global VM handle.
60 */
61typedef struct GVMHANDLE
62{
63 /** The index of the next handle in the list (free or used). (0 is nil.) */
64 uint16_t volatile iNext;
65 /** Our own index / handle value. */
66 uint16_t iSelf;
67 /** The pointer to the ring-0 only (aka global) VM structure. */
68 PGVM pGVM;
69 /** The ring-0 mapping of the shared VM instance data. */
70 PVM pVM;
71 /** The virtual machine object. */
72 void *pvObj;
73 /** The session this VM is associated with. */
74 PSUPDRVSESSION pSession;
75 /** The ring-0 handle of the EMT thread.
76 * This is used for assertions and similar cases where we need to find the VM handle. */
77 RTNATIVETHREAD hEMT;
78} GVMHANDLE;
79/** Pointer to a global VM handle. */
80typedef GVMHANDLE *PGVMHANDLE;
81
82/**
83 * The GVMM instance data.
84 */
85typedef struct GVMM
86{
87 /** Eyecatcher / magic. */
88 uint32_t u32Magic;
89 /** The index of the head of the free handle chain. (0 is nil.) */
90 uint16_t volatile iFreeHead;
91 /** The index of the head of the active handle chain. (0 is nil.) */
92 uint16_t volatile iUsedHead;
93 /** The lock used to serialize registration and deregistration. */
94 RTSEMFASTMUTEX Lock;
95 /** The handle array.
96 * The size of this array defines the maximum number of currently running VMs.
97 * The first entry is unused as it represents the NIL handle. */
98 GVMHANDLE aHandles[128];
99} GVMM;
100/** Pointer to the GVMM instance data. */
101typedef GVMM *PGVMM;
102
103/** The GVMM::u32Magic value (Charlie Haden). */
104#define GVMM_MAGIC 0x19370806
105
106
107
108/*******************************************************************************
109* Global Variables *
110*******************************************************************************/
111/** Pointer to the GVMM instance data.
112 * (Just my general dislike for global variables.) */
113static PGVMM g_pGVMM = NULL;
114
115/** Macro for obtaining and validating the g_pGVMM pointer.
116 * On failure it will return from the invoking function with the specified return value.
117 *
118 * @param pGVMM The name of the pGVMM variable.
119 * @param rc The return value on failure. Use VERR_INTERNAL_ERROR for
120 * VBox status codes.
121 */
122#define GVMM_GET_VALID_INSTANCE(pGVMM, rc) \
123 do { \
124 (pGVMM) = g_pGVMM;\
125 AssertPtrReturn((pGVMM), (rc)); \
126 AssertMsgReturn((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic), (rc)); \
127 } while (0)
128
129/** Macro for obtaining and validating the g_pGVMM pointer, void function variant.
130 * On failure it will return from the invoking function.
131 *
132 * @param pGVMM The name of the pGVMM variable.
133 */
134#define GVMM_GET_VALID_INSTANCE_VOID(pGVMM) \
135 do { \
136 (pGVMM) = g_pGVMM;\
137 AssertPtrReturnVoid((pGVMM)); \
138 AssertMsgReturnVoid((pGVMM)->u32Magic == GVMM_MAGIC, ("%p - %#x\n", (pGVMM), (pGVMM)->u32Magic)); \
139 } while (0)
140
141
142/*******************************************************************************
143* Internal Functions *
144*******************************************************************************/
145static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle);
146
147
148/**
149 * Initializes the GVMM.
150 *
151 * This is called while owninng the loader sempahore (see supdrvIOCtl_LdrLoad()).
152 *
153 * @returns VBox status code.
154 */
155GVMMR0DECL(int) GVMMR0Init(void)
156{
157 SUPR0Printf("GVMMR0Init:\n");
158
159 /*
160 * Allocate and initialize the instance data.
161 */
162 PGVMM pGVMM = (PGVMM)RTMemAllocZ(sizeof(*pGVMM));
163 if (!pGVMM)
164 return VERR_NO_MEMORY;
165 int rc = RTSemFastMutexCreate(&pGVMM->Lock);
166 if (RT_SUCCESS(rc))
167 {
168 pGVMM->u32Magic = GVMM_MAGIC;
169 pGVMM->iUsedHead = 0;
170 pGVMM->iFreeHead = 1;
171
172 /* the nil handle */
173 pGVMM->aHandles[0].iSelf = 0;
174 pGVMM->aHandles[0].iNext = 0;
175
176 /* the tail */
177 unsigned i = RT_ELEMENTS(pGVMM->aHandles);
178 pGVMM->aHandles[i].iSelf = i;
179 pGVMM->aHandles[i].iNext = 0; /* nil */
180
181 /* the rest */
182 while (i-- > 1)
183 {
184 pGVMM->aHandles[i].iSelf = i;
185 pGVMM->aHandles[i].iNext = i + 1;
186 }
187
188 g_pGVMM = pGVMM;
189 SUPR0Printf("GVMMR0Init: pGVMM=%p\n", pGVMM);
190 return VINF_SUCCESS;
191 }
192
193 RTMemFree(pGVMM);
194 return rc;
195}
196
197
198/**
199 * Terminates the GVM.
200 *
201 * This is called while owning the loader semaphore (see supdrvLdrFree()).
202 * And unless something is wrong, there should be absolutely no VMs
203 * registered at this point.
204 */
205GVMMR0DECL(void) GVMMR0Term(void)
206{
207 SUPR0Printf("GVMMR0Term:\n");
208
209 PGVMM pGVMM = g_pGVMM;
210 g_pGVMM = NULL;
211 if (RT_UNLIKELY(!VALID_PTR(pGVMM)))
212 {
213 SUPR0Printf("GVMMR0Term: pGVMM=%p\n", pGVMM);
214 return;
215 }
216
217 RTSemFastMutexDestroy(pGVMM->Lock);
218 pGVMM->Lock = NIL_RTSEMFASTMUTEX;
219 pGVMM->u32Magic++;
220 pGVMM->iFreeHead = 0;
221 if (pGVMM->iUsedHead)
222 {
223 SUPR0Printf("GVMMR0Term: iUsedHead=%#x!\n", pGVMM->iUsedHead);
224 pGVMM->iUsedHead = 0;
225 }
226
227 RTMemFree(pGVMM);
228}
229
230
231/**
232 * Request wrapper for the GVMMR0CreateVM API.
233 *
234 * @returns VBox status code.
235 * @param pReqHdr The request buffer.
236 */
237GVMMR0DECL(int) GVMMR0CreateVMReq(PSUPVMMR0REQHDR pReqHdr)
238{
239 PGVMMCREATEVMREQ pReq = (PGVMMCREATEVMREQ)pReqHdr;
240
241 /*
242 * Validate the request.
243 */
244 if (!VALID_PTR(pReq))
245 return VERR_INVALID_POINTER;
246 if (pReq->Hdr.cbReq != sizeof(*pReq))
247 return VERR_INVALID_PARAMETER;
248 if (!VALID_PTR(pReq->pSession))
249 return VERR_INVALID_POINTER;
250
251 /*
252 * Execute it.
253 */
254 PVM pVM;
255 pReq->pVMR0 = NULL;
256 pReq->pVMR3 = NIL_RTR3PTR;
257 int rc = GVMMR0CreateVM(pReq->pSession, &pVM);
258 if (RT_SUCCESS(rc))
259 {
260 pReq->pVMR0 = pVM;
261 pReq->pVMR3 = pVM->pVMR3;
262 }
263 return rc;
264}
265
266
267/**
268 * Allocates the VM structure and registers it with GVM.
269 *
270 * @returns VBox status code.
271 * @param pSession The support driver session.
272 * @param ppVM Where to store the pointer to the VM structure.
273 *
274 * @thread Any thread.
275 */
276GVMMR0DECL(int) GVMMR0CreateVM(PSUPDRVSESSION pSession, PVM *ppVM)
277{
278 LogFlow(("GVMMR0CreateVM: pSession=%p\n", pSession));
279 PGVMM pGVMM;
280 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
281
282 AssertPtrReturn(ppVM, VERR_INVALID_POINTER);
283 *ppVM = NULL;
284
285 AssertReturn(RTThreadNativeSelf() != NIL_RTNATIVETHREAD, VERR_INTERNAL_ERROR);
286
287 /*
288 * The whole allocation process is protected by the lock.
289 */
290 int rc = RTSemFastMutexRequest(pGVMM->Lock);
291 AssertRCReturn(rc, rc);
292
293 /*
294 * Allocate a handle first so we don't waste resources unnecessarily.
295 */
296 uint16_t iHandle = pGVMM->iFreeHead;
297 if (iHandle)
298 {
299 PGVMHANDLE pHandle = &pGVMM->aHandles[iHandle];
300
301 /* consistency checks, a bit paranoid as always. */
302 if ( !pHandle->pVM
303 && !pHandle->pGVM
304 && !pHandle->pvObj
305 && pHandle->iSelf == iHandle)
306 {
307 pHandle->pvObj = SUPR0ObjRegister(pSession, SUPDRVOBJTYPE_VM, gvmmR0HandleObjDestructor, pGVMM, pHandle);
308 if (pHandle->pvObj)
309 {
310 /*
311 * Move the handle from the free to used list and perform permission checks.
312 */
313 pGVMM->iFreeHead = pHandle->iNext;
314 pHandle->iNext = pGVMM->iUsedHead;
315 pGVMM->iUsedHead = iHandle;
316
317 pHandle->pVM = NULL;
318 pHandle->pGVM = NULL;
319 pHandle->pSession = pSession;
320 pHandle->hEMT = NIL_RTNATIVETHREAD;
321
322 rc = SUPR0ObjVerifyAccess(pHandle->pvObj, pSession, NULL);
323 if (RT_SUCCESS(rc))
324 {
325 /*
326 * Allocate the global VM structure (GVM) and initialize it.
327 */
328 PGVM pGVM = (PGVM)RTMemAllocZ(sizeof(*pGVM));
329 if (pGVM)
330 {
331 pGVM->u32Magic = GVM_MAGIC;
332 pGVM->hSelf = iHandle;
333 pGVM->hEMT = NIL_RTNATIVETHREAD;
334 pGVM->pVM = NULL;
335
336 /* gvmmR0InitPerVMData: */
337 AssertCompile(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));
338 Assert(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));
339 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
340 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
341 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
342 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
343
344 /* GVMMR0InitPerVMData(pGVM); - later */
345
346 /*
347 * Allocate the shared VM structure and associated page array.
348 */
349 const size_t cPages = RT_ALIGN(sizeof(VM), PAGE_SIZE) >> PAGE_SHIFT;
350 rc = RTR0MemObjAllocLow(&pGVM->gvmm.s.VMMemObj, cPages << PAGE_SHIFT, false /* fExecutable */);
351 if (RT_SUCCESS(rc))
352 {
353 PVM pVM = (PVM)RTR0MemObjAddress(pGVM->gvmm.s.VMMemObj); AssertPtr(pVM);
354 memset(pVM, 0, cPages << PAGE_SHIFT);
355 pVM->enmVMState = VMSTATE_CREATING;
356 pVM->pVMR0 = pVM;
357 pVM->pSession = pSession;
358 pVM->hSelf = iHandle;
359
360 rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */);
361 if (RT_SUCCESS(rc))
362 {
363 PSUPPAGE paPages = (PSUPPAGE)RTR0MemObjAddress(pGVM->gvmm.s.VMPagesMemObj); AssertPtr(paPages);
364 for (size_t iPage = 0; iPage < cPages; iPage++)
365 {
366 paPages[iPage].uReserved = 0;
367 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pGVM->gvmm.s.VMMemObj, iPage);
368 Assert(paPages[iPage].Phys != NIL_RTHCPHYS);
369 }
370
371 /*
372 * Map them into ring-3.
373 */
374 rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMMapObj, pGVM->gvmm.s.VMMemObj, (RTR3PTR)-1, 0,
375 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
376 if (RT_SUCCESS(rc))
377 {
378 pVM->pVMR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMMapObj);
379 AssertPtr((void *)pVM->pVMR3);
380
381 rc = RTR0MemObjMapUser(&pGVM->gvmm.s.VMPagesMapObj, pGVM->gvmm.s.VMPagesMemObj, (RTR3PTR)-1, 0,
382 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
383 if (RT_SUCCESS(rc))
384 {
385 pVM->paVMPagesR3 = RTR0MemObjAddressR3(pGVM->gvmm.s.VMPagesMapObj);
386 AssertPtr((void *)pVM->paVMPagesR3);
387
388 /* complete the handle. */
389 pHandle->pVM = pVM;
390 pHandle->pGVM = pGVM;
391
392 RTSemFastMutexRelease(pGVMM->Lock);
393
394 *ppVM = pVM;
395 SUPR0Printf("GVMMR0CreateVM: pVM=%p pVMR3=%p pGVM=%p hGVM=%d\n", pVM, pVM->pVMR3, pGVM, iHandle);
396 return VINF_SUCCESS;
397 }
398
399 RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */);
400 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
401 }
402 RTR0MemObjFree(pGVM->gvmm.s.VMPagesMemObj, false /* fFreeMappings */);
403 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
404 }
405 RTR0MemObjFree(pGVM->gvmm.s.VMMemObj, false /* fFreeMappings */);
406 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
407 }
408 }
409 }
410 /* else: The user wasn't permitted to create this VM. */
411
412 /*
413 * The handle will be freed by gvmmR0HandleObjDestructor as we release the
414 * object reference here. A little extra mess because of non-recursive lock.
415 */
416 void *pvObj = pHandle->pvObj;
417 pHandle->pvObj = NULL;
418 RTSemFastMutexRelease(pGVMM->Lock);
419
420 SUPR0ObjRelease(pvObj, pSession);
421
422 SUPR0Printf("GVMMR0CreateVM: failed, rc=%d\n", rc);
423 return rc;
424 }
425
426 rc = VERR_NO_MEMORY;
427 }
428 else
429 rc = VERR_INTERNAL_ERROR;
430 }
431 else
432 rc = VERR_GVM_TOO_MANY_VMS;
433
434 RTSemFastMutexRelease(pGVMM->Lock);
435 return rc;
436}
437
438
439/**
440 * Destroys the VM, freeing all associated resources (the ring-0 ones anyway).
441 *
442 * This is call from the vmR3DestroyFinalBit and from a error path in VMR3Create,
443 * and the caller is not the EMT thread, unfortunately. For security reasons, it
444 * would've been nice if the caller was actually the EMT thread or that we somehow
445 * could've associated the calling thread with the VM up front.
446 *
447 * @returns VBox status code.
448 * @param pVM Where to store the pointer to the VM structure.
449 *
450 * @thread EMT if it's associated with the VM, otherwise any thread.
451 */
452GVMMR0DECL(int) GVMMR0DestroyVM(PVM pVM)
453{
454 LogFlow(("GVMMR0DestroyVM: pVM=%p\n", pVM));
455 PGVMM pGVMM;
456 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
457
458
459 /*
460 * Validate the VM structure, state and caller.
461 */
462 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
463 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
464 AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_TERMINATED, ("%d\n", pVM->enmVMState), VERR_WRONG_ORDER);
465
466 uint32_t hGVM = pVM->hSelf;
467 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_HANDLE);
468 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_HANDLE);
469
470 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
471 AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
472
473 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
474 AssertReturn(pHandle->hEMT == hSelf || pHandle->hEMT == NIL_RTNATIVETHREAD, VERR_NOT_OWNER);
475
476 /*
477 * Lookup the handle and destroy the object.
478 * Since the lock isn't recursive and we'll have to leave it before dereferencing the
479 * object, we take some precautions against racing callers just in case...
480 */
481 int rc = RTSemFastMutexRequest(pGVMM->Lock);
482 AssertRC(rc);
483
484 /* be careful here because we might theoretically be racing someone else cleaning up. */
485 if ( pHandle->pVM == pVM
486 && ( pHandle->hEMT == hSelf
487 || pHandle->hEMT == NIL_RTNATIVETHREAD)
488 && VALID_PTR(pHandle->pvObj)
489 && VALID_PTR(pHandle->pSession)
490 && VALID_PTR(pHandle->pGVM)
491 && pHandle->pGVM->u32Magic == GVM_MAGIC)
492 {
493 void *pvObj = pHandle->pvObj;
494 pHandle->pvObj = NULL;
495 RTSemFastMutexRelease(pGVMM->Lock);
496
497 SUPR0ObjRelease(pvObj, pHandle->pSession);
498 }
499 else
500 {
501 SUPR0Printf("GVMMR0DestroyVM: pHandle=%p:{.pVM=%p, hEMT=%p, .pvObj=%p} pVM=%p hSelf=%p\n",
502 pHandle, pHandle->pVM, pHandle->hEMT, pHandle->pvObj, pVM, hSelf);
503 RTSemFastMutexRelease(pGVMM->Lock);
504 rc = VERR_INTERNAL_ERROR;
505 }
506
507 return rc;
508}
509
510
511/**
512 * Handle destructor.
513 *
514 * @param pvGVMM The GVM instance pointer.
515 * @param pvHandle The handle pointer.
516 */
517static DECLCALLBACK(void) gvmmR0HandleObjDestructor(void *pvObj, void *pvGVMM, void *pvHandle)
518{
519 LogFlow(("gvmmR0HandleObjDestructor: %p %p %p\n", pvObj, pvGVMM, pvHandle));
520
521 /*
522 * Some quick, paranoid, input validation.
523 */
524 PGVMHANDLE pHandle = (PGVMHANDLE)pvHandle;
525 AssertPtr(pHandle);
526 PGVMM pGVMM = (PGVMM)pvGVMM;
527 Assert(pGVMM == g_pGVMM);
528 const uint16_t iHandle = pHandle - &pGVMM->aHandles[0];
529 if ( !iHandle
530 || iHandle >= RT_ELEMENTS(pGVMM->aHandles)
531 || iHandle != pHandle->iSelf)
532 {
533 SUPR0Printf("GVM: handle %d is out of range or corrupt (iSelf=%d)!\n", iHandle, pHandle->iSelf);
534 return;
535 }
536
537 int rc = RTSemFastMutexRequest(pGVMM->Lock);
538 AssertRC(rc);
539
540 /*
541 * This is a tad slow but a doubly linked list is too much hazzle.
542 */
543 if (RT_UNLIKELY(pHandle->iNext >= RT_ELEMENTS(pGVMM->aHandles)))
544 {
545 SUPR0Printf("GVM: used list index %d is out of range!\n", pHandle->iNext);
546 RTSemFastMutexRelease(pGVMM->Lock);
547 return;
548 }
549
550 if (pGVMM->iUsedHead == iHandle)
551 pGVMM->iUsedHead = pHandle->iNext;
552 else
553 {
554 uint16_t iPrev = pGVMM->iUsedHead;
555 int c = RT_ELEMENTS(pGVMM->aHandles) + 2;
556 while (!iPrev)
557 {
558 if (RT_UNLIKELY(iPrev >= RT_ELEMENTS(pGVMM->aHandles)))
559 {
560 SUPR0Printf("GVM: used list index %d is out of range!\n");
561 RTSemFastMutexRelease(pGVMM->Lock);
562 return;
563 }
564 if (RT_UNLIKELY(c-- <= 0))
565 {
566 iPrev = 0;
567 break;
568 }
569
570 if (pGVMM->aHandles[iPrev].iNext == iHandle)
571 break;
572 iPrev = pGVMM->aHandles[iPrev].iNext;
573 }
574 if (!iPrev)
575 {
576 SUPR0Printf("GVM: can't find the handle previous previous of %d!\n", pHandle->iSelf);
577 RTSemFastMutexRelease(pGVMM->Lock);
578 return;
579 }
580
581 pGVMM->aHandles[iPrev].iNext = pHandle->iNext;
582 }
583 pHandle->iNext = 0;
584
585 /*
586 * Do the global cleanup round.
587 */
588 PGVM pGVM = pHandle->pGVM;
589 if ( VALID_PTR(pGVM)
590 && pGVM->u32Magic == GVM_MAGIC)
591 {
592 /// @todo GMMR0CleanupVM(pGVM);
593
594 /*
595 * Do the GVMM cleanup - must be done last.
596 */
597 /* The VM and VM pages mappings/allocations. */
598 if (pGVM->gvmm.s.VMPagesMapObj != NIL_RTR0MEMOBJ)
599 {
600 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMapObj, false /* fFreeMappings */); AssertRC(rc);
601 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ;
602 }
603
604 if (pGVM->gvmm.s.VMMapObj != NIL_RTR0MEMOBJ)
605 {
606 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMapObj, false /* fFreeMappings */); AssertRC(rc);
607 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ;
608 }
609
610 if (pGVM->gvmm.s.VMPagesMemObj != NIL_RTR0MEMOBJ)
611 {
612 rc = RTR0MemObjFree(pGVM->gvmm.s.VMPagesMemObj, false /* fFreeMappings */); AssertRC(rc);
613 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ;
614 }
615
616 if (pGVM->gvmm.s.VMMemObj != NIL_RTR0MEMOBJ)
617 {
618 rc = RTR0MemObjFree(pGVM->gvmm.s.VMMemObj, false /* fFreeMappings */); AssertRC(rc);
619 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ;
620 }
621
622 /* the GVM structure itself. */
623 pGVM->u32Magic++;
624 RTMemFree(pGVM);
625 }
626 /* else: GVMMR0CreateVM cleanup. */
627
628 /*
629 * Free the handle.
630 */
631 pHandle->iNext = pGVMM->iFreeHead;
632 pGVMM->iFreeHead = iHandle;
633 ASMAtomicXchgPtr((void * volatile *)&pHandle->pGVM, NULL);
634 ASMAtomicXchgPtr((void * volatile *)&pHandle->pVM, NULL);
635 ASMAtomicXchgPtr((void * volatile *)&pHandle->pvObj, NULL);
636 ASMAtomicXchgPtr((void * volatile *)&pHandle->pSession, NULL);
637 ASMAtomicXchgSize(&pHandle->hEMT, NIL_RTNATIVETHREAD);
638
639 RTSemFastMutexRelease(pGVMM->Lock);
640 SUPR0Printf("gvmmR0HandleObjDestructor: returns\n");
641}
642
643
644/**
645 * Associates an EMT thread with a VM.
646 *
647 * This is called early during the ring-0 VM initialization so assertions later in
648 * the process can be handled gracefully.
649 *
650 * @returns VBox status code.
651 *
652 * @param pVM The VM instance data (aka handle), ring-0 mapping of course.
653 * @thread EMT.
654 */
655GVMMR0DECL(int) GVMMR0AssociateEMTWithVM(PVM pVM)
656{
657 LogFlow(("GVMMR0AssociateEMTWithVM: pVM=%p\n", pVM));
658 PGVMM pGVMM;
659 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
660
661 /*
662 * Validate the VM structure, state and handle.
663 */
664 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
665 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
666 AssertMsgReturn(pVM->enmVMState == VMSTATE_CREATING, ("%d\n", pVM->enmVMState), VERR_WRONG_ORDER);
667
668 RTNATIVETHREAD hEMT = RTThreadNativeSelf();
669 AssertReturn(hEMT != NIL_RTNATIVETHREAD, VERR_NOT_SUPPORTED);
670
671 const uint16_t hGVM = pVM->hSelf;
672 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_HANDLE);
673 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_HANDLE);
674
675 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
676 AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
677
678 /*
679 * Take the lock, validate the handle and update the structure members.
680 */
681 int rc = RTSemFastMutexRequest(pGVMM->Lock);
682 AssertRCReturn(rc, rc);
683
684 if ( pHandle->pVM == pVM
685 && VALID_PTR(pHandle->pvObj)
686 && VALID_PTR(pHandle->pSession)
687 && VALID_PTR(pHandle->pGVM)
688 && pHandle->pGVM->u32Magic == GVM_MAGIC)
689 {
690 pHandle->hEMT = hEMT;
691 pHandle->pGVM->hEMT = hEMT;
692 }
693 else
694 rc = VERR_INTERNAL_ERROR;
695
696 RTSemFastMutexRelease(pGVMM->Lock);
697 LogFlow(("GVMMR0AssociateEMTWithVM: returns %Vrc (hEMT=%RTnthrd)\n", rc, hEMT));
698 return rc;
699}
700
701
702/**
703 * Disassociates the EMT thread from a VM.
704 *
705 * This is called last in the ring-0 VM termination. After this point anyone is
706 * allowed to destroy the VM. Ideally, we should associate the VM with the thread
707 * that's going to call GVMMR0DestroyVM for optimal security, but that's impractical
708 * at present.
709 *
710 * @returns VBox status code.
711 *
712 * @param pVM The VM instance data (aka handle), ring-0 mapping of course.
713 * @thread EMT.
714 */
715GVMMR0DECL(int) GVMMR0DisassociateEMTFromVM(PVM pVM)
716{
717 LogFlow(("GVMMR0DisassociateEMTFromVM: pVM=%p\n", pVM));
718 PGVMM pGVMM;
719 GVMM_GET_VALID_INSTANCE(pGVMM, VERR_INTERNAL_ERROR);
720
721 /*
722 * Validate the VM structure, state and handle.
723 */
724 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
725 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
726 AssertMsgReturn(pVM->enmVMState >= VMSTATE_CREATING && pVM->enmVMState <= VMSTATE_DESTROYING, ("%d\n", pVM->enmVMState), VERR_WRONG_ORDER);
727
728 RTNATIVETHREAD hEMT = RTThreadNativeSelf();
729 AssertReturn(hEMT != NIL_RTNATIVETHREAD, VERR_NOT_SUPPORTED);
730
731 const uint16_t hGVM = pVM->hSelf;
732 AssertReturn(hGVM != NIL_GVM_HANDLE, VERR_INVALID_HANDLE);
733 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), VERR_INVALID_HANDLE);
734
735 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
736 AssertReturn(pHandle->pVM == pVM, VERR_NOT_OWNER);
737
738 /*
739 * Take the lock, validate the handle and update the structure members.
740 */
741 int rc = RTSemFastMutexRequest(pGVMM->Lock);
742 AssertRCReturn(rc, rc);
743
744 if ( VALID_PTR(pHandle->pvObj)
745 && VALID_PTR(pHandle->pSession)
746 && VALID_PTR(pHandle->pGVM)
747 && pHandle->pGVM->u32Magic == GVM_MAGIC)
748 {
749 if ( pHandle->pVM == pVM
750 && pHandle->hEMT == hEMT)
751 {
752 pHandle->hEMT = NIL_RTNATIVETHREAD;
753 pHandle->pGVM->hEMT = NIL_RTNATIVETHREAD;
754 }
755 else
756 rc = VERR_NOT_OWNER;
757 }
758 else
759 rc = VERR_INVALID_HANDLE;
760
761 RTSemFastMutexRelease(pGVMM->Lock);
762 LogFlow(("GVMMR0DisassociateEMTFromVM: returns %Vrc (hEMT=%RTnthrd)\n", rc, hEMT));
763 return rc;
764}
765
766
767/**
768 * Lookup a GVM structure by its handle.
769 *
770 * @returns The GVM pointer on success, NULL on failure.
771 * @param hGVM The global VM handle. Asserts on bad handle.
772 */
773GVMMR0DECL(PGVM) GVMMR0ByHandle(uint32_t hGVM)
774{
775 PGVMM pGVMM;
776 GVMM_GET_VALID_INSTANCE(pGVMM, NULL);
777
778 /*
779 * Validate.
780 */
781 AssertReturn(hGVM != NIL_GVM_HANDLE, NULL);
782 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), NULL);
783
784 /*
785 * Look it up.
786 */
787 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
788 AssertPtrReturn(pHandle->pVM, NULL);
789 AssertPtrReturn(pHandle->pvObj, NULL);
790 PGVM pGVM = pHandle->pGVM;
791 AssertPtrReturn(pGVM, NULL);
792 AssertReturn(pGVM->pVM == pHandle->pVM, NULL);
793
794 return pHandle->pGVM;
795}
796
797
798/**
799 * Lookup a GVM structure by the shared VM structure.
800 *
801 * @returns The GVM pointer on success, NULL on failure.
802 * @param pVM The shared VM structure (the ring-0 mapping).
803 */
804GVMMR0DECL(PGVM) GVMMR0ByVM(PVM pVM)
805{
806 PGVMM pGVMM;
807 GVMM_GET_VALID_INSTANCE(pGVMM, NULL);
808
809 /*
810 * Validate.
811 */
812 AssertPtrReturn(pVM, NULL);
813 AssertReturn(!((uintptr_t)pVM & PAGE_OFFSET_MASK), NULL);
814
815 uint16_t hGVM = pVM->hSelf;
816 AssertReturn(hGVM != NIL_GVM_HANDLE, NULL);
817 AssertReturn(hGVM < RT_ELEMENTS(pGVMM->aHandles), NULL);
818
819 /*
820 * Look it up.
821 */
822 PGVMHANDLE pHandle = &pGVMM->aHandles[hGVM];
823 AssertReturn(pHandle->pVM == pVM, NULL);
824 AssertPtrReturn(pHandle->pvObj, NULL);
825 PGVM pGVM = pHandle->pGVM;
826 AssertPtrReturn(pGVM, NULL);
827 AssertReturn(pGVM->pVM == pVM, NULL);
828
829 return pGVM;
830}
831
832
833/**
834 * Lookup a VM by its global handle.
835 *
836 * @returns The VM handle on success, NULL on failure.
837 * @param hGVM The global VM handle. Asserts on bad handle.
838 */
839GVMMR0DECL(PVM) GVMMR0GetVMByHandle(uint32_t hGVM)
840{
841 PGVM pGVM = GVMMR0ByHandle(hGVM);
842 return pGVM ? pGVM->pVM : NULL;
843}
844
845
846/**
847 * Looks up the VM belonging to the specified EMT thread.
848 *
849 * This is used by the assertion machinery in VMMR0.cpp to avoid causing
850 * unnecessary kernel panics when the EMT thread hits an assertion. The
851 * call may or not be an EMT thread.
852 *
853 * @returns The VM handle on success, NULL on failure.
854 * @param hEMT The native thread handle of the EMT.
855 * NIL_RTNATIVETHREAD means the current thread
856 */
857GVMMR0DECL(PVM) GVMMR0GetVMByEMT(RTNATIVETHREAD hEMT)
858{
859 /*
860 * No Assertions here as we're usually called in a AssertMsgN or
861 * RTAssert* context.
862 */
863 PGVMM pGVMM = g_pGVMM;
864 if ( !VALID_PTR(pGVMM)
865 || pGVMM->u32Magic != GVMM_MAGIC)
866 return NULL;
867
868 if (hEMT == NIL_RTNATIVETHREAD)
869 hEMT = RTThreadNativeSelf();
870
871 /*
872 * Search the handles in a linear fashion as we don't dare take the lock (assert).
873 */
874 for (unsigned i = 1; i < RT_ELEMENTS(pGVMM->aHandles); i++)
875 if ( pGVMM->aHandles[i].hEMT == hEMT
876 && pGVMM->aHandles[i].iSelf == i
877 && VALID_PTR(pGVMM->aHandles[i].pvObj)
878 && VALID_PTR(pGVMM->aHandles[i].pVM))
879 return pGVMM->aHandles[i].pVM;
880
881 return NULL;
882}
883
884
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette