VirtualBox

source: vbox/trunk/src/VBox/VMM/VMReq.cpp@ 19167

Last change on this file since 19167 was 19146, checked in by vboxsync, 16 years ago

VM_FF_REQUEST -> VMCPU_FF_REQUEST

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 39.2 KB
Line 
1/* $Id: VMReq.cpp 19146 2009-04-23 14:59:10Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VM
27#include <VBox/mm.h>
28#include <VBox/vmm.h>
29#include "VMInternal.h"
30#include <VBox/vm.h>
31#include <VBox/uvm.h>
32
33#include <VBox/err.h>
34#include <VBox/param.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/string.h>
39#include <iprt/time.h>
40#include <iprt/semaphore.h>
41#include <iprt/thread.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47static int vmR3ReqProcessOneU(PUVM pUVM, PVMREQ pReq);
48
49
50/**
51 * Allocate and queue a call request.
52 *
53 * If it's desired to poll on the completion of the request set cMillies
54 * to 0 and use VMR3ReqWait() to check for completation. In the other case
55 * use RT_INDEFINITE_WAIT.
56 * The returned request packet must be freed using VMR3ReqFree().
57 *
58 * @returns VBox status code.
59 * Will not return VERR_INTERRUPTED.
60 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
61 *
62 * @param pVM The VM handle.
63 * @param enmDest Destination of the request packet (global or per VCPU).
64 * @param ppReq Where to store the pointer to the request.
65 * This will be NULL or a valid request pointer not matter what happends.
66 * @param cMillies Number of milliseconds to wait for the request to
67 * be completed. Use RT_INDEFINITE_WAIT to only
68 * wait till it's completed.
69 * @param pfnFunction Pointer to the function to call.
70 * @param cArgs Number of arguments following in the ellipsis.
71 * Not possible to pass 64-bit arguments!
72 * @param ... Function arguments.
73 */
74VMMR3DECL(int) VMR3ReqCall(PVM pVM, VMREQDEST enmDest, PVMREQ *ppReq, unsigned cMillies, PFNRT pfnFunction, unsigned cArgs, ...)
75{
76 va_list va;
77 va_start(va, cArgs);
78 int rc = VMR3ReqCallVU(pVM->pUVM, enmDest, ppReq, cMillies, VMREQFLAGS_VBOX_STATUS, pfnFunction, cArgs, va);
79 va_end(va);
80 return rc;
81}
82
83
84/**
85 * Allocate and queue a call request to a void function.
86 *
87 * If it's desired to poll on the completion of the request set cMillies
88 * to 0 and use VMR3ReqWait() to check for completation. In the other case
89 * use RT_INDEFINITE_WAIT.
90 * The returned request packet must be freed using VMR3ReqFree().
91 *
92 * @returns VBox status code.
93 * Will not return VERR_INTERRUPTED.
94 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
95 *
96 * @param pUVM Pointer to the user mode VM structure.
97 * @param enmDest Destination of the request packet (global or per VCPU).
98 * @param ppReq Where to store the pointer to the request.
99 * This will be NULL or a valid request pointer not matter what happends.
100 * @param cMillies Number of milliseconds to wait for the request to
101 * be completed. Use RT_INDEFINITE_WAIT to only
102 * wait till it's completed.
103 * @param pfnFunction Pointer to the function to call.
104 * @param cArgs Number of arguments following in the ellipsis.
105 * Not possible to pass 64-bit arguments!
106 * @param ... Function arguments.
107 */
108VMMR3DECL(int) VMR3ReqCallVoidU(PUVM pUVM, VMREQDEST enmDest, PVMREQ *ppReq, unsigned cMillies, PFNRT pfnFunction, unsigned cArgs, ...)
109{
110 va_list va;
111 va_start(va, cArgs);
112 int rc = VMR3ReqCallVU(pUVM, enmDest, ppReq, cMillies, VMREQFLAGS_VOID, pfnFunction, cArgs, va);
113 va_end(va);
114 return rc;
115}
116
117
118/**
119 * Allocate and queue a call request to a void function.
120 *
121 * If it's desired to poll on the completion of the request set cMillies
122 * to 0 and use VMR3ReqWait() to check for completation. In the other case
123 * use RT_INDEFINITE_WAIT.
124 * The returned request packet must be freed using VMR3ReqFree().
125 *
126 * @returns VBox status code.
127 * Will not return VERR_INTERRUPTED.
128 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
129 *
130 * @param pVM The VM handle.
131 * @param enmDest Destination of the request packet (global or per VCPU).
132 * @param ppReq Where to store the pointer to the request.
133 * This will be NULL or a valid request pointer not matter what happends.
134 * @param cMillies Number of milliseconds to wait for the request to
135 * be completed. Use RT_INDEFINITE_WAIT to only
136 * wait till it's completed.
137 * @param pfnFunction Pointer to the function to call.
138 * @param cArgs Number of arguments following in the ellipsis.
139 * Not possible to pass 64-bit arguments!
140 * @param ... Function arguments.
141 */
142VMMR3DECL(int) VMR3ReqCallVoid(PVM pVM, VMREQDEST enmDest, PVMREQ *ppReq, unsigned cMillies, PFNRT pfnFunction, unsigned cArgs, ...)
143{
144 va_list va;
145 va_start(va, cArgs);
146 int rc = VMR3ReqCallVU(pVM->pUVM, enmDest, ppReq, cMillies, VMREQFLAGS_VOID, pfnFunction, cArgs, va);
147 va_end(va);
148 return rc;
149}
150
151
152/**
153 * Allocate and queue a call request to a void function.
154 *
155 * If it's desired to poll on the completion of the request set cMillies
156 * to 0 and use VMR3ReqWait() to check for completation. In the other case
157 * use RT_INDEFINITE_WAIT.
158 * The returned request packet must be freed using VMR3ReqFree().
159 *
160 * @returns VBox status code.
161 * Will not return VERR_INTERRUPTED.
162 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
163 *
164 * @param pVM The VM handle.
165 * @param enmDest Destination of the request packet (global or per VCPU).
166 * @param ppReq Where to store the pointer to the request.
167 * This will be NULL or a valid request pointer not matter what happends, unless fFlags
168 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
169 * @param cMillies Number of milliseconds to wait for the request to
170 * be completed. Use RT_INDEFINITE_WAIT to only
171 * wait till it's completed.
172 * @param fFlags A combination of the VMREQFLAGS values.
173 * @param pfnFunction Pointer to the function to call.
174 * @param cArgs Number of arguments following in the ellipsis.
175 * Not possible to pass 64-bit arguments!
176 * @param ... Function arguments.
177 */
178VMMR3DECL(int) VMR3ReqCallEx(PVM pVM, VMREQDEST enmDest, PVMREQ *ppReq, unsigned cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, ...)
179{
180 va_list va;
181 va_start(va, cArgs);
182 int rc = VMR3ReqCallVU(pVM->pUVM, enmDest, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
183 va_end(va);
184 return rc;
185}
186
187
188/**
189 * Allocate and queue a call request to a void function.
190 *
191 * If it's desired to poll on the completion of the request set cMillies
192 * to 0 and use VMR3ReqWait() to check for completation. In the other case
193 * use RT_INDEFINITE_WAIT.
194 * The returned request packet must be freed using VMR3ReqFree().
195 *
196 * @returns VBox status code.
197 * Will not return VERR_INTERRUPTED.
198 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
199 *
200 * @param pUVM Pointer to the user mode VM structure.
201 * @param enmDest Destination of the request packet (global or per VCPU).
202 * @param ppReq Where to store the pointer to the request.
203 * This will be NULL or a valid request pointer not matter what happends, unless fFlags
204 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
205 * @param cMillies Number of milliseconds to wait for the request to
206 * be completed. Use RT_INDEFINITE_WAIT to only
207 * wait till it's completed.
208 * @param fFlags A combination of the VMREQFLAGS values.
209 * @param pfnFunction Pointer to the function to call.
210 * @param cArgs Number of arguments following in the ellipsis.
211 * Not possible to pass 64-bit arguments!
212 * @param ... Function arguments.
213 */
214VMMR3DECL(int) VMR3ReqCallU(PUVM pUVM, VMREQDEST enmDest, PVMREQ *ppReq, unsigned cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, ...)
215{
216 va_list va;
217 va_start(va, cArgs);
218 int rc = VMR3ReqCallVU(pUVM, enmDest, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
219 va_end(va);
220 return rc;
221}
222
223
224/**
225 * Allocate and queue a call request.
226 *
227 * If it's desired to poll on the completion of the request set cMillies
228 * to 0 and use VMR3ReqWait() to check for completation. In the other case
229 * use RT_INDEFINITE_WAIT.
230 * The returned request packet must be freed using VMR3ReqFree().
231 *
232 * @returns VBox status code.
233 * Will not return VERR_INTERRUPTED.
234 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
235 *
236 * @param pUVM Pointer to the user mode VM structure.
237 * @param enmDest Destination of the request packet (global or per VCPU).
238 * @param ppReq Where to store the pointer to the request.
239 * This will be NULL or a valid request pointer not matter what happends, unless fFlags
240 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
241 * @param cMillies Number of milliseconds to wait for the request to
242 * be completed. Use RT_INDEFINITE_WAIT to only
243 * wait till it's completed.
244 * @param pfnFunction Pointer to the function to call.
245 * @param fFlags A combination of the VMREQFLAGS values.
246 * @param cArgs Number of arguments following in the ellipsis.
247 * Stuff which differs in size from uintptr_t is gonna make trouble, so don't try!
248 * @param Args Argument vector.
249 */
250VMMR3DECL(int) VMR3ReqCallVU(PUVM pUVM, VMREQDEST enmDest, PVMREQ *ppReq, unsigned cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, va_list Args)
251{
252 LogFlow(("VMR3ReqCallV: cMillies=%d fFlags=%#x pfnFunction=%p cArgs=%d\n", cMillies, fFlags, pfnFunction, cArgs));
253
254 /*
255 * Validate input.
256 */
257 AssertPtrReturn(pfnFunction, VERR_INVALID_POINTER);
258 AssertPtrReturn(pUVM, VERR_INVALID_POINTER);
259 AssertReturn(!(fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT)), VERR_INVALID_PARAMETER);
260 if (!(fFlags & VMREQFLAGS_NO_WAIT) || ppReq)
261 {
262 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
263 *ppReq = NULL;
264 }
265 PVMREQ pReq = NULL;
266 AssertMsgReturn(cArgs * sizeof(uintptr_t) <= sizeof(pReq->u.Internal.aArgs),
267 ("cArg=%d\n", cArgs),
268 VERR_TOO_MUCH_DATA);
269
270 /*
271 * Allocate request
272 */
273 int rc = VMR3ReqAllocU(pUVM, &pReq, VMREQTYPE_INTERNAL, enmDest);
274 if (RT_FAILURE(rc))
275 return rc;
276
277 /*
278 * Initialize the request data.
279 */
280 pReq->fFlags = fFlags;
281 pReq->u.Internal.pfn = pfnFunction;
282 pReq->u.Internal.cArgs = cArgs;
283 for (unsigned iArg = 0; iArg < cArgs; iArg++)
284 pReq->u.Internal.aArgs[iArg] = va_arg(Args, uintptr_t);
285
286 /*
287 * Queue the request and return.
288 */
289 rc = VMR3ReqQueue(pReq, cMillies);
290 if ( RT_FAILURE(rc)
291 && rc != VERR_TIMEOUT)
292 {
293 VMR3ReqFree(pReq);
294 pReq = NULL;
295 }
296 if (!(fFlags & VMREQFLAGS_NO_WAIT))
297 {
298 *ppReq = pReq;
299 LogFlow(("VMR3ReqCallV: returns %Rrc *ppReq=%p\n", rc, pReq));
300 }
301 else
302 LogFlow(("VMR3ReqCallV: returns %Rrc\n", rc));
303 Assert(rc != VERR_INTERRUPTED);
304 return rc;
305}
306
307
308/**
309 * Joins the list pList with whatever is linked up at *pHead.
310 */
311static void vmr3ReqJoinFreeSub(volatile PVMREQ *ppHead, PVMREQ pList)
312{
313 for (unsigned cIterations = 0;; cIterations++)
314 {
315 PVMREQ pHead = (PVMREQ)ASMAtomicXchgPtr((void * volatile *)ppHead, pList);
316 if (!pHead)
317 return;
318 PVMREQ pTail = pHead;
319 while (pTail->pNext)
320 pTail = pTail->pNext;
321 pTail->pNext = pList;
322 if (ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (void *)pHead, pList))
323 return;
324 pTail->pNext = NULL;
325 if (ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (void *)pHead, NULL))
326 return;
327 pList = pHead;
328 Assert(cIterations != 32);
329 Assert(cIterations != 64);
330 }
331}
332
333
334/**
335 * Joins the list pList with whatever is linked up at *pHead.
336 */
337static void vmr3ReqJoinFree(PVMINTUSERPERVM pVMInt, PVMREQ pList)
338{
339 /*
340 * Split the list if it's too long.
341 */
342 unsigned cReqs = 1;
343 PVMREQ pTail = pList;
344 while (pTail->pNext)
345 {
346 if (cReqs++ > 25)
347 {
348 const uint32_t i = pVMInt->iReqFree;
349 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
350
351 pTail->pNext = NULL;
352 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2 + (i == pVMInt->iReqFree)) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
353 return;
354 }
355 pTail = pTail->pNext;
356 }
357 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(pVMInt->iReqFree + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pList);
358}
359
360
361/**
362 * Allocates a request packet.
363 *
364 * The caller allocates a request packet, fills in the request data
365 * union and queues the request.
366 *
367 * @returns VBox status code.
368 *
369 * @param pVM VM handle.
370 * @param ppReq Where to store the pointer to the allocated packet.
371 * @param enmType Package type.
372 * @param enmDest Destination of the request packet (global or per VCPU).
373 */
374VMMR3DECL(int) VMR3ReqAlloc(PVM pVM, PVMREQ *ppReq, VMREQTYPE enmType, VMREQDEST enmDest)
375{
376 return VMR3ReqAllocU(pVM->pUVM, ppReq, enmType, enmDest);
377}
378
379
380/**
381 * Allocates a request packet.
382 *
383 * The caller allocates a request packet, fills in the request data
384 * union and queues the request.
385 *
386 * @returns VBox status code.
387 *
388 * @param pUVM Pointer to the user mode VM structure.
389 * @param ppReq Where to store the pointer to the allocated packet.
390 * @param enmType Package type.
391 * @param enmDest Destination of the request packet (global or per VCPU).
392 */
393VMMR3DECL(int) VMR3ReqAllocU(PUVM pUVM, PVMREQ *ppReq, VMREQTYPE enmType, VMREQDEST enmDest)
394{
395 /*
396 * Validate input.
397 */
398 AssertMsgReturn(enmType > VMREQTYPE_INVALID && enmType < VMREQTYPE_MAX,
399 ("Invalid package type %d valid range %d-%d inclusivly.\n",
400 enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
401 VERR_VM_REQUEST_INVALID_TYPE);
402 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
403 AssertMsgReturn(enmDest == VMREQDEST_ANY || enmDest == VMREQDEST_BROADCAST || (unsigned)enmDest < pUVM->pVM->cCPUs, ("Invalid destination %d (max=%d)\n", enmDest, pUVM->pVM->cCPUs), VERR_INVALID_PARAMETER);
404
405 /*
406 * Try get a recycled packet.
407 * While this could all be solved with a single list with a lock, it's a sport
408 * of mine to avoid locks.
409 */
410 int cTries = RT_ELEMENTS(pUVM->vm.s.apReqFree) * 2;
411 while (--cTries >= 0)
412 {
413 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
414#if 0 /* sad, but this won't work safely because the reading of pReq->pNext. */
415 PVMREQ pNext = NULL;
416 PVMREQ pReq = *ppHead;
417 if ( pReq
418 && !ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (pNext = pReq->pNext), pReq)
419 && (pReq = *ppHead)
420 && !ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (pNext = pReq->pNext), pReq))
421 pReq = NULL;
422 if (pReq)
423 {
424 Assert(pReq->pNext == pNext); NOREF(pReq);
425#else
426 PVMREQ pReq = (PVMREQ)ASMAtomicXchgPtr((void * volatile *)ppHead, NULL);
427 if (pReq)
428 {
429 PVMREQ pNext = pReq->pNext;
430 if ( pNext
431 && !ASMAtomicCmpXchgPtr((void * volatile *)ppHead, pNext, NULL))
432 {
433 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRaces);
434 vmr3ReqJoinFree(&pUVM->vm.s, pReq->pNext);
435 }
436#endif
437 ASMAtomicDecU32(&pUVM->vm.s.cReqFree);
438
439 /*
440 * Make sure the event sem is not signaled.
441 */
442 if (!pReq->fEventSemClear)
443 {
444 int rc = RTSemEventWait(pReq->EventSem, 0);
445 if (rc != VINF_SUCCESS && rc != VERR_TIMEOUT)
446 {
447 /*
448 * This shall not happen, but if it does we'll just destroy
449 * the semaphore and create a new one.
450 */
451 AssertMsgFailed(("rc=%Rrc from RTSemEventWait(%#x).\n", rc, pReq->EventSem));
452 RTSemEventDestroy(pReq->EventSem);
453 rc = RTSemEventCreate(&pReq->EventSem);
454 AssertRC(rc);
455 if (RT_FAILURE(rc))
456 return rc;
457 }
458 pReq->fEventSemClear = true;
459 }
460 else
461 Assert(RTSemEventWait(pReq->EventSem, 0) == VERR_TIMEOUT);
462
463 /*
464 * Initialize the packet and return it.
465 */
466 Assert(pReq->enmType == VMREQTYPE_INVALID);
467 Assert(pReq->enmState == VMREQSTATE_FREE);
468 Assert(pReq->pUVM == pUVM);
469 ASMAtomicXchgSize(&pReq->pNext, NULL);
470 pReq->enmState = VMREQSTATE_ALLOCATED;
471 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
472 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
473 pReq->enmType = enmType;
474 pReq->enmDest = enmDest;
475
476 *ppReq = pReq;
477 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRecycled);
478 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p recycled\n", pReq));
479 return VINF_SUCCESS;
480 }
481 }
482
483 /*
484 * Ok allocate one.
485 */
486 PVMREQ pReq = (PVMREQ)MMR3HeapAllocU(pUVM, MM_TAG_VM_REQ, sizeof(*pReq));
487 if (!pReq)
488 return VERR_NO_MEMORY;
489
490 /*
491 * Create the semaphore.
492 */
493 int rc = RTSemEventCreate(&pReq->EventSem);
494 AssertRC(rc);
495 if (RT_FAILURE(rc))
496 {
497 MMR3HeapFree(pReq);
498 return rc;
499 }
500
501 /*
502 * Initialize the packet and return it.
503 */
504 pReq->pNext = NULL;
505 pReq->pUVM = pUVM;
506 pReq->enmState = VMREQSTATE_ALLOCATED;
507 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
508 pReq->fEventSemClear = true;
509 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
510 pReq->enmType = enmType;
511 pReq->enmDest = enmDest;
512
513 *ppReq = pReq;
514 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocNew);
515 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p new\n", pReq));
516 return VINF_SUCCESS;
517}
518
519
520/**
521 * Free a request packet.
522 *
523 * @returns VBox status code.
524 *
525 * @param pReq Package to free.
526 * @remark The request packet must be in allocated or completed state!
527 */
528VMMR3DECL(int) VMR3ReqFree(PVMREQ pReq)
529{
530 /*
531 * Ignore NULL (all free functions should do this imho).
532 */
533 if (!pReq)
534 return VINF_SUCCESS;
535
536 /*
537 * Check packet state.
538 */
539 switch (pReq->enmState)
540 {
541 case VMREQSTATE_ALLOCATED:
542 case VMREQSTATE_COMPLETED:
543 break;
544 default:
545 AssertMsgFailed(("Invalid state %d!\n", pReq->enmState));
546 return VERR_VM_REQUEST_STATE;
547 }
548
549 /*
550 * Make it a free packet and put it into one of the free packet lists.
551 */
552 pReq->enmState = VMREQSTATE_FREE;
553 pReq->iStatus = VERR_VM_REQUEST_STATUS_FREED;
554 pReq->enmType = VMREQTYPE_INVALID;
555
556 PUVM pUVM = pReq->pUVM;
557 STAM_COUNTER_INC(&pUVM->vm.s.StatReqFree);
558
559 if (pUVM->vm.s.cReqFree < 128)
560 {
561 ASMAtomicIncU32(&pUVM->vm.s.cReqFree);
562 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
563 PVMREQ pNext;
564 do
565 {
566 pNext = *ppHead;
567 ASMAtomicXchgPtr((void * volatile *)&pReq->pNext, pNext);
568 } while (!ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (void *)pReq, (void *)pNext));
569 }
570 else
571 {
572 STAM_COUNTER_INC(&pReq->pUVM->vm.s.StatReqFreeOverflow);
573 RTSemEventDestroy(pReq->EventSem);
574 MMR3HeapFree(pReq);
575 }
576 return VINF_SUCCESS;
577}
578
579
580/**
581 * Queue a request.
582 *
583 * The quest must be allocated using VMR3ReqAlloc() and contain
584 * all the required data.
585 * If it's desired to poll on the completion of the request set cMillies
586 * to 0 and use VMR3ReqWait() to check for completation. In the other case
587 * use RT_INDEFINITE_WAIT.
588 *
589 * @returns VBox status code.
590 * Will not return VERR_INTERRUPTED.
591 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
592 *
593 * @param pReq The request to queue.
594 * @param cMillies Number of milliseconds to wait for the request to
595 * be completed. Use RT_INDEFINITE_WAIT to only
596 * wait till it's completed.
597 */
598VMMR3DECL(int) VMR3ReqQueue(PVMREQ pReq, unsigned cMillies)
599{
600 LogFlow(("VMR3ReqQueue: pReq=%p cMillies=%d\n", pReq, cMillies));
601 /*
602 * Verify the supplied package.
603 */
604 AssertMsgReturn(pReq->enmState == VMREQSTATE_ALLOCATED, ("%d\n", pReq->enmState), VERR_VM_REQUEST_STATE);
605 AssertMsgReturn( VALID_PTR(pReq->pUVM)
606 && !pReq->pNext
607 && pReq->EventSem != NIL_RTSEMEVENT,
608 ("Invalid request package! Anyone cooking their own packages???\n"),
609 VERR_VM_REQUEST_INVALID_PACKAGE);
610 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
611 && pReq->enmType < VMREQTYPE_MAX,
612 ("Invalid package type %d valid range %d-%d inclusivly. This was verified on alloc too...\n",
613 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
614 VERR_VM_REQUEST_INVALID_TYPE);
615
616 /*
617 * Are we the EMT or not?
618 * Also, store pVM (and fFlags) locally since pReq may be invalid after queuing it.
619 */
620 int rc = VINF_SUCCESS;
621 PUVM pUVM = ((VMREQ volatile *)pReq)->pUVM; /* volatile paranoia */
622 PUVMCPU pUVMCPU = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
623
624 if (pReq->enmDest == VMREQDEST_BROADCAST)
625 {
626 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
627
628 for (unsigned i=0;i<pUVM->pVM->cCPUs;i++)
629 {
630 PVMCPU pVCpu = &pUVM->pVM->aCpus[i];
631
632 if ( !pUVMCPU
633 || pUVMCPU->idCpu != i)
634 {
635 /*
636 * Insert it.
637 */
638 pReq->enmState = VMREQSTATE_QUEUED;
639 PVMREQ pNext;
640 do
641 {
642 pNext = pUVM->aCpus[i].vm.s.pReqs;
643 pReq->pNext = pNext;
644 } while (!ASMAtomicCmpXchgPtr((void * volatile *)&pUVM->aCpus[i].vm.s.pReqs, (void *)pReq, (void *)pNext));
645
646 /*
647 * Notify EMT.
648 */
649 if (pUVM->pVM)
650 VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST);
651 /* @todo: VMR3NotifyFFU*/
652 AssertFailed();
653 VMR3NotifyFFU(pUVM, false);
654
655 /*
656 * Wait and return.
657 */
658 if (!(fFlags & VMREQFLAGS_NO_WAIT))
659 rc = VMR3ReqWait(pReq, cMillies);
660 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
661 }
662 else
663 {
664 /*
665 * The requester was EMT, just execute it.
666 */
667 pReq->enmState = VMREQSTATE_QUEUED;
668 rc = vmR3ReqProcessOneU(pUVM, pReq);
669 LogFlow(("VMR3ReqQueue: returns %Rrc (processed)\n", rc));
670 }
671 } /* for each VMCPU */
672 }
673 else
674 if ( pReq->enmDest != VMREQDEST_ANY /* for a specific VMCPU? */
675 && pUVMCPU->idCpu != (unsigned)pReq->enmDest)
676 {
677 RTCPUID idTarget = (RTCPUID)pReq->enmDest;
678 PVMCPU pVCpu = &pUVM->pVM->aCpus[idTarget];
679 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
680
681 /*
682 * Insert it.
683 */
684 pReq->enmState = VMREQSTATE_QUEUED;
685 PVMREQ pNext;
686 do
687 {
688 pNext = pUVM->aCpus[idTarget].vm.s.pReqs;
689 pReq->pNext = pNext;
690 } while (!ASMAtomicCmpXchgPtr((void * volatile *)&pUVM->aCpus[idTarget].vm.s.pReqs, (void *)pReq, (void *)pNext));
691
692 /*
693 * Notify EMT.
694 */
695 if (pUVM->pVM)
696 VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST);
697 /* @todo: VMR3NotifyFFU*/
698 AssertFailed();
699 VMR3NotifyFFU(pUVM, false);
700
701 /*
702 * Wait and return.
703 */
704 if (!(fFlags & VMREQFLAGS_NO_WAIT))
705 rc = VMR3ReqWait(pReq, cMillies);
706 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
707 }
708 else if ( pReq->enmDest == VMREQDEST_ANY
709 && !pUVMCPU /* only EMT threads have a valid pointer stored in the TLS slot. */)
710 {
711 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
712
713 /*
714 * Insert it.
715 */
716 pReq->enmState = VMREQSTATE_QUEUED;
717 PVMREQ pNext;
718 do
719 {
720 pNext = pUVM->vm.s.pReqs;
721 pReq->pNext = pNext;
722 } while (!ASMAtomicCmpXchgPtr((void * volatile *)&pUVM->vm.s.pReqs, (void *)pReq, (void *)pNext));
723
724 /*
725 * Notify EMT.
726 */
727 if (pUVM->pVM)
728 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
729 VMR3NotifyFFU(pUVM, false);
730
731 /*
732 * Wait and return.
733 */
734 if (!(fFlags & VMREQFLAGS_NO_WAIT))
735 rc = VMR3ReqWait(pReq, cMillies);
736 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
737 }
738 else
739 {
740 Assert(pUVMCPU);
741
742 /*
743 * The requester was EMT, just execute it.
744 */
745 pReq->enmState = VMREQSTATE_QUEUED;
746 rc = vmR3ReqProcessOneU(pUVM, pReq);
747 LogFlow(("VMR3ReqQueue: returns %Rrc (processed)\n", rc));
748 }
749 return rc;
750}
751
752
753/**
754 * Wait for a request to be completed.
755 *
756 * @returns VBox status code.
757 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
758 *
759 * @param pReq The request to wait for.
760 * @param cMillies Number of milliseconds to wait.
761 * Use RT_INDEFINITE_WAIT to only wait till it's completed.
762 */
763VMMR3DECL(int) VMR3ReqWait(PVMREQ pReq, unsigned cMillies)
764{
765 LogFlow(("VMR3ReqWait: pReq=%p cMillies=%d\n", pReq, cMillies));
766
767 /*
768 * Verify the supplied package.
769 */
770 AssertMsgReturn( pReq->enmState == VMREQSTATE_QUEUED
771 || pReq->enmState == VMREQSTATE_PROCESSING
772 || pReq->enmState == VMREQSTATE_COMPLETED,
773 ("Invalid state %d\n", pReq->enmState),
774 VERR_VM_REQUEST_STATE);
775 AssertMsgReturn( VALID_PTR(pReq->pUVM)
776 && pReq->EventSem != NIL_RTSEMEVENT,
777 ("Invalid request package! Anyone cooking their own packages???\n"),
778 VERR_VM_REQUEST_INVALID_PACKAGE);
779 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
780 && pReq->enmType < VMREQTYPE_MAX,
781 ("Invalid package type %d valid range %d-%d inclusivly. This was verified on alloc too...\n",
782 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
783 VERR_VM_REQUEST_INVALID_TYPE);
784
785 /*
786 * Check for deadlock condition
787 */
788 PUVM pUVM = pReq->pUVM;
789 NOREF(pUVM);
790 AssertMsg(!pUVM->pVM || !VMMR3LockIsOwner(pUVM->pVM),
791 ("Waiting for EMT to process a request, but we own the global VM lock!?!?!?!\n"));
792
793 /*
794 * Wait on the package.
795 */
796 int rc;
797 if (cMillies != RT_INDEFINITE_WAIT)
798 rc = RTSemEventWait(pReq->EventSem, cMillies);
799 else
800 {
801 do
802 {
803 rc = RTSemEventWait(pReq->EventSem, RT_INDEFINITE_WAIT);
804 Assert(rc != VERR_TIMEOUT);
805 } while ( pReq->enmState != VMREQSTATE_COMPLETED
806 && pReq->enmState != VMREQSTATE_INVALID);
807 }
808 if (RT_SUCCESS(rc))
809 ASMAtomicXchgSize(&pReq->fEventSemClear, true);
810 if (pReq->enmState == VMREQSTATE_COMPLETED)
811 rc = VINF_SUCCESS;
812 LogFlow(("VMR3ReqWait: returns %Rrc\n", rc));
813 Assert(rc != VERR_INTERRUPTED);
814 return rc;
815}
816
817
818/**
819 * Process pending request(s).
820 *
821 * This function is called from a forced action handler in the EMT
822 * or from one of the EMT loops.
823 *
824 * @returns VBox status code.
825 *
826 * @param pUVM Pointer to the user mode VM structure.
827 * @param enmDest Destination of the request packet (global or per VCPU).
828 */
829VMMR3DECL(int) VMR3ReqProcessU(PUVM pUVM, VMREQDEST enmDest)
830{
831 LogFlow(("VMR3ReqProcessU: (enmVMState=%d) enmDest=%d\n", pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING, enmDest));
832
833 /*
834 * Process loop.
835 *
836 * We do not repeat the outer loop if we've got an informationtional status code
837 * since that code needs processing by our caller.
838 */
839 int rc = VINF_SUCCESS;
840 while (rc <= VINF_SUCCESS)
841 {
842 /*
843 * Get pending requests.
844 */
845 void *volatile *ppReqs;
846 if (enmDest == VMREQDEST_ANY)
847 {
848 ppReqs = (void * volatile *)&pUVM->vm.s.pReqs;
849 if (RT_LIKELY(pUVM->pVM))
850 VM_FF_CLEAR(pUVM->pVM, VM_FF_REQUEST);
851 }
852 else
853 {
854 ppReqs = (void * volatile *)&pUVM->aCpus[enmDest].vm.s.pReqs;
855 if (RT_LIKELY(pUVM->pVM))
856 {
857 PVMCPU pVCpu = &pUVM->pVM->aCpus[enmDest];
858
859 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_REQUEST);
860 }
861 }
862
863 PVMREQ pReqs = (PVMREQ)ASMAtomicXchgPtr(ppReqs, NULL);
864 if (!pReqs)
865 break;
866
867 /*
868 * Reverse the list to process it in FIFO order.
869 */
870 PVMREQ pReq = pReqs;
871 if (pReq->pNext)
872 Log2(("VMR3ReqProcess: 2+ requests: %p %p %p\n", pReq, pReq->pNext, pReq->pNext->pNext));
873 pReqs = NULL;
874 while (pReq)
875 {
876 Assert(pReq->enmState == VMREQSTATE_QUEUED);
877 Assert(pReq->pUVM == pUVM);
878 PVMREQ pCur = pReq;
879 pReq = pReq->pNext;
880 pCur->pNext = pReqs;
881 pReqs = pCur;
882 }
883
884
885 /*
886 * Process the requests.
887 *
888 * Since this is a FF worker certain rules applies to the
889 * status codes. See the EM section in VBox/err.h and EM.cpp for details.
890 */
891 while (pReqs)
892 {
893 /* Unchain the first request and advance the list. */
894 pReq = pReqs;
895 pReqs = pReqs->pNext;
896 pReq->pNext = NULL;
897
898 /* Process the request */
899 int rc2 = vmR3ReqProcessOneU(pUVM, pReq);
900
901 /*
902 * The status code handling extremely important yet very fragile. Should probably
903 * look for a better way of communicating status changes to EM...
904 */
905 if ( rc2 >= VINF_EM_FIRST
906 && rc2 <= VINF_EM_LAST
907 && ( rc == VINF_SUCCESS
908 || rc2 < rc) )
909 rc = rc2;
910 }
911 }
912
913 LogFlow(("VMR3ReqProcess: returns %Rrc (enmVMState=%d)\n", rc, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
914 return rc;
915}
916
917
918/**
919 * Process one request.
920 *
921 * @returns VBox status code.
922 *
923 * @param pVM VM handle.
924 * @param pReq Request packet to process.
925 */
926static int vmR3ReqProcessOneU(PUVM pUVM, PVMREQ pReq)
927{
928 LogFlow(("vmR3ReqProcessOne: pReq=%p type=%d fFlags=%#x\n", pReq, pReq->enmType, pReq->fFlags));
929
930 /*
931 * Process the request.
932 */
933 Assert(pReq->enmState == VMREQSTATE_QUEUED);
934 pReq->enmState = VMREQSTATE_PROCESSING;
935 int rcRet = VINF_SUCCESS; /* the return code of this function. */
936 int rcReq = VERR_NOT_IMPLEMENTED; /* the request status. */
937 switch (pReq->enmType)
938 {
939 /*
940 * A packed down call frame.
941 */
942 case VMREQTYPE_INTERNAL:
943 {
944 uintptr_t *pauArgs = &pReq->u.Internal.aArgs[0];
945 union
946 {
947 PFNRT pfn;
948 DECLCALLBACKMEMBER(int, pfn00)(void);
949 DECLCALLBACKMEMBER(int, pfn01)(uintptr_t);
950 DECLCALLBACKMEMBER(int, pfn02)(uintptr_t, uintptr_t);
951 DECLCALLBACKMEMBER(int, pfn03)(uintptr_t, uintptr_t, uintptr_t);
952 DECLCALLBACKMEMBER(int, pfn04)(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
953 DECLCALLBACKMEMBER(int, pfn05)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
954 DECLCALLBACKMEMBER(int, pfn06)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
955 DECLCALLBACKMEMBER(int, pfn07)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
956 DECLCALLBACKMEMBER(int, pfn08)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
957 DECLCALLBACKMEMBER(int, pfn09)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
958 DECLCALLBACKMEMBER(int, pfn10)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
959 DECLCALLBACKMEMBER(int, pfn11)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
960 DECLCALLBACKMEMBER(int, pfn12)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
961 } u;
962 u.pfn = pReq->u.Internal.pfn;
963#ifdef RT_ARCH_AMD64
964 switch (pReq->u.Internal.cArgs)
965 {
966 case 0: rcRet = u.pfn00(); break;
967 case 1: rcRet = u.pfn01(pauArgs[0]); break;
968 case 2: rcRet = u.pfn02(pauArgs[0], pauArgs[1]); break;
969 case 3: rcRet = u.pfn03(pauArgs[0], pauArgs[1], pauArgs[2]); break;
970 case 4: rcRet = u.pfn04(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3]); break;
971 case 5: rcRet = u.pfn05(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4]); break;
972 case 6: rcRet = u.pfn06(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5]); break;
973 case 7: rcRet = u.pfn07(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6]); break;
974 case 8: rcRet = u.pfn08(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7]); break;
975 case 9: rcRet = u.pfn09(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8]); break;
976 case 10: rcRet = u.pfn10(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9]); break;
977 case 11: rcRet = u.pfn11(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10]); break;
978 case 12: rcRet = u.pfn12(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11]); break;
979 default:
980 AssertReleaseMsgFailed(("cArgs=%d\n", pReq->u.Internal.cArgs));
981 rcRet = rcReq = VERR_INTERNAL_ERROR;
982 break;
983 }
984#else /* x86: */
985 size_t cbArgs = pReq->u.Internal.cArgs * sizeof(uintptr_t);
986# ifdef __GNUC__
987 __asm__ __volatile__("movl %%esp, %%edx\n\t"
988 "subl %2, %%esp\n\t"
989 "andl $0xfffffff0, %%esp\n\t"
990 "shrl $2, %2\n\t"
991 "movl %%esp, %%edi\n\t"
992 "rep movsl\n\t"
993 "movl %%edx, %%edi\n\t"
994 "call *%%eax\n\t"
995 "mov %%edi, %%esp\n\t"
996 : "=a" (rcRet),
997 "=S" (pauArgs),
998 "=c" (cbArgs)
999 : "0" (u.pfn),
1000 "1" (pauArgs),
1001 "2" (cbArgs)
1002 : "edi", "edx");
1003# else
1004 __asm
1005 {
1006 xor edx, edx /* just mess it up. */
1007 mov eax, u.pfn
1008 mov ecx, cbArgs
1009 shr ecx, 2
1010 mov esi, pauArgs
1011 mov ebx, esp
1012 sub esp, cbArgs
1013 and esp, 0xfffffff0
1014 mov edi, esp
1015 rep movsd
1016 call eax
1017 mov esp, ebx
1018 mov rcRet, eax
1019 }
1020# endif
1021#endif /* x86 */
1022 if ((pReq->fFlags & (VMREQFLAGS_RETURN_MASK)) == VMREQFLAGS_VOID)
1023 rcRet = VINF_SUCCESS;
1024 rcReq = rcRet;
1025 break;
1026 }
1027
1028 default:
1029 AssertMsgFailed(("pReq->enmType=%d\n", pReq->enmType));
1030 rcReq = VERR_NOT_IMPLEMENTED;
1031 break;
1032 }
1033
1034 /*
1035 * Complete the request.
1036 */
1037 pReq->iStatus = rcReq;
1038 pReq->enmState = VMREQSTATE_COMPLETED;
1039 if (pReq->fFlags & VMREQFLAGS_NO_WAIT)
1040 {
1041 /* Free the packet, nobody is waiting. */
1042 LogFlow(("vmR3ReqProcessOne: Completed request %p: rcReq=%Rrc rcRet=%Rrc - freeing it\n",
1043 pReq, rcReq, rcRet));
1044 VMR3ReqFree(pReq);
1045 }
1046 else
1047 {
1048 /* Notify the waiter and him free up the packet. */
1049 LogFlow(("vmR3ReqProcessOne: Completed request %p: rcReq=%Rrc rcRet=%Rrc - notifying waiting thread\n",
1050 pReq, rcReq, rcRet));
1051 ASMAtomicXchgSize(&pReq->fEventSemClear, false);
1052 int rc2 = RTSemEventSignal(pReq->EventSem);
1053 if (RT_FAILURE(rc2))
1054 {
1055 AssertRC(rc2);
1056 rcRet = rc2;
1057 }
1058 }
1059 return rcRet;
1060}
1061
1062
1063
1064
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette