VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMReq.cpp@ 39944

Last change on this file since 39944 was 39402, checked in by vboxsync, 13 years ago

VMM: don't use generic IPE status codes, use specific ones. Part 1.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 47.9 KB
Line 
1/* $Id: VMReq.cpp 39402 2011-11-23 16:25:04Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_VM
23#include <VBox/vmm/mm.h>
24#include <VBox/vmm/vmm.h>
25#include "VMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/uvm.h>
28
29#include <VBox/err.h>
30#include <VBox/param.h>
31#include <VBox/log.h>
32#include <iprt/assert.h>
33#include <iprt/asm.h>
34#include <iprt/string.h>
35#include <iprt/time.h>
36#include <iprt/semaphore.h>
37#include <iprt/thread.h>
38
39
40/*******************************************************************************
41* Internal Functions *
42*******************************************************************************/
43static int vmR3ReqProcessOneU(PUVM pUVM, PVMREQ pReq);
44
45
46/**
47 * Allocate and queue a call request.
48 *
49 * If it's desired to poll on the completion of the request set cMillies
50 * to 0 and use VMR3ReqWait() to check for completion. In the other case
51 * use RT_INDEFINITE_WAIT.
52 * The returned request packet must be freed using VMR3ReqFree().
53 *
54 * @returns VBox status code.
55 * Will not return VERR_INTERRUPTED.
56 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
57 *
58 * @param pVM The VM handle.
59 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
60 * one of the following special values:
61 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
62 * @param ppReq Where to store the pointer to the request.
63 * This will be NULL or a valid request pointer not matter what happens.
64 * @param cMillies Number of milliseconds to wait for the request to
65 * be completed. Use RT_INDEFINITE_WAIT to only
66 * wait till it's completed.
67 * @param fFlags A combination of the VMREQFLAGS values.
68 * @param pfnFunction Pointer to the function to call.
69 * @param cArgs Number of arguments following in the ellipsis.
70 * @param ... Function arguments.
71 *
72 * @remarks See remarks on VMR3ReqCallVU.
73 */
74VMMR3DECL(int) VMR3ReqCall(PVM pVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
75 PFNRT pfnFunction, unsigned cArgs, ...)
76{
77 va_list va;
78 va_start(va, cArgs);
79 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
80 va_end(va);
81 return rc;
82}
83
84
85/**
86 * Convenience wrapper for VMR3ReqCallU.
87 *
88 * This assumes (1) you're calling a function that returns an VBox status code,
89 * (2) that you want it's return code on success, and (3) that you wish to wait
90 * for ever for it to return.
91 *
92 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
93 * its status code is return. Otherwise, the status of pfnFunction is
94 * returned.
95 *
96 * @param pVM Pointer to the shared VM structure.
97 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
98 * one of the following special values:
99 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
100 * @param pfnFunction Pointer to the function to call.
101 * @param cArgs Number of arguments following in the ellipsis.
102 * @param ... Function arguments.
103 *
104 * @remarks See remarks on VMR3ReqCallVU.
105 */
106VMMR3DECL(int) VMR3ReqCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
107{
108 PVMREQ pReq;
109 va_list va;
110 va_start(va, cArgs);
111 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
112 pfnFunction, cArgs, va);
113 va_end(va);
114 if (RT_SUCCESS(rc))
115 rc = pReq->iStatus;
116 VMR3ReqFree(pReq);
117 return rc;
118}
119
120
121/**
122 * Convenience wrapper for VMR3ReqCallU.
123 *
124 * This assumes (1) you're calling a function that returns an VBox status code
125 * and that you do not wish to wait for it to complete.
126 *
127 * @returns VBox status code returned by VMR3ReqCallVU.
128 *
129 * @param pVM Pointer to the shared VM structure.
130 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
131 * one of the following special values:
132 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
133 * @param pfnFunction Pointer to the function to call.
134 * @param cArgs Number of arguments following in the ellipsis.
135 * @param ... Function arguments.
136 *
137 * @remarks See remarks on VMR3ReqCallVU.
138 */
139VMMR3DECL(int) VMR3ReqCallNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
140{
141 va_list va;
142 va_start(va, cArgs);
143 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
144 pfnFunction, cArgs, va);
145 va_end(va);
146 return rc;
147}
148
149
150/**
151 * Convenience wrapper for VMR3ReqCallU.
152 *
153 * This assumes (1) you're calling a function that returns void, and (2) that
154 * you wish to wait for ever for it to return.
155 *
156 * @returns VBox status code of VMR3ReqCallVU.
157 *
158 * @param pVM Pointer to the shared VM structure.
159 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
160 * one of the following special values:
161 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
162 * @param pfnFunction Pointer to the function to call.
163 * @param cArgs Number of arguments following in the ellipsis.
164 * @param ... Function arguments.
165 *
166 * @remarks See remarks on VMR3ReqCallVU.
167 */
168VMMR3DECL(int) VMR3ReqCallVoidWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
169{
170 PVMREQ pReq;
171 va_list va;
172 va_start(va, cArgs);
173 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID,
174 pfnFunction, cArgs, va);
175 va_end(va);
176 VMR3ReqFree(pReq);
177 return rc;
178}
179
180
181/**
182 * Convenience wrapper for VMR3ReqCallU.
183 *
184 * This assumes (1) you're calling a function that returns void, and (2) that
185 * you do not wish to wait for it to complete.
186 *
187 * @returns VBox status code of VMR3ReqCallVU.
188 *
189 * @param pVM Pointer to the shared VM structure.
190 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
191 * one of the following special values:
192 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
193 * @param pfnFunction Pointer to the function to call.
194 * @param cArgs Number of arguments following in the ellipsis.
195 * @param ... Function arguments.
196 *
197 * @remarks See remarks on VMR3ReqCallVU.
198 */
199VMMR3DECL(int) VMR3ReqCallVoidNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
200{
201 PVMREQ pReq;
202 va_list va;
203 va_start(va, cArgs);
204 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_NO_WAIT,
205 pfnFunction, cArgs, va);
206 va_end(va);
207 VMR3ReqFree(pReq);
208 return rc;
209}
210
211
212/**
213 * Convenience wrapper for VMR3ReqCallU.
214 *
215 * This assumes (1) you're calling a function that returns an VBox status code,
216 * (2) that you want it's return code on success, (3) that you wish to wait for
217 * ever for it to return, and (4) that it's priority request that can be safely
218 * be handled during async suspend and power off.
219 *
220 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
221 * its status code is return. Otherwise, the status of pfnFunction is
222 * returned.
223 *
224 * @param pVM Pointer to the shared VM structure.
225 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
226 * one of the following special values:
227 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
228 * @param pfnFunction Pointer to the function to call.
229 * @param cArgs Number of arguments following in the ellipsis.
230 * @param ... Function arguments.
231 *
232 * @remarks See remarks on VMR3ReqCallVU.
233 */
234VMMR3DECL(int) VMR3ReqPriorityCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
235{
236 PVMREQ pReq;
237 va_list va;
238 va_start(va, cArgs);
239 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_PRIORITY,
240 pfnFunction, cArgs, va);
241 va_end(va);
242 if (RT_SUCCESS(rc))
243 rc = pReq->iStatus;
244 VMR3ReqFree(pReq);
245 return rc;
246}
247
248
249/**
250 * Convenience wrapper for VMR3ReqCallU.
251 *
252 * This assumes (1) you're calling a function that returns void, (2) that you
253 * wish to wait for ever for it to return, and (3) that it's priority request
254 * that can be safely be handled during async suspend and power off.
255 *
256 * @returns VBox status code of VMR3ReqCallVU.
257 *
258 * @param pVM Pointer to the shared VM structure.
259 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
260 * one of the following special values:
261 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
262 * @param pfnFunction Pointer to the function to call.
263 * @param cArgs Number of arguments following in the ellipsis.
264 * @param ... Function arguments.
265 *
266 * @remarks See remarks on VMR3ReqCallVU.
267 */
268VMMR3DECL(int) VMR3ReqPriorityCallVoidWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
269{
270 PVMREQ pReq;
271 va_list va;
272 va_start(va, cArgs);
273 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_PRIORITY,
274 pfnFunction, cArgs, va);
275 va_end(va);
276 VMR3ReqFree(pReq);
277 return rc;
278}
279
280
281/**
282 * Allocate and queue a call request to a void function.
283 *
284 * If it's desired to poll on the completion of the request set cMillies
285 * to 0 and use VMR3ReqWait() to check for completion. In the other case
286 * use RT_INDEFINITE_WAIT.
287 * The returned request packet must be freed using VMR3ReqFree().
288 *
289 * @returns VBox status code.
290 * Will not return VERR_INTERRUPTED.
291 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
292 *
293 * @param pUVM Pointer to the user mode VM structure.
294 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
295 * one of the following special values:
296 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
297 * @param ppReq Where to store the pointer to the request.
298 * This will be NULL or a valid request pointer not matter what happens, unless fFlags
299 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
300 * @param cMillies Number of milliseconds to wait for the request to
301 * be completed. Use RT_INDEFINITE_WAIT to only
302 * wait till it's completed.
303 * @param fFlags A combination of the VMREQFLAGS values.
304 * @param pfnFunction Pointer to the function to call.
305 * @param cArgs Number of arguments following in the ellipsis.
306 * @param ... Function arguments.
307 *
308 * @remarks See remarks on VMR3ReqCallVU.
309 */
310VMMR3DECL(int) VMR3ReqCallU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
311 PFNRT pfnFunction, unsigned cArgs, ...)
312{
313 va_list va;
314 va_start(va, cArgs);
315 int rc = VMR3ReqCallVU(pUVM, idDstCpu, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
316 va_end(va);
317 return rc;
318}
319
320
321/**
322 * Allocate and queue a call request.
323 *
324 * If it's desired to poll on the completion of the request set cMillies
325 * to 0 and use VMR3ReqWait() to check for completion. In the other case
326 * use RT_INDEFINITE_WAIT.
327 * The returned request packet must be freed using VMR3ReqFree().
328 *
329 * @returns VBox status code.
330 * Will not return VERR_INTERRUPTED.
331 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
332 *
333 * @param pUVM Pointer to the user mode VM structure.
334 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
335 * one of the following special values:
336 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
337 * @param ppReq Where to store the pointer to the request.
338 * This will be NULL or a valid request pointer not matter what happens, unless fFlags
339 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
340 * @param cMillies Number of milliseconds to wait for the request to
341 * be completed. Use RT_INDEFINITE_WAIT to only
342 * wait till it's completed.
343 * @param pfnFunction Pointer to the function to call.
344 * @param fFlags A combination of the VMREQFLAGS values.
345 * @param cArgs Number of arguments following in the ellipsis.
346 * Stuff which differs in size from uintptr_t is gonna make trouble, so don't try!
347 * @param Args Argument vector.
348 *
349 * @remarks Caveats:
350 * - Do not pass anything which is larger than an uintptr_t.
351 * - 64-bit integers are larger than uintptr_t on 32-bit hosts.
352 * Pass integers > 32-bit by reference (pointers).
353 * - Don't use NULL since it should be the integer 0 in C++ and may
354 * therefore end up with garbage in the bits 63:32 on 64-bit
355 * hosts because 'int' is 32-bit.
356 * Use (void *)NULL or (uintptr_t)0 instead of NULL.
357 */
358VMMR3DECL(int) VMR3ReqCallVU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, RTMSINTERVAL cMillies, uint32_t fFlags,
359 PFNRT pfnFunction, unsigned cArgs, va_list Args)
360{
361 LogFlow(("VMR3ReqCallV: idDstCpu=%u cMillies=%d fFlags=%#x pfnFunction=%p cArgs=%d\n", idDstCpu, cMillies, fFlags, pfnFunction, cArgs));
362
363 /*
364 * Validate input.
365 */
366 AssertPtrReturn(pfnFunction, VERR_INVALID_POINTER);
367 AssertPtrReturn(pUVM, VERR_INVALID_POINTER);
368 AssertReturn(!(fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE | VMREQFLAGS_PRIORITY)), VERR_INVALID_PARAMETER);
369 if (!(fFlags & VMREQFLAGS_NO_WAIT) || ppReq)
370 {
371 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
372 *ppReq = NULL;
373 }
374 PVMREQ pReq = NULL;
375 AssertMsgReturn(cArgs * sizeof(uintptr_t) <= sizeof(pReq->u.Internal.aArgs),
376 ("cArg=%d\n", cArgs),
377 VERR_TOO_MUCH_DATA);
378
379 /*
380 * Allocate request
381 */
382 int rc = VMR3ReqAllocU(pUVM, &pReq, VMREQTYPE_INTERNAL, idDstCpu);
383 if (RT_FAILURE(rc))
384 return rc;
385
386 /*
387 * Initialize the request data.
388 */
389 pReq->fFlags = fFlags;
390 pReq->u.Internal.pfn = pfnFunction;
391 pReq->u.Internal.cArgs = cArgs;
392 for (unsigned iArg = 0; iArg < cArgs; iArg++)
393 pReq->u.Internal.aArgs[iArg] = va_arg(Args, uintptr_t);
394
395 /*
396 * Queue the request and return.
397 */
398 rc = VMR3ReqQueue(pReq, cMillies);
399 if ( RT_FAILURE(rc)
400 && rc != VERR_TIMEOUT)
401 {
402 VMR3ReqFree(pReq);
403 pReq = NULL;
404 }
405 if (!(fFlags & VMREQFLAGS_NO_WAIT))
406 {
407 *ppReq = pReq;
408 LogFlow(("VMR3ReqCallV: returns %Rrc *ppReq=%p\n", rc, pReq));
409 }
410 else
411 LogFlow(("VMR3ReqCallV: returns %Rrc\n", rc));
412 Assert(rc != VERR_INTERRUPTED);
413 return rc;
414}
415
416
417/**
418 * Joins the list pList with whatever is linked up at *pHead.
419 */
420static void vmr3ReqJoinFreeSub(volatile PVMREQ *ppHead, PVMREQ pList)
421{
422 for (unsigned cIterations = 0;; cIterations++)
423 {
424 PVMREQ pHead = ASMAtomicXchgPtrT(ppHead, pList, PVMREQ);
425 if (!pHead)
426 return;
427 PVMREQ pTail = pHead;
428 while (pTail->pNext)
429 pTail = pTail->pNext;
430 ASMAtomicWritePtr(&pTail->pNext, pList);
431 ASMCompilerBarrier();
432 if (ASMAtomicCmpXchgPtr(ppHead, pHead, pList))
433 return;
434 ASMAtomicWriteNullPtr(&pTail->pNext);
435 ASMCompilerBarrier();
436 if (ASMAtomicCmpXchgPtr(ppHead, pHead, NULL))
437 return;
438 pList = pHead;
439 Assert(cIterations != 32);
440 Assert(cIterations != 64);
441 }
442}
443
444
445/**
446 * Joins the list pList with whatever is linked up at *pHead.
447 */
448static void vmr3ReqJoinFree(PVMINTUSERPERVM pVMInt, PVMREQ pList)
449{
450 /*
451 * Split the list if it's too long.
452 */
453 unsigned cReqs = 1;
454 PVMREQ pTail = pList;
455 while (pTail->pNext)
456 {
457 if (cReqs++ > 25)
458 {
459 const uint32_t i = pVMInt->iReqFree;
460 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
461
462 pTail->pNext = NULL;
463 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2 + (i == pVMInt->iReqFree)) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
464 return;
465 }
466 pTail = pTail->pNext;
467 }
468 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(pVMInt->iReqFree + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pList);
469}
470
471
472/**
473 * Allocates a request packet.
474 *
475 * The caller allocates a request packet, fills in the request data
476 * union and queues the request.
477 *
478 * @returns VBox status code.
479 *
480 * @param pVM VM handle.
481 * @param ppReq Where to store the pointer to the allocated packet.
482 * @param enmType Package type.
483 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
484 * one of the following special values:
485 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
486 */
487VMMR3DECL(int) VMR3ReqAlloc(PVM pVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
488{
489 return VMR3ReqAllocU(pVM->pUVM, ppReq, enmType, idDstCpu);
490}
491
492
493/**
494 * Allocates a request packet.
495 *
496 * The caller allocates a request packet, fills in the request data
497 * union and queues the request.
498 *
499 * @returns VBox status code.
500 *
501 * @param pUVM Pointer to the user mode VM structure.
502 * @param ppReq Where to store the pointer to the allocated packet.
503 * @param enmType Package type.
504 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
505 * one of the following special values:
506 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
507 */
508VMMR3DECL(int) VMR3ReqAllocU(PUVM pUVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
509{
510 /*
511 * Validate input.
512 */
513 AssertMsgReturn(enmType > VMREQTYPE_INVALID && enmType < VMREQTYPE_MAX,
514 ("Invalid package type %d valid range %d-%d inclusively.\n",
515 enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
516 VERR_VM_REQUEST_INVALID_TYPE);
517 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
518 AssertMsgReturn( idDstCpu == VMCPUID_ANY
519 || idDstCpu == VMCPUID_ANY_QUEUE
520 || idDstCpu < pUVM->cCpus
521 || idDstCpu == VMCPUID_ALL
522 || idDstCpu == VMCPUID_ALL_REVERSE,
523 ("Invalid destination %u (max=%u)\n", idDstCpu, pUVM->cCpus), VERR_INVALID_PARAMETER);
524
525 /*
526 * Try get a recycled packet.
527 * While this could all be solved with a single list with a lock, it's a sport
528 * of mine to avoid locks.
529 */
530 int cTries = RT_ELEMENTS(pUVM->vm.s.apReqFree) * 2;
531 while (--cTries >= 0)
532 {
533 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
534#if 0 /* sad, but this won't work safely because the reading of pReq->pNext. */
535 PVMREQ pNext = NULL;
536 PVMREQ pReq = *ppHead;
537 if ( pReq
538 && !ASMAtomicCmpXchgPtr(ppHead, (pNext = pReq->pNext), pReq)
539 && (pReq = *ppHead)
540 && !ASMAtomicCmpXchgPtr(ppHead, (pNext = pReq->pNext), pReq))
541 pReq = NULL;
542 if (pReq)
543 {
544 Assert(pReq->pNext == pNext); NOREF(pReq);
545#else
546 PVMREQ pReq = ASMAtomicXchgPtrT(ppHead, NULL, PVMREQ);
547 if (pReq)
548 {
549 PVMREQ pNext = pReq->pNext;
550 if ( pNext
551 && !ASMAtomicCmpXchgPtr(ppHead, pNext, NULL))
552 {
553 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRaces);
554 vmr3ReqJoinFree(&pUVM->vm.s, pReq->pNext);
555 }
556#endif
557 ASMAtomicDecU32(&pUVM->vm.s.cReqFree);
558
559 /*
560 * Make sure the event sem is not signaled.
561 */
562 if (!pReq->fEventSemClear)
563 {
564 int rc = RTSemEventWait(pReq->EventSem, 0);
565 if (rc != VINF_SUCCESS && rc != VERR_TIMEOUT)
566 {
567 /*
568 * This shall not happen, but if it does we'll just destroy
569 * the semaphore and create a new one.
570 */
571 AssertMsgFailed(("rc=%Rrc from RTSemEventWait(%#x).\n", rc, pReq->EventSem));
572 RTSemEventDestroy(pReq->EventSem);
573 rc = RTSemEventCreate(&pReq->EventSem);
574 AssertRC(rc);
575 if (RT_FAILURE(rc))
576 return rc;
577#if 0 ///@todo @bugref{4725} - def RT_LOCK_STRICT
578 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
579 RTSemEventAddSignaller(pReq->EventSem, pUVM->aCpus[idCpu].vm.s.ThreadEMT);
580#endif
581 }
582 pReq->fEventSemClear = true;
583 }
584 else
585 Assert(RTSemEventWait(pReq->EventSem, 0) == VERR_TIMEOUT);
586
587 /*
588 * Initialize the packet and return it.
589 */
590 Assert(pReq->enmType == VMREQTYPE_INVALID);
591 Assert(pReq->enmState == VMREQSTATE_FREE);
592 Assert(pReq->pUVM == pUVM);
593 ASMAtomicXchgSize(&pReq->pNext, NULL);
594 pReq->enmState = VMREQSTATE_ALLOCATED;
595 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
596 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
597 pReq->enmType = enmType;
598 pReq->idDstCpu = idDstCpu;
599
600 *ppReq = pReq;
601 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRecycled);
602 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p recycled\n", pReq));
603 return VINF_SUCCESS;
604 }
605 }
606
607 /*
608 * Ok allocate one.
609 */
610 PVMREQ pReq = (PVMREQ)MMR3HeapAllocU(pUVM, MM_TAG_VM_REQ, sizeof(*pReq));
611 if (!pReq)
612 return VERR_NO_MEMORY;
613
614 /*
615 * Create the semaphore.
616 */
617 int rc = RTSemEventCreate(&pReq->EventSem);
618 AssertRC(rc);
619 if (RT_FAILURE(rc))
620 {
621 MMR3HeapFree(pReq);
622 return rc;
623 }
624#if 0 ///@todo @bugref{4725} - def RT_LOCK_STRICT
625 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
626 RTSemEventAddSignaller(pReq->EventSem, pUVM->aCpus[idCpu].vm.s.ThreadEMT);
627#endif
628
629 /*
630 * Initialize the packet and return it.
631 */
632 pReq->pNext = NULL;
633 pReq->pUVM = pUVM;
634 pReq->enmState = VMREQSTATE_ALLOCATED;
635 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
636 pReq->fEventSemClear = true;
637 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
638 pReq->enmType = enmType;
639 pReq->idDstCpu = idDstCpu;
640
641 *ppReq = pReq;
642 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocNew);
643 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p new\n", pReq));
644 return VINF_SUCCESS;
645}
646
647
648/**
649 * Free a request packet.
650 *
651 * @returns VBox status code.
652 *
653 * @param pReq Package to free.
654 * @remark The request packet must be in allocated or completed state!
655 */
656VMMR3DECL(int) VMR3ReqFree(PVMREQ pReq)
657{
658 /*
659 * Ignore NULL (all free functions should do this imho).
660 */
661 if (!pReq)
662 return VINF_SUCCESS;
663
664 /*
665 * Check packet state.
666 */
667 switch (pReq->enmState)
668 {
669 case VMREQSTATE_ALLOCATED:
670 case VMREQSTATE_COMPLETED:
671 break;
672 default:
673 AssertMsgFailed(("Invalid state %d!\n", pReq->enmState));
674 return VERR_VM_REQUEST_STATE;
675 }
676
677 /*
678 * Make it a free packet and put it into one of the free packet lists.
679 */
680 pReq->enmState = VMREQSTATE_FREE;
681 pReq->iStatus = VERR_VM_REQUEST_STATUS_FREED;
682 pReq->enmType = VMREQTYPE_INVALID;
683
684 PUVM pUVM = pReq->pUVM;
685 STAM_COUNTER_INC(&pUVM->vm.s.StatReqFree);
686
687 if (pUVM->vm.s.cReqFree < 128)
688 {
689 ASMAtomicIncU32(&pUVM->vm.s.cReqFree);
690 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
691 PVMREQ pNext;
692 do
693 {
694 pNext = ASMAtomicUoReadPtrT(ppHead, PVMREQ);
695 ASMAtomicWritePtr(&pReq->pNext, pNext);
696 ASMCompilerBarrier();
697 } while (!ASMAtomicCmpXchgPtr(ppHead, pReq, pNext));
698 }
699 else
700 {
701 STAM_COUNTER_INC(&pReq->pUVM->vm.s.StatReqFreeOverflow);
702 RTSemEventDestroy(pReq->EventSem);
703 MMR3HeapFree(pReq);
704 }
705 return VINF_SUCCESS;
706}
707
708
709/**
710 * Queue a request.
711 *
712 * The quest must be allocated using VMR3ReqAlloc() and contain
713 * all the required data.
714 * If it's desired to poll on the completion of the request set cMillies
715 * to 0 and use VMR3ReqWait() to check for completion. In the other case
716 * use RT_INDEFINITE_WAIT.
717 *
718 * @returns VBox status code.
719 * Will not return VERR_INTERRUPTED.
720 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
721 *
722 * @param pReq The request to queue.
723 * @param cMillies Number of milliseconds to wait for the request to
724 * be completed. Use RT_INDEFINITE_WAIT to only
725 * wait till it's completed.
726 */
727VMMR3DECL(int) VMR3ReqQueue(PVMREQ pReq, RTMSINTERVAL cMillies)
728{
729 LogFlow(("VMR3ReqQueue: pReq=%p cMillies=%d\n", pReq, cMillies));
730 /*
731 * Verify the supplied package.
732 */
733 AssertMsgReturn(pReq->enmState == VMREQSTATE_ALLOCATED, ("%d\n", pReq->enmState), VERR_VM_REQUEST_STATE);
734 AssertMsgReturn( VALID_PTR(pReq->pUVM)
735 && !pReq->pNext
736 && pReq->EventSem != NIL_RTSEMEVENT,
737 ("Invalid request package! Anyone cooking their own packages???\n"),
738 VERR_VM_REQUEST_INVALID_PACKAGE);
739 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
740 && pReq->enmType < VMREQTYPE_MAX,
741 ("Invalid package type %d valid range %d-%d inclusively. This was verified on alloc too...\n",
742 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
743 VERR_VM_REQUEST_INVALID_TYPE);
744 Assert(!(pReq->fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE | VMREQFLAGS_PRIORITY)));
745
746 /*
747 * Are we the EMT or not?
748 * Also, store pVM (and fFlags) locally since pReq may be invalid after queuing it.
749 */
750 int rc = VINF_SUCCESS;
751 PUVM pUVM = ((VMREQ volatile *)pReq)->pUVM; /* volatile paranoia */
752 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
753
754 if (pReq->idDstCpu == VMCPUID_ALL)
755 {
756 /* One-by-one. */
757 Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
758 for (unsigned i = 0; i < pUVM->cCpus; i++)
759 {
760 /* Reinit some members. */
761 pReq->enmState = VMREQSTATE_ALLOCATED;
762 pReq->idDstCpu = i;
763 rc = VMR3ReqQueue(pReq, cMillies);
764 if (RT_FAILURE(rc))
765 break;
766 }
767 }
768 else if (pReq->idDstCpu == VMCPUID_ALL_REVERSE)
769 {
770 /* One-by-one. */
771 Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
772 for (int i = pUVM->cCpus-1; i >= 0; i--)
773 {
774 /* Reinit some members. */
775 pReq->enmState = VMREQSTATE_ALLOCATED;
776 pReq->idDstCpu = i;
777 rc = VMR3ReqQueue(pReq, cMillies);
778 if (RT_FAILURE(rc))
779 break;
780 }
781 }
782 else if ( pReq->idDstCpu != VMCPUID_ANY /* for a specific VMCPU? */
783 && pReq->idDstCpu != VMCPUID_ANY_QUEUE
784 && ( !pUVCpu /* and it's not the current thread. */
785 || pUVCpu->idCpu != pReq->idDstCpu))
786 {
787 VMCPUID idTarget = pReq->idDstCpu; Assert(idTarget < pUVM->cCpus);
788 PVMCPU pVCpu = &pUVM->pVM->aCpus[idTarget];
789 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
790
791 /* Fetch the right UVMCPU */
792 pUVCpu = &pUVM->aCpus[idTarget];
793
794 /*
795 * Insert it.
796 */
797 volatile PVMREQ *ppQueueHead = pReq->fFlags & VMREQFLAGS_PRIORITY ? &pUVCpu->vm.s.pPriorityReqs : &pUVCpu->vm.s.pNormalReqs;
798 pReq->enmState = VMREQSTATE_QUEUED;
799 PVMREQ pNext;
800 do
801 {
802 pNext = ASMAtomicUoReadPtrT(ppQueueHead, PVMREQ);
803 ASMAtomicWritePtr(&pReq->pNext, pNext);
804 ASMCompilerBarrier();
805 } while (!ASMAtomicCmpXchgPtr(ppQueueHead, pReq, pNext));
806
807 /*
808 * Notify EMT.
809 */
810 if (pUVM->pVM)
811 VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST);
812 VMR3NotifyCpuFFU(pUVCpu, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
813
814 /*
815 * Wait and return.
816 */
817 if (!(fFlags & VMREQFLAGS_NO_WAIT))
818 rc = VMR3ReqWait(pReq, cMillies);
819 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
820 }
821 else if ( ( pReq->idDstCpu == VMCPUID_ANY
822 && !pUVCpu /* only EMT threads have a valid pointer stored in the TLS slot. */)
823 || pReq->idDstCpu == VMCPUID_ANY_QUEUE)
824 {
825 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
826
827 /* Note: pUVCpu may or may not be NULL in the VMCPUID_ANY_QUEUE case; we don't care. */
828
829 /*
830 * Insert it.
831 */
832 volatile PVMREQ *ppQueueHead = pReq->fFlags & VMREQFLAGS_PRIORITY ? &pUVM->vm.s.pPriorityReqs : &pUVM->vm.s.pNormalReqs;
833 pReq->enmState = VMREQSTATE_QUEUED;
834 PVMREQ pNext;
835 do
836 {
837 pNext = ASMAtomicUoReadPtrT(ppQueueHead, PVMREQ);
838 ASMAtomicWritePtr(&pReq->pNext, pNext);
839 ASMCompilerBarrier();
840 } while (!ASMAtomicCmpXchgPtr(ppQueueHead, pReq, pNext));
841
842 /*
843 * Notify EMT.
844 */
845 if (pUVM->pVM)
846 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
847 VMR3NotifyGlobalFFU(pUVM, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
848
849 /*
850 * Wait and return.
851 */
852 if (!(fFlags & VMREQFLAGS_NO_WAIT))
853 rc = VMR3ReqWait(pReq, cMillies);
854 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
855 }
856 else
857 {
858 Assert(pUVCpu);
859
860 /*
861 * The requester was an EMT, just execute it.
862 */
863 pReq->enmState = VMREQSTATE_QUEUED;
864 rc = vmR3ReqProcessOneU(pUVM, pReq);
865 LogFlow(("VMR3ReqQueue: returns %Rrc (processed)\n", rc));
866 }
867 return rc;
868}
869
870
871/**
872 * Wait for a request to be completed.
873 *
874 * @returns VBox status code.
875 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
876 *
877 * @param pReq The request to wait for.
878 * @param cMillies Number of milliseconds to wait.
879 * Use RT_INDEFINITE_WAIT to only wait till it's completed.
880 */
881VMMR3DECL(int) VMR3ReqWait(PVMREQ pReq, RTMSINTERVAL cMillies)
882{
883 LogFlow(("VMR3ReqWait: pReq=%p cMillies=%d\n", pReq, cMillies));
884
885 /*
886 * Verify the supplied package.
887 */
888 AssertMsgReturn( pReq->enmState == VMREQSTATE_QUEUED
889 || pReq->enmState == VMREQSTATE_PROCESSING
890 || pReq->enmState == VMREQSTATE_COMPLETED,
891 ("Invalid state %d\n", pReq->enmState),
892 VERR_VM_REQUEST_STATE);
893 AssertMsgReturn( VALID_PTR(pReq->pUVM)
894 && pReq->EventSem != NIL_RTSEMEVENT,
895 ("Invalid request package! Anyone cooking their own packages???\n"),
896 VERR_VM_REQUEST_INVALID_PACKAGE);
897 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
898 && pReq->enmType < VMREQTYPE_MAX,
899 ("Invalid package type %d valid range %d-%d inclusively. This was verified on alloc too...\n",
900 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
901 VERR_VM_REQUEST_INVALID_TYPE);
902
903 /*
904 * Check for deadlock condition
905 */
906 PUVM pUVM = pReq->pUVM;
907 NOREF(pUVM);
908
909 /*
910 * Wait on the package.
911 */
912 int rc;
913 if (cMillies != RT_INDEFINITE_WAIT)
914 rc = RTSemEventWait(pReq->EventSem, cMillies);
915 else
916 {
917 do
918 {
919 rc = RTSemEventWait(pReq->EventSem, RT_INDEFINITE_WAIT);
920 Assert(rc != VERR_TIMEOUT);
921 } while ( pReq->enmState != VMREQSTATE_COMPLETED
922 && pReq->enmState != VMREQSTATE_INVALID);
923 }
924 if (RT_SUCCESS(rc))
925 ASMAtomicXchgSize(&pReq->fEventSemClear, true);
926 if (pReq->enmState == VMREQSTATE_COMPLETED)
927 rc = VINF_SUCCESS;
928 LogFlow(("VMR3ReqWait: returns %Rrc\n", rc));
929 Assert(rc != VERR_INTERRUPTED);
930 return rc;
931}
932
933
934/**
935 * Sets the relevant FF.
936 *
937 * @param pUVM Pointer to the user mode VM structure.
938 * @param idDstCpu VMCPUID_ANY or the ID of the current CPU.
939 */
940DECLINLINE(void) vmR3ReqSetFF(PUVM pUVM, VMCPUID idDstCpu)
941{
942 if (RT_LIKELY(pUVM->pVM))
943 {
944 if (idDstCpu == VMCPUID_ANY)
945 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
946 else
947 VMCPU_FF_SET(&pUVM->pVM->aCpus[idDstCpu], VMCPU_FF_REQUEST);
948 }
949}
950
951
952/**
953 * VMR3ReqProcessU helper that handles cases where there are more than one
954 * pending request.
955 *
956 * @returns The oldest request.
957 * @param pUVM Pointer to the user mode VM structure
958 * @param idDstCpu VMCPUID_ANY or virtual CPU ID.
959 * @param pReqList The list of requests.
960 * @param ppReqs Pointer to the list head.
961 */
962static PVMREQ vmR3ReqProcessUTooManyHelper(PUVM pUVM, VMCPUID idDstCpu, PVMREQ pReqList, PVMREQ volatile *ppReqs)
963{
964 STAM_COUNTER_INC(&pUVM->vm.s.StatReqMoreThan1);
965
966 /*
967 * Chop off the last one (pReq).
968 */
969 PVMREQ pPrev;
970 PVMREQ pReqRet = pReqList;
971 do
972 {
973 pPrev = pReqRet;
974 pReqRet = pReqRet->pNext;
975 } while (pReqRet->pNext);
976 ASMAtomicWriteNullPtr(&pPrev->pNext);
977
978 /*
979 * Push the others back onto the list (end of it).
980 */
981 Log2(("VMR3ReqProcess: Pushing back %p %p...\n", pReqList, pReqList->pNext));
982 if (RT_UNLIKELY(!ASMAtomicCmpXchgPtr(ppReqs, pReqList, NULL)))
983 {
984 STAM_COUNTER_INC(&pUVM->vm.s.StatReqPushBackRaces);
985 do
986 {
987 ASMNopPause();
988 PVMREQ pReqList2 = ASMAtomicXchgPtrT(ppReqs, NULL, PVMREQ);
989 if (pReqList2)
990 {
991 PVMREQ pLast = pReqList2;
992 while (pLast->pNext)
993 pLast = pLast->pNext;
994 ASMAtomicWritePtr(&pLast->pNext, pReqList);
995 pReqList = pReqList2;
996 }
997 } while (!ASMAtomicCmpXchgPtr(ppReqs, pReqList, NULL));
998 }
999
1000 vmR3ReqSetFF(pUVM, idDstCpu);
1001 return pReqRet;
1002}
1003
1004
1005/**
1006 * Process pending request(s).
1007 *
1008 * This function is called from a forced action handler in the EMT
1009 * or from one of the EMT loops.
1010 *
1011 * @returns VBox status code.
1012 *
1013 * @param pUVM Pointer to the user mode VM structure.
1014 * @param idDstCpu Pass VMCPUID_ANY to process the common request queue
1015 * and the CPU ID for a CPU specific one. In the latter
1016 * case the calling thread must be the EMT of that CPU.
1017 * @param fPriorityOnly When set, only process the priority request queue.
1018 *
1019 * @note SMP safe (multiple EMTs trying to satisfy VM_FF_REQUESTs).
1020 *
1021 * @remarks This was made reentrant for async PDM handling, the debugger and
1022 * others.
1023 */
1024VMMR3DECL(int) VMR3ReqProcessU(PUVM pUVM, VMCPUID idDstCpu, bool fPriorityOnly)
1025{
1026 LogFlow(("VMR3ReqProcessU: (enmVMState=%d) idDstCpu=%d\n", pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING, idDstCpu));
1027
1028 /*
1029 * Determine which queues to process.
1030 */
1031 PVMREQ volatile *ppNormalReqs;
1032 PVMREQ volatile *ppPriorityReqs;
1033 if (idDstCpu == VMCPUID_ANY)
1034 {
1035 ppPriorityReqs = &pUVM->vm.s.pPriorityReqs;
1036 ppNormalReqs = !fPriorityOnly ? &pUVM->vm.s.pNormalReqs : ppPriorityReqs;
1037 }
1038 else
1039 {
1040 Assert(idDstCpu < pUVM->cCpus);
1041 Assert(pUVM->aCpus[idDstCpu].vm.s.NativeThreadEMT == RTThreadNativeSelf());
1042 ppPriorityReqs = &pUVM->aCpus[idDstCpu].vm.s.pPriorityReqs;
1043 ppNormalReqs = !fPriorityOnly ? &pUVM->aCpus[idDstCpu].vm.s.pNormalReqs : ppPriorityReqs;
1044 }
1045
1046 /*
1047 * Process loop.
1048 *
1049 * We do not repeat the outer loop if we've got an informational status code
1050 * since that code needs processing by our caller (usually EM).
1051 */
1052 int rc = VINF_SUCCESS;
1053 for (;;)
1054 {
1055 /*
1056 * Get the pending requests.
1057 *
1058 * If there are more than one request, unlink the oldest and put the
1059 * rest back so that we're reentrant.
1060 */
1061 if (RT_LIKELY(pUVM->pVM))
1062 {
1063 if (idDstCpu == VMCPUID_ANY)
1064 VM_FF_CLEAR(pUVM->pVM, VM_FF_REQUEST);
1065 else
1066 VMCPU_FF_CLEAR(&pUVM->pVM->aCpus[idDstCpu], VMCPU_FF_REQUEST);
1067 }
1068
1069 PVMREQ pReq = ASMAtomicXchgPtrT(ppPriorityReqs, NULL, PVMREQ);
1070 if (pReq)
1071 {
1072 if (RT_UNLIKELY(pReq->pNext))
1073 pReq = vmR3ReqProcessUTooManyHelper(pUVM, idDstCpu, pReq, ppPriorityReqs);
1074 else if (ASMAtomicReadPtrT(ppNormalReqs, PVMREQ))
1075 vmR3ReqSetFF(pUVM, idDstCpu);
1076 }
1077 else
1078 {
1079 pReq = ASMAtomicXchgPtrT(ppNormalReqs, NULL, PVMREQ);
1080 if (!pReq)
1081 break;
1082 if (RT_UNLIKELY(pReq->pNext))
1083 pReq = vmR3ReqProcessUTooManyHelper(pUVM, idDstCpu, pReq, ppNormalReqs);
1084 }
1085
1086 /*
1087 * Process the request
1088 */
1089 STAM_COUNTER_INC(&pUVM->vm.s.StatReqProcessed);
1090 int rc2 = vmR3ReqProcessOneU(pUVM, pReq);
1091 if ( rc2 >= VINF_EM_FIRST
1092 && rc2 <= VINF_EM_LAST)
1093 {
1094 rc = rc2;
1095 break;
1096 }
1097 }
1098
1099 LogFlow(("VMR3ReqProcess: returns %Rrc (enmVMState=%d)\n", rc, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
1100 return rc;
1101}
1102
1103
1104/**
1105 * Process one request.
1106 *
1107 * @returns VBox status code.
1108 *
1109 * @param pVM VM handle.
1110 * @param pReq Request packet to process.
1111 */
1112static int vmR3ReqProcessOneU(PUVM pUVM, PVMREQ pReq)
1113{
1114 LogFlow(("vmR3ReqProcessOneU: pReq=%p type=%d fFlags=%#x\n", pReq, pReq->enmType, pReq->fFlags));
1115
1116#if 1 /*def VBOX_STRICT */
1117 /*
1118 * Disable rendezvous if servicing a priority request. Priority requests
1119 * can not make use of the EMT rendezvous API.
1120 */
1121 PVMCPU pVCpu = NULL;
1122 bool fSavedInRendezvous = true;
1123 bool const fPriorityReq = RT_BOOL(pReq->fFlags & VMREQFLAGS_PRIORITY);
1124 if (fPriorityReq && pUVM->pVM)
1125 {
1126 pVCpu = VMMGetCpu(pUVM->pVM);
1127 fSavedInRendezvous = VMMR3EmtRendezvousSetDisabled(pVCpu, true /*fDisabled*/);
1128 }
1129#endif
1130
1131 /*
1132 * Process the request.
1133 */
1134 Assert(pReq->enmState == VMREQSTATE_QUEUED);
1135 pReq->enmState = VMREQSTATE_PROCESSING;
1136 int rcRet = VINF_SUCCESS; /* the return code of this function. */
1137 int rcReq = VERR_NOT_IMPLEMENTED; /* the request status. */
1138 switch (pReq->enmType)
1139 {
1140 /*
1141 * A packed down call frame.
1142 */
1143 case VMREQTYPE_INTERNAL:
1144 {
1145 uintptr_t *pauArgs = &pReq->u.Internal.aArgs[0];
1146 union
1147 {
1148 PFNRT pfn;
1149 DECLCALLBACKMEMBER(int, pfn00)(void);
1150 DECLCALLBACKMEMBER(int, pfn01)(uintptr_t);
1151 DECLCALLBACKMEMBER(int, pfn02)(uintptr_t, uintptr_t);
1152 DECLCALLBACKMEMBER(int, pfn03)(uintptr_t, uintptr_t, uintptr_t);
1153 DECLCALLBACKMEMBER(int, pfn04)(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1154 DECLCALLBACKMEMBER(int, pfn05)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1155 DECLCALLBACKMEMBER(int, pfn06)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1156 DECLCALLBACKMEMBER(int, pfn07)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1157 DECLCALLBACKMEMBER(int, pfn08)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1158 DECLCALLBACKMEMBER(int, pfn09)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1159 DECLCALLBACKMEMBER(int, pfn10)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1160 DECLCALLBACKMEMBER(int, pfn11)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1161 DECLCALLBACKMEMBER(int, pfn12)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1162 DECLCALLBACKMEMBER(int, pfn13)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1163 DECLCALLBACKMEMBER(int, pfn14)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1164 DECLCALLBACKMEMBER(int, pfn15)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1165 } u;
1166 u.pfn = pReq->u.Internal.pfn;
1167#ifdef RT_ARCH_AMD64
1168 switch (pReq->u.Internal.cArgs)
1169 {
1170 case 0: rcRet = u.pfn00(); break;
1171 case 1: rcRet = u.pfn01(pauArgs[0]); break;
1172 case 2: rcRet = u.pfn02(pauArgs[0], pauArgs[1]); break;
1173 case 3: rcRet = u.pfn03(pauArgs[0], pauArgs[1], pauArgs[2]); break;
1174 case 4: rcRet = u.pfn04(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3]); break;
1175 case 5: rcRet = u.pfn05(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4]); break;
1176 case 6: rcRet = u.pfn06(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5]); break;
1177 case 7: rcRet = u.pfn07(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6]); break;
1178 case 8: rcRet = u.pfn08(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7]); break;
1179 case 9: rcRet = u.pfn09(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8]); break;
1180 case 10: rcRet = u.pfn10(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9]); break;
1181 case 11: rcRet = u.pfn11(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10]); break;
1182 case 12: rcRet = u.pfn12(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11]); break;
1183 case 13: rcRet = u.pfn13(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11], pauArgs[12]); break;
1184 case 14: rcRet = u.pfn14(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11], pauArgs[12], pauArgs[13]); break;
1185 case 15: rcRet = u.pfn15(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11], pauArgs[12], pauArgs[13], pauArgs[14]); break;
1186 default:
1187 AssertReleaseMsgFailed(("cArgs=%d\n", pReq->u.Internal.cArgs));
1188 rcRet = rcReq = VERR_VM_REQUEST_TOO_MANY_ARGS_IPE;
1189 break;
1190 }
1191#else /* x86: */
1192 size_t cbArgs = pReq->u.Internal.cArgs * sizeof(uintptr_t);
1193# ifdef __GNUC__
1194 __asm__ __volatile__("movl %%esp, %%edx\n\t"
1195 "subl %2, %%esp\n\t"
1196 "andl $0xfffffff0, %%esp\n\t"
1197 "shrl $2, %2\n\t"
1198 "movl %%esp, %%edi\n\t"
1199 "rep movsl\n\t"
1200 "movl %%edx, %%edi\n\t"
1201 "call *%%eax\n\t"
1202 "mov %%edi, %%esp\n\t"
1203 : "=a" (rcRet),
1204 "=S" (pauArgs),
1205 "=c" (cbArgs)
1206 : "0" (u.pfn),
1207 "1" (pauArgs),
1208 "2" (cbArgs)
1209 : "edi", "edx");
1210# else
1211 __asm
1212 {
1213 xor edx, edx /* just mess it up. */
1214 mov eax, u.pfn
1215 mov ecx, cbArgs
1216 shr ecx, 2
1217 mov esi, pauArgs
1218 mov ebx, esp
1219 sub esp, cbArgs
1220 and esp, 0xfffffff0
1221 mov edi, esp
1222 rep movsd
1223 call eax
1224 mov esp, ebx
1225 mov rcRet, eax
1226 }
1227# endif
1228#endif /* x86 */
1229 if ((pReq->fFlags & (VMREQFLAGS_RETURN_MASK)) == VMREQFLAGS_VOID)
1230 rcRet = VINF_SUCCESS;
1231 rcReq = rcRet;
1232 break;
1233 }
1234
1235 default:
1236 AssertMsgFailed(("pReq->enmType=%d\n", pReq->enmType));
1237 rcReq = VERR_NOT_IMPLEMENTED;
1238 break;
1239 }
1240
1241 /*
1242 * Complete the request.
1243 */
1244 pReq->iStatus = rcReq;
1245 pReq->enmState = VMREQSTATE_COMPLETED;
1246 if (pReq->fFlags & VMREQFLAGS_NO_WAIT)
1247 {
1248 /* Free the packet, nobody is waiting. */
1249 LogFlow(("vmR3ReqProcessOneU: Completed request %p: rcReq=%Rrc rcRet=%Rrc - freeing it\n",
1250 pReq, rcReq, rcRet));
1251 VMR3ReqFree(pReq);
1252 }
1253 else
1254 {
1255 /* Notify the waiter and him free up the packet. */
1256 LogFlow(("vmR3ReqProcessOneU: Completed request %p: rcReq=%Rrc rcRet=%Rrc - notifying waiting thread\n",
1257 pReq, rcReq, rcRet));
1258 ASMAtomicXchgSize(&pReq->fEventSemClear, false);
1259 int rc2 = RTSemEventSignal(pReq->EventSem);
1260 if (RT_FAILURE(rc2))
1261 {
1262 AssertRC(rc2);
1263 rcRet = rc2;
1264 }
1265 }
1266
1267#if 1 /*def VBOX_STRICT */
1268 /*
1269 * Restore the rendezvous disabled state.
1270 */
1271 if (!fSavedInRendezvous)
1272 VMMR3EmtRendezvousSetDisabled(pVCpu, false /*fDisabled*/);
1273#endif
1274 return rcRet;
1275}
1276
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette