VirtualBox

source: vbox/trunk/src/VBox/VMM/VMReq.cpp@ 23011

Last change on this file since 23011 was 23010, checked in by vboxsync, 15 years ago

VBox/vmapi.h,VMReq.cpp: Added A number of convenice function to simplify using VMR3ReqCallU and friends.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 50.6 KB
Line 
1/* $Id: VMReq.cpp 23010 2009-09-14 15:55:46Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_VM
27#include <VBox/mm.h>
28#include <VBox/vmm.h>
29#include "VMInternal.h"
30#include <VBox/vm.h>
31#include <VBox/uvm.h>
32
33#include <VBox/err.h>
34#include <VBox/param.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/string.h>
39#include <iprt/time.h>
40#include <iprt/semaphore.h>
41#include <iprt/thread.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47static int vmR3ReqProcessOneU(PUVM pUVM, PVMREQ pReq);
48
49
50/**
51 * Allocate and queue a call request.
52 *
53 * If it's desired to poll on the completion of the request set cMillies
54 * to 0 and use VMR3ReqWait() to check for completation. In the other case
55 * use RT_INDEFINITE_WAIT.
56 * The returned request packet must be freed using VMR3ReqFree().
57 *
58 * @returns VBox status code.
59 * Will not return VERR_INTERRUPTED.
60 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
61 *
62 * @param pVM The VM handle.
63 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
64 * one of the following special values:
65 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
66 * @param ppReq Where to store the pointer to the request.
67 * This will be NULL or a valid request pointer not matter what happends.
68 * @param cMillies Number of milliseconds to wait for the request to
69 * be completed. Use RT_INDEFINITE_WAIT to only
70 * wait till it's completed.
71 * @param pfnFunction Pointer to the function to call.
72 * @param cArgs Number of arguments following in the ellipsis.
73 * Not possible to pass 64-bit arguments!
74 * @param ... Function arguments.
75 */
76VMMR3DECL(int) VMR3ReqCall(PVM pVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, PFNRT pfnFunction, unsigned cArgs, ...)
77{
78 va_list va;
79 va_start(va, cArgs);
80 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, ppReq, cMillies, VMREQFLAGS_VBOX_STATUS, pfnFunction, cArgs, va);
81 va_end(va);
82 return rc;
83}
84
85
86/**
87 * Allocate and queue a call request to a void function.
88 *
89 * If it's desired to poll on the completion of the request set cMillies
90 * to 0 and use VMR3ReqWait() to check for completation. In the other case
91 * use RT_INDEFINITE_WAIT.
92 * The returned request packet must be freed using VMR3ReqFree().
93 *
94 * @returns VBox status code.
95 * Will not return VERR_INTERRUPTED.
96 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
97 *
98 * @param pUVM Pointer to the user mode VM structure.
99 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
100 * one of the following special values:
101 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
102 * @param ppReq Where to store the pointer to the request.
103 * This will be NULL or a valid request pointer not matter what happends.
104 * @param cMillies Number of milliseconds to wait for the request to
105 * be completed. Use RT_INDEFINITE_WAIT to only
106 * wait till it's completed.
107 * @param pfnFunction Pointer to the function to call.
108 * @param cArgs Number of arguments following in the ellipsis.
109 * Not possible to pass 64-bit arguments!
110 * @param ... Function arguments.
111 */
112VMMR3DECL(int) VMR3ReqCallVoidU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, PFNRT pfnFunction, unsigned cArgs, ...)
113{
114 va_list va;
115 va_start(va, cArgs);
116 int rc = VMR3ReqCallVU(pUVM, idDstCpu, ppReq, cMillies, VMREQFLAGS_VOID, pfnFunction, cArgs, va);
117 va_end(va);
118 return rc;
119}
120
121
122/**
123 * Allocate and queue a call request to a void function.
124 *
125 * If it's desired to poll on the completion of the request set cMillies
126 * to 0 and use VMR3ReqWait() to check for completation. In the other case
127 * use RT_INDEFINITE_WAIT.
128 * The returned request packet must be freed using VMR3ReqFree().
129 *
130 * @returns VBox status code.
131 * Will not return VERR_INTERRUPTED.
132 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
133 *
134 * @param pVM The VM handle.
135 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
136 * one of the following special values:
137 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
138 * @param ppReq Where to store the pointer to the request.
139 * This will be NULL or a valid request pointer not matter what happends.
140 * @param cMillies Number of milliseconds to wait for the request to
141 * be completed. Use RT_INDEFINITE_WAIT to only
142 * wait till it's completed.
143 * @param pfnFunction Pointer to the function to call.
144 * @param cArgs Number of arguments following in the ellipsis.
145 * Not possible to pass 64-bit arguments!
146 * @param ... Function arguments.
147 */
148VMMR3DECL(int) VMR3ReqCallVoid(PVM pVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, PFNRT pfnFunction, unsigned cArgs, ...)
149{
150 va_list va;
151 va_start(va, cArgs);
152 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, ppReq, cMillies, VMREQFLAGS_VOID, pfnFunction, cArgs, va);
153 va_end(va);
154 return rc;
155}
156
157
158/**
159 * Allocate and queue a call request to a void function.
160 *
161 * If it's desired to poll on the completion of the request set cMillies
162 * to 0 and use VMR3ReqWait() to check for completation. In the other case
163 * use RT_INDEFINITE_WAIT.
164 * The returned request packet must be freed using VMR3ReqFree().
165 *
166 * @returns VBox status code.
167 * Will not return VERR_INTERRUPTED.
168 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
169 *
170 * @param pVM The VM handle.
171 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
172 * one of the following special values:
173 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
174 * @param ppReq Where to store the pointer to the request.
175 * This will be NULL or a valid request pointer not matter what happends, unless fFlags
176 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
177 * @param cMillies Number of milliseconds to wait for the request to
178 * be completed. Use RT_INDEFINITE_WAIT to only
179 * wait till it's completed.
180 * @param fFlags A combination of the VMREQFLAGS values.
181 * @param pfnFunction Pointer to the function to call.
182 * @param cArgs Number of arguments following in the ellipsis.
183 * Not possible to pass 64-bit arguments!
184 * @param ... Function arguments.
185 */
186VMMR3DECL(int) VMR3ReqCallEx(PVM pVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, ...)
187{
188 va_list va;
189 va_start(va, cArgs);
190 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
191 va_end(va);
192 return rc;
193}
194
195
196/**
197 * Convenience wrapper for VMR3ReqCallU.
198 *
199 * This assumes (1) you're calling a function that returns an VBox status code,
200 * (2) that you want it's return code on success, and (3) that you wish to wait
201 * for ever for it to return.
202 *
203 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
204 * its status code is return. Otherwise, the status of pfnFunction is
205 * returned.
206 *
207 * @param pVM Pointer to the shared VM structure.
208 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
209 * one of the following special values:
210 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
211 * @param pfnFunction Pointer to the function to call.
212 * @param cArgs Number of arguments following in the ellipsis.
213 * Not possible to pass 64-bit arguments!
214 * @param ... Function arguments.
215 *
216 * @remarks Use VMR3ReqCallWaitU where possible.
217 */
218VMMR3DECL(int) VMR3ReqCallWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
219{
220 PVMREQ pReq;
221 va_list va;
222 va_start(va, cArgs);
223 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
224 pfnFunction, cArgs, va);
225 va_end(va);
226 if (RT_SUCCESS(rc))
227 rc = pReq->iStatus;
228 VMR3ReqFree(pReq);
229 return rc;
230}
231
232
233/**
234 * Convenience wrapper for VMR3ReqCallU.
235 *
236 * This assumes (1) you're calling a function that returns an VBox status code,
237 * (2) that you want it's return code on success, and (3) that you wish to wait
238 * for ever for it to return.
239 *
240 * @returns VBox status code. In the unlikely event that VMR3ReqCallVU fails,
241 * its status code is return. Otherwise, the status of pfnFunction is
242 * returned.
243 *
244 * @param pUVM Pointer to the user mode VM structure.
245 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
246 * one of the following special values:
247 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
248 * @param pfnFunction Pointer to the function to call.
249 * @param cArgs Number of arguments following in the ellipsis.
250 * Not possible to pass 64-bit arguments!
251 * @param ... Function arguments.
252 */
253VMMR3DECL(int) VMR3ReqCallWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
254{
255 PVMREQ pReq;
256 va_list va;
257 va_start(va, cArgs);
258 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
259 pfnFunction, cArgs, va);
260 va_end(va);
261 if (RT_SUCCESS(rc))
262 rc = pReq->iStatus;
263 VMR3ReqFree(pReq);
264 return rc;
265}
266
267
268/**
269 * Convenience wrapper for VMR3ReqCallU.
270 *
271 * This assumes (1) you're calling a function that returns an VBox status code
272 * and that you do not wish to wait for it to complete.
273 *
274 * @returns VBox status code returned by VMR3ReqCallVU.
275 *
276 * @param pVM Pointer to the shared VM structure.
277 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
278 * one of the following special values:
279 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
280 * @param pfnFunction Pointer to the function to call.
281 * @param cArgs Number of arguments following in the ellipsis.
282 * Not possible to pass 64-bit arguments!
283 * @param ... Function arguments.
284 *
285 * @remarks Use VMR3ReqCallNoWaitU where possible.
286 */
287VMMR3DECL(int) VMR3ReqCallNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
288{
289 va_list va;
290 va_start(va, cArgs);
291 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
292 pfnFunction, cArgs, va);
293 va_end(va);
294 return rc;
295}
296
297
298/**
299 * Convenience wrapper for VMR3ReqCallU.
300 *
301 * This assumes (1) you're calling a function that returns an VBox status code
302 * and that you do not wish to wait for it to complete.
303 *
304 * @returns VBox status code returned by VMR3ReqCallVU.
305 *
306 * @param pUVM Pointer to the user mode VM structure.
307 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
308 * one of the following special values:
309 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
310 * @param pfnFunction Pointer to the function to call.
311 * @param cArgs Number of arguments following in the ellipsis.
312 * Not possible to pass 64-bit arguments!
313 * @param ... Function arguments.
314 */
315VMMR3DECL(int) VMR3ReqCallNoWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
316{
317 va_list va;
318 va_start(va, cArgs);
319 int rc = VMR3ReqCallVU(pUVM, idDstCpu, NULL, 0, VMREQFLAGS_VBOX_STATUS | VMREQFLAGS_NO_WAIT,
320 pfnFunction, cArgs, va);
321 va_end(va);
322 return rc;
323}
324
325
326/**
327 * Convenience wrapper for VMR3ReqCallU.
328 *
329 * This assumes (1) you're calling a function that returns void, and (2) that
330 * you wish to wait for ever for it to return.
331 *
332 * @returns VBox status code of VMR3ReqCallVU.
333 *
334 * @param pVM Pointer to the shared VM structure.
335 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
336 * one of the following special values:
337 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
338 * @param pfnFunction Pointer to the function to call.
339 * @param cArgs Number of arguments following in the ellipsis.
340 * Not possible to pass 64-bit arguments!
341 * @param ... Function arguments.
342 *
343 * @remarks Use VMR3ReqCallVoidWaitU where possible.
344 */
345VMMR3DECL(int) VMR3ReqCallVoidWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
346{
347 PVMREQ pReq;
348 va_list va;
349 va_start(va, cArgs);
350 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID,
351 pfnFunction, cArgs, va);
352 va_end(va);
353 VMR3ReqFree(pReq);
354 return rc;
355}
356
357
358/**
359 * Convenience wrapper for VMR3ReqCallU.
360 *
361 * This assumes (1) you're calling a function that returns void, and (2) that
362 * you wish to wait for ever for it to return.
363 *
364 * @returns VBox status code of VMR3ReqCallVU.
365 *
366 * @param pUVM Pointer to the user mode VM structure.
367 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
368 * one of the following special values:
369 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
370 * @param pfnFunction Pointer to the function to call.
371 * @param cArgs Number of arguments following in the ellipsis.
372 * Not possible to pass 64-bit arguments!
373 * @param ... Function arguments.
374 */
375VMMR3DECL(int) VMR3ReqCallVoidWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
376{
377 PVMREQ pReq;
378 va_list va;
379 va_start(va, cArgs);
380 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID,
381 pfnFunction, cArgs, va);
382 va_end(va);
383 VMR3ReqFree(pReq);
384 return rc;
385}
386
387
388/**
389 * Convenience wrapper for VMR3ReqCallU.
390 *
391 * This assumes (1) you're calling a function that returns void, and (2) that
392 * you do not wish to wait for it to complete.
393 *
394 * @returns VBox status code of VMR3ReqCallVU.
395 *
396 * @param pVM Pointer to the shared VM structure.
397 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
398 * one of the following special values:
399 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
400 * @param pfnFunction Pointer to the function to call.
401 * @param cArgs Number of arguments following in the ellipsis.
402 * Not possible to pass 64-bit arguments!
403 * @param ... Function arguments.
404 */
405VMMR3DECL(int) VMR3ReqCallVoidNoWait(PVM pVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
406{
407 PVMREQ pReq;
408 va_list va;
409 va_start(va, cArgs);
410 int rc = VMR3ReqCallVU(pVM->pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_NO_WAIT,
411 pfnFunction, cArgs, va);
412 va_end(va);
413 VMR3ReqFree(pReq);
414 return rc;
415}
416
417
418/**
419 * Convenience wrapper for VMR3ReqCallU.
420 *
421 * This assumes (1) you're calling a function that returns void, and (2) that
422 * you do not wish to wait for it to complete.
423 *
424 * @returns VBox status code of VMR3ReqCallVU.
425 *
426 * @param pUVM Pointer to the user mode VM structure.
427 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
428 * one of the following special values:
429 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
430 * @param pfnFunction Pointer to the function to call.
431 * @param cArgs Number of arguments following in the ellipsis.
432 * Not possible to pass 64-bit arguments!
433 * @param ... Function arguments.
434 */
435VMMR3DECL(int) VMR3ReqCallVoidNoWaitU(PUVM pUVM, VMCPUID idDstCpu, PFNRT pfnFunction, unsigned cArgs, ...)
436{
437 PVMREQ pReq;
438 va_list va;
439 va_start(va, cArgs);
440 int rc = VMR3ReqCallVU(pUVM, idDstCpu, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VOID | VMREQFLAGS_NO_WAIT,
441 pfnFunction, cArgs, va);
442 va_end(va);
443 VMR3ReqFree(pReq);
444 return rc;
445}
446
447
448/**
449 * Allocate and queue a call request to a void function.
450 *
451 * If it's desired to poll on the completion of the request set cMillies
452 * to 0 and use VMR3ReqWait() to check for completation. In the other case
453 * use RT_INDEFINITE_WAIT.
454 * The returned request packet must be freed using VMR3ReqFree().
455 *
456 * @returns VBox status code.
457 * Will not return VERR_INTERRUPTED.
458 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
459 *
460 * @param pUVM Pointer to the user mode VM structure.
461 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
462 * one of the following special values:
463 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
464 * @param ppReq Where to store the pointer to the request.
465 * This will be NULL or a valid request pointer not matter what happends, unless fFlags
466 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
467 * @param cMillies Number of milliseconds to wait for the request to
468 * be completed. Use RT_INDEFINITE_WAIT to only
469 * wait till it's completed.
470 * @param fFlags A combination of the VMREQFLAGS values.
471 * @param pfnFunction Pointer to the function to call.
472 * @param cArgs Number of arguments following in the ellipsis.
473 * Not possible to pass 64-bit arguments!
474 * @param ... Function arguments.
475 */
476VMMR3DECL(int) VMR3ReqCallU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, ...)
477{
478 va_list va;
479 va_start(va, cArgs);
480 int rc = VMR3ReqCallVU(pUVM, idDstCpu, ppReq, cMillies, fFlags, pfnFunction, cArgs, va);
481 va_end(va);
482 return rc;
483}
484
485
486/**
487 * Allocate and queue a call request.
488 *
489 * If it's desired to poll on the completion of the request set cMillies
490 * to 0 and use VMR3ReqWait() to check for completation. In the other case
491 * use RT_INDEFINITE_WAIT.
492 * The returned request packet must be freed using VMR3ReqFree().
493 *
494 * @returns VBox status code.
495 * Will not return VERR_INTERRUPTED.
496 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
497 *
498 * @param pUVM Pointer to the user mode VM structure.
499 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
500 * one of the following special values:
501 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
502 * @param ppReq Where to store the pointer to the request.
503 * This will be NULL or a valid request pointer not matter what happends, unless fFlags
504 * contains VMREQFLAGS_NO_WAIT when it will be optional and always NULL.
505 * @param cMillies Number of milliseconds to wait for the request to
506 * be completed. Use RT_INDEFINITE_WAIT to only
507 * wait till it's completed.
508 * @param pfnFunction Pointer to the function to call.
509 * @param fFlags A combination of the VMREQFLAGS values.
510 * @param cArgs Number of arguments following in the ellipsis.
511 * Stuff which differs in size from uintptr_t is gonna make trouble, so don't try!
512 * @param Args Argument vector.
513 */
514VMMR3DECL(int) VMR3ReqCallVU(PUVM pUVM, VMCPUID idDstCpu, PVMREQ *ppReq, unsigned cMillies, unsigned fFlags, PFNRT pfnFunction, unsigned cArgs, va_list Args)
515{
516 LogFlow(("VMR3ReqCallV: idDstCpu=%u cMillies=%d fFlags=%#x pfnFunction=%p cArgs=%d\n", idDstCpu, cMillies, fFlags, pfnFunction, cArgs));
517
518 /*
519 * Validate input.
520 */
521 AssertPtrReturn(pfnFunction, VERR_INVALID_POINTER);
522 AssertPtrReturn(pUVM, VERR_INVALID_POINTER);
523 AssertReturn(!(fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE)), VERR_INVALID_PARAMETER);
524 if (!(fFlags & VMREQFLAGS_NO_WAIT) || ppReq)
525 {
526 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
527 *ppReq = NULL;
528 }
529 PVMREQ pReq = NULL;
530 AssertMsgReturn(cArgs * sizeof(uintptr_t) <= sizeof(pReq->u.Internal.aArgs),
531 ("cArg=%d\n", cArgs),
532 VERR_TOO_MUCH_DATA);
533
534 /*
535 * Allocate request
536 */
537 int rc = VMR3ReqAllocU(pUVM, &pReq, VMREQTYPE_INTERNAL, idDstCpu);
538 if (RT_FAILURE(rc))
539 return rc;
540
541 /*
542 * Initialize the request data.
543 */
544 pReq->fFlags = fFlags;
545 pReq->u.Internal.pfn = pfnFunction;
546 pReq->u.Internal.cArgs = cArgs;
547 for (unsigned iArg = 0; iArg < cArgs; iArg++)
548 pReq->u.Internal.aArgs[iArg] = va_arg(Args, uintptr_t);
549
550 /*
551 * Queue the request and return.
552 */
553 rc = VMR3ReqQueue(pReq, cMillies);
554 if ( RT_FAILURE(rc)
555 && rc != VERR_TIMEOUT)
556 {
557 VMR3ReqFree(pReq);
558 pReq = NULL;
559 }
560 if (!(fFlags & VMREQFLAGS_NO_WAIT))
561 {
562 *ppReq = pReq;
563 LogFlow(("VMR3ReqCallV: returns %Rrc *ppReq=%p\n", rc, pReq));
564 }
565 else
566 LogFlow(("VMR3ReqCallV: returns %Rrc\n", rc));
567 Assert(rc != VERR_INTERRUPTED);
568 return rc;
569}
570
571
572/**
573 * Joins the list pList with whatever is linked up at *pHead.
574 */
575static void vmr3ReqJoinFreeSub(volatile PVMREQ *ppHead, PVMREQ pList)
576{
577 for (unsigned cIterations = 0;; cIterations++)
578 {
579 PVMREQ pHead = (PVMREQ)ASMAtomicXchgPtr((void * volatile *)ppHead, pList);
580 if (!pHead)
581 return;
582 PVMREQ pTail = pHead;
583 while (pTail->pNext)
584 pTail = pTail->pNext;
585 ASMAtomicWritePtr((void * volatile *)&pTail->pNext, pList);
586 ASMCompilerBarrier();
587 if (ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (void *)pHead, pList))
588 return;
589 ASMAtomicWritePtr((void * volatile *)&pTail->pNext, NULL);
590 ASMCompilerBarrier();
591 if (ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (void *)pHead, NULL))
592 return;
593 pList = pHead;
594 Assert(cIterations != 32);
595 Assert(cIterations != 64);
596 }
597}
598
599
600/**
601 * Joins the list pList with whatever is linked up at *pHead.
602 */
603static void vmr3ReqJoinFree(PVMINTUSERPERVM pVMInt, PVMREQ pList)
604{
605 /*
606 * Split the list if it's too long.
607 */
608 unsigned cReqs = 1;
609 PVMREQ pTail = pList;
610 while (pTail->pNext)
611 {
612 if (cReqs++ > 25)
613 {
614 const uint32_t i = pVMInt->iReqFree;
615 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
616
617 pTail->pNext = NULL;
618 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(i + 2 + (i == pVMInt->iReqFree)) % RT_ELEMENTS(pVMInt->apReqFree)], pTail->pNext);
619 return;
620 }
621 pTail = pTail->pNext;
622 }
623 vmr3ReqJoinFreeSub(&pVMInt->apReqFree[(pVMInt->iReqFree + 2) % RT_ELEMENTS(pVMInt->apReqFree)], pList);
624}
625
626
627/**
628 * Allocates a request packet.
629 *
630 * The caller allocates a request packet, fills in the request data
631 * union and queues the request.
632 *
633 * @returns VBox status code.
634 *
635 * @param pVM VM handle.
636 * @param ppReq Where to store the pointer to the allocated packet.
637 * @param enmType Package type.
638 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
639 * one of the following special values:
640 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
641 */
642VMMR3DECL(int) VMR3ReqAlloc(PVM pVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
643{
644 return VMR3ReqAllocU(pVM->pUVM, ppReq, enmType, idDstCpu);
645}
646
647
648/**
649 * Allocates a request packet.
650 *
651 * The caller allocates a request packet, fills in the request data
652 * union and queues the request.
653 *
654 * @returns VBox status code.
655 *
656 * @param pUVM Pointer to the user mode VM structure.
657 * @param ppReq Where to store the pointer to the allocated packet.
658 * @param enmType Package type.
659 * @param idDstCpu The destination CPU(s). Either a specific CPU ID or
660 * one of the following special values:
661 * VMCPUID_ANY, VMCPUID_ANY_QUEUE, VMCPUID_ALL or VMCPUID_ALL_REVERSE.
662 */
663VMMR3DECL(int) VMR3ReqAllocU(PUVM pUVM, PVMREQ *ppReq, VMREQTYPE enmType, VMCPUID idDstCpu)
664{
665 /*
666 * Validate input.
667 */
668 AssertMsgReturn(enmType > VMREQTYPE_INVALID && enmType < VMREQTYPE_MAX,
669 ("Invalid package type %d valid range %d-%d inclusivly.\n",
670 enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
671 VERR_VM_REQUEST_INVALID_TYPE);
672 AssertPtrReturn(ppReq, VERR_INVALID_POINTER);
673 AssertMsgReturn( idDstCpu == VMCPUID_ANY
674 || idDstCpu == VMCPUID_ANY_QUEUE
675 || idDstCpu < pUVM->cCpus
676 || idDstCpu == VMCPUID_ALL
677 || idDstCpu == VMCPUID_ALL_REVERSE,
678 ("Invalid destination %u (max=%u)\n", idDstCpu, pUVM->cCpus), VERR_INVALID_PARAMETER);
679
680 /*
681 * Try get a recycled packet.
682 * While this could all be solved with a single list with a lock, it's a sport
683 * of mine to avoid locks.
684 */
685 int cTries = RT_ELEMENTS(pUVM->vm.s.apReqFree) * 2;
686 while (--cTries >= 0)
687 {
688 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
689#if 0 /* sad, but this won't work safely because the reading of pReq->pNext. */
690 PVMREQ pNext = NULL;
691 PVMREQ pReq = *ppHead;
692 if ( pReq
693 && !ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (pNext = pReq->pNext), pReq)
694 && (pReq = *ppHead)
695 && !ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (pNext = pReq->pNext), pReq))
696 pReq = NULL;
697 if (pReq)
698 {
699 Assert(pReq->pNext == pNext); NOREF(pReq);
700#else
701 PVMREQ pReq = (PVMREQ)ASMAtomicXchgPtr((void * volatile *)ppHead, NULL);
702 if (pReq)
703 {
704 PVMREQ pNext = pReq->pNext;
705 if ( pNext
706 && !ASMAtomicCmpXchgPtr((void * volatile *)ppHead, pNext, NULL))
707 {
708 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRaces);
709 vmr3ReqJoinFree(&pUVM->vm.s, pReq->pNext);
710 }
711#endif
712 ASMAtomicDecU32(&pUVM->vm.s.cReqFree);
713
714 /*
715 * Make sure the event sem is not signaled.
716 */
717 if (!pReq->fEventSemClear)
718 {
719 int rc = RTSemEventWait(pReq->EventSem, 0);
720 if (rc != VINF_SUCCESS && rc != VERR_TIMEOUT)
721 {
722 /*
723 * This shall not happen, but if it does we'll just destroy
724 * the semaphore and create a new one.
725 */
726 AssertMsgFailed(("rc=%Rrc from RTSemEventWait(%#x).\n", rc, pReq->EventSem));
727 RTSemEventDestroy(pReq->EventSem);
728 rc = RTSemEventCreate(&pReq->EventSem);
729 AssertRC(rc);
730 if (RT_FAILURE(rc))
731 return rc;
732 }
733 pReq->fEventSemClear = true;
734 }
735 else
736 Assert(RTSemEventWait(pReq->EventSem, 0) == VERR_TIMEOUT);
737
738 /*
739 * Initialize the packet and return it.
740 */
741 Assert(pReq->enmType == VMREQTYPE_INVALID);
742 Assert(pReq->enmState == VMREQSTATE_FREE);
743 Assert(pReq->pUVM == pUVM);
744 ASMAtomicXchgSize(&pReq->pNext, NULL);
745 pReq->enmState = VMREQSTATE_ALLOCATED;
746 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
747 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
748 pReq->enmType = enmType;
749 pReq->idDstCpu = idDstCpu;
750
751 *ppReq = pReq;
752 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocRecycled);
753 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p recycled\n", pReq));
754 return VINF_SUCCESS;
755 }
756 }
757
758 /*
759 * Ok allocate one.
760 */
761 PVMREQ pReq = (PVMREQ)MMR3HeapAllocU(pUVM, MM_TAG_VM_REQ, sizeof(*pReq));
762 if (!pReq)
763 return VERR_NO_MEMORY;
764
765 /*
766 * Create the semaphore.
767 */
768 int rc = RTSemEventCreate(&pReq->EventSem);
769 AssertRC(rc);
770 if (RT_FAILURE(rc))
771 {
772 MMR3HeapFree(pReq);
773 return rc;
774 }
775
776 /*
777 * Initialize the packet and return it.
778 */
779 pReq->pNext = NULL;
780 pReq->pUVM = pUVM;
781 pReq->enmState = VMREQSTATE_ALLOCATED;
782 pReq->iStatus = VERR_VM_REQUEST_STATUS_STILL_PENDING;
783 pReq->fEventSemClear = true;
784 pReq->fFlags = VMREQFLAGS_VBOX_STATUS;
785 pReq->enmType = enmType;
786 pReq->idDstCpu = idDstCpu;
787
788 *ppReq = pReq;
789 STAM_COUNTER_INC(&pUVM->vm.s.StatReqAllocNew);
790 LogFlow(("VMR3ReqAlloc: returns VINF_SUCCESS *ppReq=%p new\n", pReq));
791 return VINF_SUCCESS;
792}
793
794
795/**
796 * Free a request packet.
797 *
798 * @returns VBox status code.
799 *
800 * @param pReq Package to free.
801 * @remark The request packet must be in allocated or completed state!
802 */
803VMMR3DECL(int) VMR3ReqFree(PVMREQ pReq)
804{
805 /*
806 * Ignore NULL (all free functions should do this imho).
807 */
808 if (!pReq)
809 return VINF_SUCCESS;
810
811 /*
812 * Check packet state.
813 */
814 switch (pReq->enmState)
815 {
816 case VMREQSTATE_ALLOCATED:
817 case VMREQSTATE_COMPLETED:
818 break;
819 default:
820 AssertMsgFailed(("Invalid state %d!\n", pReq->enmState));
821 return VERR_VM_REQUEST_STATE;
822 }
823
824 /*
825 * Make it a free packet and put it into one of the free packet lists.
826 */
827 pReq->enmState = VMREQSTATE_FREE;
828 pReq->iStatus = VERR_VM_REQUEST_STATUS_FREED;
829 pReq->enmType = VMREQTYPE_INVALID;
830
831 PUVM pUVM = pReq->pUVM;
832 STAM_COUNTER_INC(&pUVM->vm.s.StatReqFree);
833
834 if (pUVM->vm.s.cReqFree < 128)
835 {
836 ASMAtomicIncU32(&pUVM->vm.s.cReqFree);
837 PVMREQ volatile *ppHead = &pUVM->vm.s.apReqFree[ASMAtomicIncU32(&pUVM->vm.s.iReqFree) % RT_ELEMENTS(pUVM->vm.s.apReqFree)];
838 PVMREQ pNext;
839 do
840 {
841 pNext = (PVMREQ)ASMAtomicUoReadPtr((void * volatile *)ppHead);
842 ASMAtomicWritePtr((void * volatile *)&pReq->pNext, pNext);
843 ASMCompilerBarrier();
844 } while (!ASMAtomicCmpXchgPtr((void * volatile *)ppHead, (void *)pReq, (void *)pNext));
845 }
846 else
847 {
848 STAM_COUNTER_INC(&pReq->pUVM->vm.s.StatReqFreeOverflow);
849 RTSemEventDestroy(pReq->EventSem);
850 MMR3HeapFree(pReq);
851 }
852 return VINF_SUCCESS;
853}
854
855
856/**
857 * Queue a request.
858 *
859 * The quest must be allocated using VMR3ReqAlloc() and contain
860 * all the required data.
861 * If it's desired to poll on the completion of the request set cMillies
862 * to 0 and use VMR3ReqWait() to check for completation. In the other case
863 * use RT_INDEFINITE_WAIT.
864 *
865 * @returns VBox status code.
866 * Will not return VERR_INTERRUPTED.
867 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
868 *
869 * @param pReq The request to queue.
870 * @param cMillies Number of milliseconds to wait for the request to
871 * be completed. Use RT_INDEFINITE_WAIT to only
872 * wait till it's completed.
873 */
874VMMR3DECL(int) VMR3ReqQueue(PVMREQ pReq, unsigned cMillies)
875{
876 LogFlow(("VMR3ReqQueue: pReq=%p cMillies=%d\n", pReq, cMillies));
877 /*
878 * Verify the supplied package.
879 */
880 AssertMsgReturn(pReq->enmState == VMREQSTATE_ALLOCATED, ("%d\n", pReq->enmState), VERR_VM_REQUEST_STATE);
881 AssertMsgReturn( VALID_PTR(pReq->pUVM)
882 && !pReq->pNext
883 && pReq->EventSem != NIL_RTSEMEVENT,
884 ("Invalid request package! Anyone cooking their own packages???\n"),
885 VERR_VM_REQUEST_INVALID_PACKAGE);
886 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
887 && pReq->enmType < VMREQTYPE_MAX,
888 ("Invalid package type %d valid range %d-%d inclusivly. This was verified on alloc too...\n",
889 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
890 VERR_VM_REQUEST_INVALID_TYPE);
891 Assert(!(pReq->fFlags & ~(VMREQFLAGS_RETURN_MASK | VMREQFLAGS_NO_WAIT | VMREQFLAGS_POKE)));
892
893 /*
894 * Are we the EMT or not?
895 * Also, store pVM (and fFlags) locally since pReq may be invalid after queuing it.
896 */
897 int rc = VINF_SUCCESS;
898 PUVM pUVM = ((VMREQ volatile *)pReq)->pUVM; /* volatile paranoia */
899 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
900
901 if (pReq->idDstCpu == VMCPUID_ALL)
902 {
903 /* One-by-one. */
904 Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
905 for (unsigned i = 0; i < pUVM->cCpus; i++)
906 {
907 /* Reinit some members. */
908 pReq->enmState = VMREQSTATE_ALLOCATED;
909 pReq->idDstCpu = i;
910 rc = VMR3ReqQueue(pReq, cMillies);
911 if (RT_FAILURE(rc))
912 break;
913 }
914 }
915 else if (pReq->idDstCpu == VMCPUID_ALL_REVERSE)
916 {
917 /* One-by-one. */
918 Assert(!(pReq->fFlags & VMREQFLAGS_NO_WAIT));
919 for (int i = pUVM->cCpus-1; i >= 0; i--)
920 {
921 /* Reinit some members. */
922 pReq->enmState = VMREQSTATE_ALLOCATED;
923 pReq->idDstCpu = i;
924 rc = VMR3ReqQueue(pReq, cMillies);
925 if (RT_FAILURE(rc))
926 break;
927 }
928 }
929 else if ( pReq->idDstCpu != VMCPUID_ANY /* for a specific VMCPU? */
930 && pReq->idDstCpu != VMCPUID_ANY_QUEUE
931 && ( !pUVCpu /* and it's not the current thread. */
932 || pUVCpu->idCpu != pReq->idDstCpu))
933 {
934 VMCPUID idTarget = pReq->idDstCpu; Assert(idTarget < pUVM->cCpus);
935 PVMCPU pVCpu = &pUVM->pVM->aCpus[idTarget];
936 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
937
938 /* Fetch the right UVMCPU */
939 pUVCpu = &pUVM->aCpus[idTarget];
940
941 /*
942 * Insert it.
943 */
944 pReq->enmState = VMREQSTATE_QUEUED;
945 PVMREQ pNext;
946 do
947 {
948 pNext = (PVMREQ)ASMAtomicUoReadPtr((void * volatile *)&pUVCpu->vm.s.pReqs);
949 ASMAtomicWritePtr((void * volatile *)&pReq->pNext, pNext);
950 ASMCompilerBarrier();
951 } while (!ASMAtomicCmpXchgPtr((void * volatile *)&pUVCpu->vm.s.pReqs, (void *)pReq, (void *)pNext));
952
953 /*
954 * Notify EMT.
955 */
956 if (pUVM->pVM)
957 VMCPU_FF_SET(pVCpu, VMCPU_FF_REQUEST);
958 VMR3NotifyCpuFFU(pUVCpu, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
959
960 /*
961 * Wait and return.
962 */
963 if (!(fFlags & VMREQFLAGS_NO_WAIT))
964 rc = VMR3ReqWait(pReq, cMillies);
965 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
966 }
967 else if ( ( pReq->idDstCpu == VMCPUID_ANY
968 && !pUVCpu /* only EMT threads have a valid pointer stored in the TLS slot. */)
969 || pReq->idDstCpu == VMCPUID_ANY_QUEUE)
970 {
971 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */
972
973 Assert(pReq->idDstCpu != VMCPUID_ANY_QUEUE || pUVCpu);
974
975 /*
976 * Insert it.
977 */
978 pReq->enmState = VMREQSTATE_QUEUED;
979 PVMREQ pNext;
980 do
981 {
982 pNext = (PVMREQ)ASMAtomicUoReadPtr((void * volatile *)&pUVM->vm.s.pReqs);
983 ASMAtomicWritePtr((void * volatile *)&pReq->pNext, pNext);
984 ASMCompilerBarrier();
985 } while (!ASMAtomicCmpXchgPtr((void * volatile *)&pUVM->vm.s.pReqs, (void *)pReq, (void *)pNext));
986
987 /*
988 * Notify EMT.
989 */
990 if (pUVM->pVM)
991 VM_FF_SET(pUVM->pVM, VM_FF_REQUEST);
992 VMR3NotifyGlobalFFU(pUVM, fFlags & VMREQFLAGS_POKE ? VMNOTIFYFF_FLAGS_POKE : 0);
993
994 /*
995 * Wait and return.
996 */
997 if (!(fFlags & VMREQFLAGS_NO_WAIT))
998 rc = VMR3ReqWait(pReq, cMillies);
999 LogFlow(("VMR3ReqQueue: returns %Rrc\n", rc));
1000 }
1001 else
1002 {
1003 Assert(pUVCpu);
1004
1005 /*
1006 * The requester was an EMT, just execute it.
1007 */
1008 pReq->enmState = VMREQSTATE_QUEUED;
1009 rc = vmR3ReqProcessOneU(pUVM, pReq);
1010 LogFlow(("VMR3ReqQueue: returns %Rrc (processed)\n", rc));
1011 }
1012 return rc;
1013}
1014
1015
1016/**
1017 * Wait for a request to be completed.
1018 *
1019 * @returns VBox status code.
1020 * @returns VERR_TIMEOUT if cMillies was reached without the packet being completed.
1021 *
1022 * @param pReq The request to wait for.
1023 * @param cMillies Number of milliseconds to wait.
1024 * Use RT_INDEFINITE_WAIT to only wait till it's completed.
1025 */
1026VMMR3DECL(int) VMR3ReqWait(PVMREQ pReq, unsigned cMillies)
1027{
1028 LogFlow(("VMR3ReqWait: pReq=%p cMillies=%d\n", pReq, cMillies));
1029
1030 /*
1031 * Verify the supplied package.
1032 */
1033 AssertMsgReturn( pReq->enmState == VMREQSTATE_QUEUED
1034 || pReq->enmState == VMREQSTATE_PROCESSING
1035 || pReq->enmState == VMREQSTATE_COMPLETED,
1036 ("Invalid state %d\n", pReq->enmState),
1037 VERR_VM_REQUEST_STATE);
1038 AssertMsgReturn( VALID_PTR(pReq->pUVM)
1039 && pReq->EventSem != NIL_RTSEMEVENT,
1040 ("Invalid request package! Anyone cooking their own packages???\n"),
1041 VERR_VM_REQUEST_INVALID_PACKAGE);
1042 AssertMsgReturn( pReq->enmType > VMREQTYPE_INVALID
1043 && pReq->enmType < VMREQTYPE_MAX,
1044 ("Invalid package type %d valid range %d-%d inclusivly. This was verified on alloc too...\n",
1045 pReq->enmType, VMREQTYPE_INVALID + 1, VMREQTYPE_MAX - 1),
1046 VERR_VM_REQUEST_INVALID_TYPE);
1047
1048 /*
1049 * Check for deadlock condition
1050 */
1051 PUVM pUVM = pReq->pUVM;
1052 NOREF(pUVM);
1053
1054 /*
1055 * Wait on the package.
1056 */
1057 int rc;
1058 if (cMillies != RT_INDEFINITE_WAIT)
1059 rc = RTSemEventWait(pReq->EventSem, cMillies);
1060 else
1061 {
1062 do
1063 {
1064 rc = RTSemEventWait(pReq->EventSem, RT_INDEFINITE_WAIT);
1065 Assert(rc != VERR_TIMEOUT);
1066 } while ( pReq->enmState != VMREQSTATE_COMPLETED
1067 && pReq->enmState != VMREQSTATE_INVALID);
1068 }
1069 if (RT_SUCCESS(rc))
1070 ASMAtomicXchgSize(&pReq->fEventSemClear, true);
1071 if (pReq->enmState == VMREQSTATE_COMPLETED)
1072 rc = VINF_SUCCESS;
1073 LogFlow(("VMR3ReqWait: returns %Rrc\n", rc));
1074 Assert(rc != VERR_INTERRUPTED);
1075 return rc;
1076}
1077
1078
1079/**
1080 * Process pending request(s).
1081 *
1082 * This function is called from a forced action handler in the EMT
1083 * or from one of the EMT loops.
1084 *
1085 * @returns VBox status code.
1086 *
1087 * @param pUVM Pointer to the user mode VM structure.
1088 * @param idDstCpu Pass VMCPUID_ANY to process the common request queue
1089 * and the CPU ID for a CPU specific one. In the latter
1090 * case the calling thread must be the EMT of that CPU.
1091 *
1092 * @note SMP safe (multiple EMTs trying to satisfy VM_FF_REQUESTs).
1093 */
1094VMMR3DECL(int) VMR3ReqProcessU(PUVM pUVM, VMCPUID idDstCpu)
1095{
1096 LogFlow(("VMR3ReqProcessU: (enmVMState=%d) idDstCpu=%d\n", pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING, idDstCpu));
1097
1098 /*
1099 * Process loop.
1100 *
1101 * We do not repeat the outer loop if we've got an informational status code
1102 * since that code needs processing by our caller.
1103 */
1104 int rc = VINF_SUCCESS;
1105 while (rc <= VINF_SUCCESS)
1106 {
1107 /*
1108 * Get pending requests.
1109 */
1110 void * volatile *ppReqs;
1111 if (idDstCpu == VMCPUID_ANY)
1112 {
1113 ppReqs = (void * volatile *)&pUVM->vm.s.pReqs;
1114 if (RT_LIKELY(pUVM->pVM))
1115 VM_FF_CLEAR(pUVM->pVM, VM_FF_REQUEST);
1116 }
1117 else
1118 {
1119 Assert(idDstCpu < pUVM->cCpus);
1120 Assert(pUVM->aCpus[idDstCpu].vm.s.NativeThreadEMT == RTThreadNativeSelf());
1121 ppReqs = (void * volatile *)&pUVM->aCpus[idDstCpu].vm.s.pReqs;
1122 if (RT_LIKELY(pUVM->pVM))
1123 VMCPU_FF_CLEAR(&pUVM->pVM->aCpus[idDstCpu], VMCPU_FF_REQUEST);
1124 }
1125 PVMREQ pReqs = (PVMREQ)ASMAtomicXchgPtr(ppReqs, NULL);
1126 if (!pReqs)
1127 break;
1128
1129 /*
1130 * Reverse the list to process it in FIFO order.
1131 */
1132 PVMREQ pReq = pReqs;
1133 if (pReq->pNext)
1134 Log2(("VMR3ReqProcess: 2+ requests: %p %p %p\n", pReq, pReq->pNext, pReq->pNext->pNext));
1135 pReqs = NULL;
1136 while (pReq)
1137 {
1138 Assert(pReq->enmState == VMREQSTATE_QUEUED);
1139 Assert(pReq->pUVM == pUVM);
1140 PVMREQ pCur = pReq;
1141 pReq = pReq->pNext;
1142 pCur->pNext = pReqs;
1143 pReqs = pCur;
1144 }
1145
1146
1147 /*
1148 * Process the requests.
1149 *
1150 * Since this is a FF worker certain rules applies to the
1151 * status codes. See the EM section in VBox/err.h and EM.cpp for details.
1152 */
1153 while (pReqs)
1154 {
1155 /* Unchain the first request and advance the list. */
1156 pReq = pReqs;
1157 pReqs = pReqs->pNext;
1158 pReq->pNext = NULL;
1159
1160 /* Process the request */
1161 int rc2 = vmR3ReqProcessOneU(pUVM, pReq);
1162
1163 /*
1164 * The status code handling extremely important yet very fragile. Should probably
1165 * look for a better way of communicating status changes to EM...
1166 */
1167 if ( rc2 >= VINF_EM_FIRST
1168 && rc2 <= VINF_EM_LAST
1169 && ( rc == VINF_SUCCESS
1170 || rc2 < rc) )
1171 rc = rc2;
1172 }
1173 }
1174
1175 LogFlow(("VMR3ReqProcess: returns %Rrc (enmVMState=%d)\n", rc, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
1176 return rc;
1177}
1178
1179
1180/**
1181 * Process one request.
1182 *
1183 * @returns VBox status code.
1184 *
1185 * @param pVM VM handle.
1186 * @param pReq Request packet to process.
1187 */
1188static int vmR3ReqProcessOneU(PUVM pUVM, PVMREQ pReq)
1189{
1190 LogFlow(("vmR3ReqProcessOne: pReq=%p type=%d fFlags=%#x\n", pReq, pReq->enmType, pReq->fFlags));
1191
1192 /*
1193 * Process the request.
1194 */
1195 Assert(pReq->enmState == VMREQSTATE_QUEUED);
1196 pReq->enmState = VMREQSTATE_PROCESSING;
1197 int rcRet = VINF_SUCCESS; /* the return code of this function. */
1198 int rcReq = VERR_NOT_IMPLEMENTED; /* the request status. */
1199 switch (pReq->enmType)
1200 {
1201 /*
1202 * A packed down call frame.
1203 */
1204 case VMREQTYPE_INTERNAL:
1205 {
1206 uintptr_t *pauArgs = &pReq->u.Internal.aArgs[0];
1207 union
1208 {
1209 PFNRT pfn;
1210 DECLCALLBACKMEMBER(int, pfn00)(void);
1211 DECLCALLBACKMEMBER(int, pfn01)(uintptr_t);
1212 DECLCALLBACKMEMBER(int, pfn02)(uintptr_t, uintptr_t);
1213 DECLCALLBACKMEMBER(int, pfn03)(uintptr_t, uintptr_t, uintptr_t);
1214 DECLCALLBACKMEMBER(int, pfn04)(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1215 DECLCALLBACKMEMBER(int, pfn05)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1216 DECLCALLBACKMEMBER(int, pfn06)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1217 DECLCALLBACKMEMBER(int, pfn07)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1218 DECLCALLBACKMEMBER(int, pfn08)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1219 DECLCALLBACKMEMBER(int, pfn09)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1220 DECLCALLBACKMEMBER(int, pfn10)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1221 DECLCALLBACKMEMBER(int, pfn11)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1222 DECLCALLBACKMEMBER(int, pfn12)(uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
1223 } u;
1224 u.pfn = pReq->u.Internal.pfn;
1225#ifdef RT_ARCH_AMD64
1226 switch (pReq->u.Internal.cArgs)
1227 {
1228 case 0: rcRet = u.pfn00(); break;
1229 case 1: rcRet = u.pfn01(pauArgs[0]); break;
1230 case 2: rcRet = u.pfn02(pauArgs[0], pauArgs[1]); break;
1231 case 3: rcRet = u.pfn03(pauArgs[0], pauArgs[1], pauArgs[2]); break;
1232 case 4: rcRet = u.pfn04(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3]); break;
1233 case 5: rcRet = u.pfn05(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4]); break;
1234 case 6: rcRet = u.pfn06(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5]); break;
1235 case 7: rcRet = u.pfn07(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6]); break;
1236 case 8: rcRet = u.pfn08(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7]); break;
1237 case 9: rcRet = u.pfn09(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8]); break;
1238 case 10: rcRet = u.pfn10(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9]); break;
1239 case 11: rcRet = u.pfn11(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10]); break;
1240 case 12: rcRet = u.pfn12(pauArgs[0], pauArgs[1], pauArgs[2], pauArgs[3], pauArgs[4], pauArgs[5], pauArgs[6], pauArgs[7], pauArgs[8], pauArgs[9], pauArgs[10], pauArgs[11]); break;
1241 default:
1242 AssertReleaseMsgFailed(("cArgs=%d\n", pReq->u.Internal.cArgs));
1243 rcRet = rcReq = VERR_INTERNAL_ERROR;
1244 break;
1245 }
1246#else /* x86: */
1247 size_t cbArgs = pReq->u.Internal.cArgs * sizeof(uintptr_t);
1248# ifdef __GNUC__
1249 __asm__ __volatile__("movl %%esp, %%edx\n\t"
1250 "subl %2, %%esp\n\t"
1251 "andl $0xfffffff0, %%esp\n\t"
1252 "shrl $2, %2\n\t"
1253 "movl %%esp, %%edi\n\t"
1254 "rep movsl\n\t"
1255 "movl %%edx, %%edi\n\t"
1256 "call *%%eax\n\t"
1257 "mov %%edi, %%esp\n\t"
1258 : "=a" (rcRet),
1259 "=S" (pauArgs),
1260 "=c" (cbArgs)
1261 : "0" (u.pfn),
1262 "1" (pauArgs),
1263 "2" (cbArgs)
1264 : "edi", "edx");
1265# else
1266 __asm
1267 {
1268 xor edx, edx /* just mess it up. */
1269 mov eax, u.pfn
1270 mov ecx, cbArgs
1271 shr ecx, 2
1272 mov esi, pauArgs
1273 mov ebx, esp
1274 sub esp, cbArgs
1275 and esp, 0xfffffff0
1276 mov edi, esp
1277 rep movsd
1278 call eax
1279 mov esp, ebx
1280 mov rcRet, eax
1281 }
1282# endif
1283#endif /* x86 */
1284 if ((pReq->fFlags & (VMREQFLAGS_RETURN_MASK)) == VMREQFLAGS_VOID)
1285 rcRet = VINF_SUCCESS;
1286 rcReq = rcRet;
1287 break;
1288 }
1289
1290 default:
1291 AssertMsgFailed(("pReq->enmType=%d\n", pReq->enmType));
1292 rcReq = VERR_NOT_IMPLEMENTED;
1293 break;
1294 }
1295
1296 /*
1297 * Complete the request.
1298 */
1299 pReq->iStatus = rcReq;
1300 pReq->enmState = VMREQSTATE_COMPLETED;
1301 if (pReq->fFlags & VMREQFLAGS_NO_WAIT)
1302 {
1303 /* Free the packet, nobody is waiting. */
1304 LogFlow(("vmR3ReqProcessOne: Completed request %p: rcReq=%Rrc rcRet=%Rrc - freeing it\n",
1305 pReq, rcReq, rcRet));
1306 VMR3ReqFree(pReq);
1307 }
1308 else
1309 {
1310 /* Notify the waiter and him free up the packet. */
1311 LogFlow(("vmR3ReqProcessOne: Completed request %p: rcReq=%Rrc rcRet=%Rrc - notifying waiting thread\n",
1312 pReq, rcReq, rcRet));
1313 ASMAtomicXchgSize(&pReq->fEventSemClear, false);
1314 int rc2 = RTSemEventSignal(pReq->EventSem);
1315 if (RT_FAILURE(rc2))
1316 {
1317 AssertRC(rc2);
1318 rcRet = rc2;
1319 }
1320 }
1321 return rcRet;
1322}
1323
1324
1325
1326
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette