VirtualBox

source: vbox/trunk/src/VBox/VMM/VMM.cpp@ 1257

Last change on this file since 1257 was 1257, checked in by vboxsync, 18 years ago

Temporary logging

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 112.6 KB
Line 
1/* $Id: VMM.cpp 1257 2007-03-06 11:14:54Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22#if 0 //defined(__AMD64__) && !defined(__WIN__)
23# define NO_SUPCALLR0VMM
24#endif
25
26/** @page pg_vmm VMM - The Virtual Machine Monitor
27 *
28 * !Revise this! It's already incorrect!
29 *
30 * The Virtual Machine Monitor (VMM) is the core of the virtual machine. It
31 * manages the alternate reality; controlling the virtualization, managing
32 * resources, tracking CPU state, it's resources and so on...
33 *
34 * We will split the VMM into smaller entities:
35 *
36 * - Virtual Machine Core Monitor (VMCM), which purpose it is to
37 * provide ring and world switching, that including routing
38 * interrupts to the host OS and traps to the appropriate trap
39 * handlers. It will implement an external interface for
40 * managing trap handlers.
41 *
42 * - CPU Monitor (CM), tracking the state of the CPU (in the alternate
43 * reality) and implementing external interfaces to read and change
44 * the state.
45 *
46 * - Memory Monitor (MM), which purpose it is to virtualize physical
47 * pages, segment descriptor tables, interrupt descriptor tables, task
48 * segments, and keep track of all memory providing external interfaces
49 * to access content and map pages. (Internally splitt into smaller entities!)
50 *
51 * - IO Monitor (IOM), which virtualizes in and out I/O operations. It
52 * interacts with the MM to implement memory mapped I/O. External
53 * interfaces for adding and removing I/O ranges are implemented.
54 *
55 * - External Interrupt Monitor (EIM), which purpose it is to manage
56 * interrupts generated by virtual devices. This monitor provides
57 * an interfaces for raising interrupts which is accessible at any
58 * time and from all thread.
59 * <p>
60 * A subentity of the EIM is the vitual Programmable Interrupt
61 * Controller Device (VPICD), and perhaps a virtual I/O Advanced
62 * Programmable Interrupt Controller Device (VAPICD).
63 *
64 * - Direct Memory Access Monitor (DMAM), which purpose it is to support
65 * virtual device using the DMA controller. Interfaces must be as the
66 * EIM interfaces independent and threadable.
67 * <p>
68 * A subentity of the DMAM is a virtual DMA Controller Device (VDMACD).
69 *
70 *
71 * Entities working on a higher level:
72 *
73 * - Device Manager (DM), which is a support facility for virtualized
74 * hardware. This provides generic facilities for efficient device
75 * virtualization. It will manage device attaching and detaching
76 * conversing with EIM and IOM.
77 *
78 * - Debugger Facility (DBGF) provides the basic features for
79 * debugging the alternate reality execution.
80 *
81 *
82 *
83 * @section pg_vmm_s_use_cases Use Cases
84 *
85 * @subsection pg_vmm_s_use_case_boot Bootstrap
86 *
87 * - Basic Init:
88 * - Init SUPDRV.
89 *
90 * - Init Virtual Machine Instance:
91 * - Load settings.
92 * - Check resource requirements (memory, com, stuff).
93 *
94 * - Init Host Ring 3 part:
95 * - Init Core code.
96 * - Load Pluggable Components.
97 * - Init Pluggable Components.
98 *
99 * - Init Host Ring 0 part:
100 * - Load Core (core = core components like VMM, RMI, CA, and so on) code.
101 * - Init Core code.
102 * - Load Pluggable Component code.
103 * - Init Pluggable Component code.
104 *
105 * - Allocate first chunk of memory and pin it down. This block of memory
106 * will fit the following pieces:
107 * - Virtual Machine Instance data. (Config, CPU state, VMM state, ++)
108 * (This is available from everywhere (at different addresses though)).
109 * - VMM Guest Context code.
110 * - Pluggable devices Guest Context code.
111 * - Page tables (directory and everything) for the VMM Guest
112 *
113 * - Setup Guest (Ring 0) part:
114 * - Setup initial page tables (i.e. directory all the stuff).
115 * - Load Core Guest Context code.
116 * - Load Pluggable Devices Guest Context code.
117 *
118 *
119 */
120
121
122/*******************************************************************************
123* Header Files *
124*******************************************************************************/
125#define LOG_GROUP LOG_GROUP_VMM
126#include <VBox/vmm.h>
127#include <VBox/vmapi.h>
128#include <VBox/pgm.h>
129#include <VBox/cfgm.h>
130#include <VBox/pdm.h>
131#include <VBox/cpum.h>
132#include <VBox/mm.h>
133#include <VBox/iom.h>
134#include <VBox/trpm.h>
135#include <VBox/selm.h>
136#include <VBox/em.h>
137#include <VBox/sup.h>
138#include <VBox/dbgf.h>
139#include <VBox/csam.h>
140#include <VBox/patm.h>
141#include <VBox/rem.h>
142#include <VBox/ssm.h>
143#include <VBox/tm.h>
144#include "VMMInternal.h"
145#include "VMMSwitcher/VMMSwitcher.h"
146#include <VBox/vm.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/version.h>
150#include <VBox/x86.h>
151#include <VBox/hwaccm.h>
152#include <iprt/assert.h>
153#include <iprt/alloc.h>
154#include <iprt/asm.h>
155#include <iprt/time.h>
156#include <iprt/stream.h>
157#include <iprt/string.h>
158#include <iprt/stdarg.h>
159#include <iprt/ctype.h>
160
161
162
163/** The saved state version. */
164#define VMM_SAVED_STATE_VERSION 3
165
166
167/*******************************************************************************
168* Internal Functions *
169*******************************************************************************/
170static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM);
171static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
172static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser);
173static int vmmR3ServiceCallHostRequest(PVM pVM);
174
175
176/*******************************************************************************
177* Global Variables *
178*******************************************************************************/
179/** Array of switcher defininitions.
180 * The type and index shall match!
181 */
182static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
183{
184 NULL, /* invalid entry */
185#ifndef __AMD64__
186 &vmmR3Switcher32BitTo32Bit_Def,
187 &vmmR3Switcher32BitToPAE_Def,
188 NULL, //&vmmR3Switcher32BitToAMD64_Def,
189 &vmmR3SwitcherPAETo32Bit_Def,
190 &vmmR3SwitcherPAEToPAE_Def,
191 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
192 NULL, //&vmmR3SwitcherAMD64ToPAE_Def,
193 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
194#else
195 NULL, //&vmmR3Switcher32BitTo32Bit_Def,
196 NULL, //&vmmR3Switcher32BitToPAE_Def,
197 NULL, //&vmmR3Switcher32BitToAMD64_Def,
198 NULL, //&vmmR3SwitcherPAETo32Bit_Def,
199 NULL, //&vmmR3SwitcherPAEToPAE_Def,
200 NULL, //&vmmR3SwitcherPAEToAMD64_Def,
201 &vmmR3SwitcherAMD64ToPAE_Def,
202 NULL //&vmmR3SwitcherAMD64ToAMD64_Def,
203#endif
204};
205
206
207
208/**
209 * Initiates the core code.
210 *
211 * This is core per VM code which might need fixups and/or for ease of use
212 * are put on linear contiguous backing.
213 *
214 * @returns VBox status code.
215 * @param pVM Pointer to VM structure.
216 */
217static int vmmR3InitCoreCode(PVM pVM)
218{
219 /*
220 * Calc the size.
221 */
222 unsigned cbCoreCode = 0;
223 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
224 {
225 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
226 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
227 if (pSwitcher)
228 {
229 AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
230 cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
231 }
232 }
233
234 /*
235 * Allocate continguous pages for switchers and deal with
236 * conflicts in the intermediate mapping of the code.
237 */
238 pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
239 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
240 int rc = VERR_NO_MEMORY;
241 if (pVM->vmm.s.pvHCCoreCodeR3)
242 {
243 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
244 if (rc == VERR_PGM_MAPPINGS_FIX_CONFLICT)
245 {
246 /* try more allocations. */
247 struct
248 {
249 RTR0PTR pvR0;
250 void *pvR3;
251 RTHCPHYS HCPhys;
252 } aBadTries[16];
253 unsigned i = 0;
254 do
255 {
256 aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
257 aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
258 aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
259 i++;
260 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
261 pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
262 pVM->vmm.s.pvHCCoreCodeR3 = SUPContAlloc2(pVM->vmm.s.cbCoreCode, &pVM->vmm.s.pvHCCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
263 if (!pVM->vmm.s.pvHCCoreCodeR3)
264 break;
265 rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
266 } while ( rc == VERR_PGM_MAPPINGS_FIX_CONFLICT
267 && i < ELEMENTS(aBadTries) - 1);
268
269 /* cleanup */
270 if (VBOX_FAILURE(rc))
271 {
272 aBadTries[i].pvR3 = pVM->vmm.s.pvHCCoreCodeR3;
273 aBadTries[i].pvR0 = pVM->vmm.s.pvHCCoreCodeR0;
274 aBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
275 i++;
276 LogRel(("Failed to allocated and map core code: rc=%Vrc\n", rc));
277 }
278 while (i-- > 0)
279 {
280 LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%VHp\n",
281 i, aBadTries[i].pvR3, aBadTries[i].pvR0, aBadTries[i].HCPhys));
282 SUPContFree(aBadTries[i].pvR3);
283 }
284 }
285 }
286 if (VBOX_SUCCESS(rc))
287 {
288 /*
289 * copy the code.
290 */
291 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
292 {
293 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
294 if (pSwitcher)
295 memcpy((uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
296 pSwitcher->pvCode, pSwitcher->cbCode);
297 }
298
299 /*
300 * Map the code into the GC address space.
301 */
302 rc = MMR3HyperMapHCPhys(pVM, pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, "Core Code", &pVM->vmm.s.pvGCCoreCode);
303 if (VBOX_SUCCESS(rc))
304 {
305 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
306 LogRel(("CoreCode: R3=%VHv R0=%VHv GC=%VGv Phys=%VHp cb=%#x\n",
307 pVM->vmm.s.pvHCCoreCodeR3, pVM->vmm.s.pvHCCoreCodeR0, pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, pVM->vmm.s.cbCoreCode));
308
309 /*
310 * Finally, PGM probably have selected a switcher already but we need
311 * to do get the addresses so we'll reselect it.
312 * This may legally fail so, we're ignoring the rc.
313 */
314 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
315 return rc;
316 }
317
318 /* shit */
319 AssertMsgFailed(("PGMR3Map(,%VGv, %VGp, %#x, 0) failed with rc=%Vrc\n", pVM->vmm.s.pvGCCoreCode, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));
320 SUPContFree(pVM->vmm.s.pvHCCoreCodeR3);
321 }
322 else
323 VMSetError(pVM, rc, RT_SRC_POS,
324 N_("Failed to allocate %d bytes of contiguous memory for the world switcher code."),
325 cbCoreCode);
326
327 pVM->vmm.s.pvHCCoreCodeR3 = NULL;
328 pVM->vmm.s.pvHCCoreCodeR0 = NIL_RTR0PTR;
329 pVM->vmm.s.pvGCCoreCode = 0;
330 return rc;
331}
332
333
334/**
335 * Initializes the VMM.
336 *
337 * @returns VBox status code.
338 * @param pVM The VM to operate on.
339 */
340VMMR3DECL(int) VMMR3Init(PVM pVM)
341{
342 LogFlow(("VMMR3Init\n"));
343
344 /*
345 * Assert alignment, sizes and order.
346 */
347 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n"));
348 AssertMsg(sizeof(pVM->vmm.padding) >= sizeof(pVM->vmm.s),
349 ("pVM->vmm.padding is too small! vmm.padding %d while vmm.s is %d\n",
350 sizeof(pVM->vmm.padding), sizeof(pVM->vmm.s)));
351
352 /*
353 * Init basic VM VMM members.
354 */
355 pVM->vmm.s.offVM = RT_OFFSETOF(VM, vmm);
356 int rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "YieldEMTInterval", &pVM->vmm.s.cYieldEveryMillies);
357 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
358 pVM->vmm.s.cYieldEveryMillies = 23; /* Value arrived at after experimenting with the grub boot prompt. */
359 //pVM->vmm.s.cYieldEveryMillies = 8; //debugging
360 else
361 AssertMsgRCReturn(rc, ("Configuration error. Failed to query \"YieldEMTInterval\", rc=%Vrc\n", rc), rc);
362
363 /* GC switchers are enabled by default. Turned off by HWACCM. */
364 pVM->vmm.s.fSwitcherDisabled = false;
365
366 /*
367 * Register the saved state data unit.
368 */
369 rc = SSMR3RegisterInternal(pVM, "vmm", 1, VMM_SAVED_STATE_VERSION, VMM_STACK_SIZE + sizeof(RTGCPTR),
370 NULL, vmmR3Save, NULL,
371 NULL, vmmR3Load, NULL);
372 if (VBOX_FAILURE(rc))
373 return rc;
374
375#ifdef VBOX_WITHOUT_IDT_PATCHING
376 /*
377 * Register the Ring-0 VM handle with the session for fast ioctl calls.
378 */
379 rc = SUPSetVMForFastIOCtl(pVM->pVMR0);
380 if (VBOX_FAILURE(rc))
381 return rc;
382#endif
383
384 /*
385 * Init core code.
386 */
387 rc = vmmR3InitCoreCode(pVM);
388 if (VBOX_SUCCESS(rc))
389 {
390 /*
391 * Allocate & init VMM GC stack.
392 * The stack pages are also used by the VMM R0 when VMMR0CallHost is invoked.
393 * (The page protection is modifed during R3 init completion.)
394 */
395#ifdef VBOX_STRICT_VMM_STACK
396 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
397#else
398 rc = MMHyperAlloc(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbHCStack);
399#endif
400 if (VBOX_SUCCESS(rc))
401 {
402 /* Set HC and GC stack pointers to top of stack. */
403 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = (RTR0PTR)pVM->vmm.s.pbHCStack;
404 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
405 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
406 AssertRelease(pVM->vmm.s.pbGCStack);
407
408 /* Set hypervisor eip. */
409 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStack);
410
411 /*
412 * Allocate GC & R0 Logger instances (they are finalized in the relocator).
413 */
414#ifdef LOG_ENABLED
415 PRTLOGGER pLogger = RTLogDefaultInstance();
416 if (pLogger)
417 {
418 pVM->vmm.s.cbLoggerGC = RT_OFFSETOF(RTLOGGERGC, afGroups[pLogger->cGroups]);
419 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pLoggerHC);
420 if (VBOX_SUCCESS(rc))
421 {
422 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
423
424/*
425 * Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup), so
426 * you have to sign up here by adding your defined(DEBUG_<userid>) to the #if.
427 *
428 * If you want to log in non-debug modes, you'll have to remember to change SUPDRvShared.c
429 * to not stub all the log functions.
430 */
431# ifdef DEBUG_sandervl
432 rc = MMHyperAlloc(pVM, RT_OFFSETOF(VMMR0LOGGER, Logger.afGroups[pLogger->cGroups]),
433 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pR0Logger);
434 if (VBOX_SUCCESS(rc))
435 {
436 pVM->vmm.s.pR0Logger->pVM = pVM;
437 //pVM->vmm.s.pR0Logger->fCreated = false;
438 pVM->vmm.s.pR0Logger->cbLogger = RT_OFFSETOF(RTLOGGER, afGroups[pLogger->cGroups]);
439 }
440# endif
441 }
442 }
443#endif /* LOG_ENABLED */
444
445#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
446 /*
447 * Allocate GC Release Logger instances (finalized in the relocator).
448 */
449 if (VBOX_SUCCESS(rc))
450 {
451 PRTLOGGER pRelLogger = RTLogRelDefaultInstance();
452 if (pRelLogger)
453 {
454 pVM->vmm.s.cbRelLoggerGC = RT_OFFSETOF(RTLOGGERGC, afGroups[pRelLogger->cGroups]);
455 rc = MMHyperAlloc(pVM, pVM->vmm.s.cbRelLoggerGC, 0, MM_TAG_VMM, (void **)&pVM->vmm.s.pRelLoggerHC);
456 if (VBOX_SUCCESS(rc))
457 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
458 }
459 }
460#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
461
462#ifdef VBOX_WITH_NMI
463 /*
464 * Allocate mapping for the host APIC.
465 */
466 if (VBOX_SUCCESS(rc))
467 {
468 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "Host APIC", &pVM->vmm.s.GCPtrApicBase);
469 AssertRC(rc);
470 }
471#endif
472 if (VBOX_SUCCESS(rc))
473 {
474 rc = RTCritSectInit(&pVM->vmm.s.CritSectVMLock);
475 if (VBOX_SUCCESS(rc))
476 {
477 /*
478 * Statistics.
479 */
480 STAM_REG(pVM, &pVM->vmm.s.StatRunGC, STAMTYPE_COUNTER, "/VMM/RunGC", STAMUNIT_OCCURENCES, "Number of context switches.");
481 STAM_REG(pVM, &pVM->vmm.s.StatGCRetNormal, STAMTYPE_COUNTER, "/VMM/GCRet/Normal", STAMUNIT_OCCURENCES, "Number of VINF_SUCCESS returns.");
482 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterrupt, STAMTYPE_COUNTER, "/VMM/GCRet/Interrupt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT returns.");
483 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptHyper, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptHyper", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_HYPER returns.");
484 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGuestTrap, STAMTYPE_COUNTER, "/VMM/GCRet/GuestTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_GUEST_TRAP returns.");
485 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitch, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns.");
486 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/GCRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns.");
487 STAM_REG(pVM, &pVM->vmm.s.StatGCRetExceptionPrivilege, STAMTYPE_COUNTER, "/VMM/GCRet/ExceptionPrivilege", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EXCEPTION_PRIVILEGED returns.");
488 STAM_REG(pVM, &pVM->vmm.s.StatGCRetStaleSelector, STAMTYPE_COUNTER, "/VMM/GCRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns.");
489 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIRETTrap, STAMTYPE_COUNTER, "/VMM/GCRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns.");
490 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/Emulate", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION returns.");
491 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchEmulate, STAMTYPE_COUNTER, "/VMM/GCRet/PatchEmulate", STAMUNIT_OCCURENCES, "Number of VINF_PATCH_EMULATE_INSTR returns.");
492 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIORead, STAMTYPE_COUNTER, "/VMM/GCRet/IORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READ returns.");
493 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/IOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_WRITE returns.");
494 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIOReadWrite, STAMTYPE_COUNTER, "/VMM/GCRet/IOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_IOPORT_READWRITE returns.");
495 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIORead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIORead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ returns.");
496 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_WRITE returns.");
497 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOReadWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOReadWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_READ_WRITE returns.");
498 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchRead, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchRead", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_READ returns.");
499 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMMIOPatchWrite, STAMTYPE_COUNTER, "/VMM/GCRet/MMIOPatchWrite", STAMUNIT_OCCURENCES, "Number of VINF_IOM_HC_MMIO_PATCH_WRITE returns.");
500 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/LDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_GDT_FAULT returns.");
501 STAM_REG(pVM, &pVM->vmm.s.StatGCRetGDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/GDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_LDT_FAULT returns.");
502 STAM_REG(pVM, &pVM->vmm.s.StatGCRetIDTFault, STAMTYPE_COUNTER, "/VMM/GCRet/IDTFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_IDT_FAULT returns.");
503 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTSSFault, STAMTYPE_COUNTER, "/VMM/GCRet/TSSFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_TSS_FAULT returns.");
504 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDFault, STAMTYPE_COUNTER, "/VMM/GCRet/PDFault", STAMUNIT_OCCURENCES, "Number of VINF_EM_EXECUTE_INSTRUCTION_PD_FAULT returns.");
505 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCSAMTask, STAMTYPE_COUNTER, "/VMM/GCRet/CSAMTask", STAMUNIT_OCCURENCES, "Number of VINF_CSAM_PENDING_ACTION returns.");
506 STAM_REG(pVM, &pVM->vmm.s.StatGCRetSyncCR3, STAMTYPE_COUNTER, "/VMM/GCRet/SyncCR", STAMUNIT_OCCURENCES, "Number of VINF_PGM_SYNC_CR3 returns.");
507 STAM_REG(pVM, &pVM->vmm.s.StatGCRetMisc, STAMTYPE_COUNTER, "/VMM/GCRet/Misc", STAMUNIT_OCCURENCES, "Number of misc returns.");
508 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchInt3, STAMTYPE_COUNTER, "/VMM/GCRet/PatchInt3", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_INT3 returns.");
509 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchPF, STAMTYPE_COUNTER, "/VMM/GCRet/PatchPF", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_PF returns.");
510 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchGP, STAMTYPE_COUNTER, "/VMM/GCRet/PatchGP", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PATCH_TRAP_GP returns.");
511 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPatchIretIRQ, STAMTYPE_COUNTER, "/VMM/GCRet/PatchIret", STAMUNIT_OCCURENCES, "Number of VINF_PATM_PENDING_IRQ_AFTER_IRET returns.");
512 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPageOverflow, STAMTYPE_COUNTER, "/VMM/GCRet/InvlpgOverflow", STAMUNIT_OCCURENCES, "Number of VERR_REM_FLUSHED_PAGES_OVERFLOW returns.");
513 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRescheduleREM, STAMTYPE_COUNTER, "/VMM/GCRet/ScheduleREM", STAMUNIT_OCCURENCES, "Number of VINF_EM_RESCHEDULE_REM returns.");
514 STAM_REG(pVM, &pVM->vmm.s.StatGCRetToR3, STAMTYPE_COUNTER, "/VMM/GCRet/ToR3", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TO_R3 returns.");
515 STAM_REG(pVM, &pVM->vmm.s.StatGCRetTimerPending, STAMTYPE_COUNTER, "/VMM/GCRet/TimerPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_TIMER_PENDING returns.");
516 STAM_REG(pVM, &pVM->vmm.s.StatGCRetInterruptPending, STAMTYPE_COUNTER, "/VMM/GCRet/InterruptPending", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_INTERRUPT_PENDING returns.");
517 STAM_REG(pVM, &pVM->vmm.s.StatGCRetCallHost, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/Misc", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
518 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMGrowRAM, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/GrowRAM", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
519 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PDMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
520 STAM_REG(pVM, &pVM->vmm.s.StatGCRetLogFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/LogFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
521 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPDMQueueFlush, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/QueueFlush", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
522 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMPoolGrow, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMPoolGrow",STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
523 STAM_REG(pVM, &pVM->vmm.s.StatGCRetRemReplay, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/REMReplay", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
524 STAM_REG(pVM, &pVM->vmm.s.StatGCRetVMSetError, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/VMSetError", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
525 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMLock, STAMTYPE_COUNTER, "/VMM/GCRet/CallHost/PGMLock", STAMUNIT_OCCURENCES, "Number of VINF_VMM_CALL_HOST returns.");
526 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/GCRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns.");
527 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/GCRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns.");
528 STAM_REG(pVM, &pVM->vmm.s.StatGCRetEmulHlt, STAMTYPE_COUNTER, "/VMM/GCRet/EmulHlt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_INSTR_HLT returns.");
529 STAM_REG(pVM, &pVM->vmm.s.StatGCRetPendingRequest, STAMTYPE_COUNTER, "/VMM/GCRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
530
531 return VINF_SUCCESS;
532 }
533 AssertRC(rc);
534 }
535 }
536 /** @todo: Need failure cleanup. */
537
538 //more todo in here?
539 //if (VBOX_SUCCESS(rc))
540 //{
541 //}
542 //int rc2 = vmmR3TermCoreCode(pVM);
543 //AssertRC(rc2));
544 }
545
546 return rc;
547}
548
549
550/**
551 * Ring-3 init finalizing.
552 *
553 * @returns VBox status code.
554 * @param pVM The VM handle.
555 */
556VMMR3DECL(int) VMMR3InitFinalize(PVM pVM)
557{
558#ifdef VBOX_STRICT_VMM_STACK
559 /*
560 * Two inaccessible pages at each sides of the stack to catch over/under-flows.
561 */
562 memset(pVM->vmm.s.pbHCStack - PAGE_SIZE, 0xcc, PAGE_SIZE);
563 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack - PAGE_SIZE), PAGE_SIZE, 0);
564 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
565
566 memset(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, 0xcc, PAGE_SIZE);
567 PGMMapSetPage(pVM, MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack + VMM_STACK_SIZE), PAGE_SIZE, 0);
568 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
569#endif
570
571 /*
572 * Set page attributes to r/w for stack pages.
573 */
574 int rc = PGMMapSetPage(pVM, pVM->vmm.s.pbGCStack, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
575 AssertRC(rc);
576 if (VBOX_SUCCESS(rc))
577 {
578 /*
579 * Create the EMT yield timer.
580 */
581 rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, vmmR3YieldEMT, NULL, "EMT Yielder", &pVM->vmm.s.pYieldTimer);
582 if (VBOX_SUCCESS(rc))
583 rc = TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldEveryMillies);
584 }
585#ifdef VBOX_WITH_NMI
586 /*
587 * Map the host APIC into GC - This may be host os specific!
588 */
589 if (VBOX_SUCCESS(rc))
590 rc = PGMMap(pVM, pVM->vmm.s.GCPtrApicBase, 0xfee00000, PAGE_SIZE,
591 X86_PTE_P | X86_PTE_RW | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | X86_PTE_D);
592#endif
593 return rc;
594}
595
596
597/**
598 * Initializes the R0 VMM.
599 *
600 * @returns VBox status code.
601 * @param pVM The VM to operate on.
602 */
603VMMR3DECL(int) VMMR3InitR0(PVM pVM)
604{
605 int rc;
606
607 /*
608 * Initialize the ring-0 logger if we haven't done so yet.
609 */
610 if ( pVM->vmm.s.pR0Logger
611 && !pVM->vmm.s.pR0Logger->fCreated)
612 {
613 rc = VMMR3UpdateLoggers(pVM);
614 if (VBOX_FAILURE(rc))
615 return rc;
616 }
617
618 /*
619 * Call Ring-0 entry with init code.
620 */
621 for (;;)
622 {
623#ifdef NO_SUPCALLR0VMM
624 //rc = VERR_GENERAL_FAILURE;
625 rc = VINF_SUCCESS;
626#else
627 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_VMMR0_INIT, (void *)VBOX_VERSION);
628#endif
629 if ( pVM->vmm.s.pR0Logger
630 && pVM->vmm.s.pR0Logger->Logger.offScratch > 0)
631 RTLogFlushToLogger(&pVM->vmm.s.pR0Logger->Logger, NULL);
632 if (rc != VINF_VMM_CALL_HOST)
633 break;
634 rc = vmmR3ServiceCallHostRequest(pVM);
635 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
636 break;
637 break; // remove this when we do setjmp for all ring-0 stuff.
638 }
639
640 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
641 {
642 LogRel(("R0 init failed, rc=%Vra\n", rc));
643 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
644 rc = VERR_INTERNAL_ERROR;
645 }
646 return rc;
647}
648
649
650/**
651 * Initializes the GC VMM.
652 *
653 * @returns VBox status code.
654 * @param pVM The VM to operate on.
655 */
656VMMR3DECL(int) VMMR3InitGC(PVM pVM)
657{
658 /* In VMX mode, there's no need to init GC. */
659 if (pVM->vmm.s.fSwitcherDisabled)
660 return VINF_SUCCESS;
661
662 /*
663 * Call VMMGCInit():
664 * -# resolve the address.
665 * -# setup stackframe and EIP to use the trampoline.
666 * -# do a generic hypervisor call.
667 */
668 RTGCPTR GCPtrEP;
669 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
670 if (VBOX_SUCCESS(rc))
671 {
672 CPUMHyperSetCtxCore(pVM, NULL);
673 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
674 CPUMPushHyper(pVM, VBOX_VERSION); /* Param 2: Version argument. */
675 CPUMPushHyper(pVM, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */
676 CPUMPushHyper(pVM, pVM->pVMGC); /* Param 0: pVM */
677 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* trampoline param: stacksize. */
678 CPUMPushHyper(pVM, GCPtrEP); /* Call EIP. */
679 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
680
681 for (;;)
682 {
683#ifdef NO_SUPCALLR0VMM
684 //rc = VERR_GENERAL_FAILURE;
685 rc = VINF_SUCCESS;
686#else
687 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_CALL_HYPERVISOR, NULL);
688#endif
689#ifdef LOG_ENABLED
690 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
691 if ( pLogger
692 && pLogger->offScratch > 0)
693 RTLogFlushGC(NULL, pLogger);
694#endif
695#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
696 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
697 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
698 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
699#endif
700 if (rc != VINF_VMM_CALL_HOST)
701 break;
702 rc = vmmR3ServiceCallHostRequest(pVM);
703 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
704 break;
705 }
706
707 if (VBOX_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST))
708 {
709 VMMR3FatalDump(pVM, rc);
710 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
711 rc = VERR_INTERNAL_ERROR;
712 }
713 AssertRC(rc);
714 }
715 return rc;
716}
717
718
719/**
720 * Terminate the VMM bits.
721 *
722 * @returns VINF_SUCCESS.
723 * @param pVM The VM handle.
724 */
725VMMR3DECL(int) VMMR3Term(PVM pVM)
726{
727 /** @todo must call ring-0 so the logger thread instance can be properly removed. */
728
729#ifdef VBOX_STRICT_VMM_STACK
730 /*
731 * Make the two stack guard pages present again.
732 */
733 RTMemProtect(pVM->vmm.s.pbHCStack - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
734 RTMemProtect(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
735#endif
736 return VINF_SUCCESS;
737}
738
739
740/**
741 * Applies relocations to data and code managed by this
742 * component. This function will be called at init and
743 * whenever the VMM need to relocate it self inside the GC.
744 *
745 * The VMM will need to apply relocations to the core code.
746 *
747 * @param pVM The VM handle.
748 * @param offDelta The relocation delta.
749 */
750VMMR3DECL(void) VMMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
751{
752 LogFlow(("VMMR3Relocate: offDelta=%VGv\n", offDelta));
753
754 /*
755 * Recalc the GC address.
756 */
757 pVM->vmm.s.pvGCCoreCode = MMHyperHC2GC(pVM, pVM->vmm.s.pvHCCoreCodeR3);
758
759 /*
760 * The stack.
761 */
762 CPUMSetHyperESP(pVM, CPUMGetHyperESP(pVM) + offDelta);
763 pVM->vmm.s.pbGCStack = MMHyperHC2GC(pVM, pVM->vmm.s.pbHCStack);
764 pVM->vmm.s.pbGCStackBottom = pVM->vmm.s.pbGCStack + VMM_STACK_SIZE;
765
766 /*
767 * All the switchers.
768 */
769 for (unsigned iSwitcher = 0; iSwitcher < ELEMENTS(s_apSwitchers); iSwitcher++)
770 {
771 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
772 if (pSwitcher && pSwitcher->pfnRelocate)
773 {
774 unsigned off = pVM->vmm.s.aoffSwitchers[iSwitcher];
775 pSwitcher->pfnRelocate(pVM,
776 pSwitcher,
777 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR0 + off,
778 (uint8_t *)pVM->vmm.s.pvHCCoreCodeR3 + off,
779 pVM->vmm.s.pvGCCoreCode + off,
780 pVM->vmm.s.HCPhysCoreCode + off);
781 }
782 }
783
784 /*
785 * Recalc the GC address for the current switcher.
786 */
787 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher];
788 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
789 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
790 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
791 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
792 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
793 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
794
795 /*
796 * Get other GC entry points.
797 */
798 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuest", &pVM->vmm.s.pfnCPUMGCResumeGuest);
799 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuest not found! rc=%Vra\n", rc));
800
801 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "CPUMGCResumeGuestV86", &pVM->vmm.s.pfnCPUMGCResumeGuestV86);
802 AssertReleaseMsgRC(rc, ("CPUMGCResumeGuestV86 not found! rc=%Vra\n", rc));
803
804 /*
805 * Update the logger.
806 */
807 VMMR3UpdateLoggers(pVM);
808}
809
810
811/**
812 * Updates the settings for the GC and R0 loggers.
813 *
814 * @returns VBox status code.
815 * @param pVM The VM handle.
816 */
817VMMR3DECL(int) VMMR3UpdateLoggers(PVM pVM)
818{
819 /*
820 * Simply clone the logger instance (for GC).
821 */
822 int rc = VINF_SUCCESS;
823 RTGCPTR GCPtrLoggerFlush = 0;
824
825 if (pVM->vmm.s.pLoggerHC
826#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
827 || pVM->vmm.s.pRelLoggerHC
828#endif
829 )
830 {
831 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerFlush", &GCPtrLoggerFlush);
832 AssertReleaseMsgRC(rc, ("vmmGCLoggerFlush not found! rc=%Vra\n", rc));
833 }
834
835 if (pVM->vmm.s.pLoggerHC)
836 {
837 RTGCPTR GCPtrLoggerWrapper = 0;
838 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCLoggerWrapper", &GCPtrLoggerWrapper);
839 AssertReleaseMsgRC(rc, ("vmmGCLoggerWrapper not found! rc=%Vra\n", rc));
840 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
841 rc = RTLogCloneGC(NULL /* default */, pVM->vmm.s.pLoggerHC, pVM->vmm.s.cbLoggerGC,
842 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
843 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
844 }
845
846#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
847 if (pVM->vmm.s.pRelLoggerHC)
848 {
849 RTGCPTR GCPtrLoggerWrapper = 0;
850 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "vmmGCRelLoggerWrapper", &GCPtrLoggerWrapper);
851 AssertReleaseMsgRC(rc, ("vmmGCRelLoggerWrapper not found! rc=%Vra\n", rc));
852 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
853 rc = RTLogCloneGC(RTLogRelDefaultInstance(), pVM->vmm.s.pRelLoggerHC, pVM->vmm.s.cbRelLoggerGC,
854 GCPtrLoggerWrapper, GCPtrLoggerFlush, RTLOGFLAGS_BUFFERED);
855 AssertReleaseMsgRC(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc));
856 }
857#endif /* VBOX_WITH_GC_AND_R0_RELEASE_LOG */
858
859 /*
860 * For the ring-0 EMT logger, we use a per-thread logger
861 * instance in ring-0. Only initialize it once.
862 */
863 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
864 if (pR0Logger)
865 {
866 if (!pR0Logger->fCreated)
867 {
868 RTHCPTR pfnLoggerWrapper = NULL;
869 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerWrapper", &pfnLoggerWrapper);
870 AssertReleaseMsgRCReturn(rc, ("VMMLoggerWrapper not found! rc=%Vra\n", rc), rc);
871
872 RTHCPTR pfnLoggerFlush = NULL;
873 rc = PDMR3GetSymbolR0(pVM, VMMR0_MAIN_MODULE_NAME, "vmmR0LoggerFlush", &pfnLoggerFlush);
874 AssertReleaseMsgRCReturn(rc, ("VMMLoggerFlush not found! rc=%Vra\n", rc), rc);
875
876 rc = RTLogCreateForR0(&pR0Logger->Logger, pR0Logger->cbLogger,
877 *(PFNRTLOGGER *)&pfnLoggerWrapper, *(PFNRTLOGFLUSH *)&pfnLoggerFlush,
878 RTLOGFLAGS_BUFFERED, RTLOGDEST_DUMMY);
879 AssertReleaseMsgRCReturn(rc, ("RTLogCloneGC failed! rc=%Vra\n", rc), rc);
880 pR0Logger->fCreated = true;
881 }
882
883 rc = RTLogCopyGroupsAndFlags(&pR0Logger->Logger, NULL /* default */, RTLOGFLAGS_BUFFERED, 0);
884 AssertRC(rc);
885 }
886
887 return rc;
888}
889
890
891/**
892 * Generic switch code relocator.
893 *
894 * @param pVM The VM handle.
895 * @param pSwitcher The switcher definition.
896 * @param pu8CodeR3 Pointer to the core code block for the switcher, ring-3 mapping.
897 * @param pu8CodeR0 Pointer to the core code block for the switcher, ring-0 mapping.
898 * @param GCPtrCode The guest context address corresponding to pu8Code.
899 * @param u32IDCode The identity mapped (ID) address corresponding to pu8Code.
900 * @param SelCS The hypervisor CS selector.
901 * @param SelDS The hypervisor DS selector.
902 * @param SelTSS The hypervisor TSS selector.
903 * @param GCPtrGDT The GC address of the hypervisor GDT.
904 * @param SelCS64 The 64-bit mode hypervisor CS selector.
905 */
906static void vmmR3SwitcherGenericRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode,
907 RTSEL SelCS, RTSEL SelDS, RTSEL SelTSS, RTGCPTR GCPtrGDT, RTSEL SelCS64)
908{
909 union
910 {
911 const uint8_t *pu8;
912 const uint16_t *pu16;
913 const uint32_t *pu32;
914 const uint64_t *pu64;
915 const void *pv;
916 uintptr_t u;
917 } u;
918 u.pv = pSwitcher->pvFixups;
919
920 /*
921 * Process fixups.
922 */
923 uint8_t u8;
924 while ((u8 = *u.pu8++) != FIX_THE_END)
925 {
926 /*
927 * Get the source (where to write the fixup).
928 */
929 uint32_t offSrc = *u.pu32++;
930 Assert(offSrc < pSwitcher->cbCode);
931 union
932 {
933 uint8_t *pu8;
934 uint16_t *pu16;
935 uint32_t *pu32;
936 uint64_t *pu64;
937 uintptr_t u;
938 } uSrc;
939 uSrc.pu8 = pu8CodeR3 + offSrc;
940
941 /* The fixup target and method depends on the type. */
942 switch (u8)
943 {
944 /*
945 * 32-bit relative, source in HC and target in GC.
946 */
947 case FIX_HC_2_GC_NEAR_REL:
948 {
949 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
950 uint32_t offTrg = *u.pu32++;
951 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
952 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (uSrc.u + 4));
953 break;
954 }
955
956 /*
957 * 32-bit relative, source in HC and target in ID.
958 */
959 case FIX_HC_2_ID_NEAR_REL:
960 {
961 Assert(offSrc - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offSrc - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
962 uint32_t offTrg = *u.pu32++;
963 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
964 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (uSrc.u + 4));
965 break;
966 }
967
968 /*
969 * 32-bit relative, source in GC and target in HC.
970 */
971 case FIX_GC_2_HC_NEAR_REL:
972 {
973 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
974 uint32_t offTrg = *u.pu32++;
975 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
976 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (GCPtrCode + offSrc + 4));
977 break;
978 }
979
980 /*
981 * 32-bit relative, source in GC and target in ID.
982 */
983 case FIX_GC_2_ID_NEAR_REL:
984 {
985 Assert(offSrc - pSwitcher->offGCCode < pSwitcher->cbGCCode);
986 uint32_t offTrg = *u.pu32++;
987 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
988 *uSrc.pu32 = (uint32_t)((u32IDCode + offTrg) - (GCPtrCode + offSrc + 4));
989 break;
990 }
991
992 /*
993 * 32-bit relative, source in ID and target in HC.
994 */
995 case FIX_ID_2_HC_NEAR_REL:
996 {
997 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
998 uint32_t offTrg = *u.pu32++;
999 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1000 *uSrc.pu32 = (uint32_t)(((uintptr_t)pu8CodeR0 + offTrg) - (u32IDCode + offSrc + 4));
1001 break;
1002 }
1003
1004 /*
1005 * 32-bit relative, source in ID and target in HC.
1006 */
1007 case FIX_ID_2_GC_NEAR_REL:
1008 {
1009 Assert(offSrc - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offSrc - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1010 uint32_t offTrg = *u.pu32++;
1011 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1012 *uSrc.pu32 = (uint32_t)((GCPtrCode + offTrg) - (u32IDCode + offSrc + 4));
1013 break;
1014 }
1015
1016 /*
1017 * 16:32 far jump, target in GC.
1018 */
1019 case FIX_GC_FAR32:
1020 {
1021 uint32_t offTrg = *u.pu32++;
1022 Assert(offTrg - pSwitcher->offGCCode < pSwitcher->cbGCCode);
1023 *uSrc.pu32++ = (uint32_t)(GCPtrCode + offTrg);
1024 *uSrc.pu16++ = SelCS;
1025 break;
1026 }
1027
1028 /*
1029 * Make 32-bit GC pointer given CPUM offset.
1030 */
1031 case FIX_GC_CPUM_OFF:
1032 {
1033 uint32_t offCPUM = *u.pu32++;
1034 Assert(offCPUM < sizeof(pVM->cpum));
1035 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, &pVM->cpum) + offCPUM);
1036 break;
1037 }
1038
1039 /*
1040 * Make 32-bit GC pointer given VM offset.
1041 */
1042 case FIX_GC_VM_OFF:
1043 {
1044 uint32_t offVM = *u.pu32++;
1045 Assert(offVM < sizeof(VM));
1046 *uSrc.pu32 = (uint32_t)(VM_GUEST_ADDR(pVM, pVM) + offVM);
1047 break;
1048 }
1049
1050 /*
1051 * Make 32-bit HC pointer given CPUM offset.
1052 */
1053 case FIX_HC_CPUM_OFF:
1054 {
1055 uint32_t offCPUM = *u.pu32++;
1056 Assert(offCPUM < sizeof(pVM->cpum));
1057 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + RT_OFFSETOF(VM, cpum) + offCPUM;
1058 break;
1059 }
1060
1061 /*
1062 * Make 32-bit R0 pointer given VM offset.
1063 */
1064 case FIX_HC_VM_OFF:
1065 {
1066 uint32_t offVM = *u.pu32++;
1067 Assert(offVM < sizeof(VM));
1068 *uSrc.pu32 = (uint32_t)pVM->pVMR0 + offVM;
1069 break;
1070 }
1071
1072 /*
1073 * Store the 32-Bit CR3 (32-bit) for the intermediate memory context.
1074 */
1075 case FIX_INTER_32BIT_CR3:
1076 {
1077
1078 *uSrc.pu32 = PGMGetInter32BitCR3(pVM);
1079 break;
1080 }
1081
1082 /*
1083 * Store the PAE CR3 (32-bit) for the intermediate memory context.
1084 */
1085 case FIX_INTER_PAE_CR3:
1086 {
1087
1088 *uSrc.pu32 = PGMGetInterPaeCR3(pVM);
1089 break;
1090 }
1091
1092 /*
1093 * Store the AMD64 CR3 (32-bit) for the intermediate memory context.
1094 */
1095 case FIX_INTER_AMD64_CR3:
1096 {
1097
1098 *uSrc.pu32 = PGMGetInterAmd64CR3(pVM);
1099 break;
1100 }
1101
1102 /*
1103 * Store the 32-Bit CR3 (32-bit) for the hypervisor (shadow) memory context.
1104 */
1105 case FIX_HYPER_32BIT_CR3:
1106 {
1107
1108 *uSrc.pu32 = PGMGetHyper32BitCR3(pVM);
1109 break;
1110 }
1111
1112 /*
1113 * Store the PAE CR3 (32-bit) for the hypervisor (shadow) memory context.
1114 */
1115 case FIX_HYPER_PAE_CR3:
1116 {
1117
1118 *uSrc.pu32 = PGMGetHyperPaeCR3(pVM);
1119 break;
1120 }
1121
1122 /*
1123 * Store the AMD64 CR3 (32-bit) for the hypervisor (shadow) memory context.
1124 */
1125 case FIX_HYPER_AMD64_CR3:
1126 {
1127
1128 *uSrc.pu32 = PGMGetHyperAmd64CR3(pVM);
1129 break;
1130 }
1131
1132 /*
1133 * Store Hypervisor CS (16-bit).
1134 */
1135 case FIX_HYPER_CS:
1136 {
1137 *uSrc.pu16 = SelCS;
1138 break;
1139 }
1140
1141 /*
1142 * Store Hypervisor DS (16-bit).
1143 */
1144 case FIX_HYPER_DS:
1145 {
1146 *uSrc.pu16 = SelDS;
1147 break;
1148 }
1149
1150 /*
1151 * Store Hypervisor TSS (16-bit).
1152 */
1153 case FIX_HYPER_TSS:
1154 {
1155 *uSrc.pu16 = SelTSS;
1156 break;
1157 }
1158
1159 /*
1160 * Store the 32-bit GC address of the 2nd dword of the TSS descriptor (in the GDT).
1161 */
1162 case FIX_GC_TSS_GDTE_DW2:
1163 {
1164 RTGCPTR GCPtr = GCPtrGDT + (SelTSS & ~7) + 4;
1165 *uSrc.pu32 = (uint32_t)GCPtr;
1166 break;
1167 }
1168
1169
1170 ///@todo case FIX_CR4_MASK:
1171 ///@todo case FIX_CR4_OSFSXR:
1172
1173 /*
1174 * Insert relative jump to specified target it FXSAVE/FXRSTOR isn't supported by the cpu.
1175 */
1176 case FIX_NO_FXSAVE_JMP:
1177 {
1178 uint32_t offTrg = *u.pu32++;
1179 Assert(offTrg < pSwitcher->cbCode);
1180 if (!CPUMSupportsFXSR(pVM))
1181 {
1182 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1183 *uSrc.pu32++ = offTrg - (offSrc + 5);
1184 }
1185 else
1186 {
1187 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1188 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1189 }
1190 break;
1191 }
1192
1193 /*
1194 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1195 */
1196 case FIX_NO_SYSENTER_JMP:
1197 {
1198 uint32_t offTrg = *u.pu32++;
1199 Assert(offTrg < pSwitcher->cbCode);
1200 if (!CPUMIsHostUsingSysEnter(pVM))
1201 {
1202 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1203 *uSrc.pu32++ = offTrg - (offSrc + 5);
1204 }
1205 else
1206 {
1207 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1208 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1209 }
1210 break;
1211 }
1212
1213 /*
1214 * Insert relative jump to specified target it SYSENTER isn't used by the host.
1215 */
1216 case FIX_NO_SYSCALL_JMP:
1217 {
1218 uint32_t offTrg = *u.pu32++;
1219 Assert(offTrg < pSwitcher->cbCode);
1220 if (!CPUMIsHostUsingSysEnter(pVM))
1221 {
1222 *uSrc.pu8++ = 0xe9; /* jmp rel32 */
1223 *uSrc.pu32++ = offTrg - (offSrc + 5);
1224 }
1225 else
1226 {
1227 *uSrc.pu8++ = *((uint8_t *)pSwitcher->pvCode + offSrc);
1228 *uSrc.pu32++ = *(uint32_t *)((uint8_t *)pSwitcher->pvCode + offSrc + 1);
1229 }
1230 break;
1231 }
1232
1233#ifdef __AMD64__
1234 /*
1235 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
1236 */
1237 case FIX_HC_64BIT:
1238 {
1239 uint32_t offTrg = *u.pu32++;
1240 Assert(offSrc < pSwitcher->cbCode);
1241 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1);
1242 *uSrc.pu64 = (uintptr_t)pu8CodeR0 + offTrg;
1243 break;
1244 }
1245
1246 /*
1247 * 64-bit HC pointer to the CPUM instance data (no argument).
1248 */
1249 case FIX_HC_64BIT_CPUM:
1250 {
1251 Assert(offSrc < pSwitcher->cbCode);
1252 *uSrc.pu64 = pVM->pVMR0 + RT_OFFSETOF(VM, cpum);
1253 break;
1254 }
1255#endif
1256
1257 /*
1258 * 32-bit ID pointer to (ID) target within the code (32-bit offset).
1259 */
1260 case FIX_ID_32BIT:
1261 {
1262 uint32_t offTrg = *u.pu32++;
1263 Assert(offSrc < pSwitcher->cbCode);
1264 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1265 *uSrc.pu32 = u32IDCode + offTrg;
1266 break;
1267 }
1268
1269 /*
1270 * 64-bit ID pointer to (ID) target within the code (32-bit offset).
1271 */
1272 case FIX_ID_64BIT:
1273 {
1274 uint32_t offTrg = *u.pu32++;
1275 Assert(offSrc < pSwitcher->cbCode);
1276 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1277 *uSrc.pu64 = u32IDCode + offTrg;
1278 break;
1279 }
1280
1281 /*
1282 * Far 16:32 ID pointer to 64-bit mode (ID) target within the code (32-bit offset).
1283 */
1284 case FIX_ID_FAR32_TO_64BIT_MODE:
1285 {
1286 uint32_t offTrg = *u.pu32++;
1287 Assert(offSrc < pSwitcher->cbCode);
1288 Assert(offTrg - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0 || offTrg - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1);
1289 *uSrc.pu32++ = u32IDCode + offTrg;
1290 *uSrc.pu16 = SelCS64;
1291 AssertRelease(SelCS64);
1292 break;
1293 }
1294
1295#ifdef VBOX_WITH_NMI
1296 /*
1297 * 32-bit address to the APIC base.
1298 */
1299 case FIX_GC_APIC_BASE_32BIT:
1300 {
1301 *uSrc.pu32 = pVM->vmm.s.GCPtrApicBase;
1302 break;
1303 }
1304#endif
1305
1306 default:
1307 AssertReleaseMsgFailed(("Unknown fixup %d in switcher %s\n", u8, pSwitcher->pszDesc));
1308 break;
1309 }
1310 }
1311
1312#ifdef LOG_ENABLED
1313 /*
1314 * If Log2 is enabled disassemble the switcher code.
1315 *
1316 * The switcher code have 1-2 HC parts, 1 GC part and 0-2 ID parts.
1317 */
1318 if (LogIs2Enabled())
1319 {
1320 RTLogPrintf("*** Disassembly of switcher %d '%s' %#x bytes ***\n"
1321 " pu8CodeR0 = %p\n"
1322 " pu8CodeR3 = %p\n"
1323 " GCPtrCode = %VGv\n"
1324 " u32IDCode = %08x\n"
1325 " pVMGC = %VGv\n"
1326 " pCPUMGC = %VGv\n"
1327 " pVMHC = %p\n"
1328 " pCPUMHC = %p\n"
1329 " GCPtrGDT = %VGv\n"
1330 " InterCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1331 " HyperCR3s = %08x, %08x, %08x (32-Bit, PAE, AMD64)\n"
1332 " SelCS = %04x\n"
1333 " SelDS = %04x\n"
1334 " SelCS64 = %04x\n"
1335 " SelTSS = %04x\n",
1336 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode,
1337 pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode, VM_GUEST_ADDR(pVM, pVM),
1338 VM_GUEST_ADDR(pVM, &pVM->cpum), pVM, &pVM->cpum,
1339 GCPtrGDT,
1340 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
1341 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
1342 SelCS, SelDS, SelCS64, SelTSS);
1343
1344 uint32_t offCode = 0;
1345 while (offCode < pSwitcher->cbCode)
1346 {
1347 /*
1348 * Figure out where this is.
1349 */
1350 const char *pszDesc = NULL;
1351 RTUINTPTR uBase;
1352 uint32_t cbCode;
1353 if (offCode - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0)
1354 {
1355 pszDesc = "HCCode0";
1356 uBase = (RTUINTPTR)pu8CodeR0;
1357 offCode = pSwitcher->offHCCode0;
1358 cbCode = pSwitcher->cbHCCode0;
1359 }
1360 else if (offCode - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1)
1361 {
1362 pszDesc = "HCCode1";
1363 uBase = (RTUINTPTR)pu8CodeR0;
1364 offCode = pSwitcher->offHCCode1;
1365 cbCode = pSwitcher->cbHCCode1;
1366 }
1367 else if (offCode - pSwitcher->offGCCode < pSwitcher->cbGCCode)
1368 {
1369 pszDesc = "GCCode";
1370 uBase = GCPtrCode;
1371 offCode = pSwitcher->offGCCode;
1372 cbCode = pSwitcher->cbGCCode;
1373 }
1374 else if (offCode - pSwitcher->offIDCode0 < pSwitcher->cbIDCode0)
1375 {
1376 pszDesc = "IDCode0";
1377 uBase = u32IDCode;
1378 offCode = pSwitcher->offIDCode0;
1379 cbCode = pSwitcher->cbIDCode0;
1380 }
1381 else if (offCode - pSwitcher->offIDCode1 < pSwitcher->cbIDCode1)
1382 {
1383 pszDesc = "IDCode1";
1384 uBase = u32IDCode;
1385 offCode = pSwitcher->offIDCode1;
1386 cbCode = pSwitcher->cbIDCode1;
1387 }
1388 else
1389 {
1390 RTLogPrintf(" %04x: %02x '%c' (nowhere)\n",
1391 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1392 offCode++;
1393 continue;
1394 }
1395
1396 /*
1397 * Disassemble it.
1398 */
1399 RTLogPrintf(" %s: offCode=%#x cbCode=%#x\n", pszDesc, offCode, cbCode);
1400 DISCPUSTATE Cpu = {0};
1401 Cpu.mode = CPUMODE_32BIT;
1402 while (cbCode > 0)
1403 {
1404 /* try label it */
1405 if (pSwitcher->offR0HostToGuest == offCode)
1406 RTLogPrintf(" *R0HostToGuest:\n");
1407 if (pSwitcher->offGCGuestToHost == offCode)
1408 RTLogPrintf(" *GCGuestToHost:\n");
1409 if (pSwitcher->offGCCallTrampoline == offCode)
1410 RTLogPrintf(" *GCCallTrampoline:\n");
1411 if (pSwitcher->offGCGuestToHostAsm == offCode)
1412 RTLogPrintf(" *GCGuestToHostAsm:\n");
1413 if (pSwitcher->offGCGuestToHostAsmHyperCtx == offCode)
1414 RTLogPrintf(" *GCGuestToHostAsmHyperCtx:\n");
1415 if (pSwitcher->offGCGuestToHostAsmGuestCtx == offCode)
1416 RTLogPrintf(" *GCGuestToHostAsmGuestCtx:\n");
1417
1418 /* disas */
1419 uint32_t cbInstr = 0;
1420 char szDisas[256];
1421 if (DISInstr(&Cpu, (RTUINTPTR)pu8CodeR3 + offCode, uBase - (RTUINTPTR)pu8CodeR3, &cbInstr, szDisas))
1422 RTLogPrintf(" %04x: %s", offCode, szDisas); //for whatever reason szDisas includes '\n'.
1423 else
1424 {
1425 RTLogPrintf(" %04x: %02x '%c'\n",
1426 offCode, pu8CodeR3[offCode], isprint(pu8CodeR3[offCode]) ? pu8CodeR3[offCode] : ' ');
1427 cbInstr = 1;
1428 }
1429 offCode += cbInstr;
1430 cbCode -= RT_MIN(cbInstr, cbCode);
1431 }
1432 }
1433 }
1434#endif
1435}
1436
1437
1438/**
1439 * Relocator for the 32-Bit to 32-Bit world switcher.
1440 */
1441DECLCALLBACK(void) vmmR3Switcher32BitTo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1442{
1443 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1444 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1445}
1446
1447
1448/**
1449 * Relocator for the 32-Bit to PAE world switcher.
1450 */
1451DECLCALLBACK(void) vmmR3Switcher32BitToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1452{
1453 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1454 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1455}
1456
1457
1458/**
1459 * Relocator for the PAE to 32-Bit world switcher.
1460 */
1461DECLCALLBACK(void) vmmR3SwitcherPAETo32Bit_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1462{
1463 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1464 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1465}
1466
1467
1468/**
1469 * Relocator for the PAE to PAE world switcher.
1470 */
1471DECLCALLBACK(void) vmmR3SwitcherPAEToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1472{
1473 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1474 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), 0);
1475}
1476
1477
1478/**
1479 * Relocator for the AMD64 to PAE world switcher.
1480 */
1481DECLCALLBACK(void) vmmR3SwitcherAMD64ToPAE_Relocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pu8CodeR0, uint8_t *pu8CodeR3, RTGCPTR GCPtrCode, uint32_t u32IDCode)
1482{
1483 vmmR3SwitcherGenericRelocate(pVM, pSwitcher, pu8CodeR0, pu8CodeR3, GCPtrCode, u32IDCode,
1484 SELMGetHyperCS(pVM), SELMGetHyperDS(pVM), SELMGetHyperTSS(pVM), SELMGetHyperGDT(pVM), SELMGetHyperCS64(pVM));
1485}
1486
1487
1488/**
1489 * Gets the pointer to g_szRTAssertMsg1 in GC.
1490 * @returns Pointer to VMMGC::g_szRTAssertMsg1.
1491 * Returns NULL if not present.
1492 * @param pVM The VM handle.
1493 */
1494VMMR3DECL(const char *) VMMR3GetGCAssertMsg1(PVM pVM)
1495{
1496 RTGCPTR GCPtr;
1497 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg1", &GCPtr);
1498 if (VBOX_SUCCESS(rc))
1499 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1500 return NULL;
1501}
1502
1503
1504/**
1505 * Gets the pointer to g_szRTAssertMsg2 in GC.
1506 * @returns Pointer to VMMGC::g_szRTAssertMsg2.
1507 * Returns NULL if not present.
1508 * @param pVM The VM handle.
1509 */
1510VMMR3DECL(const char *) VMMR3GetGCAssertMsg2(PVM pVM)
1511{
1512 RTGCPTR GCPtr;
1513 int rc = PDMR3GetSymbolGC(pVM, NULL, "g_szRTAssertMsg2", &GCPtr);
1514 if (VBOX_SUCCESS(rc))
1515 return (const char *)MMHyperGC2HC(pVM, GCPtr);
1516 return NULL;
1517}
1518
1519
1520/**
1521 * Execute state save operation.
1522 *
1523 * @returns VBox status code.
1524 * @param pVM VM Handle.
1525 * @param pSSM SSM operation handle.
1526 */
1527static DECLCALLBACK(int) vmmR3Save(PVM pVM, PSSMHANDLE pSSM)
1528{
1529 LogFlow(("vmmR3Save:\n"));
1530
1531 /*
1532 * The hypervisor stack.
1533 */
1534 SSMR3PutGCPtr(pSSM, pVM->vmm.s.pbGCStackBottom);
1535 RTGCPTR GCPtrESP = CPUMGetHyperESP(pVM);
1536 Assert(pVM->vmm.s.pbGCStackBottom - GCPtrESP <= VMM_STACK_SIZE);
1537 SSMR3PutGCPtr(pSSM, GCPtrESP);
1538 SSMR3PutMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1539 return SSMR3PutU32(pSSM, ~0); /* terminator */
1540}
1541
1542
1543/**
1544 * Execute state load operation.
1545 *
1546 * @returns VBox status code.
1547 * @param pVM VM Handle.
1548 * @param pSSM SSM operation handle.
1549 * @param u32Version Data layout version.
1550 */
1551static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
1552{
1553 LogFlow(("vmmR3Load:\n"));
1554
1555 /*
1556 * Validate version.
1557 */
1558 if (u32Version != VMM_SAVED_STATE_VERSION)
1559 {
1560 Log(("vmmR3Load: Invalid version u32Version=%d!\n", u32Version));
1561 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1562 }
1563
1564 /*
1565 * Check that the stack is in the same place, or that it's fearly empty.
1566 */
1567 RTGCPTR GCPtrStackBottom;
1568 SSMR3GetGCPtr(pSSM, &GCPtrStackBottom);
1569 RTGCPTR GCPtrESP;
1570 int rc = SSMR3GetGCPtr(pSSM, &GCPtrESP);
1571 if (VBOX_FAILURE(rc))
1572 return rc;
1573 if ( GCPtrStackBottom == pVM->vmm.s.pbGCStackBottom
1574 || (GCPtrStackBottom - GCPtrESP < 32)) /** @todo This will break if we start preemting the hypervisor. */
1575 {
1576 /*
1577 * We *must* set the ESP because the CPUM load + PGM load relocations will render
1578 * the ESP in CPUM fatally invalid.
1579 */
1580 CPUMSetHyperESP(pVM, GCPtrESP);
1581
1582 /* restore the stack. */
1583 SSMR3GetMem(pSSM, pVM->vmm.s.pbHCStack, VMM_STACK_SIZE);
1584
1585 /* terminator */
1586 uint32_t u32;
1587 rc = SSMR3GetU32(pSSM, &u32);
1588 if (VBOX_FAILURE(rc))
1589 return rc;
1590 if (u32 != ~0U)
1591 {
1592 AssertMsgFailed(("u32=%#x\n", u32));
1593 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1594 }
1595 return VINF_SUCCESS;
1596 }
1597
1598 LogRel(("The stack is not in the same place and it's not empty! GCPtrStackBottom=%VGv pbGCStackBottom=%VGv ESP=%VGv\n",
1599 GCPtrStackBottom, pVM->vmm.s.pbGCStackBottom, GCPtrESP));
1600 AssertFailed();
1601 return VERR_SSM_LOAD_CONFIG_MISMATCH;
1602}
1603
1604
1605/**
1606 * Selects the switcher to be used for switching to GC.
1607 *
1608 * @returns VBox status code.
1609 * @param pVM VM handle.
1610 * @param enmSwitcher The new switcher.
1611 * @remark This function may be called before the VMM is initialized.
1612 */
1613VMMR3DECL(int) VMMR3SelectSwitcher(PVM pVM, VMMSWITCHER enmSwitcher)
1614{
1615 /*
1616 * Validate input.
1617 */
1618 if ( enmSwitcher < VMMSWITCHER_INVALID
1619 || enmSwitcher >= VMMSWITCHER_MAX)
1620 {
1621 AssertMsgFailed(("Invalid input enmSwitcher=%d\n", enmSwitcher));
1622 return VERR_INVALID_PARAMETER;
1623 }
1624
1625 /*
1626 * Select the new switcher.
1627 */
1628 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
1629 if (pSwitcher)
1630 {
1631 Log(("VMMR3SelectSwitcher: enmSwitcher %d -> %d %s\n", pVM->vmm.s.enmSwitcher, enmSwitcher, pSwitcher->pszDesc));
1632 pVM->vmm.s.enmSwitcher = enmSwitcher;
1633
1634 RTR0PTR pbCodeR0 = (RTR0PTR)pVM->vmm.s.pvHCCoreCodeR0 + pVM->vmm.s.aoffSwitchers[enmSwitcher]; /** @todo fix the pvHCCoreCodeR0 type */
1635 pVM->vmm.s.pfnR0HostToGuest = pbCodeR0 + pSwitcher->offR0HostToGuest;
1636
1637 RTGCPTR GCPtr = pVM->vmm.s.pvGCCoreCode + pVM->vmm.s.aoffSwitchers[enmSwitcher];
1638 pVM->vmm.s.pfnGCGuestToHost = GCPtr + pSwitcher->offGCGuestToHost;
1639 pVM->vmm.s.pfnGCCallTrampoline = GCPtr + pSwitcher->offGCCallTrampoline;
1640 pVM->pfnVMMGCGuestToHostAsm = GCPtr + pSwitcher->offGCGuestToHostAsm;
1641 pVM->pfnVMMGCGuestToHostAsmHyperCtx = GCPtr + pSwitcher->offGCGuestToHostAsmHyperCtx;
1642 pVM->pfnVMMGCGuestToHostAsmGuestCtx = GCPtr + pSwitcher->offGCGuestToHostAsmGuestCtx;
1643 return VINF_SUCCESS;
1644 }
1645 return VERR_NOT_IMPLEMENTED;
1646}
1647
1648/**
1649 * Disable the switcher logic permanently.
1650 *
1651 * @returns VBox status code.
1652 * @param pVM VM handle.
1653 */
1654VMMR3DECL(int) VMMR3DisableSwitcher(PVM pVM)
1655{
1656/** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
1657 * @code
1658 * mov eax, VERR_INTERNAL_ERROR
1659 * ret
1660 * @endcode
1661 * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
1662 */
1663 pVM->vmm.s.fSwitcherDisabled = true;
1664 return VINF_SUCCESS;
1665}
1666
1667
1668/**
1669 * Resolve a builtin GC symbol.
1670 * Called by PDM when loading or relocating GC modules.
1671 *
1672 * @returns VBox status
1673 * @param pVM VM Handle.
1674 * @param pszSymbol Symbol to resolv
1675 * @param pGCPtrValue Where to store the symbol value.
1676 * @remark This has to work before VMMR3Relocate() is called.
1677 */
1678VMMR3DECL(int) VMMR3GetImportGC(PVM pVM, const char *pszSymbol, PRTGCPTR pGCPtrValue)
1679{
1680 if (!strcmp(pszSymbol, "g_Logger"))
1681 {
1682 if (pVM->vmm.s.pLoggerHC)
1683 pVM->vmm.s.pLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pLoggerHC);
1684 *pGCPtrValue = pVM->vmm.s.pLoggerGC;
1685 }
1686 else if (!strcmp(pszSymbol, "g_RelLogger"))
1687 {
1688#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1689 if (pVM->vmm.s.pRelLoggerHC)
1690 pVM->vmm.s.pRelLoggerGC = MMHyperHC2GC(pVM, pVM->vmm.s.pRelLoggerHC);
1691 *pGCPtrValue = pVM->vmm.s.pRelLoggerGC;
1692#else
1693 *pGCPtrValue = NIL_RTGCPTR;
1694#endif
1695 }
1696 else
1697 return VERR_SYMBOL_NOT_FOUND;
1698 return VINF_SUCCESS;
1699}
1700
1701
1702/**
1703 * Suspends the the CPU yielder.
1704 *
1705 * @param pVM The VM handle.
1706 */
1707VMMR3DECL(void) VMMR3YieldSuspend(PVM pVM)
1708{
1709 if (!pVM->vmm.s.cYieldResumeMillies)
1710 {
1711 uint64_t u64Now = TMTimerGet(pVM->vmm.s.pYieldTimer);
1712 uint64_t u64Expire = TMTimerGetExpire(pVM->vmm.s.pYieldTimer);
1713 if (u64Now >= u64Expire || u64Expire == ~(uint64_t)0)
1714 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1715 else
1716 pVM->vmm.s.cYieldResumeMillies = TMTimerToMilli(pVM->vmm.s.pYieldTimer, u64Expire - u64Now);
1717 TMTimerStop(pVM->vmm.s.pYieldTimer);
1718 }
1719}
1720
1721
1722/**
1723 * Stops the the CPU yielder.
1724 *
1725 * @param pVM The VM handle.
1726 */
1727VMMR3DECL(void) VMMR3YieldStop(PVM pVM)
1728{
1729 if (!pVM->vmm.s.cYieldResumeMillies)
1730 TMTimerStop(pVM->vmm.s.pYieldTimer);
1731 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies;
1732}
1733
1734
1735/**
1736 * Resumes the CPU yielder when it has been a suspended or stopped.
1737 *
1738 * @param pVM The VM handle.
1739 */
1740VMMR3DECL(void) VMMR3YieldResume(PVM pVM)
1741{
1742 if (pVM->vmm.s.cYieldResumeMillies)
1743 {
1744 TMTimerSetMillies(pVM->vmm.s.pYieldTimer, pVM->vmm.s.cYieldResumeMillies);
1745 pVM->vmm.s.cYieldResumeMillies = 0;
1746 }
1747}
1748
1749
1750/**
1751 * Internal timer callback function.
1752 *
1753 * @param pVM The VM.
1754 * @param pTimer The timer handle.
1755 * @param pvUser User argument specified upon timer creation.
1756 */
1757static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser)
1758{
1759#ifdef LOG_ENABLED
1760 uint64_t u64Elapsed = RTTimeNanoTS();
1761#endif
1762 RTThreadYield();
1763 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies);
1764 Log(("vmmR3YieldEMT: %RI64 ns\n", RTTimeNanoTS() - u64Elapsed));
1765}
1766
1767
1768/**
1769 * Acquire global VM lock.
1770 *
1771 * @returns VBox status code
1772 * @param pVM The VM to operate on.
1773 */
1774VMMR3DECL(int) VMMR3Lock(PVM pVM)
1775{
1776 return RTCritSectEnter(&pVM->vmm.s.CritSectVMLock);
1777}
1778
1779
1780/**
1781 * Release global VM lock.
1782 *
1783 * @returns VBox status code
1784 * @param pVM The VM to operate on.
1785 */
1786VMMR3DECL(int) VMMR3Unlock(PVM pVM)
1787{
1788 return RTCritSectLeave(&pVM->vmm.s.CritSectVMLock);
1789}
1790
1791
1792/**
1793 * Return global VM lock owner.
1794 *
1795 * @returns Thread id of owner.
1796 * @returns NIL_RTTHREAD if no owner.
1797 * @param pVM The VM to operate on.
1798 */
1799VMMR3DECL(RTNATIVETHREAD) VMMR3LockGetOwner(PVM pVM)
1800{
1801 return RTCritSectGetOwner(&pVM->vmm.s.CritSectVMLock);
1802}
1803
1804
1805/**
1806 * Checks if the current thread is the owner of the global VM lock.
1807 *
1808 * @returns true if owner.
1809 * @returns false if not owner.
1810 * @param pVM The VM to operate on.
1811 */
1812VMMR3DECL(bool) VMMR3LockIsOwner(PVM pVM)
1813{
1814 return RTCritSectIsOwner(&pVM->vmm.s.CritSectVMLock);
1815}
1816
1817
1818/**
1819 * Executes guest code.
1820 *
1821 * @param pVM VM handle.
1822 */
1823VMMR3DECL(int) VMMR3RawRunGC(PVM pVM)
1824{
1825 Log2(("VMMR3RawRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1826
1827 /*
1828 * Set the EIP and ESP.
1829 */
1830 CPUMSetHyperEIP(pVM, CPUMGetGuestEFlags(pVM) & X86_EFL_VM
1831 ? pVM->vmm.s.pfnCPUMGCResumeGuestV86
1832 : pVM->vmm.s.pfnCPUMGCResumeGuest);
1833 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom);
1834
1835 /*
1836 * We hide log flushes (outer) and hypervisor interrupts (inner).
1837 */
1838 for (;;)
1839 {
1840 int rc;
1841 do
1842 {
1843#ifdef NO_SUPCALLR0VMM
1844 rc = VERR_GENERAL_FAILURE;
1845#else
1846 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
1847#endif
1848 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1849
1850 /*
1851 * Flush the logs.
1852 */
1853#ifdef LOG_ENABLED
1854 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
1855 if ( pLogger
1856 && pLogger->offScratch > 0)
1857 RTLogFlushGC(NULL, pLogger);
1858#endif
1859#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1860 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
1861 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1862 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
1863#endif
1864 if (rc != VINF_VMM_CALL_HOST)
1865 {
1866 Log2(("VMMR3RawRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1867 return rc;
1868 }
1869 rc = vmmR3ServiceCallHostRequest(pVM);
1870 if (VBOX_FAILURE(rc))
1871 return rc;
1872 /* Resume GC */
1873 }
1874}
1875
1876
1877/**
1878 * Executes guest code (Intel VMX and AMD SVM).
1879 *
1880 * @param pVM VM handle.
1881 */
1882VMMR3DECL(int) VMMR3HwAccRunGC(PVM pVM)
1883{
1884 Log2(("VMMR3HwAccRunGC: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1885
1886 for (;;)
1887 {
1888 int rc;
1889 do
1890 {
1891#ifdef NO_SUPCALLR0VMM
1892 rc = VERR_GENERAL_FAILURE;
1893#else
1894 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL);
1895#endif
1896 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1897
1898#ifdef LOG_ENABLED
1899 /*
1900 * Flush the log
1901 */
1902 PVMMR0LOGGER pR0Logger = pVM->vmm.s.pR0Logger;
1903 if ( pR0Logger
1904 && pR0Logger->Logger.offScratch > 0)
1905 RTLogFlushToLogger(&pR0Logger->Logger, NULL);
1906#endif /* !LOG_ENABLED */
1907 if (rc != VINF_VMM_CALL_HOST)
1908 {
1909 Log2(("VMMR3HwAccRunGC: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1910 return rc;
1911 }
1912 rc = vmmR3ServiceCallHostRequest(pVM);
1913 if (VBOX_FAILURE(rc))
1914 return rc;
1915 /* Resume R0 */
1916 }
1917}
1918
1919/**
1920 * Calls GC a function.
1921 *
1922 * @param pVM The VM handle.
1923 * @param GCPtrEntry The GC function address.
1924 * @param cArgs The number of arguments in the ....
1925 * @param ... Arguments to the function.
1926 */
1927VMMR3DECL(int) VMMR3CallGC(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, ...)
1928{
1929 va_list args;
1930 va_start(args, cArgs);
1931 int rc = VMMR3CallGCV(pVM, GCPtrEntry, cArgs, args);
1932 va_end(args);
1933 return rc;
1934}
1935
1936
1937/**
1938 * Calls GC a function.
1939 *
1940 * @param pVM The VM handle.
1941 * @param GCPtrEntry The GC function address.
1942 * @param cArgs The number of arguments in the ....
1943 * @param args Arguments to the function.
1944 */
1945VMMR3DECL(int) VMMR3CallGCV(PVM pVM, RTGCPTR GCPtrEntry, unsigned cArgs, va_list args)
1946{
1947 Log2(("VMMR3CallGCV: GCPtrEntry=%VGv cArgs=%d\n", GCPtrEntry, cArgs));
1948
1949 /*
1950 * Setup the call frame using the trampoline.
1951 */
1952 CPUMHyperSetCtxCore(pVM, NULL);
1953 memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */
1954 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom - cArgs * sizeof(RTGCUINTPTR));
1955 PRTGCUINTPTR pFrame = (PRTGCUINTPTR)(pVM->vmm.s.pbHCStack + VMM_STACK_SIZE) - cArgs;
1956 int i = cArgs;
1957 while (i-- > 0)
1958 *pFrame++ = va_arg(args, RTGCUINTPTR);
1959
1960 CPUMPushHyper(pVM, cArgs * sizeof(RTGCUINTPTR)); /* stack frame size */
1961 CPUMPushHyper(pVM, GCPtrEntry); /* what to call */
1962 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
1963
1964 /*
1965 * We hide log flushes (outer) and hypervisor interrupts (inner).
1966 */
1967 for (;;)
1968 {
1969 int rc;
1970 do
1971 {
1972#ifdef NO_SUPCALLR0VMM
1973 rc = VERR_GENERAL_FAILURE;
1974#else
1975 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
1976#endif
1977 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
1978
1979 /*
1980 * Flush the logs.
1981 */
1982#ifdef LOG_ENABLED
1983 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
1984 if ( pLogger
1985 && pLogger->offScratch > 0)
1986 RTLogFlushGC(NULL, pLogger);
1987#endif
1988#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
1989 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
1990 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
1991 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
1992#endif
1993 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
1994 VMMR3FatalDump(pVM, rc);
1995 if (rc != VINF_VMM_CALL_HOST)
1996 {
1997 Log2(("VMMR3CallGCV: returns %Vrc (cs:eip=%04x:%08x)\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
1998 return rc;
1999 }
2000 rc = vmmR3ServiceCallHostRequest(pVM);
2001 if (VBOX_FAILURE(rc))
2002 return rc;
2003 }
2004}
2005
2006
2007/**
2008 * Resumes executing hypervisor code when interrupted
2009 * by a queue flush or a debug event.
2010 *
2011 * @returns VBox status code.
2012 * @param pVM VM handle.
2013 */
2014VMMR3DECL(int) VMMR3ResumeHyper(PVM pVM)
2015{
2016 Log(("VMMR3ResumeHyper: eip=%VGv esp=%VGv\n", CPUMGetHyperEIP(pVM), CPUMGetHyperESP(pVM)));
2017
2018 /*
2019 * We hide log flushes (outer) and hypervisor interrupts (inner).
2020 */
2021 for (;;)
2022 {
2023 int rc;
2024 do
2025 {
2026#ifdef NO_SUPCALLR0VMM
2027 rc = VERR_GENERAL_FAILURE;
2028#else
2029 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2030#endif
2031 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2032
2033 /*
2034 * Flush the loggers,
2035 */
2036#ifdef LOG_ENABLED
2037 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
2038 if ( pLogger
2039 && pLogger->offScratch > 0)
2040 RTLogFlushGC(NULL, pLogger);
2041#endif
2042#ifdef VBOX_WITH_GC_AND_R0_RELEASE_LOG
2043 PRTLOGGERGC pRelLogger = pVM->vmm.s.pRelLoggerHC;
2044 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
2045 RTLogFlushGC(RTLogRelDefaultInstance(), pRelLogger);
2046#endif
2047 if (rc == VERR_TRPM_PANIC || rc == VERR_TRPM_DONT_PANIC)
2048 VMMR3FatalDump(pVM, rc);
2049 if (rc != VINF_VMM_CALL_HOST)
2050 {
2051 Log(("VMMR3ResumeHyper: returns %Vrc\n", rc));
2052 return rc;
2053 }
2054 rc = vmmR3ServiceCallHostRequest(pVM);
2055 if (VBOX_FAILURE(rc))
2056 return rc;
2057 }
2058}
2059
2060
2061/**
2062 * Service a call to the ring-3 host code.
2063 *
2064 * @returns VBox status code.
2065 * @param pVM VM handle.
2066 * @remark Careful with critsects.
2067 */
2068static int vmmR3ServiceCallHostRequest(PVM pVM)
2069{
2070 switch (pVM->vmm.s.enmCallHostOperation)
2071 {
2072 /*
2073 * Acquire the PDM lock.
2074 */
2075 case VMMCALLHOST_PDM_LOCK:
2076 {
2077 pVM->vmm.s.rcCallHost = PDMR3LockCall(pVM);
2078 break;
2079 }
2080
2081 /*
2082 * Flush a PDM queue.
2083 */
2084 case VMMCALLHOST_PDM_QUEUE_FLUSH:
2085 {
2086 PDMR3QueueFlushWorker(pVM, NULL);
2087 pVM->vmm.s.rcCallHost = VINF_SUCCESS;
2088 break;
2089 }
2090
2091 /*
2092 * Grow the PGM pool.
2093 */
2094 case VMMCALLHOST_PGM_POOL_GROW:
2095 {
2096 pVM->vmm.s.rcCallHost = PGMR3PoolGrow(pVM);
2097 break;
2098 }
2099
2100 /*
2101 * Acquire the PGM lock.
2102 */
2103 case VMMCALLHOST_PGM_LOCK:
2104 {
2105 pVM->vmm.s.rcCallHost = PGMR3LockCall(pVM);
2106 break;
2107 }
2108
2109 /*
2110 * Flush REM handler notifications.
2111 */
2112 case VMMCALLHOST_REM_REPLAY_HANDLER_NOTIFICATIONS:
2113 {
2114 REMR3ReplayHandlerNotifications(pVM);
2115 break;
2116 }
2117
2118 case VMMCALLHOST_PGM_RAM_GROW_RANGE:
2119 {
2120 pVM->vmm.s.rcCallHost = PGM3PhysGrowRange(pVM, pVM->vmm.s.u64CallHostArg);
2121 break;
2122 }
2123
2124 /*
2125 * This is a noop. We just take this route to avoid unnecessary
2126 * tests in the loops.
2127 */
2128 case VMMCALLHOST_VMM_LOGGER_FLUSH:
2129 break;
2130
2131 /*
2132 * Set the VM error message.
2133 */
2134 case VMMCALLHOST_VM_SET_ERROR:
2135 VMR3SetErrorWorker(pVM);
2136 break;
2137
2138 /*
2139 * Set the VM runtime error message.
2140 */
2141 case VMMCALLHOST_VM_SET_RUNTIME_ERROR:
2142 VMR3SetRuntimeErrorWorker(pVM);
2143 break;
2144
2145 default:
2146 AssertMsgFailed(("enmCallHostOperation=%d\n", pVM->vmm.s.enmCallHostOperation));
2147 return VERR_INTERNAL_ERROR;
2148 }
2149
2150 pVM->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;
2151 return VINF_SUCCESS;
2152}
2153
2154
2155
2156/**
2157 * Structure to pass to DBGFR3Info() and for doing all other
2158 * output during fatal dump.
2159 */
2160typedef struct VMMR3FATALDUMPINFOHLP
2161{
2162 /** The helper core. */
2163 DBGFINFOHLP Core;
2164 /** The release logger instance. */
2165 PRTLOGGER pRelLogger;
2166 /** The saved release logger flags. */
2167 RTUINT fRelLoggerFlags;
2168 /** The logger instance. */
2169 PRTLOGGER pLogger;
2170 /** The saved logger flags. */
2171 RTUINT fLoggerFlags;
2172 /** The saved logger destination flags. */
2173 RTUINT fLoggerDestFlags;
2174 /** Whether to output to stderr or not. */
2175 bool fStdErr;
2176} VMMR3FATALDUMPINFOHLP, *PVMMR3FATALDUMPINFOHLP;
2177typedef const VMMR3FATALDUMPINFOHLP *PCVMMR3FATALDUMPINFOHLP;
2178
2179
2180/**
2181 * Print formatted string.
2182 *
2183 * @param pHlp Pointer to this structure.
2184 * @param pszFormat The format string.
2185 * @param ... Arguments.
2186 */
2187static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
2188{
2189 va_list args;
2190 va_start(args, pszFormat);
2191 pHlp->pfnPrintfV(pHlp, pszFormat, args);
2192 va_end(args);
2193}
2194
2195
2196/**
2197 * Print formatted string.
2198 *
2199 * @param pHlp Pointer to this structure.
2200 * @param pszFormat The format string.
2201 * @param args Argument list.
2202 */
2203static DECLCALLBACK(void) vmmR3FatalDumpInfoHlp_pfnPrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
2204{
2205 PCVMMR3FATALDUMPINFOHLP pMyHlp = (PCVMMR3FATALDUMPINFOHLP)pHlp;
2206
2207 if (pMyHlp->pRelLogger)
2208 {
2209 va_list args2;
2210 va_copy(args2, args);
2211 RTLogLoggerV(pMyHlp->pRelLogger, pszFormat, args2);
2212 va_end(args2);
2213 }
2214 if (pMyHlp->pLogger)
2215 {
2216 va_list args2;
2217 va_copy(args2, args);
2218 RTLogLoggerV(pMyHlp->pLogger, pszFormat, args);
2219 va_end(args2);
2220 }
2221 if (pMyHlp->fStdErr)
2222 {
2223 va_list args2;
2224 va_copy(args2, args);
2225 RTStrmPrintfV(g_pStdErr, pszFormat, args);
2226 va_end(args2);
2227 }
2228}
2229
2230
2231/**
2232 * Initializes the fatal dump output helper.
2233 *
2234 * @param pHlp The structure to initialize.
2235 */
2236static void vmmR3FatalDumpInfoHlpInit(PVMMR3FATALDUMPINFOHLP pHlp)
2237{
2238 memset(pHlp, 0, sizeof(*pHlp));
2239
2240 pHlp->Core.pfnPrintf = vmmR3FatalDumpInfoHlp_pfnPrintf;
2241 pHlp->Core.pfnPrintfV = vmmR3FatalDumpInfoHlp_pfnPrintfV;
2242
2243 /*
2244 * The loggers.
2245 */
2246 pHlp->pRelLogger = RTLogRelDefaultInstance();
2247#ifndef LOG_ENABLED
2248 if (!pHlp->pRelLogger)
2249#endif
2250 pHlp->pLogger = RTLogDefaultInstance();
2251
2252 if (pHlp->pRelLogger)
2253 {
2254 pHlp->fRelLoggerFlags = pHlp->pRelLogger->fFlags;
2255 pHlp->pRelLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2256 }
2257
2258 if (pHlp->pLogger)
2259 {
2260 pHlp->fLoggerFlags = pHlp->pLogger->fFlags;
2261 pHlp->fLoggerDestFlags = pHlp->pLogger->fDestFlags;
2262 pHlp->pLogger->fFlags &= ~(RTLOGFLAGS_BUFFERED | RTLOGFLAGS_DISABLED);
2263 pHlp->pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
2264 }
2265
2266 /*
2267 * Check if we need write to stderr.
2268 */
2269 pHlp->fStdErr = (!pHlp->pRelLogger || !(pHlp->pRelLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)))
2270 && (!pHlp->pLogger || !(pHlp->pLogger->fDestFlags & (RTLOGDEST_STDOUT | RTLOGDEST_STDERR)));
2271}
2272
2273
2274/**
2275 * Deletes the fatal dump output helper.
2276 *
2277 * @param pHlp The structure to delete.
2278 */
2279static void vmmR3FatalDumpInfoHlpDelete(PVMMR3FATALDUMPINFOHLP pHlp)
2280{
2281 if (pHlp->pRelLogger)
2282 {
2283 RTLogFlush(pHlp->pRelLogger);
2284 pHlp->pRelLogger->fFlags = pHlp->fRelLoggerFlags;
2285 }
2286
2287 if (pHlp->pLogger)
2288 {
2289 RTLogFlush(pHlp->pLogger);
2290 pHlp->pLogger->fFlags = pHlp->fLoggerFlags;
2291 pHlp->pLogger->fDestFlags = pHlp->fLoggerDestFlags;
2292 }
2293}
2294
2295
2296/**
2297 * Dumps the VM state on a fatal error.
2298 *
2299 * @param pVM VM Handle.
2300 * @param rcErr VBox status code.
2301 */
2302VMMR3DECL(void) VMMR3FatalDump(PVM pVM, int rcErr)
2303{
2304 /*
2305 * Create our output helper and sync it with the log settings.
2306 * This helper will be used for all the output.
2307 */
2308 VMMR3FATALDUMPINFOHLP Hlp;
2309 PCDBGFINFOHLP pHlp = &Hlp.Core;
2310 vmmR3FatalDumpInfoHlpInit(&Hlp);
2311
2312 /*
2313 * Header.
2314 */
2315 pHlp->pfnPrintf(pHlp,
2316 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
2317 "!!\n"
2318 "!! Guru Meditation %d (%Vrc)\n"
2319 "!!\n",
2320 rcErr, rcErr);
2321
2322 /*
2323 * Continue according to context.
2324 */
2325 bool fDoneHyper = false;
2326 switch (rcErr)
2327 {
2328 /*
2329 * Hyper visor errors.
2330 */
2331 case VINF_EM_DBG_HYPER_ASSERTION:
2332 pHlp->pfnPrintf(pHlp, "%s%s!!\n", VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
2333 /* fall thru */
2334 case VERR_TRPM_DONT_PANIC:
2335 case VERR_TRPM_PANIC:
2336 case VINF_EM_RAW_STALE_SELECTOR:
2337 case VINF_EM_RAW_IRET_TRAP:
2338 case VINF_EM_DBG_HYPER_BREAKPOINT:
2339 case VINF_EM_DBG_HYPER_STEPPED:
2340 {
2341 /* Trap? */
2342 uint32_t uEIP = CPUMGetHyperEIP(pVM);
2343 bool fSoftwareInterrupt = false;
2344 uint8_t u8TrapNo = 0xce;
2345 RTGCUINT uErrorCode = 0xdeadface;
2346 RTGCUINTPTR uCR2 = 0xdeadface;
2347 int rc2 = TRPMQueryTrapAll(pVM, &u8TrapNo, &fSoftwareInterrupt, &uErrorCode, &uCR2);
2348 if (VBOX_SUCCESS(rc2))
2349 pHlp->pfnPrintf(pHlp,
2350 "!! TRAP=%02x ERRCD=%VGv CR2=%VGv EIP=%VGv fSoft=%d\n",
2351 u8TrapNo, uErrorCode, uCR2, uEIP, fSoftwareInterrupt);
2352 else
2353 pHlp->pfnPrintf(pHlp,
2354 "!! EIP=%VGv NOTRAP\n",
2355 uEIP);
2356
2357 /*
2358 * Try figure out where eip is.
2359 */
2360 /** @todo make query call for core code or move this function to VMM. */
2361 /* core code? */
2362 //if (uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode < pVM->vmm.s.cbCoreCode)
2363 // pHlp->pfnPrintf(pHlp,
2364 // "!! EIP is in CoreCode, offset %#x\n",
2365 // uEIP - (RTGCUINTPTR)pVM->vmm.s.pvGCCoreCode);
2366 //else
2367 { /* ask PDM */
2368 /** @todo ask DBGFR3Sym later. */
2369 char szModName[64];
2370 RTGCPTR GCPtrMod;
2371 char szNearSym1[260];
2372 RTGCPTR GCPtrNearSym1;
2373 char szNearSym2[260];
2374 RTGCPTR GCPtrNearSym2;
2375 int rc = PDMR3QueryModFromEIP(pVM, uEIP,
2376 &szModName[0], sizeof(szModName), &GCPtrMod,
2377 &szNearSym1[0], sizeof(szNearSym1), &GCPtrNearSym1,
2378 &szNearSym2[0], sizeof(szNearSym2), &GCPtrNearSym2);
2379 if (VBOX_SUCCESS(rc))
2380 {
2381 pHlp->pfnPrintf(pHlp,
2382 "!! EIP in %s (%p) at rva %x near symbols:\n"
2383 "!! %VGv rva %VGv off %08x %s\n"
2384 "!! %VGv rva %VGv off -%08x %s\n",
2385 szModName, GCPtrMod, (unsigned)(uEIP - GCPtrMod),
2386 GCPtrNearSym1, GCPtrNearSym1 - GCPtrMod, (unsigned)(uEIP - GCPtrNearSym1), szNearSym1,
2387 GCPtrNearSym2, GCPtrNearSym2 - GCPtrMod, (unsigned)(GCPtrNearSym2 - uEIP), szNearSym2);
2388 }
2389 else
2390 pHlp->pfnPrintf(pHlp,
2391 "!! EIP is not in any code known to VMM!\n");
2392 }
2393
2394 /* Disassemble the instruction. */
2395 char szInstr[256];
2396 rc2 = DBGFR3DisasInstrEx(pVM, 0, 0, DBGF_DISAS_FLAGS_CURRENT_HYPER, &szInstr[0], sizeof(szInstr), NULL);
2397 if (VBOX_SUCCESS(rc2))
2398 pHlp->pfnPrintf(pHlp,
2399 "!! %s\n", szInstr);
2400
2401 /* Dump the hypervisor cpu state. */
2402 pHlp->pfnPrintf(pHlp,
2403 "!!\n"
2404 "!!\n"
2405 "!!\n");
2406 rc2 = DBGFR3Info(pVM, "cpumhyper", "verbose", pHlp);
2407 fDoneHyper = true;
2408
2409 /* Callstack. */
2410 DBGFSTACKFRAME Frame = {0};
2411 rc2 = DBGFR3StackWalkBeginHyper(pVM, &Frame);
2412 if (VBOX_SUCCESS(rc2))
2413 {
2414 pHlp->pfnPrintf(pHlp,
2415 "!!\n"
2416 "!! Call Stack:\n"
2417 "!!\n"
2418 "EBP Ret EBP Ret CS:EIP Arg0 Arg1 Arg2 Arg3 CS:EIP Symbol [line]\n");
2419 do
2420 {
2421 pHlp->pfnPrintf(pHlp,
2422 "%08RX32 %08RX32 %04RX32:%08RX32 %08RX32 %08RX32 %08RX32 %08RX32",
2423 (uint32_t)Frame.AddrFrame.off,
2424 (uint32_t)Frame.AddrReturnFrame.off,
2425 (uint32_t)Frame.AddrReturnPC.Sel,
2426 (uint32_t)Frame.AddrReturnPC.off,
2427 Frame.Args.au32[0],
2428 Frame.Args.au32[1],
2429 Frame.Args.au32[2],
2430 Frame.Args.au32[3]);
2431 pHlp->pfnPrintf(pHlp, " %RTsel:%08RGv", Frame.AddrPC.Sel, Frame.AddrPC.off);
2432 if (Frame.pSymPC)
2433 {
2434 RTGCINTPTR offDisp = Frame.AddrPC.FlatPtr - Frame.pSymPC->Value;
2435 if (offDisp > 0)
2436 pHlp->pfnPrintf(pHlp, " %s+%llx", Frame.pSymPC->szName, (int64_t)offDisp);
2437 else if (offDisp < 0)
2438 pHlp->pfnPrintf(pHlp, " %s-%llx", Frame.pSymPC->szName, -(int64_t)offDisp);
2439 else
2440 pHlp->pfnPrintf(pHlp, " %s", Frame.pSymPC->szName);
2441 }
2442 if (Frame.pLinePC)
2443 pHlp->pfnPrintf(pHlp, " [%s @ 0i%d]", Frame.pLinePC->szFilename, Frame.pLinePC->uLineNo);
2444 pHlp->pfnPrintf(pHlp, "\n");
2445
2446 /* next */
2447 rc2 = DBGFR3StackWalkNext(pVM, &Frame);
2448 } while (VBOX_SUCCESS(rc2));
2449 DBGFR3StackWalkEnd(pVM, &Frame);
2450 }
2451
2452 /* raw stack */
2453 pHlp->pfnPrintf(pHlp,
2454 "!!\n"
2455 "!! Raw stack (mind the direction).\n"
2456 "!!\n"
2457 "%.*Vhxd\n",
2458 VMM_STACK_SIZE, (char *)pVM->vmm.s.pbHCStack);
2459 break;
2460 }
2461
2462 default:
2463 {
2464 break;
2465 }
2466
2467 } /* switch (rcErr) */
2468
2469
2470 /*
2471 * Dump useful state information.
2472 */
2473 /** @todo convert these dumpers to DBGFR3Info() handlers!!! */
2474 pHlp->pfnPrintf(pHlp,
2475 "!!\n"
2476 "!! PGM Access Handlers & Stuff:\n"
2477 "!!\n");
2478 PGMR3DumpMappings(pVM);
2479
2480
2481 /*
2482 * Generic info dumper loop.
2483 */
2484 static struct
2485 {
2486 const char *pszInfo;
2487 const char *pszArgs;
2488 } const aInfo[] =
2489 {
2490 { "hma", NULL },
2491 { "cpumguest", "verbose" },
2492 { "cpumhyper", "verbose" },
2493 { "cpumhost", "verbose" },
2494 { "mode", "all" },
2495 { "cpuid", "verbose" },
2496 { "gdt", NULL },
2497 { "ldt", NULL },
2498 //{ "tss", NULL },
2499 { "ioport", NULL },
2500 { "mmio", NULL },
2501 { "phys", NULL },
2502 //{ "pgmpd", NULL }, - doesn't always work at init time...
2503 { "timers", NULL },
2504 { "activetimers", NULL },
2505 { "handlers", "phys virt stats" },
2506 { "cfgm", NULL },
2507 };
2508 for (unsigned i = 0; i < ELEMENTS(aInfo); i++)
2509 {
2510 if (fDoneHyper && !strcmp(aInfo[i].pszInfo, "cpumhyper"))
2511 continue;
2512 pHlp->pfnPrintf(pHlp,
2513 "!!\n"
2514 "!! {%s, %s}\n"
2515 "!!\n",
2516 aInfo[i].pszInfo, aInfo[i].pszArgs);
2517 DBGFR3Info(pVM, aInfo[i].pszInfo, aInfo[i].pszArgs, pHlp);
2518 }
2519
2520 /* done */
2521 pHlp->pfnPrintf(pHlp,
2522 "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
2523
2524
2525 /*
2526 * Delete the output instance (flushing and restoring of flags).
2527 */
2528 vmmR3FatalDumpInfoHlpDelete(&Hlp);
2529}
2530
2531
2532/**
2533 * Performs a testcase.
2534 *
2535 * @returns return value from the test.
2536 * @param pVM The VM handle.
2537 * @param enmTestcase The testcase operation to perform.
2538 * @param uVariation The testcase variation id.
2539 */
2540static int vmmR3DoGCTest(PVM pVM, VMMGCOPERATION enmTestcase, unsigned uVariation)
2541{
2542 RTGCPTR GCPtrEP;
2543 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
2544 if (VBOX_FAILURE(rc))
2545 return rc;
2546
2547 CPUMHyperSetCtxCore(pVM, NULL);
2548 memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE);
2549 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
2550 CPUMPushHyper(pVM, uVariation);
2551 CPUMPushHyper(pVM, enmTestcase);
2552 CPUMPushHyper(pVM, pVM->pVMGC);
2553 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */
2554 CPUMPushHyper(pVM, GCPtrEP); /* what to call */
2555 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2556 return SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2557}
2558
2559
2560/**
2561 * Performs a trap test.
2562 *
2563 * @returns Return value from the trap test.
2564 * @param pVM The VM handle.
2565 * @param u8Trap The trap number to test.
2566 * @param uVariation The testcase variation.
2567 * @param rcExpect The expected result.
2568 * @param u32Eax The expected eax value.
2569 * @param pszFaultEIP The fault address. Pass NULL if this isn't available or doesn't apply.
2570 * @param pszDesc The test description.
2571 */
2572static int vmmR3DoTrapTest(PVM pVM, uint8_t u8Trap, unsigned uVariation, int rcExpect, uint32_t u32Eax, const char *pszFaultEIP, const char *pszDesc)
2573{
2574 RTPrintf("VMM: testing 0%x / %d - %s\n", u8Trap, uVariation, pszDesc);
2575
2576 RTGCPTR GCPtrEP;
2577 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
2578 if (VBOX_FAILURE(rc))
2579 return rc;
2580
2581 CPUMHyperSetCtxCore(pVM, NULL);
2582 memset(pVM->vmm.s.pbHCStack, 0xaa, VMM_STACK_SIZE);
2583 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
2584 CPUMPushHyper(pVM, uVariation);
2585 CPUMPushHyper(pVM, u8Trap + VMMGC_DO_TESTCASE_TRAP_FIRST);
2586 CPUMPushHyper(pVM, pVM->pVMGC);
2587 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */
2588 CPUMPushHyper(pVM, GCPtrEP); /* what to call */
2589 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2590 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2591 bool fDump = false;
2592 if (rc != rcExpect)
2593 {
2594 RTPrintf("VMM: FAILURE - rc=%Vrc expected %Vrc\n", rc, rcExpect);
2595 if (rc != VERR_NOT_IMPLEMENTED)
2596 fDump = true;
2597 }
2598 else if ( rcExpect != VINF_SUCCESS
2599 && u8Trap != 8 /* double fault doesn't dare set TrapNo. */
2600 && u8Trap != 3 /* guest only, we're not in guest. */
2601 && u8Trap != 1 /* guest only, we're not in guest. */
2602 && u8Trap != TRPMGetTrapNo(pVM))
2603 {
2604 RTPrintf("VMM: FAILURE - Trap %#x expected %#x\n", TRPMGetTrapNo(pVM), u8Trap);
2605 fDump = true;
2606 }
2607 else if (pszFaultEIP)
2608 {
2609 RTGCPTR GCPtrFault;
2610 int rc2 = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, pszFaultEIP, &GCPtrFault);
2611 if (VBOX_FAILURE(rc2))
2612 RTPrintf("VMM: FAILURE - Failed to resolve symbol '%s', %Vrc!\n", pszFaultEIP, rc);
2613 else if (GCPtrFault != CPUMGetHyperEIP(pVM))
2614 {
2615 RTPrintf("VMM: FAILURE - EIP=%VGv expected %VGv (%s)\n", CPUMGetHyperEIP(pVM), GCPtrFault, pszFaultEIP);
2616 fDump = true;
2617 }
2618 }
2619 else if (rcExpect != VINF_SUCCESS)
2620 {
2621 if (CPUMGetHyperSS(pVM) == SELMGetHyperDS(pVM))
2622 RTPrintf("VMM: FAILURE - ss=%x expected %x\n", CPUMGetHyperSS(pVM), SELMGetHyperDS(pVM));
2623 if (CPUMGetHyperES(pVM) == SELMGetHyperDS(pVM))
2624 RTPrintf("VMM: FAILURE - es=%x expected %x\n", CPUMGetHyperES(pVM), SELMGetHyperDS(pVM));
2625 if (CPUMGetHyperDS(pVM) == SELMGetHyperDS(pVM))
2626 RTPrintf("VMM: FAILURE - ds=%x expected %x\n", CPUMGetHyperDS(pVM), SELMGetHyperDS(pVM));
2627 if (CPUMGetHyperFS(pVM) == SELMGetHyperDS(pVM))
2628 RTPrintf("VMM: FAILURE - fs=%x expected %x\n", CPUMGetHyperFS(pVM), SELMGetHyperDS(pVM));
2629 if (CPUMGetHyperGS(pVM) == SELMGetHyperDS(pVM))
2630 RTPrintf("VMM: FAILURE - gs=%x expected %x\n", CPUMGetHyperGS(pVM), SELMGetHyperDS(pVM));
2631 if (CPUMGetHyperEDI(pVM) == 0x01234567)
2632 RTPrintf("VMM: FAILURE - edi=%x expected %x\n", CPUMGetHyperEDI(pVM), 0x01234567);
2633 if (CPUMGetHyperESI(pVM) == 0x42000042)
2634 RTPrintf("VMM: FAILURE - esi=%x expected %x\n", CPUMGetHyperESI(pVM), 0x42000042);
2635 if (CPUMGetHyperEBP(pVM) == 0xffeeddcc)
2636 RTPrintf("VMM: FAILURE - ebp=%x expected %x\n", CPUMGetHyperEBP(pVM), 0xffeeddcc);
2637 if (CPUMGetHyperEBX(pVM) == 0x89abcdef)
2638 RTPrintf("VMM: FAILURE - ebx=%x expected %x\n", CPUMGetHyperEBX(pVM), 0x89abcdef);
2639 if (CPUMGetHyperECX(pVM) == 0xffffaaaa)
2640 RTPrintf("VMM: FAILURE - ecx=%x expected %x\n", CPUMGetHyperECX(pVM), 0xffffaaaa);
2641 if (CPUMGetHyperEDX(pVM) == 0x77778888)
2642 RTPrintf("VMM: FAILURE - edx=%x expected %x\n", CPUMGetHyperEDX(pVM), 0x77778888);
2643 if (CPUMGetHyperEAX(pVM) == u32Eax)
2644 RTPrintf("VMM: FAILURE - eax=%x expected %x\n", CPUMGetHyperEAX(pVM), u32Eax);
2645 }
2646 if (fDump)
2647 VMMR3FatalDump(pVM, rc);
2648 return rc;
2649}
2650
2651
2652/* execute the switch. */
2653VMMR3DECL(int) VMMDoTest(PVM pVM)
2654{
2655#if 1
2656#ifdef NO_SUPCALLR0VMM
2657 RTPrintf("NO_SUPCALLR0VMM\n");
2658 return VINF_SUCCESS;
2659#endif
2660
2661 /*
2662 * Setup stack for calling VMMGCEntry().
2663 */
2664 RTGCPTR GCPtrEP;
2665 int rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
2666 if (VBOX_SUCCESS(rc))
2667 {
2668 RTPrintf("VMM: VMMGCEntry=%VGv\n", GCPtrEP);
2669
2670 /*
2671 * Test various crashes which we must be able to recover from.
2672 */
2673 vmmR3DoTrapTest(pVM, 0x3, 0, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3");
2674 vmmR3DoTrapTest(pVM, 0x3, 1, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3 WP");
2675
2676#if defined(DEBUG_bird) /* guess most people would like to skip these since they write to com1. */
2677 vmmR3DoTrapTest(pVM, 0x8, 0, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG]");
2678 SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */
2679 bool f;
2680 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f);
2681#if !defined(DEBUG_bird)
2682 if (VBOX_SUCCESS(rc) && f)
2683#endif
2684 {
2685 /* see tripple fault warnings in SELM and VMMGC.cpp. */
2686 vmmR3DoTrapTest(pVM, 0x8, 1, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG] WP");
2687 SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */
2688 }
2689#endif
2690
2691 vmmR3DoTrapTest(pVM, 0xd, 0, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP");
2692 ///@todo find a better \#GP case, on intel ltr will \#PF (busy update?) and not \#GP.
2693 //vmmR3DoTrapTest(pVM, 0xd, 1, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP WP");
2694
2695 vmmR3DoTrapTest(pVM, 0xe, 0, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL)");
2696 vmmR3DoTrapTest(pVM, 0xe, 1, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL) WP");
2697 vmmR3DoTrapTest(pVM, 0xe, 2, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler");
2698 vmmR3DoTrapTest(pVM, 0xe, 4, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler and bad fs");
2699
2700 /*
2701 * Set a debug register and perform a context switch.
2702 */
2703 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
2704 if (rc != VINF_SUCCESS)
2705 {
2706 RTPrintf("VMM: Nop test failed, rc=%Vrc not VINF_SUCCESS\n", rc);
2707 return rc;
2708 }
2709
2710 /* a harmless breakpoint */
2711 RTPrintf("VMM: testing hardware bp at 0x10000 (not hit)\n");
2712 DBGFADDRESS Addr;
2713 DBGFR3AddrFromFlat(pVM, &Addr, 0x10000);
2714 RTUINT iBp0;
2715 rc = DBGFR3BpSetReg(pVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp0);
2716 AssertReleaseRC(rc);
2717 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
2718 if (rc != VINF_SUCCESS)
2719 {
2720 RTPrintf("VMM: DR0=0x10000 test failed with rc=%Vrc!\n", rc);
2721 return rc;
2722 }
2723
2724 /* a bad one at VMMGCEntry */
2725 RTPrintf("VMM: testing hardware bp at VMMGCEntry (hit)\n");
2726 DBGFR3AddrFromFlat(pVM, &Addr, GCPtrEP);
2727 RTUINT iBp1;
2728 rc = DBGFR3BpSetReg(pVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp1);
2729 AssertReleaseRC(rc);
2730 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
2731 if (rc != VINF_EM_DBG_HYPER_BREAKPOINT)
2732 {
2733 RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Vrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc);
2734 return rc;
2735 }
2736
2737 /* resume the breakpoint */
2738 RTPrintf("VMM: resuming hyper after breakpoint\n");
2739 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) | X86_EFL_RF);
2740 rc = VMMR3ResumeHyper(pVM);
2741 if (rc != VINF_SUCCESS)
2742 {
2743 RTPrintf("VMM: failed to resume on hyper breakpoint, rc=%Vrc\n", rc);
2744 return rc;
2745 }
2746
2747 /* engage the breakpoint again and try single stepping. */
2748 RTPrintf("VMM: testing hardware bp at VMMGCEntry + stepping\n");
2749 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
2750 if (rc != VINF_EM_DBG_HYPER_BREAKPOINT)
2751 {
2752 RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Vrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc);
2753 return rc;
2754 }
2755
2756 RTGCUINTREG OldPc = CPUMGetHyperEIP(pVM);
2757 RTPrintf("%RGr=>", OldPc);
2758 unsigned i;
2759 for (i = 0; i < 8; i++)
2760 {
2761 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) | X86_EFL_TF | X86_EFL_RF);
2762 rc = VMMR3ResumeHyper(pVM);
2763 if (rc != VINF_EM_DBG_HYPER_STEPPED)
2764 {
2765 RTPrintf("\nVMM: failed to step on hyper breakpoint, rc=%Vrc\n", rc);
2766 return rc;
2767 }
2768 RTGCUINTREG Pc = CPUMGetHyperEIP(pVM);
2769 RTPrintf("%RGr=>", Pc);
2770 if (Pc == OldPc)
2771 {
2772 RTPrintf("\nVMM: step failed, PC: %RGr -> %RGr\n", OldPc, Pc);
2773 return VERR_GENERAL_FAILURE;
2774 }
2775 OldPc = Pc;
2776 }
2777 RTPrintf("ok\n");
2778
2779 /* done, clear it */
2780 if ( VBOX_FAILURE(DBGFR3BpClear(pVM, iBp0))
2781 || VBOX_FAILURE(DBGFR3BpClear(pVM, iBp1)))
2782 {
2783 RTPrintf("VMM: Failed to clear breakpoints!\n");
2784 return VERR_GENERAL_FAILURE;
2785 }
2786 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
2787 if (rc != VINF_SUCCESS)
2788 {
2789 RTPrintf("VMM: NOP failed, rc=%Vrc\n", rc);
2790 return rc;
2791 }
2792
2793 /*
2794 * Interrupt masking.
2795 */
2796 RTPrintf("VMM: interrupt masking...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250);
2797 for (i = 0; i < 10000; i++)
2798 {
2799 uint64_t StartTick = ASMReadTSC();
2800 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_INTERRUPT_MASKING, 0);
2801 if (rc != VINF_SUCCESS)
2802 {
2803 RTPrintf("VMM: Interrupt masking failed: rc=%Vrc\n", rc);
2804 return rc;
2805 }
2806 uint64_t Ticks = ASMReadTSC() - StartTick;
2807 if (Ticks < (SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000))
2808 RTPrintf("Warning: Ticks=%RU64 (< %RU64)\n", Ticks, SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000);
2809 }
2810
2811 /*
2812 * Interrupt forwarding.
2813 */
2814 CPUMHyperSetCtxCore(pVM, NULL);
2815 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
2816 CPUMPushHyper(pVM, 0);
2817 CPUMPushHyper(pVM, VMMGC_DO_TESTCASE_HYPER_INTERRUPT);
2818 CPUMPushHyper(pVM, pVM->pVMGC);
2819 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */
2820 CPUMPushHyper(pVM, GCPtrEP); /* what to call */
2821 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2822 Log(("trampoline=%x\n", pVM->vmm.s.pfnGCCallTrampoline));
2823
2824 /*
2825 * Switch and do da thing.
2826 */
2827 RTPrintf("VMM: interrupt forwarding...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250);
2828 i = 0;
2829 uint64_t tsBegin = RTTimeNanoTS();
2830 uint64_t TickStart = ASMReadTSC();
2831 do
2832 {
2833 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2834 if (VBOX_FAILURE(rc))
2835 {
2836 Log(("VMM: GC returned fatal %Vra in iteration %d\n", rc, i));
2837 VMMR3FatalDump(pVM, rc);
2838 return rc;
2839 }
2840 i++;
2841 if (!(i % 32))
2842 Log(("VMM: iteration %d, esi=%08x edi=%08x ebx=%08x\n",
2843 i, CPUMGetHyperESI(pVM), CPUMGetHyperEDI(pVM), CPUMGetHyperEBX(pVM)));
2844 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
2845 uint64_t TickEnd = ASMReadTSC();
2846 uint64_t tsEnd = RTTimeNanoTS();
2847
2848 uint64_t Elapsed = tsEnd - tsBegin;
2849 uint64_t PerIteration = Elapsed / (uint64_t)i;
2850 uint64_t cTicksElapsed = TickEnd - TickStart;
2851 uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i;
2852
2853 RTPrintf("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n",
2854 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration);
2855 Log(("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n",
2856 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration));
2857
2858 /*
2859 * These forced actions are not necessary for the test and trigger breakpoints too.
2860 */
2861 VM_FF_CLEAR(pVM, VM_FF_TRPM_SYNC_IDT);
2862 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
2863
2864 /*
2865 * Profile switching.
2866 */
2867 RTPrintf("VMM: profiling switcher...\n");
2868 Log(("VMM: profiling switcher...\n"));
2869 uint64_t TickMin = ~0;
2870 tsBegin = RTTimeNanoTS();
2871 TickStart = ASMReadTSC();
2872 for (i = 0; i < 1000000; i++)
2873 {
2874 CPUMHyperSetCtxCore(pVM, NULL);
2875 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
2876 CPUMPushHyper(pVM, 0);
2877 CPUMPushHyper(pVM, VMMGC_DO_TESTCASE_NOP);
2878 CPUMPushHyper(pVM, pVM->pVMGC);
2879 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */
2880 CPUMPushHyper(pVM, GCPtrEP); /* what to call */
2881 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
2882
2883 uint64_t TickThisStart = ASMReadTSC();
2884 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_RAW_RUN, NULL);
2885 uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
2886 if (VBOX_FAILURE(rc))
2887 {
2888 Log(("VMM: GC returned fatal %Vra in iteration %d\n", rc, i));
2889 VMMR3FatalDump(pVM, rc);
2890 return rc;
2891 }
2892 if (TickThisElapsed < TickMin)
2893 TickMin = TickThisElapsed;
2894 }
2895 TickEnd = ASMReadTSC();
2896 tsEnd = RTTimeNanoTS();
2897
2898 Elapsed = tsEnd - tsBegin;
2899 PerIteration = Elapsed / (uint64_t)i;
2900 cTicksElapsed = TickEnd - TickStart;
2901 cTicksPerIteration = cTicksElapsed / (uint64_t)i;
2902
2903 RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
2904 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin);
2905 Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
2906 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));
2907
2908 rc = VINF_SUCCESS;
2909 }
2910 else
2911 AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Vrc\n", rc));
2912#endif
2913 return rc;
2914}
2915
2916#define SYNC_SEL(pHyperCtx, reg) \
2917 if (pHyperCtx->reg) \
2918 { \
2919 SELMSELINFO selInfo; \
2920 int rc = SELMR3GetShadowSelectorInfo(pVM, pHyperCtx->reg, &selInfo); \
2921 AssertRC(rc); \
2922 \
2923 pHyperCtx->reg##Hid.u32Base = selInfo.GCPtrBase; \
2924 pHyperCtx->reg##Hid.u32Limit = selInfo.cbLimit; \
2925 pHyperCtx->reg##Hid.Attr.n.u1Present = selInfo.Raw.Gen.u1Present; \
2926 pHyperCtx->reg##Hid.Attr.n.u1DefBig = selInfo.Raw.Gen.u1DefBig; \
2927 pHyperCtx->reg##Hid.Attr.n.u1Granularity = selInfo.Raw.Gen.u1Granularity; \
2928 pHyperCtx->reg##Hid.Attr.n.u4Type = selInfo.Raw.Gen.u4Type; \
2929 pHyperCtx->reg##Hid.Attr.n.u2Dpl = selInfo.Raw.Gen.u2Dpl; \
2930 pHyperCtx->reg##Hid.Attr.n.u1DescType = selInfo.Raw.Gen.u1DescType; \
2931 pHyperCtx->reg##Hid.Attr.n.u1Reserved = selInfo.Raw.Gen.u1Reserved; \
2932 }
2933
2934/* execute the switch. */
2935VMMR3DECL(int) VMMDoHwAccmTest(PVM pVM)
2936{
2937 uint32_t i;
2938 int rc;
2939 PCPUMCTX pHyperCtx, pGuestCtx;
2940
2941 if (!HWACCMR3IsAllowed(pVM))
2942 {
2943 RTPrintf("VMM: Hardware accelerated test not available!\n");
2944 return VERR_ACCESS_DENIED;
2945 }
2946
2947 /*
2948 * These forced actions are not necessary for the test and trigger breakpoints too.
2949 */
2950 VM_FF_CLEAR(pVM, VM_FF_TRPM_SYNC_IDT);
2951 VM_FF_CLEAR(pVM, VM_FF_SELM_SYNC_TSS);
2952
2953 /* Enable mapping of the hypervisor into the shadow page table. */
2954 PGMR3ChangeShwPDMappings(pVM, true);
2955
2956 VM_FF_CLEAR(pVM, VM_FF_TO_R3);
2957 VM_FF_CLEAR(pVM, VM_FF_TIMER);
2958 VM_FF_CLEAR(pVM, VM_FF_REQUEST);
2959
2960 CPUMQueryHyperCtxPtr(pVM, &pHyperCtx);
2961
2962 pHyperCtx->cr0 = X86_CR0_PE | X86_CR0_WP | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
2963 pHyperCtx->cr4 = X86_CR4_PGE | X86_CR4_OSFSXR | X86_CR4_OSXMMEEXCPT;
2964
2965 /*
2966 * Setup stack for calling VMMGCEntry().
2967 */
2968 RTGCPTR GCPtrEP;
2969 rc = PDMR3GetSymbolGC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &GCPtrEP);
2970 if (VBOX_SUCCESS(rc))
2971 {
2972 RTPrintf("VMM: VMMGCEntry=%VGv\n", GCPtrEP);
2973
2974 CPUMQueryHyperCtxPtr(pVM, &pHyperCtx);
2975
2976 /* Fill in hidden selector registers for the hypervisor state. */
2977 SYNC_SEL(pHyperCtx, cs);
2978 SYNC_SEL(pHyperCtx, ds);
2979 SYNC_SEL(pHyperCtx, es);
2980 SYNC_SEL(pHyperCtx, fs);
2981 SYNC_SEL(pHyperCtx, gs);
2982 SYNC_SEL(pHyperCtx, ss);
2983 SYNC_SEL(pHyperCtx, tr);
2984
2985 /*
2986 * Profile switching.
2987 */
2988 RTPrintf("VMM: profiling switcher...\n");
2989 Log(("VMM: profiling switcher...\n"));
2990 uint64_t TickMin = ~0;
2991 uint64_t tsBegin = RTTimeNanoTS();
2992 uint64_t TickStart = ASMReadTSC();
2993 for (i = 0; i < 1000000; i++)
2994 {
2995 CPUMHyperSetCtxCore(pVM, NULL);
2996
2997 CPUMSetHyperESP(pVM, pVM->vmm.s.pbGCStackBottom); /* Clear the stack. */
2998 CPUMPushHyper(pVM, 0);
2999 CPUMPushHyper(pVM, VMMGC_DO_TESTCASE_HWACCM_NOP);
3000 CPUMPushHyper(pVM, pVM->pVMGC);
3001 CPUMPushHyper(pVM, 3 * sizeof(RTGCPTR)); /* stack frame size */
3002 CPUMPushHyper(pVM, GCPtrEP); /* what to call */
3003 CPUMSetHyperEIP(pVM, pVM->vmm.s.pfnGCCallTrampoline);
3004
3005 CPUMQueryHyperCtxPtr(pVM, &pHyperCtx);
3006 CPUMQueryGuestCtxPtr(pVM, &pGuestCtx);
3007
3008 /* Copy the hypervisor context to make sure we have a valid guest context. */
3009 *pGuestCtx = *pHyperCtx;
3010
3011 pGuestCtx->csHid.u32Base = 0;
3012 pGuestCtx->csHid.u32Limit = 0xffffffff;
3013
3014 uint64_t TickThisStart = ASMReadTSC();
3015 rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL);
3016 uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
3017 if (VBOX_FAILURE(rc))
3018 {
3019 Log(("VMM: GC returned fatal %Vra in iteration %d\n", rc, i));
3020 VMMR3FatalDump(pVM, rc);
3021 return rc;
3022 }
3023 if (TickThisElapsed < TickMin)
3024 TickMin = TickThisElapsed;
3025/* temporary */
3026#ifdef LOG_ENABLED
3027 PRTLOGGERGC pLogger = pVM->vmm.s.pLoggerHC;
3028 if ( pLogger
3029 && pLogger->offScratch > 0)
3030 RTLogFlushGC(NULL, pLogger);
3031#endif
3032
3033 }
3034 uint64_t TickEnd = ASMReadTSC();
3035 uint64_t tsEnd = RTTimeNanoTS();
3036
3037 uint64_t Elapsed = tsEnd - tsBegin;
3038 uint64_t PerIteration = Elapsed / (uint64_t)i;
3039 uint64_t cTicksElapsed = TickEnd - TickStart;
3040 uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i;
3041
3042 RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
3043 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin);
3044 Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
3045 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));
3046
3047 rc = VINF_SUCCESS;
3048 }
3049 else
3050 AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Vrc\n", rc));
3051
3052 return rc;
3053}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette