VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VMMTests.cpp@ 49369

Last change on this file since 49369 was 49369, checked in by vboxsync, 11 years ago

testing...

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 34.7 KB
Line 
1/* $Id: VMMTests.cpp 49369 2013-11-02 21:07:59Z vboxsync $ */
2/** @file
3 * VMM - The Virtual Machine Monitor Core, Tests.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18//#define NO_SUPCALLR0VMM
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_VMM
24#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */
25#include <VBox/vmm/vmm.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/cpum.h>
28#include <VBox/dbg.h>
29#include <VBox/vmm/hm.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/trpm.h>
32#include <VBox/vmm/selm.h>
33#include "VMMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/param.h>
37
38#include <iprt/assert.h>
39#include <iprt/asm.h>
40#include <iprt/time.h>
41#include <iprt/stream.h>
42#include <iprt/string.h>
43#include <iprt/x86.h>
44
45static void vmmR3TestClearStack(PVMCPU pVCpu)
46{
47 /* We leave the first 64 bytes of the stack alone because of strict
48 ring-0 long jump code uses it. */
49 memset(pVCpu->vmm.s.pbEMTStackR3 + 64, 0xaa, VMM_STACK_SIZE - 64);
50}
51
52
53#ifdef VBOX_WITH_RAW_MODE
54
55static int vmmR3ReportMsrRange(PVM pVM, uint32_t uMsr, uint64_t cMsrs, PRTSTREAM pReportStrm, uint32_t *pcMsrsFound)
56{
57 /*
58 * Preps.
59 */
60 RTRCPTR RCPtrEP;
61 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMRCTestReadMsrs", &RCPtrEP);
62 AssertMsgRCReturn(rc, ("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc), rc);
63
64 uint32_t const cMsrsPerCall = 16384;
65 uint32_t cbResults = cMsrsPerCall * sizeof(VMMTESTMSRENTRY);
66 PVMMTESTMSRENTRY paResults;
67 rc = MMHyperAlloc(pVM, cbResults, 0, MM_TAG_VMM, (void **)&paResults);
68 AssertMsgRCReturn(rc, ("Error allocating %#x bytes off the hyper heap: %Rrc\n", cbResults, rc), rc);
69 /*
70 * The loop.
71 */
72 RTRCPTR RCPtrResults = MMHyperR3ToRC(pVM, paResults);
73 uint32_t cMsrsFound = 0;
74 uint32_t uLastMsr = uMsr;
75 uint64_t uNsTsStart = RTTimeNanoTS();
76
77 for (;;)
78 {
79 if ( pReportStrm
80 && uMsr - uLastMsr > _64K
81 && (uMsr & (_4M - 1)) == 0)
82 {
83 if (uMsr - uLastMsr < 16U*_1M)
84 RTStrmFlush(pReportStrm);
85 RTPrintf("... %#010x [%u ns/msr] ...\n", uMsr, (RTTimeNanoTS() - uNsTsStart) / uMsr);
86 }
87
88 /*RT_BZERO(paResults, cbResults);*/
89 uint32_t const cBatch = RT_MIN(cMsrsPerCall, cMsrs);
90 rc = VMMR3CallRC(pVM, RCPtrEP, 4, pVM->pVMRC, uMsr, cBatch, RCPtrResults);
91 if (RT_FAILURE(rc))
92 {
93 RTPrintf("VMM: VMMR3CallRC failed rc=%Rrc, uMsr=%#x\n", rc, uMsr);
94 break;
95 }
96
97 for (uint32_t i = 0; i < cBatch; i++)
98 if (paResults[i].uMsr != UINT64_MAX)
99 {
100 if (paResults[i].uValue == 0)
101 {
102 if (pReportStrm)
103 RTStrmPrintf(pReportStrm,
104 " MVO(%#010llx, \"MSR\", UINT64_C(%#018llx)),\n", paResults[i].uMsr, paResults[i].uValue);
105 RTPrintf("%#010llx = 0\n", paResults[i].uMsr);
106 }
107 else
108 {
109 if (pReportStrm)
110 RTStrmPrintf(pReportStrm,
111 " MVO(%#010llx, \"MSR\", UINT64_C(%#018llx)),\n", paResults[i].uMsr, paResults[i].uValue);
112 RTPrintf("%#010llx = %#010x`%08x\n", paResults[i].uMsr,
113 (uint32_t)(paResults[i].uValue >> 32), (uint32_t)paResults[i].uValue);
114 }
115 cMsrsFound++;
116 uLastMsr = paResults[i].uMsr;
117 }
118
119 /* Advance. */
120 if (cMsrs <= cMsrsPerCall)
121 break;
122 cMsrs -= cMsrsPerCall;
123 uMsr += cMsrsPerCall;
124 }
125
126 *pcMsrsFound += cMsrsFound;
127 MMHyperFree(pVM, paResults);
128 return rc;
129}
130
131
132/**
133 * Produces a quick report of MSRs.
134 *
135 * @returns VBox status code.
136 * @param pVM Pointer to the cross context VM structure.
137 * @param pReportStrm Pointer to the report output stream. Optional.
138 * @param fWithCpuId Whether CPUID should be included.
139 */
140static int vmmR3DoMsrQuickReport(PVM pVM, PRTSTREAM pReportStrm, bool fWithCpuId)
141{
142 uint64_t uTsStart = RTTimeNanoTS();
143 RTPrintf("=== MSR Quick Report Start ===\n");
144 RTStrmFlush(g_pStdOut);
145 if (fWithCpuId)
146 {
147 DBGFR3InfoStdErr(pVM->pUVM, "cpuid", "verbose");
148 RTPrintf("\n");
149 }
150 if (pReportStrm)
151 RTStrmPrintf(pReportStrm, "\n\n{\n");
152 uint32_t cMsrsFound = 0;
153 int aRc[] =
154 {
155 vmmR3ReportMsrRange(pVM, 0x00000000, 0x00042000, pReportStrm, &cMsrsFound),
156 vmmR3ReportMsrRange(pVM, 0x10000000, 0x00001000, pReportStrm, &cMsrsFound),
157 vmmR3ReportMsrRange(pVM, 0x20000000, 0x00001000, pReportStrm, &cMsrsFound),
158 vmmR3ReportMsrRange(pVM, 0x40000000, 0x00012000, pReportStrm, &cMsrsFound),
159 vmmR3ReportMsrRange(pVM, 0x80000000, 0x00012000, pReportStrm, &cMsrsFound),
160// vmmR3ReportMsrRange(pVM, 0xc0000000, 0x00102000, pReportStrm, &cMsrsFound),
161 vmmR3ReportMsrRange(pVM, 0xc0000000, 0x00010000, pReportStrm, &cMsrsFound),
162 vmmR3ReportMsrRange(pVM, 0xc0010000, 0x00002000, pReportStrm, &cMsrsFound),
163 };
164 int rc = VINF_SUCCESS;
165 for (unsigned i = 0; i < RT_ELEMENTS(aRc); i++)
166 if (RT_FAILURE(aRc[i]))
167 {
168 rc = aRc[i];
169 break;
170 }
171 if (pReportStrm)
172 RTStrmPrintf(pReportStrm, "}; /* %u (%#x) MSRs; rc=%Rrc */\n", cMsrsFound, cMsrsFound, rc);
173 RTPrintf("Total %u (%#x) MSRs\n", cMsrsFound, cMsrsFound);
174 RTPrintf("=== MSR Quick Report End (rc=%Rrc, %'llu ns) ===\n", rc, RTTimeNanoTS() - uTsStart);
175 return rc;
176}
177
178
179/**
180 * Performs a testcase.
181 *
182 * @returns return value from the test.
183 * @param pVM Pointer to the VM.
184 * @param enmTestcase The testcase operation to perform.
185 * @param uVariation The testcase variation id.
186 */
187static int vmmR3DoGCTest(PVM pVM, VMMGCOPERATION enmTestcase, unsigned uVariation)
188{
189 PVMCPU pVCpu = &pVM->aCpus[0];
190
191 RTRCPTR RCPtrEP;
192 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP);
193 if (RT_FAILURE(rc))
194 return rc;
195
196 Log(("vmmR3DoGCTest: %d %#x\n", enmTestcase, uVariation));
197 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
198 vmmR3TestClearStack(pVCpu);
199 CPUMPushHyper(pVCpu, uVariation);
200 CPUMPushHyper(pVCpu, enmTestcase);
201 CPUMPushHyper(pVCpu, pVM->pVMRC);
202 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
203 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
204 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
205 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
206
207#if 1
208 /* flush the raw-mode logs. */
209# ifdef LOG_ENABLED
210 PRTLOGGERRC pLogger = pVM->vmm.s.pRCLoggerR3;
211 if ( pLogger
212 && pLogger->offScratch > 0)
213 RTLogFlushRC(NULL, pLogger);
214# endif
215# ifdef VBOX_WITH_RC_RELEASE_LOGGING
216 PRTLOGGERRC pRelLogger = pVM->vmm.s.pRCRelLoggerR3;
217 if (RT_UNLIKELY(pRelLogger && pRelLogger->offScratch > 0))
218 RTLogFlushRC(RTLogRelDefaultInstance(), pRelLogger);
219# endif
220#endif
221
222 Log(("vmmR3DoGCTest: rc=%Rrc iLastGZRc=%Rrc\n", rc, pVCpu->vmm.s.iLastGZRc));
223 if (RT_LIKELY(rc == VINF_SUCCESS))
224 rc = pVCpu->vmm.s.iLastGZRc;
225 return rc;
226}
227
228
229/**
230 * Performs a trap test.
231 *
232 * @returns Return value from the trap test.
233 * @param pVM Pointer to the VM.
234 * @param u8Trap The trap number to test.
235 * @param uVariation The testcase variation.
236 * @param rcExpect The expected result.
237 * @param u32Eax The expected eax value.
238 * @param pszFaultEIP The fault address. Pass NULL if this isn't available or doesn't apply.
239 * @param pszDesc The test description.
240 */
241static int vmmR3DoTrapTest(PVM pVM, uint8_t u8Trap, unsigned uVariation, int rcExpect, uint32_t u32Eax, const char *pszFaultEIP, const char *pszDesc)
242{
243 PVMCPU pVCpu = &pVM->aCpus[0];
244
245 RTPrintf("VMM: testing 0%x / %d - %s\n", u8Trap, uVariation, pszDesc);
246
247 RTRCPTR RCPtrEP;
248 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP);
249 if (RT_FAILURE(rc))
250 return rc;
251
252 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
253 vmmR3TestClearStack(pVCpu);
254 CPUMPushHyper(pVCpu, uVariation);
255 CPUMPushHyper(pVCpu, u8Trap + VMMGC_DO_TESTCASE_TRAP_FIRST);
256 CPUMPushHyper(pVCpu, pVM->pVMRC);
257 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
258 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
259 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
260 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
261 if (RT_LIKELY(rc == VINF_SUCCESS))
262 rc = pVCpu->vmm.s.iLastGZRc;
263 bool fDump = false;
264 if (rc != rcExpect)
265 {
266 RTPrintf("VMM: FAILURE - rc=%Rrc expected %Rrc\n", rc, rcExpect);
267 if (rc != VERR_NOT_IMPLEMENTED)
268 fDump = true;
269 }
270 else if ( rcExpect != VINF_SUCCESS
271 && u8Trap != 8 /* double fault doesn't dare set TrapNo. */
272 && u8Trap != 3 /* guest only, we're not in guest. */
273 && u8Trap != 1 /* guest only, we're not in guest. */
274 && u8Trap != TRPMGetTrapNo(pVCpu))
275 {
276 RTPrintf("VMM: FAILURE - Trap %#x expected %#x\n", TRPMGetTrapNo(pVCpu), u8Trap);
277 fDump = true;
278 }
279 else if (pszFaultEIP)
280 {
281 RTRCPTR RCPtrFault;
282 int rc2 = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, pszFaultEIP, &RCPtrFault);
283 if (RT_FAILURE(rc2))
284 RTPrintf("VMM: FAILURE - Failed to resolve symbol '%s', %Rrc!\n", pszFaultEIP, rc);
285 else if (RCPtrFault != CPUMGetHyperEIP(pVCpu))
286 {
287 RTPrintf("VMM: FAILURE - EIP=%08RX32 expected %RRv (%s)\n", CPUMGetHyperEIP(pVCpu), RCPtrFault, pszFaultEIP);
288 fDump = true;
289 }
290 }
291 else if (rcExpect != VINF_SUCCESS)
292 {
293 if (CPUMGetHyperSS(pVCpu) == SELMGetHyperDS(pVM))
294 RTPrintf("VMM: FAILURE - ss=%x expected %x\n", CPUMGetHyperSS(pVCpu), SELMGetHyperDS(pVM));
295 if (CPUMGetHyperES(pVCpu) == SELMGetHyperDS(pVM))
296 RTPrintf("VMM: FAILURE - es=%x expected %x\n", CPUMGetHyperES(pVCpu), SELMGetHyperDS(pVM));
297 if (CPUMGetHyperDS(pVCpu) == SELMGetHyperDS(pVM))
298 RTPrintf("VMM: FAILURE - ds=%x expected %x\n", CPUMGetHyperDS(pVCpu), SELMGetHyperDS(pVM));
299 if (CPUMGetHyperFS(pVCpu) == SELMGetHyperDS(pVM))
300 RTPrintf("VMM: FAILURE - fs=%x expected %x\n", CPUMGetHyperFS(pVCpu), SELMGetHyperDS(pVM));
301 if (CPUMGetHyperGS(pVCpu) == SELMGetHyperDS(pVM))
302 RTPrintf("VMM: FAILURE - gs=%x expected %x\n", CPUMGetHyperGS(pVCpu), SELMGetHyperDS(pVM));
303 if (CPUMGetHyperEDI(pVCpu) == 0x01234567)
304 RTPrintf("VMM: FAILURE - edi=%x expected %x\n", CPUMGetHyperEDI(pVCpu), 0x01234567);
305 if (CPUMGetHyperESI(pVCpu) == 0x42000042)
306 RTPrintf("VMM: FAILURE - esi=%x expected %x\n", CPUMGetHyperESI(pVCpu), 0x42000042);
307 if (CPUMGetHyperEBP(pVCpu) == 0xffeeddcc)
308 RTPrintf("VMM: FAILURE - ebp=%x expected %x\n", CPUMGetHyperEBP(pVCpu), 0xffeeddcc);
309 if (CPUMGetHyperEBX(pVCpu) == 0x89abcdef)
310 RTPrintf("VMM: FAILURE - ebx=%x expected %x\n", CPUMGetHyperEBX(pVCpu), 0x89abcdef);
311 if (CPUMGetHyperECX(pVCpu) == 0xffffaaaa)
312 RTPrintf("VMM: FAILURE - ecx=%x expected %x\n", CPUMGetHyperECX(pVCpu), 0xffffaaaa);
313 if (CPUMGetHyperEDX(pVCpu) == 0x77778888)
314 RTPrintf("VMM: FAILURE - edx=%x expected %x\n", CPUMGetHyperEDX(pVCpu), 0x77778888);
315 if (CPUMGetHyperEAX(pVCpu) == u32Eax)
316 RTPrintf("VMM: FAILURE - eax=%x expected %x\n", CPUMGetHyperEAX(pVCpu), u32Eax);
317 }
318 if (fDump)
319 VMMR3FatalDump(pVM, pVCpu, rc);
320 return rc;
321}
322
323#endif /* VBOX_WITH_RAW_MODE */
324
325
326/* execute the switch. */
327VMMR3DECL(int) VMMDoTest(PVM pVM)
328{
329 int rc = VINF_SUCCESS;
330
331#ifdef VBOX_WITH_RAW_MODE
332 PVMCPU pVCpu = &pVM->aCpus[0];
333 PUVM pUVM = pVM->pUVM;
334
335# ifdef NO_SUPCALLR0VMM
336 RTPrintf("NO_SUPCALLR0VMM\n");
337 return rc;
338# endif
339
340 /*
341 * Setup stack for calling VMMGCEntry().
342 */
343 RTRCPTR RCPtrEP;
344 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP);
345 if (RT_SUCCESS(rc))
346 {
347 RTPrintf("VMM: VMMGCEntry=%RRv\n", RCPtrEP);
348
349 /*
350 * Test various crashes which we must be able to recover from.
351 */
352 vmmR3DoTrapTest(pVM, 0x3, 0, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3");
353 vmmR3DoTrapTest(pVM, 0x3, 1, VINF_EM_DBG_HYPER_ASSERTION, 0xf0f0f0f0, "vmmGCTestTrap3_FaultEIP", "int3 WP");
354
355# if 0//defined(DEBUG_bird) /* guess most people would like to skip these since they write to com1. */
356 vmmR3DoTrapTest(pVM, 0x8, 0, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG]");
357 SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */
358 bool f;
359 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f);
360# if !defined(DEBUG_bird)
361 if (RT_SUCCESS(rc) && f)
362# endif
363 {
364 /* see triple fault warnings in SELM and VMMGC.cpp. */
365 vmmR3DoTrapTest(pVM, 0x8, 1, VERR_TRPM_PANIC, 0x00000000, "vmmGCTestTrap8_FaultEIP", "#DF [#PG] WP");
366 SELMR3Relocate(pVM); /* this resets the busy flag of the Trap 08 TSS */
367 }
368# endif
369
370 vmmR3DoTrapTest(pVM, 0xd, 0, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP");
371 ///@todo find a better \#GP case, on intel ltr will \#PF (busy update?) and not \#GP.
372 //vmmR3DoTrapTest(pVM, 0xd, 1, VERR_TRPM_DONT_PANIC, 0xf0f0f0f0, "vmmGCTestTrap0d_FaultEIP", "ltr #GP WP");
373
374 vmmR3DoTrapTest(pVM, 0xe, 0, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL)");
375 vmmR3DoTrapTest(pVM, 0xe, 1, VERR_TRPM_DONT_PANIC, 0x00000000, "vmmGCTestTrap0e_FaultEIP", "#PF (NULL) WP");
376 vmmR3DoTrapTest(pVM, 0xe, 2, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler");
377 /* This test is no longer relevant as fs and gs are loaded with NULL
378 selectors and we will always return to HC if a #GP occurs while
379 returning to guest code.
380 vmmR3DoTrapTest(pVM, 0xe, 4, VINF_SUCCESS, 0x00000000, NULL, "#PF w/Tmp Handler and bad fs");
381 */
382
383 /*
384 * Set a debug register and perform a context switch.
385 */
386 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
387 if (rc != VINF_SUCCESS)
388 {
389 RTPrintf("VMM: Nop test failed, rc=%Rrc not VINF_SUCCESS\n", rc);
390 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
391 }
392
393 /* a harmless breakpoint */
394 RTPrintf("VMM: testing hardware bp at 0x10000 (not hit)\n");
395 DBGFADDRESS Addr;
396 DBGFR3AddrFromFlat(pUVM, &Addr, 0x10000);
397 RTUINT iBp0;
398 rc = DBGFR3BpSetReg(pUVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp0);
399 AssertReleaseRC(rc);
400 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
401 if (rc != VINF_SUCCESS)
402 {
403 RTPrintf("VMM: DR0=0x10000 test failed with rc=%Rrc!\n", rc);
404 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
405 }
406
407 /* a bad one at VMMGCEntry */
408 RTPrintf("VMM: testing hardware bp at VMMGCEntry (hit)\n");
409 DBGFR3AddrFromFlat(pUVM, &Addr, RCPtrEP);
410 RTUINT iBp1;
411 rc = DBGFR3BpSetReg(pUVM, &Addr, 0, ~(uint64_t)0, X86_DR7_RW_EO, 1, &iBp1);
412 AssertReleaseRC(rc);
413 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
414 if (rc != VINF_EM_DBG_HYPER_BREAKPOINT)
415 {
416 RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Rrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc);
417 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
418 }
419
420 /* resume the breakpoint */
421 RTPrintf("VMM: resuming hyper after breakpoint\n");
422 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF);
423 rc = VMMR3ResumeHyper(pVM, pVCpu);
424 if (rc != VINF_SUCCESS)
425 {
426 RTPrintf("VMM: failed to resume on hyper breakpoint, rc=%Rrc = KNOWN BUG\n", rc); /** @todo fix VMMR3ResumeHyper */
427 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
428 }
429
430 /* engage the breakpoint again and try single stepping. */
431 RTPrintf("VMM: testing hardware bp at VMMGCEntry + stepping\n");
432 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
433 if (rc != VINF_EM_DBG_HYPER_BREAKPOINT)
434 {
435 RTPrintf("VMM: DR1=VMMGCEntry test failed with rc=%Rrc! expected VINF_EM_RAW_BREAKPOINT_HYPER\n", rc);
436 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
437 }
438
439 RTGCUINTREG OldPc = CPUMGetHyperEIP(pVCpu);
440 RTPrintf("%RGr=>", OldPc);
441 unsigned i;
442 for (i = 0; i < 8; i++)
443 {
444 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
445 rc = VMMR3ResumeHyper(pVM, pVCpu);
446 if (rc != VINF_EM_DBG_HYPER_STEPPED)
447 {
448 RTPrintf("\nVMM: failed to step on hyper breakpoint, rc=%Rrc\n", rc);
449 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
450 }
451 RTGCUINTREG Pc = CPUMGetHyperEIP(pVCpu);
452 RTPrintf("%RGr=>", Pc);
453 if (Pc == OldPc)
454 {
455 RTPrintf("\nVMM: step failed, PC: %RGr -> %RGr\n", OldPc, Pc);
456 return VERR_GENERAL_FAILURE;
457 }
458 OldPc = Pc;
459 }
460 RTPrintf("ok\n");
461
462 /* done, clear it */
463 if ( RT_FAILURE(DBGFR3BpClear(pUVM, iBp0))
464 || RT_FAILURE(DBGFR3BpClear(pUVM, iBp1)))
465 {
466 RTPrintf("VMM: Failed to clear breakpoints!\n");
467 return VERR_GENERAL_FAILURE;
468 }
469 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_NOP, 0);
470 if (rc != VINF_SUCCESS)
471 {
472 RTPrintf("VMM: NOP failed, rc=%Rrc\n", rc);
473 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
474 }
475
476 /*
477 * Interrupt masking. Failure may indiate NMI watchdog activity.
478 */
479 RTPrintf("VMM: interrupt masking...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250);
480 for (i = 0; i < 10000; i++)
481 {
482 uint64_t StartTick = ASMReadTSC();
483 rc = vmmR3DoGCTest(pVM, VMMGC_DO_TESTCASE_INTERRUPT_MASKING, 0);
484 if (rc != VINF_SUCCESS)
485 {
486 RTPrintf("VMM: Interrupt masking failed: rc=%Rrc\n", rc);
487 return RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_INFO_STATUS;
488 }
489 uint64_t Ticks = ASMReadTSC() - StartTick;
490 if (Ticks < (SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000))
491 RTPrintf("Warning: Ticks=%RU64 (< %RU64)\n", Ticks, SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000);
492 }
493
494 /*
495 * Interrupt forwarding.
496 */
497 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
498 CPUMPushHyper(pVCpu, 0);
499 CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HYPER_INTERRUPT);
500 CPUMPushHyper(pVCpu, pVM->pVMRC);
501 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
502 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
503 Log(("trampoline=%x\n", pVM->vmm.s.pfnCallTrampolineRC));
504
505 /*
506 * Switch and do da thing.
507 */
508 RTPrintf("VMM: interrupt forwarding...\n"); RTStrmFlush(g_pStdOut); RTThreadSleep(250);
509 i = 0;
510 uint64_t tsBegin = RTTimeNanoTS();
511 uint64_t TickStart = ASMReadTSC();
512 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
513 do
514 {
515 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
516 if (RT_LIKELY(rc == VINF_SUCCESS))
517 rc = pVCpu->vmm.s.iLastGZRc;
518 if (RT_FAILURE(rc))
519 {
520 Log(("VMM: GC returned fatal %Rra in iteration %d\n", rc, i));
521 VMMR3FatalDump(pVM, pVCpu, rc);
522 return rc;
523 }
524 i++;
525 if (!(i % 32))
526 Log(("VMM: iteration %d, esi=%08x edi=%08x ebx=%08x\n",
527 i, CPUMGetHyperESI(pVCpu), CPUMGetHyperEDI(pVCpu), CPUMGetHyperEBX(pVCpu)));
528 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
529 uint64_t TickEnd = ASMReadTSC();
530 uint64_t tsEnd = RTTimeNanoTS();
531
532 uint64_t Elapsed = tsEnd - tsBegin;
533 uint64_t PerIteration = Elapsed / (uint64_t)i;
534 uint64_t cTicksElapsed = TickEnd - TickStart;
535 uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i;
536
537 RTPrintf("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n",
538 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration);
539 Log(("VMM: %8d interrupts in %11llu ns (%11llu ticks), %10llu ns/iteration (%11llu ticks)\n",
540 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration));
541
542 /*
543 * These forced actions are not necessary for the test and trigger breakpoints too.
544 */
545 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
546 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
547
548 /*
549 * Profile switching.
550 */
551 RTPrintf("VMM: profiling switcher...\n");
552 Log(("VMM: profiling switcher...\n"));
553 uint64_t TickMin = ~0;
554 tsBegin = RTTimeNanoTS();
555 TickStart = ASMReadTSC();
556 Assert(CPUMGetHyperCR3(pVCpu) && CPUMGetHyperCR3(pVCpu) == PGMGetHyperCR3(pVCpu));
557 for (i = 0; i < 1000000; i++)
558 {
559 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
560 CPUMPushHyper(pVCpu, 0);
561 CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_NOP);
562 CPUMPushHyper(pVCpu, pVM->pVMRC);
563 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
564 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
565
566 uint64_t TickThisStart = ASMReadTSC();
567 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_RAW_RUN, 0);
568 if (RT_LIKELY(rc == VINF_SUCCESS))
569 rc = pVCpu->vmm.s.iLastGZRc;
570 uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
571 if (RT_FAILURE(rc))
572 {
573 Log(("VMM: GC returned fatal %Rra in iteration %d\n", rc, i));
574 VMMR3FatalDump(pVM, pVCpu, rc);
575 return rc;
576 }
577 if (TickThisElapsed < TickMin)
578 TickMin = TickThisElapsed;
579 }
580 TickEnd = ASMReadTSC();
581 tsEnd = RTTimeNanoTS();
582
583 Elapsed = tsEnd - tsBegin;
584 PerIteration = Elapsed / (uint64_t)i;
585 cTicksElapsed = TickEnd - TickStart;
586 cTicksPerIteration = cTicksElapsed / (uint64_t)i;
587
588 RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
589 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin);
590 Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
591 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));
592
593 rc = VINF_SUCCESS;
594
595 /*
596 * A quick MSR report.
597 */
598 vmmR3DoMsrQuickReport(pVM, NULL, true);
599 }
600 else
601 AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Rrc\n", rc));
602#endif
603 return rc;
604}
605
606#define SYNC_SEL(pHyperCtx, reg) \
607 if (pHyperCtx->reg.Sel) \
608 { \
609 DBGFSELINFO selInfo; \
610 int rc2 = SELMR3GetShadowSelectorInfo(pVM, pHyperCtx->reg.Sel, &selInfo); \
611 AssertRC(rc2); \
612 \
613 pHyperCtx->reg.u64Base = selInfo.GCPtrBase; \
614 pHyperCtx->reg.u32Limit = selInfo.cbLimit; \
615 pHyperCtx->reg.Attr.n.u1Present = selInfo.u.Raw.Gen.u1Present; \
616 pHyperCtx->reg.Attr.n.u1DefBig = selInfo.u.Raw.Gen.u1DefBig; \
617 pHyperCtx->reg.Attr.n.u1Granularity = selInfo.u.Raw.Gen.u1Granularity; \
618 pHyperCtx->reg.Attr.n.u4Type = selInfo.u.Raw.Gen.u4Type; \
619 pHyperCtx->reg.Attr.n.u2Dpl = selInfo.u.Raw.Gen.u2Dpl; \
620 pHyperCtx->reg.Attr.n.u1DescType = selInfo.u.Raw.Gen.u1DescType; \
621 pHyperCtx->reg.Attr.n.u1Long = selInfo.u.Raw.Gen.u1Long; \
622 }
623
624/* execute the switch. */
625VMMR3DECL(int) VMMDoHmTest(PVM pVM)
626{
627 uint32_t i;
628 int rc;
629 PCPUMCTX pHyperCtx, pGuestCtx;
630 RTGCPHYS CR3Phys = 0x0; /* fake address */
631 PVMCPU pVCpu = &pVM->aCpus[0];
632
633 if (!HMIsEnabled(pVM))
634 {
635 RTPrintf("VMM: Hardware accelerated test not available!\n");
636 return VERR_ACCESS_DENIED;
637 }
638
639#ifdef VBOX_WITH_RAW_MODE
640 /*
641 * These forced actions are not necessary for the test and trigger breakpoints too.
642 */
643 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
644 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
645#endif
646
647 /* Enable mapping of the hypervisor into the shadow page table. */
648 uint32_t cb;
649 rc = PGMR3MappingsSize(pVM, &cb);
650 AssertRCReturn(rc, rc);
651
652 /* Pretend the mappings are now fixed; to force a refresh of the reserved PDEs. */
653 rc = PGMR3MappingsFix(pVM, MM_HYPER_AREA_ADDRESS, cb);
654 AssertRCReturn(rc, rc);
655
656 pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
657
658 pHyperCtx->cr0 = X86_CR0_PE | X86_CR0_WP | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
659 pHyperCtx->cr4 = X86_CR4_PGE | X86_CR4_OSFSXR | X86_CR4_OSXMMEEXCPT;
660 PGMChangeMode(pVCpu, pHyperCtx->cr0, pHyperCtx->cr4, pHyperCtx->msrEFER);
661 PGMSyncCR3(pVCpu, pHyperCtx->cr0, CR3Phys, pHyperCtx->cr4, true);
662
663 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
664 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
665 VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
666 VM_FF_CLEAR(pVM, VM_FF_REQUEST);
667
668 /*
669 * Setup stack for calling VMMGCEntry().
670 */
671 RTRCPTR RCPtrEP;
672 rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMGCEntry", &RCPtrEP);
673 if (RT_SUCCESS(rc))
674 {
675 RTPrintf("VMM: VMMGCEntry=%RRv\n", RCPtrEP);
676
677 pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
678
679 /* Fill in hidden selector registers for the hypervisor state. */
680 SYNC_SEL(pHyperCtx, cs);
681 SYNC_SEL(pHyperCtx, ds);
682 SYNC_SEL(pHyperCtx, es);
683 SYNC_SEL(pHyperCtx, fs);
684 SYNC_SEL(pHyperCtx, gs);
685 SYNC_SEL(pHyperCtx, ss);
686 SYNC_SEL(pHyperCtx, tr);
687
688 /*
689 * Profile switching.
690 */
691 RTPrintf("VMM: profiling switcher...\n");
692 Log(("VMM: profiling switcher...\n"));
693 uint64_t TickMin = ~0;
694 uint64_t tsBegin = RTTimeNanoTS();
695 uint64_t TickStart = ASMReadTSC();
696 for (i = 0; i < 1000000; i++)
697 {
698 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
699 CPUMPushHyper(pVCpu, 0);
700 CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HM_NOP);
701 CPUMPushHyper(pVCpu, pVM->pVMRC);
702 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */
703 CPUMPushHyper(pVCpu, RCPtrEP); /* what to call */
704
705 pHyperCtx = CPUMGetHyperCtxPtr(pVCpu);
706 pGuestCtx = CPUMQueryGuestCtxPtr(pVCpu);
707
708 /* Copy the hypervisor context to make sure we have a valid guest context. */
709 *pGuestCtx = *pHyperCtx;
710 pGuestCtx->cr3 = CR3Phys;
711
712 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
713 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TIMER);
714 VM_FF_CLEAR(pVM, VM_FF_TM_VIRTUAL_SYNC);
715
716 uint64_t TickThisStart = ASMReadTSC();
717 rc = SUPR3CallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HM_RUN, 0);
718 uint64_t TickThisElapsed = ASMReadTSC() - TickThisStart;
719 if (RT_FAILURE(rc))
720 {
721 Log(("VMM: R0 returned fatal %Rrc in iteration %d\n", rc, i));
722 VMMR3FatalDump(pVM, pVCpu, rc);
723 return rc;
724 }
725 if (TickThisElapsed < TickMin)
726 TickMin = TickThisElapsed;
727 }
728 uint64_t TickEnd = ASMReadTSC();
729 uint64_t tsEnd = RTTimeNanoTS();
730
731 uint64_t Elapsed = tsEnd - tsBegin;
732 uint64_t PerIteration = Elapsed / (uint64_t)i;
733 uint64_t cTicksElapsed = TickEnd - TickStart;
734 uint64_t cTicksPerIteration = cTicksElapsed / (uint64_t)i;
735
736 RTPrintf("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
737 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin);
738 Log(("VMM: %8d cycles in %11llu ns (%11lld ticks), %10llu ns/iteration (%11lld ticks) Min %11lld ticks\n",
739 i, Elapsed, cTicksElapsed, PerIteration, cTicksPerIteration, TickMin));
740
741 rc = VINF_SUCCESS;
742 }
743 else
744 AssertMsgFailed(("Failed to resolved VMMGC.gc::VMMGCEntry(), rc=%Rrc\n", rc));
745
746 return rc;
747}
748
749
750#ifdef VBOX_WITH_RAW_MODE
751
752/**
753 * Used by VMMDoBruteForceMsrs to dump the CPUID info of the host CPU as a
754 * prefix to the MSR report.
755 */
756static DECLCALLBACK(void) vmmDoPrintfVToStream(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list va)
757{
758 PRTSTREAM pOutStrm = ((PRTSTREAM *)pHlp)[-1];
759 RTStrmPrintfV(pOutStrm, pszFormat, va);
760}
761
762/**
763 * Used by VMMDoBruteForceMsrs to dump the CPUID info of the host CPU as a
764 * prefix to the MSR report.
765 */
766static DECLCALLBACK(void) vmmDoPrintfToStream(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
767{
768 va_list va;
769 va_start(va, pszFormat);
770 vmmDoPrintfVToStream(pHlp, pszFormat, va);
771 va_end(va);
772}
773
774#endif
775
776
777/**
778 * Uses raw-mode to query all possible MSRs on the real hardware.
779 *
780 * This generates a msr-report.txt file (appending, no overwriting) as well as
781 * writing the values and process to stdout.
782 *
783 * @returns VBox status code.
784 * @param pVM The VM handle.
785 */
786VMMR3DECL(int) VMMDoBruteForceMsrs(PVM pVM)
787{
788#ifdef VBOX_WITH_RAW_MODE
789 PRTSTREAM pOutStrm;
790 int rc = RTStrmOpen("msr-report.txt", "a", &pOutStrm);
791 if (RT_SUCCESS(rc))
792 {
793 /* Header */
794 struct
795 {
796 PRTSTREAM pOutStrm;
797 DBGFINFOHLP Hlp;
798 } MyHlp = { pOutStrm, { vmmDoPrintfToStream, vmmDoPrintfVToStream } };
799 DBGFR3Info(pVM->pUVM, "cpuid", "verbose", &MyHlp.Hlp);
800 RTStrmPrintf(pOutStrm, "\n");
801
802 uint32_t cMsrsFound = 0;
803 vmmR3ReportMsrRange(pVM, 0, _4G, pOutStrm, &cMsrsFound);
804
805 RTStrmPrintf(pOutStrm, "Total %u (%#x) MSRs\n", cMsrsFound, cMsrsFound);
806 RTPrintf("Total %u (%#x) MSRs\n", cMsrsFound, cMsrsFound);
807
808 RTStrmClose(pOutStrm);
809 }
810 return rc;
811#else
812 return VERR_NOT_SUPPORTED;
813#endif
814}
815
816
817/**
818 * Uses raw-mode to query all known MSRS on the real hardware.
819 *
820 * This generates a known-msr-report.txt file (appending, no overwriting) as
821 * well as writing the values and process to stdout.
822 *
823 * @returns VBox status code.
824 * @param pVM The VM handle.
825 */
826VMMR3DECL(int) VMMDoKnownMsrs(PVM pVM)
827{
828#ifdef VBOX_WITH_RAW_MODE
829 PRTSTREAM pOutStrm;
830 int rc = RTStrmOpen("known-msr-report.txt", "a", &pOutStrm);
831 if (RT_SUCCESS(rc))
832 {
833 vmmR3DoMsrQuickReport(pVM, pOutStrm, false);
834 RTStrmClose(pOutStrm);
835 }
836 return rc;
837#else
838 return VERR_NOT_SUPPORTED;
839#endif
840}
841
842
843/**
844 * MSR experimentation.
845 *
846 * @returns VBox status code.
847 * @param pVM The VM handle.
848 */
849VMMR3DECL(int) VMMDoMsrExperiments(PVM pVM)
850{
851#ifdef VBOX_WITH_RAW_MODE
852 /*
853 * Preps.
854 */
855 RTRCPTR RCPtrEP;
856 int rc = PDMR3LdrGetSymbolRC(pVM, VMMGC_MAIN_MODULE_NAME, "VMMRCTestTestWriteMsr", &RCPtrEP);
857 AssertMsgRCReturn(rc, ("Failed to resolved VMMRC.rc::VMMRCEntry(), rc=%Rrc\n", rc), rc);
858
859 uint64_t *pauValues;
860 rc = MMHyperAlloc(pVM, 2 * sizeof(uint64_t), 0, MM_TAG_VMM, (void **)&pauValues);
861 AssertMsgRCReturn(rc, ("Error allocating %#x bytes off the hyper heap: %Rrc\n", 2 * sizeof(uint64_t), rc), rc);
862 RTRCPTR RCPtrValues = MMHyperR3ToRC(pVM, pauValues);
863
864 /*
865 * Do the experiments.
866 */
867 uint32_t uMsr = 0xc0011011;
868 uint64_t uValue = 0x10000;
869#if 0
870 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
871 RCPtrValues, RCPtrValues + sizeof(uint64_t));
872 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
873 uMsr, pauValues[0], uValue, pauValues[1], rc);
874#endif
875 for (uint32_t i = 0; i <= 63; i++)
876 {
877 uValue = RT_BIT_64(i);
878 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
879 RCPtrValues, RCPtrValues + sizeof(uint64_t));
880 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
881 uMsr, pauValues[0], uValue, pauValues[1], rc);
882 }
883
884 uValue = 0;
885 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
886 RCPtrValues, RCPtrValues + sizeof(uint64_t));
887 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
888 uMsr, pauValues[0], uValue, pauValues[1], rc);
889
890 uValue = UINT64_MAX;
891 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
892 RCPtrValues, RCPtrValues + sizeof(uint64_t));
893 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
894 uMsr, pauValues[0], uValue, pauValues[1], rc);
895
896 /*
897 * Cleanups.
898 */
899 MMHyperFree(pVM, pauValues);
900 return rc;
901#else
902 return VERR_NOT_SUPPORTED;
903#endif
904}
905
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette