VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CSAM.cpp@ 57009

Last change on this file since 57009 was 57008, checked in by vboxsync, 9 years ago

CSAM: Fixed saved state (broken since r10346). Won't easily break again.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 108.5 KB
Line 
1/* $Id: CSAM.cpp 57008 2015-07-19 17:11:15Z vboxsync $ */
2/** @file
3 * CSAM - Guest OS Code Scanning and Analysis Manager
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_CSAM
22#include <VBox/vmm/cpum.h>
23#include <VBox/vmm/stam.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/csam.h>
26#include <VBox/vmm/cpumdis.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/vmm/mm.h>
30#include <VBox/vmm/em.h>
31#include <VBox/vmm/hm.h>
32#ifdef VBOX_WITH_REM
33# include <VBox/vmm/rem.h>
34#endif
35#include <VBox/vmm/selm.h>
36#include <VBox/vmm/trpm.h>
37#include <VBox/vmm/cfgm.h>
38#include <VBox/vmm/ssm.h>
39#include <VBox/param.h>
40#include <iprt/avl.h>
41#include <iprt/asm.h>
42#include <iprt/thread.h>
43#include "CSAMInternal.h"
44#include <VBox/vmm/vm.h>
45#include <VBox/vmm/uvm.h>
46
47#include <VBox/dbg.h>
48#include <VBox/sup.h>
49#include <VBox/err.h>
50#include <VBox/log.h>
51#include <VBox/version.h>
52
53#include <VBox/dis.h>
54#include <VBox/disopcode.h>
55#include <iprt/assert.h>
56#include <iprt/string.h>
57#include "internal/pgm.h"
58
59
60/* Enabled by default */
61#define CSAM_ENABLE
62
63/* Enable to monitor code pages for self-modifying code. */
64#define CSAM_MONITOR_CODE_PAGES
65/* Enable to monitor all scanned pages
66#define CSAM_MONITOR_CSAM_CODE_PAGES */
67/* Enable to scan beyond ret instructions.
68#define CSAM_ANALYSE_BEYOND_RET */
69
70/*******************************************************************************
71* Internal Functions *
72*******************************************************************************/
73static DECLCALLBACK(int) csamR3Save(PVM pVM, PSSMHANDLE pSSM);
74static DECLCALLBACK(int) csamR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
75static FNPGMR3VIRTINVALIDATE csamR3CodePageInvalidate;
76
77bool csamIsCodeScanned(PVM pVM, RTRCPTR pInstr, PCSAMPAGE *pPage);
78int csamR3CheckPageRecord(PVM pVM, RTRCPTR pInstr);
79static PCSAMPAGE csamR3CreatePageRecord(PVM pVM, RTRCPTR GCPtr, CSAMTAG enmTag, bool fCode32, bool fMonitorInvalidation = false);
80static int csamRemovePageRecord(PVM pVM, RTRCPTR GCPtr);
81static int csamReinit(PVM pVM);
82static void csamMarkCode(PVM pVM, PCSAMPAGE pPage, RTRCPTR pInstr, uint32_t opsize, bool fScanned);
83static int csamAnalyseCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
84 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec);
85
86/** @todo "Temporary" for debugging. */
87static bool g_fInCsamR3CodePageInvalidate = false;
88
89#ifdef VBOX_WITH_DEBUGGER
90static FNDBGCCMD csamr3CmdOn;
91static FNDBGCCMD csamr3CmdOff;
92#endif
93
94
95/*******************************************************************************
96* Global Variables *
97*******************************************************************************/
98#ifdef VBOX_WITH_DEBUGGER
99/** Command descriptors. */
100static const DBGCCMD g_aCmds[] =
101{
102 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
103 { "csamon", 0, 0, NULL, 0, 0, csamr3CmdOn, "", "Enable CSAM code scanning." },
104 { "csamoff", 0, 0, NULL, 0, 0, csamr3CmdOff, "", "Disable CSAM code scanning." },
105};
106#endif
107
108/**
109 * SSM descriptor table for the CSAM structure (save + restore).
110 */
111static const SSMFIELD g_aCsamFields[] =
112{
113 SSMFIELD_ENTRY( CSAM, aDangerousInstr), /* didn't used to restored */
114 SSMFIELD_ENTRY( CSAM, cDangerousInstr), /* didn't used to restored */
115 SSMFIELD_ENTRY( CSAM, iDangerousInstr), /* didn't used to restored */
116 SSMFIELD_ENTRY( CSAM, savedstate.cPageRecords),
117 SSMFIELD_ENTRY( CSAM, savedstate.cPatchPageRecords),
118 SSMFIELD_ENTRY( CSAM, cDirtyPages),
119 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyBasePage),
120 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyFaultPage),
121 SSMFIELD_ENTRY( CSAM, cPossibleCodePages),
122 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvPossibleCodePage),
123 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvCallInstruction), /* didn't used to be restored */
124 SSMFIELD_ENTRY( CSAM, iCallInstruction), /* didn't used to be restored */
125 SSMFIELD_ENTRY( CSAM, fScanningStarted),
126 SSMFIELD_ENTRY( CSAM, fGatesChecked),
127 SSMFIELD_ENTRY_TERM()
128};
129
130/**
131 * SSM descriptor table for the version 5.0.0 CSAM structure.
132 */
133static const SSMFIELD g_aCsamFields500[] =
134{
135 SSMFIELD_ENTRY_IGNORE( CSAM, offVM),
136 SSMFIELD_ENTRY_PAD_HC64( CSAM, Alignment0, sizeof(uint32_t)),
137 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPageTree),
138 SSMFIELD_ENTRY( CSAM, aDangerousInstr),
139 SSMFIELD_ENTRY( CSAM, cDangerousInstr),
140 SSMFIELD_ENTRY( CSAM, iDangerousInstr),
141 SSMFIELD_ENTRY_RCPTR( CSAM, pPDBitmapGC), /// @todo ignore this?
142 SSMFIELD_ENTRY_RCPTR( CSAM, pPDHCBitmapGC), /// @todo ignore this?
143 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDBitmapHC),
144 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDGCBitmapHC),
145 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, savedstate.pSSM),
146 SSMFIELD_ENTRY( CSAM, savedstate.cPageRecords),
147 SSMFIELD_ENTRY( CSAM, savedstate.cPatchPageRecords),
148 SSMFIELD_ENTRY( CSAM, cDirtyPages),
149 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyBasePage),
150 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyFaultPage),
151 SSMFIELD_ENTRY( CSAM, cPossibleCodePages),
152 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvPossibleCodePage),
153 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvCallInstruction),
154 SSMFIELD_ENTRY( CSAM, iCallInstruction),
155 SSMFIELD_ENTRY_IGNORE( CSAM, hCodePageWriteType), /* added in 5.0 */
156 SSMFIELD_ENTRY_IGNORE( CSAM, hCodePageWriteAndInvPgType), /* added in 5.0 */
157 SSMFIELD_ENTRY( CSAM, fScanningStarted),
158 SSMFIELD_ENTRY( CSAM, fGatesChecked),
159 SSMFIELD_ENTRY_PAD_HC( CSAM, Alignment1, 6, 2),
160 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrTraps),
161 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPages),
162 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPagesInv),
163 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrRemovedPages),
164 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPatchPages),
165 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPHC),
166 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPGC),
167 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushes),
168 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushesSkipped),
169 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesHC),
170 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesGC),
171 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrInstr),
172 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrBytesRead),
173 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrOpcodeRead),
174 SSMFIELD_ENTRY_IGNORE( CSAM, StatTime),
175 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeCheckAddr),
176 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeAddrConv),
177 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeFlushPage),
178 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeDisasm),
179 SSMFIELD_ENTRY_IGNORE( CSAM, StatFlushDirtyPages),
180 SSMFIELD_ENTRY_IGNORE( CSAM, StatCheckGates),
181 SSMFIELD_ENTRY_IGNORE( CSAM, StatCodePageModified),
182 SSMFIELD_ENTRY_IGNORE( CSAM, StatDangerousWrite),
183 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheHit),
184 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheMiss),
185 SSMFIELD_ENTRY_IGNORE( CSAM, StatPagePATM),
186 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageCSAM),
187 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageREM),
188 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrUserPages),
189 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageMonitor),
190 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageRemoveREMFlush),
191 SSMFIELD_ENTRY_IGNORE( CSAM, StatBitmapAlloc),
192 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunction),
193 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunctionFailed),
194 SSMFIELD_ENTRY_TERM()
195};
196
197/**
198 * SSM descriptor table for the pre 5.0.0 CSAM structure.
199 */
200static const SSMFIELD g_aCsamFieldsBefore500[] =
201{
202 /** @todo there are more fields that can be ignored here. */
203 SSMFIELD_ENTRY_IGNORE( CSAM, offVM),
204 SSMFIELD_ENTRY_PAD_HC64( CSAM, Alignment0, sizeof(uint32_t)),
205 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPageTree),
206 SSMFIELD_ENTRY( CSAM, aDangerousInstr),
207 SSMFIELD_ENTRY( CSAM, cDangerousInstr),
208 SSMFIELD_ENTRY( CSAM, iDangerousInstr),
209 SSMFIELD_ENTRY_RCPTR( CSAM, pPDBitmapGC), /// @todo ignore this?
210 SSMFIELD_ENTRY_RCPTR( CSAM, pPDHCBitmapGC), /// @todo ignore this?
211 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDBitmapHC),
212 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDGCBitmapHC),
213 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, savedstate.pSSM),
214 SSMFIELD_ENTRY( CSAM, savedstate.cPageRecords),
215 SSMFIELD_ENTRY( CSAM, savedstate.cPatchPageRecords),
216 SSMFIELD_ENTRY( CSAM, cDirtyPages),
217 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyBasePage),
218 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyFaultPage),
219 SSMFIELD_ENTRY( CSAM, cPossibleCodePages),
220 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvPossibleCodePage),
221 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvCallInstruction),
222 SSMFIELD_ENTRY( CSAM, iCallInstruction),
223 SSMFIELD_ENTRY( CSAM, fScanningStarted),
224 SSMFIELD_ENTRY( CSAM, fGatesChecked),
225 SSMFIELD_ENTRY_PAD_HC( CSAM, Alignment1, 6, 2),
226 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrTraps),
227 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPages),
228 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPagesInv),
229 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrRemovedPages),
230 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPatchPages),
231 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPHC),
232 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPGC),
233 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushes),
234 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushesSkipped),
235 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesHC),
236 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesGC),
237 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrInstr),
238 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrBytesRead),
239 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrOpcodeRead),
240 SSMFIELD_ENTRY_IGNORE( CSAM, StatTime),
241 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeCheckAddr),
242 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeAddrConv),
243 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeFlushPage),
244 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeDisasm),
245 SSMFIELD_ENTRY_IGNORE( CSAM, StatFlushDirtyPages),
246 SSMFIELD_ENTRY_IGNORE( CSAM, StatCheckGates),
247 SSMFIELD_ENTRY_IGNORE( CSAM, StatCodePageModified),
248 SSMFIELD_ENTRY_IGNORE( CSAM, StatDangerousWrite),
249 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheHit),
250 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheMiss),
251 SSMFIELD_ENTRY_IGNORE( CSAM, StatPagePATM),
252 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageCSAM),
253 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageREM),
254 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrUserPages),
255 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageMonitor),
256 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageRemoveREMFlush),
257 SSMFIELD_ENTRY_IGNORE( CSAM, StatBitmapAlloc),
258 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunction),
259 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunctionFailed),
260 SSMFIELD_ENTRY_TERM()
261};
262
263
264/** Fake type to simplify g_aCsamPDBitmapArray construction. */
265typedef struct
266{
267 uint8_t *a[CSAM_PGDIRBMP_CHUNKS];
268} CSAMPDBITMAPARRAY;
269
270/**
271 * SSM descriptor table for the CSAM::pPDBitmapHC array.
272 */
273static SSMFIELD const g_aCsamPDBitmapArray[] =
274{
275 SSMFIELD_ENTRY_HCPTR_NI_ARRAY(CSAMPDBITMAPARRAY, a),
276 SSMFIELD_ENTRY_TERM()
277};
278
279
280/**
281 * SSM descriptor table for the CSAMPAGE structure.
282 */
283static const SSMFIELD g_aCsamPageFields[] =
284{
285 SSMFIELD_ENTRY_RCPTR( CSAMPAGE, pPageGC),
286 SSMFIELD_ENTRY_GCPHYS( CSAMPAGE, GCPhys),
287 SSMFIELD_ENTRY( CSAMPAGE, fFlags),
288 SSMFIELD_ENTRY( CSAMPAGE, uSize),
289 SSMFIELD_ENTRY_HCPTR_NI( CSAMPAGE, pBitmap),
290 SSMFIELD_ENTRY( CSAMPAGE, fCode32),
291 SSMFIELD_ENTRY( CSAMPAGE, fMonitorActive),
292 SSMFIELD_ENTRY( CSAMPAGE, fMonitorInvalidation),
293 SSMFIELD_ENTRY( CSAMPAGE, enmTag),
294 SSMFIELD_ENTRY( CSAMPAGE, u64Hash),
295 SSMFIELD_ENTRY_TERM()
296};
297
298/**
299 * SSM descriptor table for the CSAMPAGEREC structure, putmem fashion.
300 */
301static const SSMFIELD g_aCsamPageRecFields[] =
302{
303 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.Key),
304 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.pLeft),
305 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.pRight),
306 SSMFIELD_ENTRY_IGNORE( CSAMPAGEREC, Core.uchHeight),
307 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
308 SSMFIELD_ENTRY_RCPTR( CSAMPAGEREC, page.pPageGC),
309 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
310 SSMFIELD_ENTRY_PAD_MSC32_AUTO( 4),
311 SSMFIELD_ENTRY_GCPHYS( CSAMPAGEREC, page.GCPhys),
312 SSMFIELD_ENTRY( CSAMPAGEREC, page.fFlags),
313 SSMFIELD_ENTRY( CSAMPAGEREC, page.uSize),
314 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
315 SSMFIELD_ENTRY_HCPTR_NI( CSAMPAGEREC, page.pBitmap),
316 SSMFIELD_ENTRY( CSAMPAGEREC, page.fCode32),
317 SSMFIELD_ENTRY( CSAMPAGEREC, page.fMonitorActive),
318 SSMFIELD_ENTRY( CSAMPAGEREC, page.fMonitorInvalidation),
319 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 1),
320 SSMFIELD_ENTRY( CSAMPAGEREC, page.enmTag),
321 SSMFIELD_ENTRY( CSAMPAGEREC, page.u64Hash),
322 SSMFIELD_ENTRY_TERM()
323};
324
325
326/**
327 * Initializes the CSAM.
328 *
329 * @returns VBox status code.
330 * @param pVM Pointer to the VM.
331 */
332VMMR3_INT_DECL(int) CSAMR3Init(PVM pVM)
333{
334 int rc;
335
336 /*
337 * We only need a saved state dummy loader if HM is enabled.
338 */
339 if (HMIsEnabled(pVM))
340 {
341 pVM->fCSAMEnabled = false;
342 return SSMR3RegisterStub(pVM, "CSAM", 0);
343 }
344
345 /*
346 * Raw-mode.
347 */
348 LogFlow(("CSAMR3Init\n"));
349
350 /* Allocate bitmap for the page directory. */
351 rc = MMR3HyperAllocOnceNoRel(pVM, CSAM_PGDIRBMP_CHUNKS*sizeof(RTHCPTR), 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDBitmapHC);
352 AssertRCReturn(rc, rc);
353 rc = MMR3HyperAllocOnceNoRel(pVM, CSAM_PGDIRBMP_CHUNKS*sizeof(RTRCPTR), 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDGCBitmapHC);
354 AssertRCReturn(rc, rc);
355 pVM->csam.s.pPDBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDGCBitmapHC);
356 pVM->csam.s.pPDHCBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC);
357
358 rc = csamReinit(pVM);
359 AssertRCReturn(rc, rc);
360
361 /*
362 * Register virtual handler types.
363 */
364 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/,
365 NULL /*pfnInvalidateR3 */,
366 csamCodePageWriteHandler,
367 "csamCodePageWriteHandler", "csamRCCodePageWritePfHandler",
368 "CSAM code page write handler",
369 &pVM->csam.s.hCodePageWriteType);
370 AssertLogRelRCReturn(rc, rc);
371 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/,
372 csamR3CodePageInvalidate,
373 csamCodePageWriteHandler,
374 "csamCodePageWriteHandler", "csamRCCodePageWritePfHandler",
375 "CSAM code page write and invlpg handler",
376 &pVM->csam.s.hCodePageWriteAndInvPgType);
377 AssertLogRelRCReturn(rc, rc);
378
379 /*
380 * Register save and load state notifiers.
381 */
382 rc = SSMR3RegisterInternal(pVM, "CSAM", 0, CSAM_SAVED_STATE_VERSION, sizeof(pVM->csam.s) + PAGE_SIZE*16,
383 NULL, NULL, NULL,
384 NULL, csamR3Save, NULL,
385 NULL, csamR3Load, NULL);
386 AssertRCReturn(rc, rc);
387
388 STAM_REG(pVM, &pVM->csam.s.StatNrTraps, STAMTYPE_COUNTER, "/CSAM/PageTraps", STAMUNIT_OCCURENCES, "The number of CSAM page traps.");
389 STAM_REG(pVM, &pVM->csam.s.StatDangerousWrite, STAMTYPE_COUNTER, "/CSAM/DangerousWrites", STAMUNIT_OCCURENCES, "The number of dangerous writes that cause a context switch.");
390
391 STAM_REG(pVM, &pVM->csam.s.StatNrPageNPHC, STAMTYPE_COUNTER, "/CSAM/HC/PageNotPresent", STAMUNIT_OCCURENCES, "The number of CSAM pages marked not present.");
392 STAM_REG(pVM, &pVM->csam.s.StatNrPageNPGC, STAMTYPE_COUNTER, "/CSAM/GC/PageNotPresent", STAMUNIT_OCCURENCES, "The number of CSAM pages marked not present.");
393 STAM_REG(pVM, &pVM->csam.s.StatNrPages, STAMTYPE_COUNTER, "/CSAM/PageRec/AddedRW", STAMUNIT_OCCURENCES, "The number of CSAM page records (RW monitoring).");
394 STAM_REG(pVM, &pVM->csam.s.StatNrPagesInv, STAMTYPE_COUNTER, "/CSAM/PageRec/AddedRWI", STAMUNIT_OCCURENCES, "The number of CSAM page records (RW & invalidation monitoring).");
395 STAM_REG(pVM, &pVM->csam.s.StatNrRemovedPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Removed", STAMUNIT_OCCURENCES, "The number of removed CSAM page records.");
396 STAM_REG(pVM, &pVM->csam.s.StatPageRemoveREMFlush,STAMTYPE_COUNTER, "/CSAM/PageRec/Removed/REMFlush", STAMUNIT_OCCURENCES, "The number of removed CSAM page records that caused a REM flush.");
397
398 STAM_REG(pVM, &pVM->csam.s.StatNrPatchPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Patch", STAMUNIT_OCCURENCES, "The number of CSAM patch page records.");
399 STAM_REG(pVM, &pVM->csam.s.StatNrUserPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Ignore/User", STAMUNIT_OCCURENCES, "The number of CSAM user page records (ignored).");
400 STAM_REG(pVM, &pVM->csam.s.StatPagePATM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/PATM", STAMUNIT_OCCURENCES, "The number of PATM page records.");
401 STAM_REG(pVM, &pVM->csam.s.StatPageCSAM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/CSAM", STAMUNIT_OCCURENCES, "The number of CSAM page records.");
402 STAM_REG(pVM, &pVM->csam.s.StatPageREM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/REM", STAMUNIT_OCCURENCES, "The number of REM page records.");
403 STAM_REG(pVM, &pVM->csam.s.StatPageMonitor, STAMTYPE_COUNTER, "/CSAM/PageRec/Monitored", STAMUNIT_OCCURENCES, "The number of monitored pages.");
404
405 STAM_REG(pVM, &pVM->csam.s.StatCodePageModified, STAMTYPE_COUNTER, "/CSAM/Monitor/DirtyPage", STAMUNIT_OCCURENCES, "The number of code page modifications.");
406
407 STAM_REG(pVM, &pVM->csam.s.StatNrFlushes, STAMTYPE_COUNTER, "/CSAM/PageFlushes", STAMUNIT_OCCURENCES, "The number of CSAM page flushes.");
408 STAM_REG(pVM, &pVM->csam.s.StatNrFlushesSkipped, STAMTYPE_COUNTER, "/CSAM/PageFlushesSkipped", STAMUNIT_OCCURENCES, "The number of CSAM page flushes that were skipped.");
409 STAM_REG(pVM, &pVM->csam.s.StatNrKnownPagesHC, STAMTYPE_COUNTER, "/CSAM/HC/KnownPageRecords", STAMUNIT_OCCURENCES, "The number of known CSAM page records.");
410 STAM_REG(pVM, &pVM->csam.s.StatNrKnownPagesGC, STAMTYPE_COUNTER, "/CSAM/GC/KnownPageRecords", STAMUNIT_OCCURENCES, "The number of known CSAM page records.");
411 STAM_REG(pVM, &pVM->csam.s.StatNrInstr, STAMTYPE_COUNTER, "/CSAM/ScannedInstr", STAMUNIT_OCCURENCES, "The number of scanned instructions.");
412 STAM_REG(pVM, &pVM->csam.s.StatNrBytesRead, STAMTYPE_COUNTER, "/CSAM/BytesRead", STAMUNIT_OCCURENCES, "The number of bytes read for scanning.");
413 STAM_REG(pVM, &pVM->csam.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/CSAM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
414
415 STAM_REG(pVM, &pVM->csam.s.StatBitmapAlloc, STAMTYPE_COUNTER, "/CSAM/Alloc/PageBitmap", STAMUNIT_OCCURENCES, "The number of page bitmap allocations.");
416
417 STAM_REG(pVM, &pVM->csam.s.StatInstrCacheHit, STAMTYPE_COUNTER, "/CSAM/Cache/Hit", STAMUNIT_OCCURENCES, "The number of dangerous instruction cache hits.");
418 STAM_REG(pVM, &pVM->csam.s.StatInstrCacheMiss, STAMTYPE_COUNTER, "/CSAM/Cache/Miss", STAMUNIT_OCCURENCES, "The number of dangerous instruction cache misses.");
419
420 STAM_REG(pVM, &pVM->csam.s.StatScanNextFunction, STAMTYPE_COUNTER, "/CSAM/Function/Scan/Success", STAMUNIT_OCCURENCES, "The number of found functions beyond the ret border.");
421 STAM_REG(pVM, &pVM->csam.s.StatScanNextFunctionFailed, STAMTYPE_COUNTER, "/CSAM/Function/Scan/Failed", STAMUNIT_OCCURENCES, "The number of refused functions beyond the ret border.");
422
423 STAM_REG(pVM, &pVM->csam.s.StatTime, STAMTYPE_PROFILE, "/PROF/CSAM/Scan", STAMUNIT_TICKS_PER_CALL, "Scanning overhead.");
424 STAM_REG(pVM, &pVM->csam.s.StatTimeCheckAddr, STAMTYPE_PROFILE, "/PROF/CSAM/CheckAddr", STAMUNIT_TICKS_PER_CALL, "Address check overhead.");
425 STAM_REG(pVM, &pVM->csam.s.StatTimeAddrConv, STAMTYPE_PROFILE, "/PROF/CSAM/AddrConv", STAMUNIT_TICKS_PER_CALL, "Address conversion overhead.");
426 STAM_REG(pVM, &pVM->csam.s.StatTimeFlushPage, STAMTYPE_PROFILE, "/PROF/CSAM/FlushPage", STAMUNIT_TICKS_PER_CALL, "Page flushing overhead.");
427 STAM_REG(pVM, &pVM->csam.s.StatTimeDisasm, STAMTYPE_PROFILE, "/PROF/CSAM/Disasm", STAMUNIT_TICKS_PER_CALL, "Disassembly overhead.");
428 STAM_REG(pVM, &pVM->csam.s.StatFlushDirtyPages, STAMTYPE_PROFILE, "/PROF/CSAM/FlushDirtyPage", STAMUNIT_TICKS_PER_CALL, "Dirty page flushing overhead.");
429 STAM_REG(pVM, &pVM->csam.s.StatCheckGates, STAMTYPE_PROFILE, "/PROF/CSAM/CheckGates", STAMUNIT_TICKS_PER_CALL, "CSAMR3CheckGates overhead.");
430
431 /*
432 * Check CFGM option and enable/disable CSAM.
433 */
434 bool fEnabled;
435 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "CSAMEnabled", &fEnabled);
436 if (RT_FAILURE(rc))
437#ifdef CSAM_ENABLE
438 fEnabled = true;
439#else
440 fEnabled = false;
441#endif
442 if (fEnabled)
443 CSAMEnableScanning(pVM);
444
445#ifdef VBOX_WITH_DEBUGGER
446 /*
447 * Debugger commands.
448 */
449 static bool fRegisteredCmds = false;
450 if (!fRegisteredCmds)
451 {
452 rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
453 if (RT_SUCCESS(rc))
454 fRegisteredCmds = true;
455 }
456#endif
457
458 return VINF_SUCCESS;
459}
460
461/**
462 * (Re)initializes CSAM
463 *
464 * @param pVM The VM.
465 */
466static int csamReinit(PVM pVM)
467{
468 /*
469 * Assert alignment and sizes.
470 */
471 AssertRelease(!(RT_OFFSETOF(VM, csam.s) & 31));
472 AssertRelease(sizeof(pVM->csam.s) <= sizeof(pVM->csam.padding));
473 AssertRelease(!HMIsEnabled(pVM));
474
475 /*
476 * Setup any fixed pointers and offsets.
477 */
478 pVM->csam.s.offVM = RT_OFFSETOF(VM, patm);
479
480 pVM->csam.s.fGatesChecked = false;
481 pVM->csam.s.fScanningStarted = false;
482
483 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies 1 VPCU */
484 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
485 pVM->csam.s.cDirtyPages = 0;
486 /* not necessary */
487 memset(pVM->csam.s.pvDirtyBasePage, 0, sizeof(pVM->csam.s.pvDirtyBasePage));
488 memset(pVM->csam.s.pvDirtyFaultPage, 0, sizeof(pVM->csam.s.pvDirtyFaultPage));
489
490 memset(&pVM->csam.s.aDangerousInstr, 0, sizeof(pVM->csam.s.aDangerousInstr));
491 pVM->csam.s.cDangerousInstr = 0;
492 pVM->csam.s.iDangerousInstr = 0;
493
494 memset(pVM->csam.s.pvCallInstruction, 0, sizeof(pVM->csam.s.pvCallInstruction));
495 pVM->csam.s.iCallInstruction = 0;
496
497 /** @note never mess with the pgdir bitmap here! */
498 return VINF_SUCCESS;
499}
500
501/**
502 * Applies relocations to data and code managed by this
503 * component. This function will be called at init and
504 * whenever the VMM need to relocate itself inside the GC.
505 *
506 * The csam will update the addresses used by the switcher.
507 *
508 * @param pVM The VM.
509 * @param offDelta Relocation delta.
510 */
511VMMR3_INT_DECL(void) CSAMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
512{
513 if (offDelta && !HMIsEnabled(pVM))
514 {
515 /* Adjust pgdir and page bitmap pointers. */
516 pVM->csam.s.pPDBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDGCBitmapHC);
517 pVM->csam.s.pPDHCBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC);
518
519 for(int i=0;i<CSAM_PGDIRBMP_CHUNKS;i++)
520 {
521 if (pVM->csam.s.pPDGCBitmapHC[i])
522 {
523 pVM->csam.s.pPDGCBitmapHC[i] += offDelta;
524 }
525 }
526 }
527 return;
528}
529
530/**
531 * Terminates the csam.
532 *
533 * Termination means cleaning up and freeing all resources,
534 * the VM it self is at this point powered off or suspended.
535 *
536 * @returns VBox status code.
537 * @param pVM Pointer to the VM.
538 */
539VMMR3_INT_DECL(int) CSAMR3Term(PVM pVM)
540{
541 if (HMIsEnabled(pVM))
542 return VINF_SUCCESS;
543
544 int rc;
545
546 rc = CSAMR3Reset(pVM);
547 AssertRC(rc);
548
549 /* @todo triggers assertion in MMHyperFree */
550#if 0
551 for(int i=0;i<CSAM_PAGEBMP_CHUNKS;i++)
552 {
553 if (pVM->csam.s.pPDBitmapHC[i])
554 MMHyperFree(pVM, pVM->csam.s.pPDBitmapHC[i]);
555 }
556#endif
557
558 return VINF_SUCCESS;
559}
560
561/**
562 * CSAM reset callback.
563 *
564 * @returns VBox status code.
565 * @param pVM The VM which is reset.
566 */
567VMMR3_INT_DECL(int) CSAMR3Reset(PVM pVM)
568{
569 if (HMIsEnabled(pVM))
570 return VINF_SUCCESS;
571
572 /* Clear page bitmaps. */
573 for (int i = 0; i < CSAM_PGDIRBMP_CHUNKS; i++)
574 {
575 if (pVM->csam.s.pPDBitmapHC[i])
576 {
577 Assert((CSAM_PAGE_BITMAP_SIZE& 3) == 0);
578 ASMMemZero32(pVM->csam.s.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
579 }
580 }
581
582 /* Remove all CSAM page records. */
583 for (;;)
584 {
585 PCSAMPAGEREC pPageRec = (PCSAMPAGEREC)RTAvlPVGetBestFit(&pVM->csam.s.pPageTree, 0, true);
586 if (!pPageRec)
587 break;
588 csamRemovePageRecord(pVM, pPageRec->page.pPageGC);
589 }
590 Assert(!pVM->csam.s.pPageTree);
591
592 csamReinit(pVM);
593
594 return VINF_SUCCESS;
595}
596
597
598/**
599 * Callback function for RTAvlPVDoWithAll
600 *
601 * Counts the number of records in the tree
602 *
603 * @returns VBox status code.
604 * @param pNode Current node
605 * @param pcPatches Pointer to patch counter
606 */
607static DECLCALLBACK(int) csamR3SaveCountRecord(PAVLPVNODECORE pNode, void *pcPatches)
608{
609 NOREF(pNode);
610 *(uint32_t *)pcPatches += 1;
611 return VINF_SUCCESS;
612}
613
614/**
615 * Callback function for RTAvlPVDoWithAll for saving a page record.
616 *
617 * @returns VBox status code.
618 * @param pNode Current node
619 * @param pvVM Pointer to the VM
620 */
621static DECLCALLBACK(int) csamR3SavePageState(PAVLPVNODECORE pNode, void *pvVM)
622{
623 PCSAMPAGEREC pPage = (PCSAMPAGEREC)pNode;
624 PVM pVM = (PVM)pvVM;
625 PSSMHANDLE pSSM = pVM->csam.s.savedstate.pSSM;
626
627 int rc = SSMR3PutStructEx(pSSM, &pPage->page, sizeof(pPage->page), 0 /*fFlags*/, &g_aCsamPageFields[0], NULL);
628 AssertLogRelRCReturn(rc, rc);
629
630 if (pPage->page.pBitmap)
631 SSMR3PutMem(pSSM, pPage->page.pBitmap, CSAM_PAGE_BITMAP_SIZE);
632
633 return VINF_SUCCESS;
634}
635
636/**
637 * Execute state save operation.
638 *
639 * @returns VBox status code.
640 * @param pVM Pointer to the VM.
641 * @param pSSM SSM operation handle.
642 */
643static DECLCALLBACK(int) csamR3Save(PVM pVM, PSSMHANDLE pSSM)
644{
645 int rc;
646
647 /*
648 * Count the number of page records in the tree (feeling lazy)
649 */
650 pVM->csam.s.savedstate.cPageRecords = 0;
651 RTAvlPVDoWithAll(&pVM->csam.s.pPageTree, true, csamR3SaveCountRecord, &pVM->csam.s.savedstate.cPageRecords);
652
653 /*
654 * Save CSAM structure.
655 */
656 pVM->csam.s.savedstate.pSSM = pSSM;
657 rc = SSMR3PutStructEx(pSSM, &pVM->csam.s, sizeof(pVM->csam.s), 0 /*fFlags*/, g_aCsamFields, NULL);
658 AssertLogRelRCReturn(rc, rc);
659
660 /*
661 * Save pgdir bitmap.
662 */
663 SSMR3PutU32(pSSM, CSAM_PGDIRBMP_CHUNKS);
664 SSMR3PutU32(pSSM, CSAM_PAGE_BITMAP_SIZE);
665 for (uint32_t i = 0; i < CSAM_PGDIRBMP_CHUNKS; i++)
666 if (pVM->csam.s.pPDBitmapHC[i])
667 {
668 SSMR3PutU32(pSSM, i);
669 SSMR3PutMem(pSSM, pVM->csam.s.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
670 }
671 SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
672
673 /*
674 * Save page records
675 */
676 pVM->csam.s.savedstate.pSSM = pSSM;
677 rc = RTAvlPVDoWithAll(&pVM->csam.s.pPageTree, true, csamR3SavePageState, pVM);
678 AssertRCReturn(rc, rc);
679
680 pVM->csam.s.savedstate.pSSM = NULL;
681 return VINF_SUCCESS;
682}
683
684
685/**
686 * Execute state load operation.
687 *
688 * @returns VBox status code.
689 * @param pVM Pointer to the VM.
690 * @param pSSM SSM operation handle.
691 * @param uVersion Data layout version.
692 * @param uPass The data pass.
693 */
694static DECLCALLBACK(int) csamR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
695{
696 int rc;
697
698 /*
699 * Check preconditions.
700 */
701 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
702 Assert(pVM->csam.s.savedstate.pSSM == NULL);
703 AssertLogRelMsgReturn(uVersion >= CSAM_SAVED_STATE_VERSION_PUT_MEM && uVersion <= CSAM_SAVED_STATE_VERSION,
704 ("uVersion=%d (%#x)\n", uVersion),
705 VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
706
707 if (uVersion >= CSAM_SAVED_STATE_VERSION_PUT_STRUCT)
708 {
709 /*
710 * Restore the SSMR3PutStructEx fashioned state.
711 */
712 rc = SSMR3GetStructEx(pSSM, &pVM->csam.s, sizeof(pVM->csam.s), 0 /*fFlags*/, &g_aCsamFields[0], NULL);
713
714 /*
715 * Restore page bitmaps
716 */
717 uint32_t cPgDirBmpChunks = 0;
718 rc = SSMR3GetU32(pSSM, &cPgDirBmpChunks);
719 uint32_t cbPgDirBmpChunk = 0;
720 rc = SSMR3GetU32(pSSM, &cbPgDirBmpChunk);
721 AssertRCReturn(rc, rc);
722 AssertLogRelMsgReturn(cPgDirBmpChunks <= CSAM_PGDIRBMP_CHUNKS,
723 ("cPgDirBmpChunks=%#x (vs %#x)\n", cPgDirBmpChunks, CSAM_PGDIRBMP_CHUNKS),
724 VERR_SSM_UNEXPECTED_DATA);
725 AssertLogRelMsgReturn(cbPgDirBmpChunk <= CSAM_PAGE_BITMAP_SIZE,
726 ("cbPgDirBmpChunk=%#x (vs %#x)\n", cbPgDirBmpChunk, CSAM_PAGE_BITMAP_SIZE),
727 VERR_SSM_UNEXPECTED_DATA);
728 for (uint32_t i = 0; i < CSAM_PGDIRBMP_CHUNKS; i++)
729 {
730 Assert(!pVM->csam.s.pPDBitmapHC[i]);
731 Assert(!pVM->csam.s.pPDGCBitmapHC[i]);
732 }
733 for (uint32_t iNext = 0;;)
734 {
735 uint32_t iThis;
736 rc = SSMR3GetU32(pSSM, &iThis);
737 AssertLogRelRCReturn(rc, rc);
738 AssertLogRelMsgReturn(iThis >= iNext, ("iThis=%#x iNext=%#x\n", iThis, iNext), VERR_SSM_UNEXPECTED_DATA);
739 if (iThis == UINT32_MAX)
740 break;
741
742 rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDBitmapHC[iThis]);
743 AssertLogRelRCReturn(rc, rc);
744 pVM->csam.s.pPDGCBitmapHC[iThis] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[iThis]);
745
746 rc = SSMR3GetMem(pSSM, pVM->csam.s.pPDBitmapHC[iThis], CSAM_PAGE_BITMAP_SIZE);
747 AssertLogRelRCReturn(rc, rc);
748 iNext = iThis + 1;
749 }
750
751 /*
752 * Restore page records
753 */
754 uint32_t const cPageRecords = pVM->csam.s.savedstate.cPageRecords + pVM->csam.s.savedstate.cPatchPageRecords;
755 for (uint32_t iPageRec = 0; iPageRec < cPageRecords; iPageRec++)
756 {
757 CSAMPAGE PageRec;
758 RT_ZERO(PageRec);
759 rc = SSMR3GetStructEx(pSSM, &PageRec, sizeof(PageRec), 0 /*fFlags*/, &g_aCsamPageFields[0], NULL);
760 AssertLogRelRCReturn(rc, rc);
761
762 /* Recreate the page record. */
763 PCSAMPAGE pPage = csamR3CreatePageRecord(pVM, PageRec.pPageGC, PageRec.enmTag, PageRec.fCode32,
764 PageRec.fMonitorInvalidation);
765 AssertReturn(pPage, VERR_NO_MEMORY);
766 pPage->GCPhys = PageRec.GCPhys;
767 pPage->fFlags = PageRec.fFlags;
768 pPage->u64Hash = PageRec.u64Hash;
769 if (PageRec.pBitmap)
770 {
771 rc = SSMR3GetMem(pSSM, pPage->pBitmap, CSAM_PAGE_BITMAP_SIZE);
772 AssertLogRelRCReturn(rc, rc);
773 }
774 else
775 {
776 MMR3HeapFree(pPage->pBitmap);
777 pPage->pBitmap = NULL;
778 }
779 }
780 }
781 else
782 {
783 /*
784 * Restore the old SSMR3PutMem fashioned state.
785 */
786
787 /* CSAM structure first. */
788 CSAM csamInfo;
789 RT_ZERO(csamInfo);
790 if ( SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(4, 3, 51)
791 && SSMR3HandleRevision(pSSM) >= 100346)
792 rc = SSMR3GetStructEx(pSSM, &csamInfo, sizeof(csamInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID,
793 &g_aCsamFields500[0], NULL);
794 else
795 rc = SSMR3GetStructEx(pSSM, &csamInfo, sizeof(csamInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED,
796 &g_aCsamFieldsBefore500[0], NULL);
797 AssertRCReturn(rc, rc);
798
799 pVM->csam.s.fGatesChecked = csamInfo.fGatesChecked;
800 pVM->csam.s.fScanningStarted = csamInfo.fScanningStarted;
801
802 /* Restore dirty code page info. */
803 pVM->csam.s.cDirtyPages = csamInfo.cDirtyPages;
804 memcpy(pVM->csam.s.pvDirtyBasePage, csamInfo.pvDirtyBasePage, sizeof(pVM->csam.s.pvDirtyBasePage));
805 memcpy(pVM->csam.s.pvDirtyFaultPage, csamInfo.pvDirtyFaultPage, sizeof(pVM->csam.s.pvDirtyFaultPage));
806
807 /* Restore possible code page */
808 pVM->csam.s.cPossibleCodePages = csamInfo.cPossibleCodePages;
809 memcpy(pVM->csam.s.pvPossibleCodePage, csamInfo.pvPossibleCodePage, sizeof(pVM->csam.s.pvPossibleCodePage));
810
811 /*
812 * Restore pgdir bitmap (we'll change the pointers next).
813 */
814 rc = SSMR3GetStructEx(pSSM, pVM->csam.s.pPDBitmapHC, sizeof(uint8_t *) * CSAM_PGDIRBMP_CHUNKS,
815 SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamPDBitmapArray[0], NULL);
816 AssertRCReturn(rc, rc);
817
818 /*
819 * Restore page bitmaps
820 */
821 for (unsigned i = 0; i < CSAM_PGDIRBMP_CHUNKS; i++)
822 if (pVM->csam.s.pPDBitmapHC[i])
823 {
824 rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDBitmapHC[i]);
825 AssertLogRelRCReturn(rc, rc);
826 pVM->csam.s.pPDGCBitmapHC[i] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[i]);
827
828 /* Restore the bitmap. */
829 rc = SSMR3GetMem(pSSM, pVM->csam.s.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
830 AssertRCReturn(rc, rc);
831 }
832 else
833 {
834 Assert(!pVM->csam.s.pPDGCBitmapHC[i]);
835 pVM->csam.s.pPDGCBitmapHC[i] = 0;
836 }
837
838 /*
839 * Restore page records
840 */
841 for (uint32_t i=0;i<csamInfo.savedstate.cPageRecords + csamInfo.savedstate.cPatchPageRecords;i++)
842 {
843 CSAMPAGEREC page;
844 PCSAMPAGE pPage;
845
846 RT_ZERO(page);
847 rc = SSMR3GetStructEx(pSSM, &page, sizeof(page), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamPageRecFields[0], NULL);
848 AssertRCReturn(rc, rc);
849
850 /*
851 * Recreate the page record
852 */
853 pPage = csamR3CreatePageRecord(pVM, page.page.pPageGC, page.page.enmTag, page.page.fCode32, page.page.fMonitorInvalidation);
854 AssertReturn(pPage, VERR_NO_MEMORY);
855
856 pPage->GCPhys = page.page.GCPhys;
857 pPage->fFlags = page.page.fFlags;
858 pPage->u64Hash = page.page.u64Hash;
859
860 if (page.page.pBitmap)
861 {
862 rc = SSMR3GetMem(pSSM, pPage->pBitmap, CSAM_PAGE_BITMAP_SIZE);
863 AssertRCReturn(rc, rc);
864 }
865 else
866 {
867 MMR3HeapFree(pPage->pBitmap);
868 pPage->pBitmap = NULL;
869 }
870 }
871
872 /* Note: we don't restore aDangerousInstr; it will be recreated automatically. */
873 memset(&pVM->csam.s.aDangerousInstr, 0, sizeof(pVM->csam.s.aDangerousInstr));
874 pVM->csam.s.cDangerousInstr = 0;
875 pVM->csam.s.iDangerousInstr = 0;
876 }
877 return VINF_SUCCESS;
878}
879
880/**
881 * Convert guest context address to host context pointer
882 *
883 * @returns Byte pointer (ring-3 context) corresponding to pGCPtr on success,
884 * NULL on failure.
885 * @param pVM Pointer to the VM.
886 * @param pCacheRec Address conversion cache record
887 * @param pGCPtr Guest context pointer
888 * @returns Host context pointer or NULL in case of an error
889 *
890 */
891static uint8_t *csamR3GCVirtToHCVirt(PVM pVM, PCSAMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
892{
893 int rc;
894 void *pHCPtr;
895 Assert(pVM->cCpus == 1);
896 PVMCPU pVCpu = VMMGetCpu0(pVM);
897
898 STAM_PROFILE_START(&pVM->csam.s.StatTimeAddrConv, a);
899
900 pHCPtr = PATMR3GCPtrToHCPtr(pVM, pGCPtr);
901 if (pHCPtr)
902 return (uint8_t *)pHCPtr;
903
904 if (pCacheRec->pPageLocStartHC)
905 {
906 uint32_t offset = pGCPtr & PAGE_OFFSET_MASK;
907 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
908 {
909 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
910 return pCacheRec->pPageLocStartHC + offset;
911 }
912 }
913
914 /* Release previous lock if any. */
915 if (pCacheRec->Lock.pvMap)
916 {
917 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
918 pCacheRec->Lock.pvMap = NULL;
919 }
920
921 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
922 if (rc != VINF_SUCCESS)
923 {
924//// AssertMsgRC(rc, ("MMR3PhysGCVirt2HCVirtEx failed for %RRv\n", pGCPtr));
925 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
926 return NULL;
927 }
928
929 pCacheRec->pPageLocStartHC = (uint8_t*)((uintptr_t)pHCPtr & PAGE_BASE_HC_MASK);
930 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
931 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
932 return (uint8_t *)pHCPtr;
933}
934
935
936/** For csamR3ReadBytes. */
937typedef struct CSAMDISINFO
938{
939 PVM pVM;
940 uint8_t const *pbSrcInstr; /* aka pInstHC */
941} CSAMDISINFO, *PCSAMDISINFO;
942
943
944/**
945 * @callback_method_impl{FNDISREADBYTES}
946 */
947static DECLCALLBACK(int) csamR3ReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
948{
949 PCSAMDISINFO pDisInfo = (PCSAMDISINFO)pDis->pvUser;
950
951 /*
952 * We are not interested in patched instructions, so read the original opcode bytes.
953 *
954 * Note! single instruction patches (int3) are checked in CSAMR3AnalyseCallback
955 *
956 * Since we're decoding one instruction at the time, we don't need to be
957 * concerned about any patched instructions following the first one. We
958 * could in fact probably skip this PATM call for offInstr != 0.
959 */
960 size_t cbRead = cbMaxRead;
961 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
962 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
963 if (RT_SUCCESS(rc))
964 {
965 if (cbRead >= cbMinRead)
966 {
967 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
968 return rc;
969 }
970
971 cbMinRead -= (uint8_t)cbRead;
972 cbMaxRead -= (uint8_t)cbRead;
973 offInstr += (uint8_t)cbRead;
974 uSrcAddr += cbRead;
975 }
976
977 /*
978 * The current byte isn't a patch instruction byte.
979 */
980 AssertPtr(pDisInfo->pbSrcInstr);
981 if ((pDis->uInstrAddr >> PAGE_SHIFT) == ((uSrcAddr + cbMaxRead - 1) >> PAGE_SHIFT))
982 {
983 memcpy(&pDis->abInstr[offInstr], &pDisInfo->pbSrcInstr[offInstr], cbMaxRead);
984 offInstr += cbMaxRead;
985 rc = VINF_SUCCESS;
986 }
987 else if ( (pDis->uInstrAddr >> PAGE_SHIFT) == ((uSrcAddr + cbMinRead - 1) >> PAGE_SHIFT)
988 || PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr) /** @todo does CSAM actually analyze patch code, or is this just a copy&past check? */
989 )
990 {
991 memcpy(&pDis->abInstr[offInstr], &pDisInfo->pbSrcInstr[offInstr], cbMinRead);
992 offInstr += cbMinRead;
993 rc = VINF_SUCCESS;
994 }
995 else
996 {
997 /* Crossed page boundrary, pbSrcInstr is no good... */
998 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pDisInfo->pVM), &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
999 offInstr += cbMinRead;
1000 }
1001
1002 pDis->cbCachedInstr = offInstr;
1003 return rc;
1004}
1005
1006DECLINLINE(int) csamR3DISInstr(PVM pVM, RTRCPTR InstrGC, uint8_t *InstrHC, DISCPUMODE enmCpuMode,
1007 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
1008{
1009 CSAMDISINFO DisInfo = { pVM, InstrHC };
1010#ifdef DEBUG
1011 return DISInstrToStrEx(InstrGC, enmCpuMode, csamR3ReadBytes, &DisInfo, DISOPTYPE_ALL,
1012 pCpu, pcbInstr, pszOutput, cbOutput);
1013#else
1014 /* We are interested in everything except harmless stuff */
1015 if (pszOutput)
1016 return DISInstrToStrEx(InstrGC, enmCpuMode, csamR3ReadBytes, &DisInfo,
1017 ~(DISOPTYPE_INVALID | DISOPTYPE_HARMLESS | DISOPTYPE_RRM_MASK),
1018 pCpu, pcbInstr, pszOutput, cbOutput);
1019 return DISInstrEx(InstrGC, enmCpuMode, ~(DISOPTYPE_INVALID | DISOPTYPE_HARMLESS | DISOPTYPE_RRM_MASK),
1020 csamR3ReadBytes, &DisInfo, pCpu, pcbInstr);
1021#endif
1022}
1023
1024/**
1025 * Analyses the instructions following the cli for compliance with our heuristics for cli
1026 *
1027 * @returns VBox status code.
1028 * @param pVM Pointer to the VM.
1029 * @param pCpu CPU disassembly state
1030 * @param pInstrGC Guest context pointer to privileged instruction
1031 * @param pCurInstrGC Guest context pointer to the current instruction
1032 * @param pCacheRec GC to HC cache record
1033 * @param pUserData User pointer (callback specific)
1034 *
1035 */
1036static int CSAMR3AnalyseCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC,
1037 PCSAMP2GLOOKUPREC pCacheRec, void *pUserData)
1038{
1039 PCSAMPAGE pPage = (PCSAMPAGE)pUserData;
1040 int rc;
1041 NOREF(pInstrGC);
1042
1043 switch (pCpu->pCurInstr->uOpcode)
1044 {
1045 case OP_INT:
1046 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE8);
1047 if (pCpu->Param1.uValue == 3)
1048 {
1049 //two byte int 3
1050 return VINF_SUCCESS;
1051 }
1052 break;
1053
1054 /* removing breaks win2k guests? */
1055 case OP_IRET:
1056 if (EMIsRawRing1Enabled(pVM))
1057 break;
1058 /* no break */
1059
1060 case OP_ILLUD2:
1061 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue. */
1062 case OP_RETN:
1063 case OP_INT3:
1064 case OP_INVALID:
1065 return VINF_SUCCESS;
1066 }
1067
1068 // Check for exit points
1069 switch (pCpu->pCurInstr->uOpcode)
1070 {
1071 /* It's not a good idea to patch pushf instructions:
1072 * - increases the chance of conflicts (code jumping to the next instruction)
1073 * - better to patch the cli
1074 * - code that branches before the cli will likely hit an int 3
1075 * - in general doesn't offer any benefits as we don't allow nested patch blocks (IF is always 1)
1076 */
1077 case OP_PUSHF:
1078 case OP_POPF:
1079 break;
1080
1081 case OP_CLI:
1082 {
1083 uint32_t cbInstrs = 0;
1084 uint32_t cbCurInstr = pCpu->cbInstr;
1085 bool fCode32 = pPage->fCode32;
1086
1087 Assert(fCode32);
1088
1089 PATMR3AddHint(pVM, pCurInstrGC, (fCode32) ? PATMFL_CODE32 : 0);
1090
1091 /* Make sure the instructions that follow the cli have not been encountered before. */
1092 while (true)
1093 {
1094 DISCPUSTATE cpu;
1095
1096 if (cbInstrs + cbCurInstr >= SIZEOF_NEARJUMP32)
1097 break;
1098
1099 if (csamIsCodeScanned(pVM, pCurInstrGC + cbCurInstr, &pPage) == true)
1100 {
1101 /* We've scanned the next instruction(s) already. This means we've
1102 followed a branch that ended up there before -> dangerous!! */
1103 PATMR3DetectConflict(pVM, pCurInstrGC, pCurInstrGC + cbCurInstr);
1104 break;
1105 }
1106 pCurInstrGC += cbCurInstr;
1107 cbInstrs += cbCurInstr;
1108
1109 { /* Force pCurInstrHC out of scope after we stop using it (page lock!) */
1110 uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
1111 if (pCurInstrHC == NULL)
1112 {
1113 Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
1114 break;
1115 }
1116 Assert(VALID_PTR(pCurInstrHC));
1117
1118 rc = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, (fCode32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1119 &cpu, &cbCurInstr, NULL, 0);
1120 }
1121 AssertRC(rc);
1122 if (RT_FAILURE(rc))
1123 break;
1124 }
1125 break;
1126 }
1127
1128#ifdef VBOX_WITH_RAW_RING1
1129 case OP_MOV:
1130 /* mov xx, CS is a dangerous instruction as our raw ring usage leaks through. */
1131 if ( EMIsRawRing1Enabled(pVM)
1132 && (pCpu->Param2.fUse & DISUSE_REG_SEG)
1133 && (pCpu->Param2.Base.idxSegReg == DISSELREG_CS))
1134 {
1135 Log(("CSAM: Patching dangerous 'mov xx, cs' instruction at %RGv with an int3\n", pCurInstrGC));
1136 if (PATMR3HasBeenPatched(pVM, pCurInstrGC) == false)
1137 {
1138 rc = PATMR3InstallPatch(pVM, pCurInstrGC, (pPage->fCode32) ? PATMFL_CODE32 : 0);
1139 if (RT_FAILURE(rc))
1140 {
1141 Log(("PATMR3InstallPatch failed with %d\n", rc));
1142 return VWRN_CONTINUE_ANALYSIS;
1143 }
1144 }
1145 return VWRN_CONTINUE_ANALYSIS;
1146 }
1147 break;
1148#endif
1149
1150 case OP_PUSH:
1151 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1152 if (pCpu->pCurInstr->fParam1 != OP_PARM_REG_CS)
1153 break;
1154
1155 /* no break */
1156#ifndef VBOX_WITH_SAFE_STR
1157 case OP_STR:
1158#endif
1159 case OP_LSL:
1160 case OP_LAR:
1161 case OP_SGDT:
1162 case OP_SLDT:
1163 case OP_SIDT:
1164 case OP_SMSW:
1165 case OP_VERW:
1166 case OP_VERR:
1167 case OP_CPUID:
1168 case OP_IRET:
1169#ifdef DEBUG
1170 switch(pCpu->pCurInstr->uOpcode)
1171 {
1172 case OP_STR:
1173 Log(("Privileged instruction at %RRv: str!!\n", pCurInstrGC));
1174 break;
1175 case OP_LSL:
1176 Log(("Privileged instruction at %RRv: lsl!!\n", pCurInstrGC));
1177 break;
1178 case OP_LAR:
1179 Log(("Privileged instruction at %RRv: lar!!\n", pCurInstrGC));
1180 break;
1181 case OP_SGDT:
1182 Log(("Privileged instruction at %RRv: sgdt!!\n", pCurInstrGC));
1183 break;
1184 case OP_SLDT:
1185 Log(("Privileged instruction at %RRv: sldt!!\n", pCurInstrGC));
1186 break;
1187 case OP_SIDT:
1188 Log(("Privileged instruction at %RRv: sidt!!\n", pCurInstrGC));
1189 break;
1190 case OP_SMSW:
1191 Log(("Privileged instruction at %RRv: smsw!!\n", pCurInstrGC));
1192 break;
1193 case OP_VERW:
1194 Log(("Privileged instruction at %RRv: verw!!\n", pCurInstrGC));
1195 break;
1196 case OP_VERR:
1197 Log(("Privileged instruction at %RRv: verr!!\n", pCurInstrGC));
1198 break;
1199 case OP_CPUID:
1200 Log(("Privileged instruction at %RRv: cpuid!!\n", pCurInstrGC));
1201 break;
1202 case OP_PUSH:
1203 Log(("Privileged instruction at %RRv: push cs!!\n", pCurInstrGC));
1204 break;
1205 case OP_IRET:
1206 Log(("Privileged instruction at %RRv: iret!!\n", pCurInstrGC));
1207 break;
1208 }
1209#endif
1210
1211 if (PATMR3HasBeenPatched(pVM, pCurInstrGC) == false)
1212 {
1213 rc = PATMR3InstallPatch(pVM, pCurInstrGC, (pPage->fCode32) ? PATMFL_CODE32 : 0);
1214 if (RT_FAILURE(rc))
1215 {
1216 Log(("PATMR3InstallPatch failed with %d\n", rc));
1217 return VWRN_CONTINUE_ANALYSIS;
1218 }
1219 }
1220 if (pCpu->pCurInstr->uOpcode == OP_IRET)
1221 return VINF_SUCCESS; /* Look no further in this branch. */
1222
1223 return VWRN_CONTINUE_ANALYSIS;
1224
1225 case OP_JMP:
1226 case OP_CALL:
1227 {
1228 // return or jump/call through a jump table
1229 if (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J)
1230 {
1231#ifdef DEBUG
1232 switch(pCpu->pCurInstr->uOpcode)
1233 {
1234 case OP_JMP:
1235 Log(("Control Flow instruction at %RRv: jmp!!\n", pCurInstrGC));
1236 break;
1237 case OP_CALL:
1238 Log(("Control Flow instruction at %RRv: call!!\n", pCurInstrGC));
1239 break;
1240 }
1241#endif
1242 return VWRN_CONTINUE_ANALYSIS;
1243 }
1244 return VWRN_CONTINUE_ANALYSIS;
1245 }
1246
1247 }
1248
1249 return VWRN_CONTINUE_ANALYSIS;
1250}
1251
1252#ifdef CSAM_ANALYSE_BEYOND_RET
1253/**
1254 * Wrapper for csamAnalyseCodeStream for call instructions.
1255 *
1256 * @returns VBox status code.
1257 * @param pVM Pointer to the VM.
1258 * @param pInstrGC Guest context pointer to privileged instruction
1259 * @param pCurInstrGC Guest context pointer to the current instruction
1260 * @param fCode32 16 or 32 bits code
1261 * @param pfnCSAMR3Analyse Callback for testing the disassembled instruction
1262 * @param pUserData User pointer (callback specific)
1263 *
1264 */
1265static int csamAnalyseCallCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
1266 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec)
1267{
1268 int rc;
1269 CSAMCALLEXITREC CallExitRec;
1270 PCSAMCALLEXITREC pOldCallRec;
1271 PCSAMPAGE pPage = 0;
1272 uint32_t i;
1273
1274 CallExitRec.cInstrAfterRet = 0;
1275
1276 pOldCallRec = pCacheRec->pCallExitRec;
1277 pCacheRec->pCallExitRec = &CallExitRec;
1278
1279 rc = csamAnalyseCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1280
1281 for (i=0;i<CallExitRec.cInstrAfterRet;i++)
1282 {
1283 PCSAMPAGE pPage = 0;
1284
1285 pCurInstrGC = CallExitRec.pInstrAfterRetGC[i];
1286
1287 /* Check if we've previously encountered the instruction after the ret. */
1288 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1289 {
1290 DISCPUSTATE cpu;
1291 uint32_t cbInstr;
1292 int rc2;
1293#ifdef DEBUG
1294 char szOutput[256];
1295#endif
1296 if (pPage == NULL)
1297 {
1298 /* New address; let's take a look at it. */
1299 pPage = csamR3CreatePageRecord(pVM, pCurInstrGC, CSAM_TAG_CSAM, fCode32);
1300 if (pPage == NULL)
1301 {
1302 rc = VERR_NO_MEMORY;
1303 goto done;
1304 }
1305 }
1306
1307 /**
1308 * Some generic requirements for recognizing an adjacent function:
1309 * - alignment fillers that consist of:
1310 * - nop
1311 * - lea genregX, [genregX (+ 0)]
1312 * - push ebp after the filler (can extend this later); aligned at at least a 4 byte boundary
1313 */
1314 for (int j = 0; j < 16; j++)
1315 {
1316 uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
1317 if (pCurInstrHC == NULL)
1318 {
1319 Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
1320 goto done;
1321 }
1322 Assert(VALID_PTR(pCurInstrHC));
1323
1324 STAM_PROFILE_START(&pVM->csam.s.StatTimeDisasm, a);
1325#ifdef DEBUG
1326 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, (fCode32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1327 &cpu, &cbInstr, szOutput, sizeof(szOutput));
1328 if (RT_SUCCESS(rc2)) Log(("CSAM Call Analysis: %s", szOutput));
1329#else
1330 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, (fCode32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1331 &cpu, &cbInstr, NULL, 0);
1332#endif
1333 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeDisasm, a);
1334 if (RT_FAILURE(rc2))
1335 {
1336 Log(("Disassembly failed at %RRv with %Rrc (probably page not present) -> return to caller\n", pCurInstrGC, rc2));
1337 goto done;
1338 }
1339
1340 STAM_COUNTER_ADD(&pVM->csam.s.StatNrBytesRead, cbInstr);
1341
1342 RCPTRTYPE(uint8_t *) addr = 0;
1343 PCSAMPAGE pJmpPage = NULL;
1344
1345 if (PAGE_ADDRESS(pCurInstrGC) != PAGE_ADDRESS(pCurInstrGC + cbInstr - 1))
1346 {
1347 if (!PGMGstIsPagePresent(pVM, pCurInstrGC + cbInstr - 1))
1348 {
1349 /// @todo fault in the page
1350 Log(("Page for current instruction %RRv is not present!!\n", pCurInstrGC));
1351 goto done;
1352 }
1353 //all is fine, let's continue
1354 csamR3CheckPageRecord(pVM, pCurInstrGC + cbInstr - 1);
1355 }
1356
1357 switch (cpu.pCurInstr->uOpcode)
1358 {
1359 case OP_NOP:
1360 case OP_INT3:
1361 break; /* acceptable */
1362
1363 case OP_LEA:
1364 /* Must be similar to:
1365 *
1366 * lea esi, [esi]
1367 * lea esi, [esi+0]
1368 * Any register is allowed as long as source and destination are identical.
1369 */
1370 if ( cpu.Param1.fUse != DISUSE_REG_GEN32
1371 || ( cpu.Param2.flags != DISUSE_REG_GEN32
1372 && ( !(cpu.Param2.flags & DISUSE_REG_GEN32)
1373 || !(cpu.Param2.flags & (DISUSE_DISPLACEMENT8|DISUSE_DISPLACEMENT16|DISUSE_DISPLACEMENT32))
1374 || cpu.Param2.uValue != 0
1375 )
1376 )
1377 || cpu.Param1.base.reg_gen32 != cpu.Param2.base.reg_gen32
1378 )
1379 {
1380 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1381 goto next_function;
1382 }
1383 break;
1384
1385 case OP_PUSH:
1386 {
1387 if ( (pCurInstrGC & 0x3) != 0
1388 || cpu.Param1.fUse != DISUSE_REG_GEN32
1389 || cpu.Param1.base.reg_gen32 != USE_REG_EBP
1390 )
1391 {
1392 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1393 goto next_function;
1394 }
1395
1396 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1397 {
1398 CSAMCALLEXITREC CallExitRec2;
1399 CallExitRec2.cInstrAfterRet = 0;
1400
1401 pCacheRec->pCallExitRec = &CallExitRec2;
1402
1403 /* Analyse the function. */
1404 Log(("Found new function at %RRv\n", pCurInstrGC));
1405 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunction);
1406 csamAnalyseCallCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1407 }
1408 goto next_function;
1409 }
1410
1411 case OP_SUB:
1412 {
1413 if ( (pCurInstrGC & 0x3) != 0
1414 || cpu.Param1.fUse != DISUSE_REG_GEN32
1415 || cpu.Param1.base.reg_gen32 != USE_REG_ESP
1416 )
1417 {
1418 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1419 goto next_function;
1420 }
1421
1422 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1423 {
1424 CSAMCALLEXITREC CallExitRec2;
1425 CallExitRec2.cInstrAfterRet = 0;
1426
1427 pCacheRec->pCallExitRec = &CallExitRec2;
1428
1429 /* Analyse the function. */
1430 Log(("Found new function at %RRv\n", pCurInstrGC));
1431 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunction);
1432 csamAnalyseCallCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1433 }
1434 goto next_function;
1435 }
1436
1437 default:
1438 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1439 goto next_function;
1440 }
1441 /* Mark it as scanned. */
1442 csamMarkCode(pVM, pPage, pCurInstrGC, cbInstr, true);
1443 pCurInstrGC += cbInstr;
1444 } /* for at most 16 instructions */
1445next_function:
1446 ; /* MSVC complains otherwise */
1447 }
1448 }
1449done:
1450 pCacheRec->pCallExitRec = pOldCallRec;
1451 return rc;
1452}
1453#else
1454#define csamAnalyseCallCodeStream csamAnalyseCodeStream
1455#endif
1456
1457/**
1458 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
1459 *
1460 * @returns VBox status code.
1461 * @param pVM Pointer to the VM.
1462 * @param pInstrGC Guest context pointer to privileged instruction
1463 * @param pCurInstrGC Guest context pointer to the current instruction
1464 * @param fCode32 16 or 32 bits code
1465 * @param pfnCSAMR3Analyse Callback for testing the disassembled instruction
1466 * @param pUserData User pointer (callback specific)
1467 *
1468 */
1469static int csamAnalyseCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
1470 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec)
1471{
1472 DISCPUSTATE cpu;
1473 PCSAMPAGE pPage = (PCSAMPAGE)pUserData;
1474 int rc = VWRN_CONTINUE_ANALYSIS;
1475 uint32_t cbInstr;
1476 int rc2;
1477 Assert(pVM->cCpus == 1);
1478 PVMCPU pVCpu = VMMGetCpu0(pVM);
1479
1480#ifdef DEBUG
1481 char szOutput[256];
1482#endif
1483
1484 LogFlow(("csamAnalyseCodeStream: code at %RRv depth=%d\n", pCurInstrGC, pCacheRec->depth));
1485
1486 pVM->csam.s.fScanningStarted = true;
1487
1488 pCacheRec->depth++;
1489 /*
1490 * Limit the call depth. (rather arbitrary upper limit; too low and we won't detect certain
1491 * cpuid instructions in Linux kernels; too high and we waste too much time scanning code)
1492 * (512 is necessary to detect cpuid instructions in Red Hat EL4; see defect 1355)
1493 * @note we are using a lot of stack here. couple of 100k when we go to the full depth (!)
1494 */
1495 if (pCacheRec->depth > 512)
1496 {
1497 LogFlow(("CSAM: maximum calldepth reached for %RRv\n", pCurInstrGC));
1498 pCacheRec->depth--;
1499 return VINF_SUCCESS; //let's not go on forever
1500 }
1501
1502 Assert(!PATMIsPatchGCAddr(pVM, pCurInstrGC));
1503 csamR3CheckPageRecord(pVM, pCurInstrGC);
1504
1505 while(rc == VWRN_CONTINUE_ANALYSIS)
1506 {
1507 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1508 {
1509 if (pPage == NULL)
1510 {
1511 /* New address; let's take a look at it. */
1512 pPage = csamR3CreatePageRecord(pVM, pCurInstrGC, CSAM_TAG_CSAM, fCode32);
1513 if (pPage == NULL)
1514 {
1515 rc = VERR_NO_MEMORY;
1516 goto done;
1517 }
1518 }
1519 }
1520 else
1521 {
1522 LogFlow(("Code at %RRv has been scanned before\n", pCurInstrGC));
1523 rc = VINF_SUCCESS;
1524 goto done;
1525 }
1526
1527 { /* Force pCurInstrHC out of scope after we stop using it (page lock!) */
1528 uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
1529 if (pCurInstrHC == NULL)
1530 {
1531 Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
1532 rc = VERR_PATCHING_REFUSED;
1533 goto done;
1534 }
1535 Assert(VALID_PTR(pCurInstrHC));
1536
1537 STAM_PROFILE_START(&pVM->csam.s.StatTimeDisasm, a);
1538#ifdef DEBUG
1539 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, fCode32 ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1540 &cpu, &cbInstr, szOutput, sizeof(szOutput));
1541 if (RT_SUCCESS(rc2)) Log(("CSAM Analysis: %s", szOutput));
1542#else
1543 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, fCode32 ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1544 &cpu, &cbInstr, NULL, 0);
1545#endif
1546 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeDisasm, a);
1547 }
1548 if (RT_FAILURE(rc2))
1549 {
1550 Log(("Disassembly failed at %RRv with %Rrc (probably page not present) -> return to caller\n", pCurInstrGC, rc2));
1551 rc = VINF_SUCCESS;
1552 goto done;
1553 }
1554
1555 STAM_COUNTER_ADD(&pVM->csam.s.StatNrBytesRead, cbInstr);
1556
1557 csamMarkCode(pVM, pPage, pCurInstrGC, cbInstr, true);
1558
1559 RCPTRTYPE(uint8_t *) addr = 0;
1560 PCSAMPAGE pJmpPage = NULL;
1561
1562 if (PAGE_ADDRESS(pCurInstrGC) != PAGE_ADDRESS(pCurInstrGC + cbInstr - 1))
1563 {
1564 if (!PGMGstIsPagePresent(pVCpu, pCurInstrGC + cbInstr - 1))
1565 {
1566 /// @todo fault in the page
1567 Log(("Page for current instruction %RRv is not present!!\n", pCurInstrGC));
1568 rc = VWRN_CONTINUE_ANALYSIS;
1569 goto next_please;
1570 }
1571 //all is fine, let's continue
1572 csamR3CheckPageRecord(pVM, pCurInstrGC + cbInstr - 1);
1573 }
1574 /*
1575 * If it's harmless, then don't bother checking it (the disasm tables had better be accurate!)
1576 */
1577 if ((cpu.pCurInstr->fOpType & ~DISOPTYPE_RRM_MASK) == DISOPTYPE_HARMLESS)
1578 {
1579 AssertMsg(pfnCSAMR3Analyse(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec, (void *)pPage) == VWRN_CONTINUE_ANALYSIS, ("Instruction incorrectly marked harmless?!?!?\n"));
1580 rc = VWRN_CONTINUE_ANALYSIS;
1581 goto next_please;
1582 }
1583
1584#ifdef CSAM_ANALYSE_BEYOND_RET
1585 /* Remember the address of the instruction following the ret in case the parent instruction was a call. */
1586 if ( pCacheRec->pCallExitRec
1587 && cpu.pCurInstr->uOpcode == OP_RETN
1588 && pCacheRec->pCallExitRec->cInstrAfterRet < CSAM_MAX_CALLEXIT_RET)
1589 {
1590 pCacheRec->pCallExitRec->pInstrAfterRetGC[pCacheRec->pCallExitRec->cInstrAfterRet] = pCurInstrGC + cbInstr;
1591 pCacheRec->pCallExitRec->cInstrAfterRet++;
1592 }
1593#endif
1594
1595 rc = pfnCSAMR3Analyse(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec, (void *)pPage);
1596 if (rc == VINF_SUCCESS)
1597 goto done;
1598
1599 // For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction)
1600 if ( ((cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW) && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J))
1601 || (cpu.pCurInstr->uOpcode == OP_CALL && cpu.Param1.fUse == DISUSE_DISPLACEMENT32)) /* simple indirect call (call dword ptr [address]) */
1602 {
1603 /* We need to parse 'call dword ptr [address]' type of calls to catch cpuid instructions in some recent Linux distributions (e.g. OpenSuse 10.3) */
1604 if ( cpu.pCurInstr->uOpcode == OP_CALL
1605 && cpu.Param1.fUse == DISUSE_DISPLACEMENT32)
1606 {
1607 addr = 0;
1608 PGMPhysSimpleReadGCPtr(pVCpu, &addr, (RTRCUINTPTR)cpu.Param1.uDisp.i32, sizeof(addr));
1609 }
1610 else
1611 addr = CSAMResolveBranch(&cpu, pCurInstrGC);
1612
1613 if (addr == 0)
1614 {
1615 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
1616 rc = VINF_SUCCESS;
1617 break;
1618 }
1619 Assert(!PATMIsPatchGCAddr(pVM, addr));
1620
1621 /* If the target address lies in a patch generated jump, then special action needs to be taken. */
1622 PATMR3DetectConflict(pVM, pCurInstrGC, addr);
1623
1624 /* Same page? */
1625 if (PAGE_ADDRESS(addr) != PAGE_ADDRESS(pCurInstrGC ))
1626 {
1627 if (!PGMGstIsPagePresent(pVCpu, addr))
1628 {
1629 Log(("Page for current instruction %RRv is not present!!\n", addr));
1630 rc = VWRN_CONTINUE_ANALYSIS;
1631 goto next_please;
1632 }
1633
1634 /* All is fine, let's continue. */
1635 csamR3CheckPageRecord(pVM, addr);
1636 }
1637
1638 pJmpPage = NULL;
1639 if (csamIsCodeScanned(pVM, addr, &pJmpPage) == false)
1640 {
1641 if (pJmpPage == NULL)
1642 {
1643 /* New branch target; let's take a look at it. */
1644 pJmpPage = csamR3CreatePageRecord(pVM, addr, CSAM_TAG_CSAM, fCode32);
1645 if (pJmpPage == NULL)
1646 {
1647 rc = VERR_NO_MEMORY;
1648 goto done;
1649 }
1650 Assert(pPage);
1651 }
1652 if (cpu.pCurInstr->uOpcode == OP_CALL)
1653 rc = csamAnalyseCallCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1654 else
1655 rc = csamAnalyseCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1656
1657 if (rc != VINF_SUCCESS) {
1658 goto done;
1659 }
1660 }
1661 if (cpu.pCurInstr->uOpcode == OP_JMP)
1662 {//unconditional jump; return to caller
1663 rc = VINF_SUCCESS;
1664 goto done;
1665 }
1666
1667 rc = VWRN_CONTINUE_ANALYSIS;
1668 } //if ((cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW) && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J))
1669#ifdef CSAM_SCAN_JUMP_TABLE
1670 else
1671 if ( cpu.pCurInstr->uOpcode == OP_JMP
1672 && (cpu.Param1.fUse & (DISUSE_DISPLACEMENT32|DISUSE_INDEX|DISUSE_SCALE)) == (DISUSE_DISPLACEMENT32|DISUSE_INDEX|DISUSE_SCALE)
1673 )
1674 {
1675 RTRCPTR pJumpTableGC = (RTRCPTR)cpu.Param1.disp32;
1676 uint8_t *pJumpTableHC;
1677 int rc2;
1678
1679 Log(("Jump through jump table\n"));
1680
1681 rc2 = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, pJumpTableGC, (PRTHCPTR)&pJumpTableHC, missing page lock);
1682 if (rc2 == VINF_SUCCESS)
1683 {
1684 for (uint32_t i=0;i<2;i++)
1685 {
1686 uint64_t fFlags;
1687
1688 addr = pJumpTableGC + cpu.Param1.scale * i;
1689 /* Same page? */
1690 if (PAGE_ADDRESS(addr) != PAGE_ADDRESS(pJumpTableGC))
1691 break;
1692
1693 addr = *(RTRCPTR *)(pJumpTableHC + cpu.Param1.scale * i);
1694
1695 rc2 = PGMGstGetPage(pVCpu, addr, &fFlags, NULL);
1696 if ( rc2 != VINF_SUCCESS
1697 || (fFlags & X86_PTE_US)
1698 || !(fFlags & X86_PTE_P)
1699 )
1700 break;
1701
1702 Log(("Jump to %RRv\n", addr));
1703
1704 pJmpPage = NULL;
1705 if (csamIsCodeScanned(pVM, addr, &pJmpPage) == false)
1706 {
1707 if (pJmpPage == NULL)
1708 {
1709 /* New branch target; let's take a look at it. */
1710 pJmpPage = csamR3CreatePageRecord(pVM, addr, CSAM_TAG_CSAM, fCode32);
1711 if (pJmpPage == NULL)
1712 {
1713 rc = VERR_NO_MEMORY;
1714 goto done;
1715 }
1716 Assert(pPage);
1717 }
1718 rc = csamAnalyseCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1719 if (rc != VINF_SUCCESS) {
1720 goto done;
1721 }
1722 }
1723 }
1724 }
1725 }
1726#endif
1727 if (rc != VWRN_CONTINUE_ANALYSIS) {
1728 break; //done!
1729 }
1730next_please:
1731 if (cpu.pCurInstr->uOpcode == OP_JMP)
1732 {
1733 rc = VINF_SUCCESS;
1734 goto done;
1735 }
1736 pCurInstrGC += cbInstr;
1737 }
1738done:
1739 pCacheRec->depth--;
1740 return rc;
1741}
1742
1743
1744/**
1745 * Calculates the 64 bits hash value for the current page
1746 *
1747 * @returns hash value
1748 * @param pVM Pointer to the VM.
1749 * @param pInstr Page address
1750 */
1751uint64_t csamR3CalcPageHash(PVM pVM, RTRCPTR pInstr)
1752{
1753 uint64_t hash = 0;
1754 uint32_t val[5];
1755 int rc;
1756 Assert(pVM->cCpus == 1);
1757 PVMCPU pVCpu = VMMGetCpu0(pVM);
1758
1759 Assert((pInstr & PAGE_OFFSET_MASK) == 0);
1760
1761 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[0], pInstr, sizeof(val[0]));
1762 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1763 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1764 {
1765 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1766 return ~0ULL;
1767 }
1768
1769 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[1], pInstr+1024, sizeof(val[0]));
1770 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1771 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1772 {
1773 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1774 return ~0ULL;
1775 }
1776
1777 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[2], pInstr+2048, sizeof(val[0]));
1778 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1779 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1780 {
1781 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1782 return ~0ULL;
1783 }
1784
1785 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[3], pInstr+3072, sizeof(val[0]));
1786 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1787 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1788 {
1789 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1790 return ~0ULL;
1791 }
1792
1793 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[4], pInstr+4092, sizeof(val[0]));
1794 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1795 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1796 {
1797 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1798 return ~0ULL;
1799 }
1800
1801 // don't want to get division by zero traps
1802 val[2] |= 1;
1803 val[4] |= 1;
1804
1805 hash = (uint64_t)val[0] * (uint64_t)val[1] / (uint64_t)val[2] + (val[3]%val[4]);
1806 return (hash == ~0ULL) ? hash - 1 : hash;
1807}
1808
1809
1810/**
1811 * Notify CSAM of a page flush
1812 *
1813 * @returns VBox status code
1814 * @param pVM Pointer to the VM.
1815 * @param addr GC address of the page to flush
1816 * @param fRemovePage Page removal flag
1817 */
1818static int csamFlushPage(PVM pVM, RTRCPTR addr, bool fRemovePage)
1819{
1820 PCSAMPAGEREC pPageRec;
1821 int rc;
1822 RTGCPHYS GCPhys = 0;
1823 uint64_t fFlags = 0;
1824 Assert(pVM->cCpus == 1 || !CSAMIsEnabled(pVM));
1825
1826 if (!CSAMIsEnabled(pVM))
1827 return VINF_SUCCESS;
1828 Assert(!HMIsEnabled(pVM));
1829
1830 PVMCPU pVCpu = VMMGetCpu0(pVM);
1831
1832 STAM_PROFILE_START(&pVM->csam.s.StatTimeFlushPage, a);
1833
1834 addr = addr & PAGE_BASE_GC_MASK;
1835
1836 /*
1837 * Note: searching for the page in our tree first is more expensive (skipped flushes are two orders of magnitude more common)
1838 */
1839 if (pVM->csam.s.pPageTree == NULL)
1840 {
1841 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1842 return VWRN_CSAM_PAGE_NOT_FOUND;
1843 }
1844
1845 rc = PGMGstGetPage(pVCpu, addr, &fFlags, &GCPhys);
1846 /* Returned at a very early stage (no paging yet presumably). */
1847 if (rc == VERR_NOT_SUPPORTED)
1848 {
1849 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1850 return rc;
1851 }
1852
1853 if (RT_SUCCESS(rc))
1854 {
1855 if ( (fFlags & X86_PTE_US)
1856 || rc == VERR_PGM_PHYS_PAGE_RESERVED
1857 )
1858 {
1859 /* User page -> not relevant for us. */
1860 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushesSkipped, 1);
1861 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1862 return VINF_SUCCESS;
1863 }
1864 }
1865 else
1866 if (rc != VERR_PAGE_NOT_PRESENT && rc != VERR_PAGE_TABLE_NOT_PRESENT)
1867 AssertMsgFailed(("PGMR3GetPage %RRv failed with %Rrc\n", addr, rc));
1868
1869 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)addr);
1870 if (pPageRec)
1871 {
1872 if ( GCPhys == pPageRec->page.GCPhys
1873 && (fFlags & X86_PTE_P))
1874 {
1875 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushesSkipped, 1);
1876 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1877 return VINF_SUCCESS;
1878 }
1879
1880 Log(("CSAMR3FlushPage: page %RRv has changed -> FLUSH (rc=%Rrc) (Phys: %RGp vs %RGp)\n", addr, rc, GCPhys, pPageRec->page.GCPhys));
1881
1882 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushes, 1);
1883
1884 if (fRemovePage)
1885 csamRemovePageRecord(pVM, addr);
1886 else
1887 {
1888 CSAMMarkPage(pVM, addr, false);
1889 pPageRec->page.GCPhys = 0;
1890 pPageRec->page.fFlags = 0;
1891 rc = PGMGstGetPage(pVCpu, addr, &pPageRec->page.fFlags, &pPageRec->page.GCPhys);
1892 if (rc == VINF_SUCCESS)
1893 pPageRec->page.u64Hash = csamR3CalcPageHash(pVM, addr);
1894
1895 if (pPageRec->page.pBitmap == NULL)
1896 {
1897 pPageRec->page.pBitmap = (uint8_t *)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, CSAM_PAGE_BITMAP_SIZE);
1898 Assert(pPageRec->page.pBitmap);
1899 if (pPageRec->page.pBitmap == NULL)
1900 return VERR_NO_MEMORY;
1901 }
1902 else
1903 memset(pPageRec->page.pBitmap, 0, CSAM_PAGE_BITMAP_SIZE);
1904 }
1905
1906
1907 /*
1908 * Inform patch manager about the flush; no need to repeat the above check twice.
1909 */
1910 PATMR3FlushPage(pVM, addr);
1911
1912 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1913 return VINF_SUCCESS;
1914 }
1915 else
1916 {
1917 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1918 return VWRN_CSAM_PAGE_NOT_FOUND;
1919 }
1920}
1921
1922/**
1923 * Notify CSAM of a page flush
1924 *
1925 * @returns VBox status code
1926 * @param pVM Pointer to the VM.
1927 * @param addr GC address of the page to flush
1928 */
1929VMMR3_INT_DECL(int) CSAMR3FlushPage(PVM pVM, RTRCPTR addr)
1930{
1931 return csamFlushPage(pVM, addr, true /* remove page record */);
1932}
1933
1934/**
1935 * Remove a CSAM monitored page. Use with care!
1936 *
1937 * @returns VBox status code
1938 * @param pVM Pointer to the VM.
1939 * @param addr GC address of the page to flush
1940 */
1941VMMR3_INT_DECL(int) CSAMR3RemovePage(PVM pVM, RTRCPTR addr)
1942{
1943 PCSAMPAGEREC pPageRec;
1944 int rc;
1945
1946 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
1947
1948 addr = addr & PAGE_BASE_GC_MASK;
1949
1950 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)addr);
1951 if (pPageRec)
1952 {
1953 rc = csamRemovePageRecord(pVM, addr);
1954 if (RT_SUCCESS(rc))
1955 PATMR3FlushPage(pVM, addr);
1956 return VINF_SUCCESS;
1957 }
1958 return VWRN_CSAM_PAGE_NOT_FOUND;
1959}
1960
1961/**
1962 * Check a page record in case a page has been changed
1963 *
1964 * @returns VBox status code. (trap handled or not)
1965 * @param pVM Pointer to the VM.
1966 * @param pInstrGC GC instruction pointer
1967 */
1968int csamR3CheckPageRecord(PVM pVM, RTRCPTR pInstrGC)
1969{
1970 PCSAMPAGEREC pPageRec;
1971 uint64_t u64hash;
1972
1973 pInstrGC = pInstrGC & PAGE_BASE_GC_MASK;
1974
1975 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1976 if (pPageRec)
1977 {
1978 u64hash = csamR3CalcPageHash(pVM, pInstrGC);
1979 if (u64hash != pPageRec->page.u64Hash)
1980 csamFlushPage(pVM, pInstrGC, false /* don't remove page record */);
1981 }
1982 else
1983 return VWRN_CSAM_PAGE_NOT_FOUND;
1984
1985 return VINF_SUCCESS;
1986}
1987
1988/**
1989 * Returns monitor description based on CSAM tag
1990 *
1991 * @return description string
1992 * @param enmTag Owner tag
1993 */
1994const char *csamGetMonitorDescription(CSAMTAG enmTag)
1995{
1996 if (enmTag == CSAM_TAG_PATM)
1997 return "CSAM-PATM self-modifying code monitor handler";
1998 else
1999 if (enmTag == CSAM_TAG_REM)
2000 return "CSAM-REM self-modifying code monitor handler";
2001 Assert(enmTag == CSAM_TAG_CSAM);
2002 return "CSAM self-modifying code monitor handler";
2003}
2004
2005/**
2006 * Adds page record to our lookup tree
2007 *
2008 * @returns CSAMPAGE ptr or NULL if failure
2009 * @param pVM Pointer to the VM.
2010 * @param GCPtr Page address
2011 * @param enmTag Owner tag
2012 * @param fCode32 16 or 32 bits code
2013 * @param fMonitorInvalidation Monitor page invalidation flag
2014 */
2015static PCSAMPAGE csamR3CreatePageRecord(PVM pVM, RTRCPTR GCPtr, CSAMTAG enmTag, bool fCode32, bool fMonitorInvalidation)
2016{
2017 PCSAMPAGEREC pPage;
2018 int rc;
2019 bool ret;
2020 Assert(pVM->cCpus == 1);
2021 PVMCPU pVCpu = VMMGetCpu0(pVM);
2022
2023 Log(("New page record for %RRv\n", GCPtr & PAGE_BASE_GC_MASK));
2024
2025 pPage = (PCSAMPAGEREC)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, sizeof(CSAMPAGEREC));
2026 if (pPage == NULL)
2027 {
2028 AssertMsgFailed(("csamR3CreatePageRecord: Out of memory!!!!\n"));
2029 return NULL;
2030 }
2031 /* Round down to page boundary. */
2032 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2033 pPage->Core.Key = (AVLPVKEY)(uintptr_t)GCPtr;
2034 pPage->page.pPageGC = GCPtr;
2035 pPage->page.fCode32 = fCode32;
2036 pPage->page.fMonitorInvalidation = fMonitorInvalidation;
2037 pPage->page.enmTag = enmTag;
2038 pPage->page.fMonitorActive = false;
2039 pPage->page.pBitmap = (uint8_t *)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, PAGE_SIZE/sizeof(uint8_t));
2040 rc = PGMGstGetPage(pVCpu, GCPtr, &pPage->page.fFlags, &pPage->page.GCPhys);
2041 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
2042
2043 pPage->page.u64Hash = csamR3CalcPageHash(pVM, GCPtr);
2044 ret = RTAvlPVInsert(&pVM->csam.s.pPageTree, &pPage->Core);
2045 Assert(ret);
2046
2047#ifdef CSAM_MONITOR_CODE_PAGES
2048 AssertRelease(!g_fInCsamR3CodePageInvalidate);
2049
2050 switch (enmTag)
2051 {
2052 case CSAM_TAG_PATM:
2053 case CSAM_TAG_REM:
2054# ifdef CSAM_MONITOR_CSAM_CODE_PAGES
2055 case CSAM_TAG_CSAM:
2056# endif
2057 {
2058 rc = PGMR3HandlerVirtualRegister(pVM, pVCpu, fMonitorInvalidation
2059 ? pVM->csam.s.hCodePageWriteAndInvPgType : pVM->csam.s.hCodePageWriteType,
2060 GCPtr, GCPtr + (PAGE_SIZE - 1) /* inclusive! */,
2061 pPage, NIL_RTRCPTR, csamGetMonitorDescription(enmTag));
2062 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT,
2063 ("PGMR3HandlerVirtualRegister %RRv failed with %Rrc\n", GCPtr, rc));
2064 if (RT_FAILURE(rc))
2065 Log(("PGMR3HandlerVirtualRegister for %RRv failed with %Rrc\n", GCPtr, rc));
2066
2067 /* Could fail, because it's already monitored. Don't treat that condition as fatal. */
2068
2069 /* Prefetch it in case it's not there yet. */
2070 rc = PGMPrefetchPage(pVCpu, GCPtr);
2071 AssertRC(rc);
2072
2073 rc = PGMShwMakePageReadonly(pVCpu, GCPtr, 0 /*fFlags*/);
2074 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2075
2076 pPage->page.fMonitorActive = true;
2077 STAM_COUNTER_INC(&pVM->csam.s.StatPageMonitor);
2078 break;
2079 }
2080 default:
2081 break; /* to shut up GCC */
2082 }
2083
2084 Log(("csamR3CreatePageRecord %RRv GCPhys=%RGp\n", GCPtr, pPage->page.GCPhys));
2085
2086# ifdef VBOX_WITH_STATISTICS
2087 switch (enmTag)
2088 {
2089 case CSAM_TAG_CSAM:
2090 STAM_COUNTER_INC(&pVM->csam.s.StatPageCSAM);
2091 break;
2092 case CSAM_TAG_PATM:
2093 STAM_COUNTER_INC(&pVM->csam.s.StatPagePATM);
2094 break;
2095 case CSAM_TAG_REM:
2096 STAM_COUNTER_INC(&pVM->csam.s.StatPageREM);
2097 break;
2098 default:
2099 break; /* to shut up GCC */
2100 }
2101# endif
2102
2103#endif
2104
2105 STAM_COUNTER_INC(&pVM->csam.s.StatNrPages);
2106 if (fMonitorInvalidation)
2107 STAM_COUNTER_INC(&pVM->csam.s.StatNrPagesInv);
2108
2109 return &pPage->page;
2110}
2111
2112/**
2113 * Monitors a code page (if not already monitored)
2114 *
2115 * @returns VBox status code
2116 * @param pVM Pointer to the VM.
2117 * @param pPageAddrGC The page to monitor
2118 * @param enmTag Monitor tag
2119 */
2120VMMR3DECL(int) CSAMR3MonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
2121{
2122 ;
2123 int rc;
2124 bool fMonitorInvalidation;
2125 Assert(pVM->cCpus == 1);
2126 PVMCPU pVCpu = VMMGetCpu0(pVM);
2127 Assert(!HMIsEnabled(pVM));
2128
2129 /* Dirty pages must be handled before calling this function!. */
2130 Assert(!pVM->csam.s.cDirtyPages);
2131
2132 if (pVM->csam.s.fScanningStarted == false)
2133 return VINF_SUCCESS; /* too early */
2134
2135 pPageAddrGC &= PAGE_BASE_GC_MASK;
2136
2137 Log(("CSAMR3MonitorPage %RRv %d\n", pPageAddrGC, enmTag));
2138
2139 /** @todo implicit assumption */
2140 fMonitorInvalidation = (enmTag == CSAM_TAG_PATM);
2141
2142 PCSAMPAGEREC pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pPageAddrGC);
2143 if (pPageRec == NULL)
2144 {
2145 uint64_t fFlags;
2146
2147 rc = PGMGstGetPage(pVCpu, pPageAddrGC, &fFlags, NULL);
2148 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
2149 if ( rc == VINF_SUCCESS
2150 && (fFlags & X86_PTE_US))
2151 {
2152 /* We don't care about user pages. */
2153 STAM_COUNTER_INC(&pVM->csam.s.StatNrUserPages);
2154 return VINF_SUCCESS;
2155 }
2156
2157 csamR3CreatePageRecord(pVM, pPageAddrGC, enmTag, true /* 32 bits code */, fMonitorInvalidation);
2158
2159 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pPageAddrGC);
2160 Assert(pPageRec);
2161 }
2162 /** @todo reference count */
2163
2164#ifdef CSAM_MONITOR_CSAM_CODE_PAGES
2165 Assert(pPageRec->page.fMonitorActive);
2166#endif
2167
2168#ifdef CSAM_MONITOR_CODE_PAGES
2169 if (!pPageRec->page.fMonitorActive)
2170 {
2171 Log(("CSAMR3MonitorPage: activate monitoring for %RRv\n", pPageAddrGC));
2172
2173 rc = PGMR3HandlerVirtualRegister(pVM, pVCpu, fMonitorInvalidation
2174 ? pVM->csam.s.hCodePageWriteAndInvPgType : pVM->csam.s.hCodePageWriteType,
2175 pPageAddrGC, pPageAddrGC + (PAGE_SIZE - 1) /* inclusive! */,
2176 pPageRec, NIL_RTRCPTR /*pvUserRC*/, csamGetMonitorDescription(enmTag));
2177 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT,
2178 ("PGMR3HandlerVirtualRegister %RRv failed with %Rrc\n", pPageAddrGC, rc));
2179 if (RT_FAILURE(rc))
2180 Log(("PGMR3HandlerVirtualRegister for %RRv failed with %Rrc\n", pPageAddrGC, rc));
2181
2182 /* Could fail, because it's already monitored. Don't treat that condition as fatal. */
2183
2184 /* Prefetch it in case it's not there yet. */
2185 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
2186 AssertRC(rc);
2187
2188 rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
2189 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2190
2191 STAM_COUNTER_INC(&pVM->csam.s.StatPageMonitor);
2192
2193 pPageRec->page.fMonitorActive = true;
2194 pPageRec->page.fMonitorInvalidation = fMonitorInvalidation;
2195 }
2196 else
2197 if ( !pPageRec->page.fMonitorInvalidation
2198 && fMonitorInvalidation)
2199 {
2200 Assert(pPageRec->page.fMonitorActive);
2201 rc = PGMHandlerVirtualChangeType(pVM, pPageRec->page.pPageGC, pVM->csam.s.hCodePageWriteAndInvPgType);
2202 AssertRC(rc);
2203 pPageRec->page.fMonitorInvalidation = true;
2204 STAM_COUNTER_INC(&pVM->csam.s.StatNrPagesInv);
2205
2206 /* Prefetch it in case it's not there yet. */
2207 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
2208 AssertRC(rc);
2209
2210 /* Make sure it's readonly. Page invalidation may have modified the attributes. */
2211 rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
2212 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2213 }
2214
2215#if 0 /* def VBOX_STRICT -> very annoying) */
2216 if (pPageRec->page.fMonitorActive)
2217 {
2218 uint64_t fPageShw;
2219 RTHCPHYS GCPhys;
2220 rc = PGMShwGetPage(pVCpu, pPageAddrGC, &fPageShw, &GCPhys);
2221// AssertMsg( (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
2222// || !(fPageShw & X86_PTE_RW)
2223// || (pPageRec->page.GCPhys == 0), ("Shadow page flags for %RRv (%RHp) aren't readonly (%RX64)!!\n", pPageAddrGC, GCPhys, fPageShw));
2224 }
2225#endif
2226
2227 if (pPageRec->page.GCPhys == 0)
2228 {
2229 /* Prefetch it in case it's not there yet. */
2230 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
2231 AssertRC(rc);
2232 /* The page was changed behind our back. It won't be made read-only until the next SyncCR3, so force it here. */
2233 rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
2234 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2235 }
2236#endif /* CSAM_MONITOR_CODE_PAGES */
2237 return VINF_SUCCESS;
2238}
2239
2240/**
2241 * Unmonitors a code page
2242 *
2243 * @returns VBox status code
2244 * @param pVM Pointer to the VM.
2245 * @param pPageAddrGC The page to monitor
2246 * @param enmTag Monitor tag
2247 */
2248VMMR3DECL(int) CSAMR3UnmonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
2249{
2250 Assert(!HMIsEnabled(pVM));
2251
2252 pPageAddrGC &= PAGE_BASE_GC_MASK;
2253
2254 Log(("CSAMR3UnmonitorPage %RRv %d\n", pPageAddrGC, enmTag));
2255
2256 Assert(enmTag == CSAM_TAG_REM);
2257
2258#ifdef VBOX_STRICT
2259 PCSAMPAGEREC pPageRec;
2260
2261 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pPageAddrGC);
2262 Assert(pPageRec && pPageRec->page.enmTag == enmTag);
2263#endif
2264 return CSAMR3RemovePage(pVM, pPageAddrGC);
2265}
2266
2267/**
2268 * Removes a page record from our lookup tree
2269 *
2270 * @returns VBox status code
2271 * @param pVM Pointer to the VM.
2272 * @param GCPtr Page address
2273 */
2274static int csamRemovePageRecord(PVM pVM, RTRCPTR GCPtr)
2275{
2276 PCSAMPAGEREC pPageRec;
2277 Assert(pVM->cCpus == 1);
2278 PVMCPU pVCpu = VMMGetCpu0(pVM);
2279
2280 Log(("csamRemovePageRecord %RRv\n", GCPtr));
2281 pPageRec = (PCSAMPAGEREC)RTAvlPVRemove(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)GCPtr);
2282
2283 if (pPageRec)
2284 {
2285 STAM_COUNTER_INC(&pVM->csam.s.StatNrRemovedPages);
2286
2287#ifdef CSAM_MONITOR_CODE_PAGES
2288 if (pPageRec->page.fMonitorActive)
2289 {
2290 /* @todo -> this is expensive (cr3 reload)!!!
2291 * if this happens often, then reuse it instead!!!
2292 */
2293 Assert(!g_fInCsamR3CodePageInvalidate);
2294 STAM_COUNTER_DEC(&pVM->csam.s.StatPageMonitor);
2295 PGMHandlerVirtualDeregister(pVM, pVCpu, GCPtr, false /*fHypervisor*/);
2296 }
2297 if (pPageRec->page.enmTag == CSAM_TAG_PATM)
2298 {
2299 /* Make sure the recompiler flushes its cache as this page is no longer monitored. */
2300 STAM_COUNTER_INC(&pVM->csam.s.StatPageRemoveREMFlush);
2301 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
2302 }
2303#endif
2304
2305#ifdef VBOX_WITH_STATISTICS
2306 switch (pPageRec->page.enmTag)
2307 {
2308 case CSAM_TAG_CSAM:
2309 STAM_COUNTER_DEC(&pVM->csam.s.StatPageCSAM);
2310 break;
2311 case CSAM_TAG_PATM:
2312 STAM_COUNTER_DEC(&pVM->csam.s.StatPagePATM);
2313 break;
2314 case CSAM_TAG_REM:
2315 STAM_COUNTER_DEC(&pVM->csam.s.StatPageREM);
2316 break;
2317 default:
2318 break; /* to shut up GCC */
2319 }
2320#endif
2321
2322 if (pPageRec->page.pBitmap) MMR3HeapFree(pPageRec->page.pBitmap);
2323 MMR3HeapFree(pPageRec);
2324 }
2325 else
2326 AssertFailed();
2327
2328 return VINF_SUCCESS;
2329}
2330
2331/**
2332 * Callback for delayed writes from non-EMT threads
2333 *
2334 * @param pVM Pointer to the VM.
2335 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
2336 * @param cbBuf How much it's reading/writing.
2337 */
2338static DECLCALLBACK(void) CSAMDelayedWriteHandler(PVM pVM, RTRCPTR GCPtr, size_t cbBuf)
2339{
2340 int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
2341 AssertRC(rc);
2342}
2343
2344/**
2345 * \#PF Handler callback for invalidation of virtual access handler ranges.
2346 *
2347 * @param pVM Pointer to the VM.
2348 * @param pVCpu Pointer to the cross context CPU context for the
2349 * calling EMT.
2350 * @param GCPtr The virtual address the guest has changed.
2351 *
2352 * @remarks Not currently called by PGM. It was actually only called for a month
2353 * back in 2006...
2354 */
2355static DECLCALLBACK(int) csamR3CodePageInvalidate(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvUser)
2356{
2357 g_fInCsamR3CodePageInvalidate = true;
2358 LogFlow(("csamR3CodePageInvalidate %RGv\n", GCPtr));
2359 /** @todo We can't remove the page (which unregisters the virtual handler) as we are called from a DoWithAll on the virtual handler tree. Argh. */
2360 csamFlushPage(pVM, GCPtr, false /* don't remove page! */);
2361 g_fInCsamR3CodePageInvalidate = false;
2362 return VINF_SUCCESS;
2363}
2364
2365/**
2366 * Check if the current instruction has already been checked before
2367 *
2368 * @returns VBox status code. (trap handled or not)
2369 * @param pVM Pointer to the VM.
2370 * @param pInstr Instruction pointer
2371 * @param pPage CSAM patch structure pointer
2372 */
2373bool csamIsCodeScanned(PVM pVM, RTRCPTR pInstr, PCSAMPAGE *pPage)
2374{
2375 PCSAMPAGEREC pPageRec;
2376 uint32_t offset;
2377
2378 STAM_PROFILE_START(&pVM->csam.s.StatTimeCheckAddr, a);
2379
2380 offset = pInstr & PAGE_OFFSET_MASK;
2381 pInstr = pInstr & PAGE_BASE_GC_MASK;
2382
2383 Assert(pPage);
2384
2385 if (*pPage && (*pPage)->pPageGC == pInstr)
2386 {
2387 if ((*pPage)->pBitmap == NULL || ASMBitTest((*pPage)->pBitmap, offset))
2388 {
2389 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesHC, 1);
2390 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2391 return true;
2392 }
2393 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2394 return false;
2395 }
2396
2397 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pInstr);
2398 if (pPageRec)
2399 {
2400 if (pPage) *pPage= &pPageRec->page;
2401 if (pPageRec->page.pBitmap == NULL || ASMBitTest(pPageRec->page.pBitmap, offset))
2402 {
2403 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesHC, 1);
2404 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2405 return true;
2406 }
2407 }
2408 else
2409 {
2410 if (pPage) *pPage = NULL;
2411 }
2412 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2413 return false;
2414}
2415
2416/**
2417 * Mark an instruction in a page as scanned/not scanned
2418 *
2419 * @param pVM Pointer to the VM.
2420 * @param pPage Patch structure pointer
2421 * @param pInstr Instruction pointer
2422 * @param cbInstr Instruction size
2423 * @param fScanned Mark as scanned or not
2424 */
2425static void csamMarkCode(PVM pVM, PCSAMPAGE pPage, RTRCPTR pInstr, uint32_t cbInstr, bool fScanned)
2426{
2427 LogFlow(("csamMarkCodeAsScanned %RRv cbInstr=%d\n", pInstr, cbInstr));
2428 CSAMMarkPage(pVM, pInstr, fScanned);
2429
2430 /** @todo should recreate empty bitmap if !fScanned */
2431 if (pPage->pBitmap == NULL)
2432 return;
2433
2434 if (fScanned)
2435 {
2436 // retn instructions can be scanned more than once
2437 if (ASMBitTest(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK) == 0)
2438 {
2439 pPage->uSize += cbInstr;
2440 STAM_COUNTER_ADD(&pVM->csam.s.StatNrInstr, 1);
2441 }
2442 if (pPage->uSize >= PAGE_SIZE)
2443 {
2444 Log(("Scanned full page (%RRv) -> free bitmap\n", pInstr & PAGE_BASE_GC_MASK));
2445 MMR3HeapFree(pPage->pBitmap);
2446 pPage->pBitmap = NULL;
2447 }
2448 else
2449 ASMBitSet(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK);
2450 }
2451 else
2452 ASMBitClear(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK);
2453}
2454
2455/**
2456 * Mark an instruction in a page as scanned/not scanned
2457 *
2458 * @returns VBox status code.
2459 * @param pVM Pointer to the VM.
2460 * @param pInstr Instruction pointer
2461 * @param cbInstr Instruction size
2462 * @param fScanned Mark as scanned or not
2463 */
2464VMMR3_INT_DECL(int) CSAMR3MarkCode(PVM pVM, RTRCPTR pInstr, uint32_t cbInstr, bool fScanned)
2465{
2466 PCSAMPAGE pPage = 0;
2467
2468 Assert(!fScanned); /* other case not implemented. */
2469 Assert(!PATMIsPatchGCAddr(pVM, pInstr));
2470 Assert(!HMIsEnabled(pVM));
2471
2472 if (csamIsCodeScanned(pVM, pInstr, &pPage) == false)
2473 {
2474 Assert(fScanned == true); /* other case should not be possible */
2475 return VINF_SUCCESS;
2476 }
2477
2478 Log(("CSAMR3MarkCode: %RRv size=%d fScanned=%d\n", pInstr, cbInstr, fScanned));
2479 csamMarkCode(pVM, pPage, pInstr, cbInstr, fScanned);
2480 return VINF_SUCCESS;
2481}
2482
2483
2484/**
2485 * Scan and analyse code
2486 *
2487 * @returns VBox status code.
2488 * @param pVM Pointer to the VM.
2489 * @param pCtx Guest CPU context.
2490 * @param pInstrGC Instruction pointer.
2491 */
2492VMMR3_INT_DECL(int) CSAMR3CheckCodeEx(PVM pVM, PCPUMCTX pCtx, RTRCPTR pInstrGC)
2493{
2494 Assert(!HMIsEnabled(pVM));
2495 if (EMIsRawRing0Enabled(pVM) == false || PATMIsPatchGCAddr(pVM, pInstrGC) == true)
2496 {
2497 // No use
2498 return VINF_SUCCESS;
2499 }
2500
2501 if (CSAMIsEnabled(pVM))
2502 {
2503 /* Assuming 32 bits code for now. */
2504 Assert(CPUMGetGuestCodeBits(VMMGetCpu0(pVM)) == 32);
2505
2506 pInstrGC = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
2507 return CSAMR3CheckCode(pVM, pInstrGC);
2508 }
2509 return VINF_SUCCESS;
2510}
2511
2512/**
2513 * Scan and analyse code
2514 *
2515 * @returns VBox status code.
2516 * @param pVM Pointer to the VM.
2517 * @param pInstrGC Instruction pointer (0:32 virtual address)
2518 */
2519VMMR3_INT_DECL(int) CSAMR3CheckCode(PVM pVM, RTRCPTR pInstrGC)
2520{
2521 int rc;
2522 PCSAMPAGE pPage = NULL;
2523 Assert(!HMIsEnabled(pVM));
2524
2525 if ( EMIsRawRing0Enabled(pVM) == false
2526 || PATMIsPatchGCAddr(pVM, pInstrGC) == true)
2527 {
2528 /* Not active. */
2529 return VINF_SUCCESS;
2530 }
2531
2532 if (CSAMIsEnabled(pVM))
2533 {
2534 /* Cache record for csamR3GCVirtToHCVirt */
2535 CSAMP2GLOOKUPREC cacheRec;
2536 RT_ZERO(cacheRec);
2537
2538 STAM_PROFILE_START(&pVM->csam.s.StatTime, a);
2539 rc = csamAnalyseCallCodeStream(pVM, pInstrGC, pInstrGC, true /* 32 bits code */, CSAMR3AnalyseCallback, pPage, &cacheRec);
2540 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, a);
2541 if (cacheRec.Lock.pvMap)
2542 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2543
2544 if (rc != VINF_SUCCESS)
2545 {
2546 Log(("csamAnalyseCodeStream failed with %d\n", rc));
2547 return rc;
2548 }
2549 }
2550 return VINF_SUCCESS;
2551}
2552
2553/**
2554 * Flush dirty code pages
2555 *
2556 * @returns VBox status code.
2557 * @param pVM Pointer to the VM.
2558 */
2559static int csamR3FlushDirtyPages(PVM pVM)
2560{
2561 Assert(pVM->cCpus == 1);
2562 PVMCPU pVCpu = VMMGetCpu0(pVM);
2563
2564 STAM_PROFILE_START(&pVM->csam.s.StatFlushDirtyPages, a);
2565
2566 for (uint32_t i = 0; i < pVM->csam.s.cDirtyPages; i++)
2567 {
2568 int rc;
2569 PCSAMPAGEREC pPageRec;
2570 RTRCPTR GCPtr = pVM->csam.s.pvDirtyBasePage[i] & PAGE_BASE_GC_MASK;
2571
2572#ifdef VBOX_WITH_REM
2573 /* Notify the recompiler that this page has been changed. */
2574 REMR3NotifyCodePageChanged(pVM, pVCpu, GCPtr);
2575 if (pVM->csam.s.pvDirtyFaultPage[i] != pVM->csam.s.pvDirtyBasePage[i])
2576 REMR3NotifyCodePageChanged(pVM, pVCpu, pVM->csam.s.pvDirtyFaultPage[i] & PAGE_BASE_GC_MASK);
2577#endif
2578
2579 /* Enable write protection again. (use the fault address as it might be an alias) */
2580 rc = PGMShwMakePageReadonly(pVCpu, pVM->csam.s.pvDirtyFaultPage[i], 0 /*fFlags*/);
2581 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2582
2583 Log(("CSAMR3FlushDirtyPages: flush %RRv (modifypage rc=%Rrc)\n", pVM->csam.s.pvDirtyBasePage[i], rc));
2584
2585 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)GCPtr);
2586 if (pPageRec && pPageRec->page.enmTag == CSAM_TAG_REM)
2587 {
2588 uint64_t fFlags;
2589
2590 rc = PGMGstGetPage(pVCpu, GCPtr, &fFlags, NULL);
2591 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
2592 if ( rc == VINF_SUCCESS
2593 && (fFlags & X86_PTE_US))
2594 {
2595 /* We don't care about user pages. */
2596 csamRemovePageRecord(pVM, GCPtr);
2597 STAM_COUNTER_INC(&pVM->csam.s.StatNrUserPages);
2598 }
2599 }
2600 }
2601 pVM->csam.s.cDirtyPages = 0;
2602 STAM_PROFILE_STOP(&pVM->csam.s.StatFlushDirtyPages, a);
2603 return VINF_SUCCESS;
2604}
2605
2606/**
2607 * Flush potential new code pages
2608 *
2609 * @returns VBox status code.
2610 * @param pVM Pointer to the VM.
2611 */
2612static int csamR3FlushCodePages(PVM pVM)
2613{
2614 Assert(pVM->cCpus == 1);
2615 PVMCPU pVCpu = VMMGetCpu0(pVM);
2616
2617 for (uint32_t i=0;i<pVM->csam.s.cPossibleCodePages;i++)
2618 {
2619 RTRCPTR GCPtr = pVM->csam.s.pvPossibleCodePage[i];
2620
2621 GCPtr = GCPtr & PAGE_BASE_GC_MASK;
2622
2623 Log(("csamR3FlushCodePages: %RRv\n", GCPtr));
2624 PGMShwMakePageNotPresent(pVCpu, GCPtr, 0 /*fFlags*/);
2625 /* Resync the page to make sure instruction fetch will fault */
2626 CSAMMarkPage(pVM, GCPtr, false);
2627 }
2628 pVM->csam.s.cPossibleCodePages = 0;
2629 return VINF_SUCCESS;
2630}
2631
2632/**
2633 * Perform any pending actions
2634 *
2635 * @returns VBox status code.
2636 * @param pVM Pointer to the VM.
2637 * @param pVCpu Pointer to the VMCPU.
2638 */
2639VMMR3_INT_DECL(int) CSAMR3DoPendingAction(PVM pVM, PVMCPU pVCpu)
2640{
2641 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
2642
2643 csamR3FlushDirtyPages(pVM);
2644 csamR3FlushCodePages(pVM);
2645
2646 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
2647 return VINF_SUCCESS;
2648}
2649
2650/**
2651 * Analyse interrupt and trap gates
2652 *
2653 * @returns VBox status code.
2654 * @param pVM Pointer to the VM.
2655 * @param iGate Start gate
2656 * @param cGates Number of gates to check
2657 */
2658VMMR3_INT_DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
2659{
2660#ifdef VBOX_WITH_RAW_MODE
2661 Assert(pVM->cCpus == 1);
2662 PVMCPU pVCpu = VMMGetCpu0(pVM);
2663 uint16_t cbIDT;
2664 RTRCPTR GCPtrIDT = CPUMGetGuestIDTR(pVCpu, &cbIDT);
2665 uint32_t iGateEnd;
2666 uint32_t maxGates;
2667 VBOXIDTE aIDT[256];
2668 PVBOXIDTE pGuestIdte;
2669 int rc;
2670
2671 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
2672 if (EMIsRawRing0Enabled(pVM) == false)
2673 {
2674 /* Enabling interrupt gates only works when raw ring 0 is enabled. */
2675 //AssertFailed();
2676 return VINF_SUCCESS;
2677 }
2678
2679 /* We only check all gates once during a session */
2680 if ( !pVM->csam.s.fGatesChecked
2681 && cGates != 256)
2682 return VINF_SUCCESS; /* too early */
2683
2684 /* We only check all gates once during a session */
2685 if ( pVM->csam.s.fGatesChecked
2686 && cGates != 1)
2687 return VINF_SUCCESS; /* ignored */
2688
2689 Assert(cGates <= 256);
2690 if (!GCPtrIDT || cGates > 256)
2691 return VERR_INVALID_PARAMETER;
2692
2693 if (cGates != 1)
2694 {
2695 pVM->csam.s.fGatesChecked = true;
2696 for (unsigned i=0;i<RT_ELEMENTS(pVM->csam.s.pvCallInstruction);i++)
2697 {
2698 RTRCPTR pHandler = pVM->csam.s.pvCallInstruction[i];
2699
2700 if (pHandler)
2701 {
2702 PCSAMPAGE pPage = NULL;
2703 CSAMP2GLOOKUPREC cacheRec; /* Cache record for csamR3GCVirtToHCVirt. */
2704 RT_ZERO(cacheRec);
2705
2706 Log(("CSAMCheckGates: checking previous call instruction %RRv\n", pHandler));
2707 STAM_PROFILE_START(&pVM->csam.s.StatTime, a);
2708 rc = csamAnalyseCodeStream(pVM, pHandler, pHandler, true, CSAMR3AnalyseCallback, pPage, &cacheRec);
2709 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, a);
2710 if (cacheRec.Lock.pvMap)
2711 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2712
2713 if (rc != VINF_SUCCESS)
2714 {
2715 Log(("CSAMCheckGates: csamAnalyseCodeStream failed with %d\n", rc));
2716 continue;
2717 }
2718 }
2719 }
2720 }
2721
2722 /* Determine valid upper boundary. */
2723 maxGates = (cbIDT+1) / sizeof(VBOXIDTE);
2724 Assert(iGate < maxGates);
2725 if (iGate > maxGates)
2726 return VERR_INVALID_PARAMETER;
2727
2728 if (iGate + cGates > maxGates)
2729 cGates = maxGates - iGate;
2730
2731 GCPtrIDT = GCPtrIDT + iGate * sizeof(VBOXIDTE);
2732 iGateEnd = iGate + cGates;
2733
2734 STAM_PROFILE_START(&pVM->csam.s.StatCheckGates, a);
2735
2736 /*
2737 * Get IDT entries.
2738 */
2739 rc = PGMPhysSimpleReadGCPtr(pVCpu, aIDT, GCPtrIDT, cGates*sizeof(VBOXIDTE));
2740 if (RT_FAILURE(rc))
2741 {
2742 AssertMsgRC(rc, ("Failed to read IDTE! rc=%Rrc\n", rc));
2743 STAM_PROFILE_STOP(&pVM->csam.s.StatCheckGates, a);
2744 return rc;
2745 }
2746 pGuestIdte = &aIDT[0];
2747
2748 for (/*iGate*/; iGate<iGateEnd; iGate++, pGuestIdte++)
2749 {
2750 Assert(TRPMR3GetGuestTrapHandler(pVM, iGate) == TRPM_INVALID_HANDLER);
2751
2752 if ( pGuestIdte->Gen.u1Present
2753 && (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32 || pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_INT_32)
2754 && (pGuestIdte->Gen.u2DPL == 3 || pGuestIdte->Gen.u2DPL == 0)
2755 )
2756 {
2757 RTRCPTR pHandler;
2758 PCSAMPAGE pPage = NULL;
2759 DBGFSELINFO selInfo;
2760 CSAMP2GLOOKUPREC cacheRec; /* Cache record for csamR3GCVirtToHCVirt. */
2761 RT_ZERO(cacheRec);
2762
2763 pHandler = VBOXIDTE_OFFSET(*pGuestIdte);
2764 pHandler = SELMToFlatBySel(pVM, pGuestIdte->Gen.u16SegSel, pHandler);
2765
2766 rc = SELMR3GetSelectorInfo(pVM, pVCpu, pGuestIdte->Gen.u16SegSel, &selInfo);
2767 if ( RT_FAILURE(rc)
2768 || (selInfo.fFlags & (DBGFSELINFO_FLAGS_NOT_PRESENT | DBGFSELINFO_FLAGS_INVALID))
2769 || selInfo.GCPtrBase != 0
2770 || selInfo.cbLimit != ~0U
2771 )
2772 {
2773 /* Refuse to patch a handler whose idt cs selector isn't wide open. */
2774 Log(("CSAMCheckGates: check gate %d failed due to rc %Rrc GCPtrBase=%RRv limit=%x\n", iGate, rc, selInfo.GCPtrBase, selInfo.cbLimit));
2775 continue;
2776 }
2777
2778
2779 if (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32)
2780 {
2781 Log(("CSAMCheckGates: check trap gate %d at %04X:%08X (flat %RRv)\n", iGate, pGuestIdte->Gen.u16SegSel, VBOXIDTE_OFFSET(*pGuestIdte), pHandler));
2782 }
2783 else
2784 {
2785 Log(("CSAMCheckGates: check interrupt gate %d at %04X:%08X (flat %RRv)\n", iGate, pGuestIdte->Gen.u16SegSel, VBOXIDTE_OFFSET(*pGuestIdte), pHandler));
2786 }
2787
2788 STAM_PROFILE_START(&pVM->csam.s.StatTime, b);
2789 rc = csamAnalyseCodeStream(pVM, pHandler, pHandler, true, CSAMR3AnalyseCallback, pPage, &cacheRec);
2790 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, b);
2791 if (cacheRec.Lock.pvMap)
2792 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2793
2794 if (rc != VINF_SUCCESS)
2795 {
2796 Log(("CSAMCheckGates: csamAnalyseCodeStream failed with %d\n", rc));
2797 continue;
2798 }
2799 /* OpenBSD guest specific patch test. */
2800 if (iGate >= 0x20)
2801 {
2802 PCPUMCTX pCtx;
2803 DISCPUSTATE cpu;
2804 RTGCUINTPTR32 aOpenBsdPushCSOffset[3] = {0x03, /* OpenBSD 3.7 & 3.8 */
2805 0x2B, /* OpenBSD 4.0 installation ISO */
2806 0x2F}; /* OpenBSD 4.0 after install */
2807
2808 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2809
2810 for (unsigned i=0;i<RT_ELEMENTS(aOpenBsdPushCSOffset);i++)
2811 {
2812 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pHandler - aOpenBsdPushCSOffset[i], &cpu, NULL);
2813 if ( rc == VINF_SUCCESS
2814 && cpu.pCurInstr->uOpcode == OP_PUSH
2815 && cpu.pCurInstr->fParam1 == OP_PARM_REG_CS)
2816 {
2817 rc = PATMR3InstallPatch(pVM, pHandler - aOpenBsdPushCSOffset[i], PATMFL_CODE32 | PATMFL_GUEST_SPECIFIC);
2818 if (RT_SUCCESS(rc))
2819 Log(("Installed OpenBSD interrupt handler prefix instruction (push cs) patch\n"));
2820 }
2821 }
2822 }
2823
2824 /* Trap gates and certain interrupt gates. */
2825 uint32_t fPatchFlags = PATMFL_CODE32 | PATMFL_IDTHANDLER;
2826
2827 if (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32)
2828 fPatchFlags |= PATMFL_TRAPHANDLER;
2829 else
2830 fPatchFlags |= PATMFL_INTHANDLER;
2831
2832 switch (iGate) {
2833 case 8:
2834 case 10:
2835 case 11:
2836 case 12:
2837 case 13:
2838 case 14:
2839 case 17:
2840 fPatchFlags |= PATMFL_TRAPHANDLER_WITH_ERRORCODE;
2841 break;
2842 default:
2843 /* No error code. */
2844 break;
2845 }
2846
2847 Log(("Installing %s gate handler for 0x%X at %RRv\n", (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32) ? "trap" : "intr", iGate, pHandler));
2848
2849 rc = PATMR3InstallPatch(pVM, pHandler, fPatchFlags);
2850 if ( RT_SUCCESS(rc)
2851 || rc == VERR_PATM_ALREADY_PATCHED)
2852 {
2853 Log(("Gate handler 0x%X is SAFE!\n", iGate));
2854
2855 RTRCPTR pNewHandlerGC = PATMR3QueryPatchGCPtr(pVM, pHandler);
2856 if (pNewHandlerGC)
2857 {
2858 rc = TRPMR3SetGuestTrapHandler(pVM, iGate, pNewHandlerGC);
2859 if (RT_FAILURE(rc))
2860 Log(("TRPMR3SetGuestTrapHandler %d failed with %Rrc\n", iGate, rc));
2861 }
2862 }
2863 }
2864 } /* for */
2865 STAM_PROFILE_STOP(&pVM->csam.s.StatCheckGates, a);
2866#endif /* VBOX_WITH_RAW_MODE */
2867 return VINF_SUCCESS;
2868}
2869
2870/**
2871 * Record previous call instruction addresses
2872 *
2873 * @returns VBox status code.
2874 * @param pVM Pointer to the VM.
2875 * @param GCPtrCall Call address
2876 */
2877VMMR3DECL(int) CSAMR3RecordCallAddress(PVM pVM, RTRCPTR GCPtrCall)
2878{
2879 Assert(!HMIsEnabled(pVM));
2880 for (unsigned i=0;i<RT_ELEMENTS(pVM->csam.s.pvCallInstruction);i++)
2881 {
2882 if (pVM->csam.s.pvCallInstruction[i] == GCPtrCall)
2883 return VINF_SUCCESS;
2884 }
2885
2886 Log(("CSAMR3RecordCallAddress %RRv\n", GCPtrCall));
2887
2888 pVM->csam.s.pvCallInstruction[pVM->csam.s.iCallInstruction++] = GCPtrCall;
2889 if (pVM->csam.s.iCallInstruction >= RT_ELEMENTS(pVM->csam.s.pvCallInstruction))
2890 pVM->csam.s.iCallInstruction = 0;
2891
2892 return VINF_SUCCESS;
2893}
2894
2895
2896/**
2897 * Query CSAM state (enabled/disabled)
2898 *
2899 * @returns true if enabled, false otherwise.
2900 * @param pUVM The user mode VM handle.
2901 */
2902VMMR3DECL(bool) CSAMR3IsEnabled(PUVM pUVM)
2903{
2904 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2905 PVM pVM = pUVM->pVM;
2906 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2907 return CSAMIsEnabled(pVM);
2908}
2909
2910
2911/**
2912 * Enables or disables code scanning.
2913 *
2914 * @returns VBox status code.
2915 * @param pUVM The user mode VM handle.
2916 * @param fEnabled Whether to enable or disable scanning.
2917 */
2918VMMR3DECL(int) CSAMR3SetScanningEnabled(PUVM pUVM, bool fEnabled)
2919{
2920 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2921 PVM pVM = pUVM->pVM;
2922 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2923
2924 if (HMIsEnabled(pVM))
2925 {
2926 Assert(!pVM->fCSAMEnabled);
2927 return VINF_SUCCESS;
2928 }
2929
2930 int rc;
2931 if (fEnabled)
2932 rc = CSAMEnableScanning(pVM);
2933 else
2934 rc = CSAMDisableScanning(pVM);
2935 return rc;
2936}
2937
2938
2939#ifdef VBOX_WITH_DEBUGGER
2940
2941/**
2942 * @callback_method_impl{FNDBGCCMD, The '.csamoff' command.}
2943 */
2944static DECLCALLBACK(int) csamr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2945{
2946 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
2947 NOREF(cArgs); NOREF(paArgs);
2948
2949 if (HMR3IsEnabled(pUVM))
2950 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM is permanently disabled by HM.\n");
2951
2952 int rc = CSAMR3SetScanningEnabled(pUVM, false);
2953 if (RT_FAILURE(rc))
2954 return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "CSAMR3SetScanningEnabled");
2955 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM Scanning disabled\n");
2956}
2957
2958/**
2959 * @callback_method_impl{FNDBGCCMD, The '.csamon' command.}
2960 */
2961static DECLCALLBACK(int) csamr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2962{
2963 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
2964 NOREF(cArgs); NOREF(paArgs);
2965
2966 if (HMR3IsEnabled(pUVM))
2967 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM is permanently disabled by HM.\n");
2968
2969 int rc = CSAMR3SetScanningEnabled(pUVM, true);
2970 if (RT_FAILURE(rc))
2971 return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "CSAMR3SetScanningEnabled");
2972 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM Scanning enabled\n");
2973}
2974
2975#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette