VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CSAM.cpp@ 74168

Last change on this file since 74168 was 73097, checked in by vboxsync, 6 years ago

*: Made RT_UOFFSETOF, RT_OFFSETOF, RT_UOFFSETOF_ADD and RT_OFFSETOF_ADD work like builtin_offsetof() and require compile time resolvable requests, adding RT_UOFFSETOF_DYN for the dynamic questions that can only be answered at runtime.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 110.0 KB
Line 
1/* $Id: CSAM.cpp 73097 2018-07-12 21:06:33Z vboxsync $ */
2/** @file
3 * CSAM - Guest OS Code Scanning and Analysis Manager
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_csam CSAM - Code Scanning Analysis Manager
19 *
20 * The CSAM is responsible for scanning and marking guest OS kernel code paths
21 * to making safe raw-mode execution possible.
22 *
23 * It works tightly with the @ref pg_patm "patch manager" to patch code
24 * sequences that we could otherwise not execute in raw-mode.
25 *
26 * @sa @ref grp_csam
27 */
28
29
30/*********************************************************************************************************************************
31* Header Files *
32*********************************************************************************************************************************/
33#define LOG_GROUP LOG_GROUP_CSAM
34#include <VBox/vmm/cpum.h>
35#include <VBox/vmm/stam.h>
36#include <VBox/vmm/patm.h>
37#include <VBox/vmm/csam.h>
38#include <VBox/vmm/cpumdis.h>
39#include <VBox/vmm/pgm.h>
40#include <VBox/vmm/iom.h>
41#include <VBox/vmm/mm.h>
42#include <VBox/vmm/em.h>
43#include <VBox/vmm/hm.h>
44#ifdef VBOX_WITH_REM
45# include <VBox/vmm/rem.h>
46#endif
47#include <VBox/vmm/selm.h>
48#include <VBox/vmm/trpm.h>
49#include <VBox/vmm/cfgm.h>
50#include <VBox/vmm/ssm.h>
51#include <VBox/param.h>
52#include <iprt/avl.h>
53#include <iprt/asm.h>
54#include <iprt/thread.h>
55#include "CSAMInternal.h"
56#include <VBox/vmm/vm.h>
57#include <VBox/vmm/uvm.h>
58
59#include <VBox/dbg.h>
60#include <VBox/sup.h>
61#include <VBox/err.h>
62#include <VBox/log.h>
63#include <VBox/version.h>
64
65#include <VBox/dis.h>
66#include <VBox/disopcode.h>
67#include <iprt/assert.h>
68#include <iprt/string.h>
69
70
71/* Enabled by default */
72#define CSAM_ENABLE
73
74/* Enable to monitor code pages for self-modifying code. */
75#define CSAM_MONITOR_CODE_PAGES
76/* Enable to monitor all scanned pages
77#define CSAM_MONITOR_CSAM_CODE_PAGES */
78/* Enable to scan beyond ret instructions.
79#define CSAM_ANALYSE_BEYOND_RET */
80
81
82/*********************************************************************************************************************************
83* Internal Functions *
84*********************************************************************************************************************************/
85static DECLCALLBACK(int) csamR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) csamR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87static FNPGMR3VIRTINVALIDATE csamR3CodePageInvalidate;
88
89bool csamIsCodeScanned(PVM pVM, RTRCPTR pInstr, PCSAMPAGE *pPage);
90int csamR3CheckPageRecord(PVM pVM, RTRCPTR pInstr);
91static PCSAMPAGE csamR3CreatePageRecord(PVM pVM, RTRCPTR GCPtr, CSAMTAG enmTag, bool fCode32, bool fMonitorInvalidation = false);
92static int csamRemovePageRecord(PVM pVM, RTRCPTR GCPtr);
93static int csamReinit(PVM pVM);
94static void csamMarkCode(PVM pVM, PCSAMPAGE pPage, RTRCPTR pInstr, uint32_t opsize, bool fScanned);
95static int csamAnalyseCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
96 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec);
97
98/** @todo "Temporary" for debugging. */
99static bool g_fInCsamR3CodePageInvalidate = false;
100
101#ifdef VBOX_WITH_DEBUGGER
102static FNDBGCCMD csamr3CmdOn;
103static FNDBGCCMD csamr3CmdOff;
104#endif
105
106
107/*********************************************************************************************************************************
108* Global Variables *
109*********************************************************************************************************************************/
110#ifdef VBOX_WITH_DEBUGGER
111/** Command descriptors. */
112static const DBGCCMD g_aCmds[] =
113{
114 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
115 { "csamon", 0, 0, NULL, 0, 0, csamr3CmdOn, "", "Enable CSAM code scanning." },
116 { "csamoff", 0, 0, NULL, 0, 0, csamr3CmdOff, "", "Disable CSAM code scanning." },
117};
118#endif
119
120/**
121 * SSM descriptor table for the CSAM structure (save + restore).
122 */
123static const SSMFIELD g_aCsamFields[] =
124{
125 SSMFIELD_ENTRY( CSAM, aDangerousInstr), /* didn't used to restored */
126 SSMFIELD_ENTRY( CSAM, cDangerousInstr), /* didn't used to restored */
127 SSMFIELD_ENTRY( CSAM, iDangerousInstr), /* didn't used to restored */
128 SSMFIELD_ENTRY( CSAM, savedstate.cPageRecords),
129 SSMFIELD_ENTRY( CSAM, savedstate.cPatchPageRecords),
130 SSMFIELD_ENTRY( CSAM, cDirtyPages),
131 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyBasePage),
132 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyFaultPage),
133 SSMFIELD_ENTRY( CSAM, cPossibleCodePages),
134 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvPossibleCodePage),
135 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvCallInstruction), /* didn't used to be restored */
136 SSMFIELD_ENTRY( CSAM, iCallInstruction), /* didn't used to be restored */
137 SSMFIELD_ENTRY( CSAM, fScanningStarted),
138 SSMFIELD_ENTRY( CSAM, fGatesChecked),
139 SSMFIELD_ENTRY_TERM()
140};
141
142/**
143 * SSM descriptor table for the version 5.0.0 CSAM structure.
144 */
145static const SSMFIELD g_aCsamFields500[] =
146{
147 SSMFIELD_ENTRY_IGNORE( CSAM, offVM),
148 SSMFIELD_ENTRY_PAD_HC64( CSAM, Alignment0, sizeof(uint32_t)),
149 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPageTree),
150 SSMFIELD_ENTRY( CSAM, aDangerousInstr),
151 SSMFIELD_ENTRY( CSAM, cDangerousInstr),
152 SSMFIELD_ENTRY( CSAM, iDangerousInstr),
153 SSMFIELD_ENTRY_RCPTR( CSAM, pPDBitmapGC), /// @todo ignore this?
154 SSMFIELD_ENTRY_RCPTR( CSAM, pPDHCBitmapGC), /// @todo ignore this?
155 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDBitmapHC),
156 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDGCBitmapHC),
157 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, savedstate.pSSM),
158 SSMFIELD_ENTRY( CSAM, savedstate.cPageRecords),
159 SSMFIELD_ENTRY( CSAM, savedstate.cPatchPageRecords),
160 SSMFIELD_ENTRY( CSAM, cDirtyPages),
161 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyBasePage),
162 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyFaultPage),
163 SSMFIELD_ENTRY( CSAM, cPossibleCodePages),
164 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvPossibleCodePage),
165 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvCallInstruction),
166 SSMFIELD_ENTRY( CSAM, iCallInstruction),
167 SSMFIELD_ENTRY_IGNORE( CSAM, hCodePageWriteType), /* added in 5.0 */
168 SSMFIELD_ENTRY_IGNORE( CSAM, hCodePageWriteAndInvPgType), /* added in 5.0 */
169 SSMFIELD_ENTRY( CSAM, fScanningStarted),
170 SSMFIELD_ENTRY( CSAM, fGatesChecked),
171 SSMFIELD_ENTRY_PAD_HC( CSAM, Alignment1, 6, 2),
172 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrTraps),
173 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPages),
174 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPagesInv),
175 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrRemovedPages),
176 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPatchPages),
177 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPHC),
178 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPGC),
179 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushes),
180 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushesSkipped),
181 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesHC),
182 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesGC),
183 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrInstr),
184 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrBytesRead),
185 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrOpcodeRead),
186 SSMFIELD_ENTRY_IGNORE( CSAM, StatTime),
187 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeCheckAddr),
188 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeAddrConv),
189 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeFlushPage),
190 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeDisasm),
191 SSMFIELD_ENTRY_IGNORE( CSAM, StatFlushDirtyPages),
192 SSMFIELD_ENTRY_IGNORE( CSAM, StatCheckGates),
193 SSMFIELD_ENTRY_IGNORE( CSAM, StatCodePageModified),
194 SSMFIELD_ENTRY_IGNORE( CSAM, StatDangerousWrite),
195 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheHit),
196 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheMiss),
197 SSMFIELD_ENTRY_IGNORE( CSAM, StatPagePATM),
198 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageCSAM),
199 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageREM),
200 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrUserPages),
201 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageMonitor),
202 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageRemoveREMFlush),
203 SSMFIELD_ENTRY_IGNORE( CSAM, StatBitmapAlloc),
204 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunction),
205 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunctionFailed),
206 SSMFIELD_ENTRY_TERM()
207};
208
209/**
210 * SSM descriptor table for the pre 5.0.0 CSAM structure.
211 */
212static const SSMFIELD g_aCsamFieldsBefore500[] =
213{
214 /** @todo there are more fields that can be ignored here. */
215 SSMFIELD_ENTRY_IGNORE( CSAM, offVM),
216 SSMFIELD_ENTRY_PAD_HC64( CSAM, Alignment0, sizeof(uint32_t)),
217 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPageTree),
218 SSMFIELD_ENTRY( CSAM, aDangerousInstr),
219 SSMFIELD_ENTRY( CSAM, cDangerousInstr),
220 SSMFIELD_ENTRY( CSAM, iDangerousInstr),
221 SSMFIELD_ENTRY_RCPTR( CSAM, pPDBitmapGC), /// @todo ignore this?
222 SSMFIELD_ENTRY_RCPTR( CSAM, pPDHCBitmapGC), /// @todo ignore this?
223 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDBitmapHC),
224 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDGCBitmapHC),
225 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, savedstate.pSSM),
226 SSMFIELD_ENTRY( CSAM, savedstate.cPageRecords),
227 SSMFIELD_ENTRY( CSAM, savedstate.cPatchPageRecords),
228 SSMFIELD_ENTRY( CSAM, cDirtyPages),
229 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyBasePage),
230 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyFaultPage),
231 SSMFIELD_ENTRY( CSAM, cPossibleCodePages),
232 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvPossibleCodePage),
233 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvCallInstruction),
234 SSMFIELD_ENTRY( CSAM, iCallInstruction),
235 SSMFIELD_ENTRY( CSAM, fScanningStarted),
236 SSMFIELD_ENTRY( CSAM, fGatesChecked),
237 SSMFIELD_ENTRY_PAD_HC( CSAM, Alignment1, 6, 2),
238 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrTraps),
239 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPages),
240 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPagesInv),
241 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrRemovedPages),
242 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPatchPages),
243 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPHC),
244 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPGC),
245 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushes),
246 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushesSkipped),
247 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesHC),
248 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesGC),
249 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrInstr),
250 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrBytesRead),
251 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrOpcodeRead),
252 SSMFIELD_ENTRY_IGNORE( CSAM, StatTime),
253 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeCheckAddr),
254 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeAddrConv),
255 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeFlushPage),
256 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeDisasm),
257 SSMFIELD_ENTRY_IGNORE( CSAM, StatFlushDirtyPages),
258 SSMFIELD_ENTRY_IGNORE( CSAM, StatCheckGates),
259 SSMFIELD_ENTRY_IGNORE( CSAM, StatCodePageModified),
260 SSMFIELD_ENTRY_IGNORE( CSAM, StatDangerousWrite),
261 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheHit),
262 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheMiss),
263 SSMFIELD_ENTRY_IGNORE( CSAM, StatPagePATM),
264 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageCSAM),
265 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageREM),
266 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrUserPages),
267 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageMonitor),
268 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageRemoveREMFlush),
269 SSMFIELD_ENTRY_IGNORE( CSAM, StatBitmapAlloc),
270 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunction),
271 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunctionFailed),
272 SSMFIELD_ENTRY_TERM()
273};
274
275
276/** Fake type to simplify g_aCsamPDBitmapArray construction. */
277typedef struct
278{
279 uint8_t *a[CSAM_PGDIRBMP_CHUNKS];
280} CSAMPDBITMAPARRAY;
281
282/**
283 * SSM descriptor table for the CSAM::pPDBitmapHC array.
284 */
285static SSMFIELD const g_aCsamPDBitmapArray[] =
286{
287 SSMFIELD_ENTRY_HCPTR_NI_ARRAY(CSAMPDBITMAPARRAY, a),
288 SSMFIELD_ENTRY_TERM()
289};
290
291
292/**
293 * SSM descriptor table for the CSAMPAGE structure.
294 */
295static const SSMFIELD g_aCsamPageFields[] =
296{
297 SSMFIELD_ENTRY_RCPTR( CSAMPAGE, pPageGC),
298 SSMFIELD_ENTRY_GCPHYS( CSAMPAGE, GCPhys),
299 SSMFIELD_ENTRY( CSAMPAGE, fFlags),
300 SSMFIELD_ENTRY( CSAMPAGE, uSize),
301 SSMFIELD_ENTRY_HCPTR_NI( CSAMPAGE, pBitmap),
302 SSMFIELD_ENTRY( CSAMPAGE, fCode32),
303 SSMFIELD_ENTRY( CSAMPAGE, fMonitorActive),
304 SSMFIELD_ENTRY( CSAMPAGE, fMonitorInvalidation),
305 SSMFIELD_ENTRY( CSAMPAGE, enmTag),
306 SSMFIELD_ENTRY( CSAMPAGE, u64Hash),
307 SSMFIELD_ENTRY_TERM()
308};
309
310/**
311 * SSM descriptor table for the CSAMPAGEREC structure, putmem fashion.
312 */
313static const SSMFIELD g_aCsamPageRecFields[] =
314{
315 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.Key),
316 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.pLeft),
317 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.pRight),
318 SSMFIELD_ENTRY_IGNORE( CSAMPAGEREC, Core.uchHeight),
319 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
320 SSMFIELD_ENTRY_RCPTR( CSAMPAGEREC, page.pPageGC),
321 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
322 SSMFIELD_ENTRY_PAD_MSC32_AUTO( 4),
323 SSMFIELD_ENTRY_GCPHYS( CSAMPAGEREC, page.GCPhys),
324 SSMFIELD_ENTRY( CSAMPAGEREC, page.fFlags),
325 SSMFIELD_ENTRY( CSAMPAGEREC, page.uSize),
326 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
327 SSMFIELD_ENTRY_HCPTR_NI( CSAMPAGEREC, page.pBitmap),
328 SSMFIELD_ENTRY( CSAMPAGEREC, page.fCode32),
329 SSMFIELD_ENTRY( CSAMPAGEREC, page.fMonitorActive),
330 SSMFIELD_ENTRY( CSAMPAGEREC, page.fMonitorInvalidation),
331 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 1),
332 SSMFIELD_ENTRY( CSAMPAGEREC, page.enmTag),
333 SSMFIELD_ENTRY( CSAMPAGEREC, page.u64Hash),
334 SSMFIELD_ENTRY_TERM()
335};
336
337
338/**
339 * Initializes the CSAM.
340 *
341 * @returns VBox status code.
342 * @param pVM The cross context VM structure.
343 */
344VMMR3_INT_DECL(int) CSAMR3Init(PVM pVM)
345{
346 int rc;
347
348 /*
349 * We only need a saved state dummy loader if HM is enabled.
350 */
351 if (!VM_IS_RAW_MODE_ENABLED(pVM))
352 {
353 pVM->fCSAMEnabled = false;
354 return SSMR3RegisterStub(pVM, "CSAM", 0);
355 }
356
357 /*
358 * Raw-mode.
359 */
360 LogFlow(("CSAMR3Init\n"));
361
362 /* Allocate bitmap for the page directory. */
363 rc = MMR3HyperAllocOnceNoRel(pVM, CSAM_PGDIRBMP_CHUNKS*sizeof(RTHCPTR), 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDBitmapHC);
364 AssertRCReturn(rc, rc);
365 rc = MMR3HyperAllocOnceNoRel(pVM, CSAM_PGDIRBMP_CHUNKS*sizeof(RTRCPTR), 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDGCBitmapHC);
366 AssertRCReturn(rc, rc);
367 pVM->csam.s.pPDBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDGCBitmapHC);
368 pVM->csam.s.pPDHCBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC);
369
370 rc = csamReinit(pVM);
371 AssertRCReturn(rc, rc);
372
373 /*
374 * Register virtual handler types.
375 */
376 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/,
377 NULL /*pfnInvalidateR3 */,
378 csamCodePageWriteHandler,
379 "csamCodePageWriteHandler", "csamRCCodePageWritePfHandler",
380 "CSAM code page write handler",
381 &pVM->csam.s.hCodePageWriteType);
382 AssertLogRelRCReturn(rc, rc);
383 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/,
384 csamR3CodePageInvalidate,
385 csamCodePageWriteHandler,
386 "csamCodePageWriteHandler", "csamRCCodePageWritePfHandler",
387 "CSAM code page write and invlpg handler",
388 &pVM->csam.s.hCodePageWriteAndInvPgType);
389 AssertLogRelRCReturn(rc, rc);
390
391 /*
392 * Register save and load state notifiers.
393 */
394 rc = SSMR3RegisterInternal(pVM, "CSAM", 0, CSAM_SAVED_STATE_VERSION, sizeof(pVM->csam.s) + PAGE_SIZE*16,
395 NULL, NULL, NULL,
396 NULL, csamR3Save, NULL,
397 NULL, csamR3Load, NULL);
398 AssertRCReturn(rc, rc);
399
400 STAM_REG(pVM, &pVM->csam.s.StatNrTraps, STAMTYPE_COUNTER, "/CSAM/PageTraps", STAMUNIT_OCCURENCES, "The number of CSAM page traps.");
401 STAM_REG(pVM, &pVM->csam.s.StatDangerousWrite, STAMTYPE_COUNTER, "/CSAM/DangerousWrites", STAMUNIT_OCCURENCES, "The number of dangerous writes that cause a context switch.");
402
403 STAM_REG(pVM, &pVM->csam.s.StatNrPageNPHC, STAMTYPE_COUNTER, "/CSAM/HC/PageNotPresent", STAMUNIT_OCCURENCES, "The number of CSAM pages marked not present.");
404 STAM_REG(pVM, &pVM->csam.s.StatNrPageNPGC, STAMTYPE_COUNTER, "/CSAM/GC/PageNotPresent", STAMUNIT_OCCURENCES, "The number of CSAM pages marked not present.");
405 STAM_REG(pVM, &pVM->csam.s.StatNrPages, STAMTYPE_COUNTER, "/CSAM/PageRec/AddedRW", STAMUNIT_OCCURENCES, "The number of CSAM page records (RW monitoring).");
406 STAM_REG(pVM, &pVM->csam.s.StatNrPagesInv, STAMTYPE_COUNTER, "/CSAM/PageRec/AddedRWI", STAMUNIT_OCCURENCES, "The number of CSAM page records (RW & invalidation monitoring).");
407 STAM_REG(pVM, &pVM->csam.s.StatNrRemovedPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Removed", STAMUNIT_OCCURENCES, "The number of removed CSAM page records.");
408 STAM_REG(pVM, &pVM->csam.s.StatPageRemoveREMFlush,STAMTYPE_COUNTER, "/CSAM/PageRec/Removed/REMFlush", STAMUNIT_OCCURENCES, "The number of removed CSAM page records that caused a REM flush.");
409
410 STAM_REG(pVM, &pVM->csam.s.StatNrPatchPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Patch", STAMUNIT_OCCURENCES, "The number of CSAM patch page records.");
411 STAM_REG(pVM, &pVM->csam.s.StatNrUserPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Ignore/User", STAMUNIT_OCCURENCES, "The number of CSAM user page records (ignored).");
412 STAM_REG(pVM, &pVM->csam.s.StatPagePATM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/PATM", STAMUNIT_OCCURENCES, "The number of PATM page records.");
413 STAM_REG(pVM, &pVM->csam.s.StatPageCSAM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/CSAM", STAMUNIT_OCCURENCES, "The number of CSAM page records.");
414 STAM_REG(pVM, &pVM->csam.s.StatPageREM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/REM", STAMUNIT_OCCURENCES, "The number of REM page records.");
415 STAM_REG(pVM, &pVM->csam.s.StatPageMonitor, STAMTYPE_COUNTER, "/CSAM/PageRec/Monitored", STAMUNIT_OCCURENCES, "The number of monitored pages.");
416
417 STAM_REG(pVM, &pVM->csam.s.StatCodePageModified, STAMTYPE_COUNTER, "/CSAM/Monitor/DirtyPage", STAMUNIT_OCCURENCES, "The number of code page modifications.");
418
419 STAM_REG(pVM, &pVM->csam.s.StatNrFlushes, STAMTYPE_COUNTER, "/CSAM/PageFlushes", STAMUNIT_OCCURENCES, "The number of CSAM page flushes.");
420 STAM_REG(pVM, &pVM->csam.s.StatNrFlushesSkipped, STAMTYPE_COUNTER, "/CSAM/PageFlushesSkipped", STAMUNIT_OCCURENCES, "The number of CSAM page flushes that were skipped.");
421 STAM_REG(pVM, &pVM->csam.s.StatNrKnownPagesHC, STAMTYPE_COUNTER, "/CSAM/HC/KnownPageRecords", STAMUNIT_OCCURENCES, "The number of known CSAM page records.");
422 STAM_REG(pVM, &pVM->csam.s.StatNrKnownPagesGC, STAMTYPE_COUNTER, "/CSAM/GC/KnownPageRecords", STAMUNIT_OCCURENCES, "The number of known CSAM page records.");
423 STAM_REG(pVM, &pVM->csam.s.StatNrInstr, STAMTYPE_COUNTER, "/CSAM/ScannedInstr", STAMUNIT_OCCURENCES, "The number of scanned instructions.");
424 STAM_REG(pVM, &pVM->csam.s.StatNrBytesRead, STAMTYPE_COUNTER, "/CSAM/BytesRead", STAMUNIT_OCCURENCES, "The number of bytes read for scanning.");
425 STAM_REG(pVM, &pVM->csam.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/CSAM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
426
427 STAM_REG(pVM, &pVM->csam.s.StatBitmapAlloc, STAMTYPE_COUNTER, "/CSAM/Alloc/PageBitmap", STAMUNIT_OCCURENCES, "The number of page bitmap allocations.");
428
429 STAM_REG(pVM, &pVM->csam.s.StatInstrCacheHit, STAMTYPE_COUNTER, "/CSAM/Cache/Hit", STAMUNIT_OCCURENCES, "The number of dangerous instruction cache hits.");
430 STAM_REG(pVM, &pVM->csam.s.StatInstrCacheMiss, STAMTYPE_COUNTER, "/CSAM/Cache/Miss", STAMUNIT_OCCURENCES, "The number of dangerous instruction cache misses.");
431
432 STAM_REG(pVM, &pVM->csam.s.StatScanNextFunction, STAMTYPE_COUNTER, "/CSAM/Function/Scan/Success", STAMUNIT_OCCURENCES, "The number of found functions beyond the ret border.");
433 STAM_REG(pVM, &pVM->csam.s.StatScanNextFunctionFailed, STAMTYPE_COUNTER, "/CSAM/Function/Scan/Failed", STAMUNIT_OCCURENCES, "The number of refused functions beyond the ret border.");
434
435 STAM_REG(pVM, &pVM->csam.s.StatTime, STAMTYPE_PROFILE, "/PROF/CSAM/Scan", STAMUNIT_TICKS_PER_CALL, "Scanning overhead.");
436 STAM_REG(pVM, &pVM->csam.s.StatTimeCheckAddr, STAMTYPE_PROFILE, "/PROF/CSAM/CheckAddr", STAMUNIT_TICKS_PER_CALL, "Address check overhead.");
437 STAM_REG(pVM, &pVM->csam.s.StatTimeAddrConv, STAMTYPE_PROFILE, "/PROF/CSAM/AddrConv", STAMUNIT_TICKS_PER_CALL, "Address conversion overhead.");
438 STAM_REG(pVM, &pVM->csam.s.StatTimeFlushPage, STAMTYPE_PROFILE, "/PROF/CSAM/FlushPage", STAMUNIT_TICKS_PER_CALL, "Page flushing overhead.");
439 STAM_REG(pVM, &pVM->csam.s.StatTimeDisasm, STAMTYPE_PROFILE, "/PROF/CSAM/Disasm", STAMUNIT_TICKS_PER_CALL, "Disassembly overhead.");
440 STAM_REG(pVM, &pVM->csam.s.StatFlushDirtyPages, STAMTYPE_PROFILE, "/PROF/CSAM/FlushDirtyPage", STAMUNIT_TICKS_PER_CALL, "Dirty page flushing overhead.");
441 STAM_REG(pVM, &pVM->csam.s.StatCheckGates, STAMTYPE_PROFILE, "/PROF/CSAM/CheckGates", STAMUNIT_TICKS_PER_CALL, "CSAMR3CheckGates overhead.");
442
443 /*
444 * Check CFGM option and enable/disable CSAM.
445 */
446 bool fEnabled;
447 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "CSAMEnabled", &fEnabled);
448 if (RT_FAILURE(rc))
449#ifdef CSAM_ENABLE
450 fEnabled = true;
451#else
452 fEnabled = false;
453#endif
454 if (fEnabled)
455 CSAMEnableScanning(pVM);
456
457#ifdef VBOX_WITH_DEBUGGER
458 /*
459 * Debugger commands.
460 */
461 static bool fRegisteredCmds = false;
462 if (!fRegisteredCmds)
463 {
464 rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
465 if (RT_SUCCESS(rc))
466 fRegisteredCmds = true;
467 }
468#endif
469
470 return VINF_SUCCESS;
471}
472
473/**
474 * (Re)initializes CSAM
475 *
476 * @param pVM The cross context VM structure.
477 */
478static int csamReinit(PVM pVM)
479{
480 /*
481 * Assert alignment and sizes.
482 */
483 AssertRelease(!(RT_UOFFSETOF(VM, csam.s) & 31));
484 AssertRelease(sizeof(pVM->csam.s) <= sizeof(pVM->csam.padding));
485 AssertRelease(VM_IS_RAW_MODE_ENABLED(pVM));
486
487 /*
488 * Setup any fixed pointers and offsets.
489 */
490 pVM->csam.s.offVM = RT_UOFFSETOF(VM, patm);
491
492 pVM->csam.s.fGatesChecked = false;
493 pVM->csam.s.fScanningStarted = false;
494
495 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies 1 VPCU */
496 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
497 pVM->csam.s.cDirtyPages = 0;
498 /* not necessary */
499 memset(pVM->csam.s.pvDirtyBasePage, 0, sizeof(pVM->csam.s.pvDirtyBasePage));
500 memset(pVM->csam.s.pvDirtyFaultPage, 0, sizeof(pVM->csam.s.pvDirtyFaultPage));
501
502 memset(&pVM->csam.s.aDangerousInstr, 0, sizeof(pVM->csam.s.aDangerousInstr));
503 pVM->csam.s.cDangerousInstr = 0;
504 pVM->csam.s.iDangerousInstr = 0;
505
506 memset(pVM->csam.s.pvCallInstruction, 0, sizeof(pVM->csam.s.pvCallInstruction));
507 pVM->csam.s.iCallInstruction = 0;
508
509 /** @note never mess with the pgdir bitmap here! */
510 return VINF_SUCCESS;
511}
512
513/**
514 * Applies relocations to data and code managed by this
515 * component. This function will be called at init and
516 * whenever the VMM need to relocate itself inside the GC.
517 *
518 * The csam will update the addresses used by the switcher.
519 *
520 * @param pVM The cross context VM structure.
521 * @param offDelta Relocation delta.
522 */
523VMMR3_INT_DECL(void) CSAMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
524{
525 if (offDelta && VM_IS_RAW_MODE_ENABLED(pVM))
526 {
527 /* Adjust pgdir and page bitmap pointers. */
528 pVM->csam.s.pPDBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDGCBitmapHC);
529 pVM->csam.s.pPDHCBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC);
530
531 for(int i=0;i<CSAM_PGDIRBMP_CHUNKS;i++)
532 {
533 if (pVM->csam.s.pPDGCBitmapHC[i])
534 {
535 pVM->csam.s.pPDGCBitmapHC[i] += offDelta;
536 }
537 }
538 }
539 return;
540}
541
542/**
543 * Terminates the csam.
544 *
545 * Termination means cleaning up and freeing all resources,
546 * the VM it self is at this point powered off or suspended.
547 *
548 * @returns VBox status code.
549 * @param pVM The cross context VM structure.
550 */
551VMMR3_INT_DECL(int) CSAMR3Term(PVM pVM)
552{
553 if (!VM_IS_RAW_MODE_ENABLED(pVM))
554 return VINF_SUCCESS;
555
556 int rc;
557
558 rc = CSAMR3Reset(pVM);
559 AssertRC(rc);
560
561 /** @todo triggers assertion in MMHyperFree */
562#if 0
563 for(int i=0;i<CSAM_PAGEBMP_CHUNKS;i++)
564 {
565 if (pVM->csam.s.pPDBitmapHC[i])
566 MMHyperFree(pVM, pVM->csam.s.pPDBitmapHC[i]);
567 }
568#endif
569
570 return VINF_SUCCESS;
571}
572
573/**
574 * CSAM reset callback.
575 *
576 * @returns VBox status code.
577 * @param pVM The cross context VM structure.
578 */
579VMMR3_INT_DECL(int) CSAMR3Reset(PVM pVM)
580{
581 if (!VM_IS_RAW_MODE_ENABLED(pVM))
582 return VINF_SUCCESS;
583
584 /* Clear page bitmaps. */
585 for (int i = 0; i < CSAM_PGDIRBMP_CHUNKS; i++)
586 {
587 if (pVM->csam.s.pPDBitmapHC[i])
588 {
589 Assert((CSAM_PAGE_BITMAP_SIZE& 3) == 0);
590 ASMMemZero32(pVM->csam.s.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
591 }
592 }
593
594 /* Remove all CSAM page records. */
595 for (;;)
596 {
597 PCSAMPAGEREC pPageRec = (PCSAMPAGEREC)RTAvlPVGetBestFit(&pVM->csam.s.pPageTree, 0, true);
598 if (!pPageRec)
599 break;
600 csamRemovePageRecord(pVM, pPageRec->page.pPageGC);
601 }
602 Assert(!pVM->csam.s.pPageTree);
603
604 csamReinit(pVM);
605
606 return VINF_SUCCESS;
607}
608
609
610/**
611 * Callback function for RTAvlPVDoWithAll
612 *
613 * Counts the number of records in the tree
614 *
615 * @returns VBox status code.
616 * @param pNode Current node
617 * @param pcPatches Pointer to patch counter
618 */
619static DECLCALLBACK(int) csamR3SaveCountRecord(PAVLPVNODECORE pNode, void *pcPatches)
620{
621 NOREF(pNode);
622 *(uint32_t *)pcPatches += 1;
623 return VINF_SUCCESS;
624}
625
626/**
627 * Callback function for RTAvlPVDoWithAll for saving a page record.
628 *
629 * @returns VBox status code.
630 * @param pNode Current node
631 * @param pvVM Pointer to the VM
632 */
633static DECLCALLBACK(int) csamR3SavePageState(PAVLPVNODECORE pNode, void *pvVM)
634{
635 PCSAMPAGEREC pPage = (PCSAMPAGEREC)pNode;
636 PVM pVM = (PVM)pvVM;
637 PSSMHANDLE pSSM = pVM->csam.s.savedstate.pSSM;
638
639 int rc = SSMR3PutStructEx(pSSM, &pPage->page, sizeof(pPage->page), 0 /*fFlags*/, &g_aCsamPageFields[0], NULL);
640 AssertLogRelRCReturn(rc, rc);
641
642 if (pPage->page.pBitmap)
643 SSMR3PutMem(pSSM, pPage->page.pBitmap, CSAM_PAGE_BITMAP_SIZE);
644
645 return VINF_SUCCESS;
646}
647
648/**
649 * Execute state save operation.
650 *
651 * @returns VBox status code.
652 * @param pVM The cross context VM structure.
653 * @param pSSM SSM operation handle.
654 */
655static DECLCALLBACK(int) csamR3Save(PVM pVM, PSSMHANDLE pSSM)
656{
657 int rc;
658
659 /*
660 * Count the number of page records in the tree (feeling lazy)
661 */
662 pVM->csam.s.savedstate.cPageRecords = 0;
663 RTAvlPVDoWithAll(&pVM->csam.s.pPageTree, true, csamR3SaveCountRecord, &pVM->csam.s.savedstate.cPageRecords);
664
665 /*
666 * Save CSAM structure.
667 */
668 pVM->csam.s.savedstate.pSSM = pSSM;
669 rc = SSMR3PutStructEx(pSSM, &pVM->csam.s, sizeof(pVM->csam.s), 0 /*fFlags*/, g_aCsamFields, NULL);
670 AssertLogRelRCReturn(rc, rc);
671
672 /*
673 * Save pgdir bitmap.
674 */
675 SSMR3PutU32(pSSM, CSAM_PGDIRBMP_CHUNKS);
676 SSMR3PutU32(pSSM, CSAM_PAGE_BITMAP_SIZE);
677 for (uint32_t i = 0; i < CSAM_PGDIRBMP_CHUNKS; i++)
678 if (pVM->csam.s.pPDBitmapHC[i])
679 {
680 SSMR3PutU32(pSSM, i);
681 SSMR3PutMem(pSSM, pVM->csam.s.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
682 }
683 SSMR3PutU32(pSSM, UINT32_MAX); /* terminator */
684
685 /*
686 * Save page records
687 */
688 pVM->csam.s.savedstate.pSSM = pSSM;
689 rc = RTAvlPVDoWithAll(&pVM->csam.s.pPageTree, true, csamR3SavePageState, pVM);
690 AssertRCReturn(rc, rc);
691
692 pVM->csam.s.savedstate.pSSM = NULL;
693 return VINF_SUCCESS;
694}
695
696
697/**
698 * Execute state load operation.
699 *
700 * @returns VBox status code.
701 * @param pVM The cross context VM structure.
702 * @param pSSM SSM operation handle.
703 * @param uVersion Data layout version.
704 * @param uPass The data pass.
705 */
706static DECLCALLBACK(int) csamR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
707{
708 int rc;
709
710 /*
711 * Check preconditions.
712 */
713 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
714 Assert(pVM->csam.s.savedstate.pSSM == NULL);
715 AssertLogRelMsgReturn(uVersion >= CSAM_SAVED_STATE_VERSION_PUT_MEM && uVersion <= CSAM_SAVED_STATE_VERSION,
716 ("uVersion=%d (%#x)\n", uVersion, uVersion),
717 VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
718
719 if (uVersion >= CSAM_SAVED_STATE_VERSION_PUT_STRUCT)
720 {
721 /*
722 * Restore the SSMR3PutStructEx fashioned state.
723 */
724 rc = SSMR3GetStructEx(pSSM, &pVM->csam.s, sizeof(pVM->csam.s), 0 /*fFlags*/, &g_aCsamFields[0], NULL);
725
726 /*
727 * Restore page bitmaps
728 */
729 uint32_t cPgDirBmpChunks = 0;
730 rc = SSMR3GetU32(pSSM, &cPgDirBmpChunks);
731 uint32_t cbPgDirBmpChunk = 0;
732 rc = SSMR3GetU32(pSSM, &cbPgDirBmpChunk);
733 AssertRCReturn(rc, rc);
734 AssertLogRelMsgReturn(cPgDirBmpChunks <= CSAM_PGDIRBMP_CHUNKS,
735 ("cPgDirBmpChunks=%#x (vs %#x)\n", cPgDirBmpChunks, CSAM_PGDIRBMP_CHUNKS),
736 VERR_SSM_UNEXPECTED_DATA);
737 AssertLogRelMsgReturn(cbPgDirBmpChunk <= CSAM_PAGE_BITMAP_SIZE,
738 ("cbPgDirBmpChunk=%#x (vs %#x)\n", cbPgDirBmpChunk, CSAM_PAGE_BITMAP_SIZE),
739 VERR_SSM_UNEXPECTED_DATA);
740 for (uint32_t i = 0; i < CSAM_PGDIRBMP_CHUNKS; i++)
741 {
742 Assert(!pVM->csam.s.pPDBitmapHC[i]);
743 Assert(!pVM->csam.s.pPDGCBitmapHC[i]);
744 }
745 for (uint32_t iNext = 0;;)
746 {
747 uint32_t iThis;
748 rc = SSMR3GetU32(pSSM, &iThis);
749 AssertLogRelRCReturn(rc, rc);
750 AssertLogRelMsgReturn(iThis >= iNext, ("iThis=%#x iNext=%#x\n", iThis, iNext), VERR_SSM_UNEXPECTED_DATA);
751 if (iThis == UINT32_MAX)
752 break;
753
754 rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDBitmapHC[iThis]);
755 AssertLogRelRCReturn(rc, rc);
756 pVM->csam.s.pPDGCBitmapHC[iThis] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[iThis]);
757
758 rc = SSMR3GetMem(pSSM, pVM->csam.s.pPDBitmapHC[iThis], CSAM_PAGE_BITMAP_SIZE);
759 AssertLogRelRCReturn(rc, rc);
760 iNext = iThis + 1;
761 }
762
763 /*
764 * Restore page records
765 */
766 uint32_t const cPageRecords = pVM->csam.s.savedstate.cPageRecords + pVM->csam.s.savedstate.cPatchPageRecords;
767 for (uint32_t iPageRec = 0; iPageRec < cPageRecords; iPageRec++)
768 {
769 CSAMPAGE PageRec;
770 RT_ZERO(PageRec);
771 rc = SSMR3GetStructEx(pSSM, &PageRec, sizeof(PageRec), 0 /*fFlags*/, &g_aCsamPageFields[0], NULL);
772 AssertLogRelRCReturn(rc, rc);
773
774 /* Recreate the page record. */
775 PCSAMPAGE pPage = csamR3CreatePageRecord(pVM, PageRec.pPageGC, PageRec.enmTag, PageRec.fCode32,
776 PageRec.fMonitorInvalidation);
777 AssertReturn(pPage, VERR_NO_MEMORY);
778 pPage->GCPhys = PageRec.GCPhys;
779 pPage->fFlags = PageRec.fFlags;
780 pPage->u64Hash = PageRec.u64Hash;
781 if (PageRec.pBitmap)
782 {
783 rc = SSMR3GetMem(pSSM, pPage->pBitmap, CSAM_PAGE_BITMAP_SIZE);
784 AssertLogRelRCReturn(rc, rc);
785 }
786 else
787 {
788 MMR3HeapFree(pPage->pBitmap);
789 pPage->pBitmap = NULL;
790 }
791 }
792 }
793 else
794 {
795 /*
796 * Restore the old SSMR3PutMem fashioned state.
797 */
798
799 /* CSAM structure first. */
800 CSAM csamInfo;
801 RT_ZERO(csamInfo);
802 if ( SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(4, 3, 51)
803 && SSMR3HandleRevision(pSSM) >= 100346)
804 rc = SSMR3GetStructEx(pSSM, &csamInfo, sizeof(csamInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID,
805 &g_aCsamFields500[0], NULL);
806 else
807 rc = SSMR3GetStructEx(pSSM, &csamInfo, sizeof(csamInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED,
808 &g_aCsamFieldsBefore500[0], NULL);
809 AssertRCReturn(rc, rc);
810
811 pVM->csam.s.fGatesChecked = csamInfo.fGatesChecked;
812 pVM->csam.s.fScanningStarted = csamInfo.fScanningStarted;
813
814 /* Restore dirty code page info. */
815 pVM->csam.s.cDirtyPages = csamInfo.cDirtyPages;
816 memcpy(pVM->csam.s.pvDirtyBasePage, csamInfo.pvDirtyBasePage, sizeof(pVM->csam.s.pvDirtyBasePage));
817 memcpy(pVM->csam.s.pvDirtyFaultPage, csamInfo.pvDirtyFaultPage, sizeof(pVM->csam.s.pvDirtyFaultPage));
818
819 /* Restore possible code page */
820 pVM->csam.s.cPossibleCodePages = csamInfo.cPossibleCodePages;
821 memcpy(pVM->csam.s.pvPossibleCodePage, csamInfo.pvPossibleCodePage, sizeof(pVM->csam.s.pvPossibleCodePage));
822
823 /*
824 * Restore pgdir bitmap (we'll change the pointers next).
825 */
826 rc = SSMR3GetStructEx(pSSM, pVM->csam.s.pPDBitmapHC, sizeof(uint8_t *) * CSAM_PGDIRBMP_CHUNKS,
827 SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamPDBitmapArray[0], NULL);
828 AssertRCReturn(rc, rc);
829
830 /*
831 * Restore page bitmaps
832 */
833 for (unsigned i = 0; i < CSAM_PGDIRBMP_CHUNKS; i++)
834 if (pVM->csam.s.pPDBitmapHC[i])
835 {
836 rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDBitmapHC[i]);
837 AssertLogRelRCReturn(rc, rc);
838 pVM->csam.s.pPDGCBitmapHC[i] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[i]);
839
840 /* Restore the bitmap. */
841 rc = SSMR3GetMem(pSSM, pVM->csam.s.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
842 AssertRCReturn(rc, rc);
843 }
844 else
845 {
846 Assert(!pVM->csam.s.pPDGCBitmapHC[i]);
847 pVM->csam.s.pPDGCBitmapHC[i] = 0;
848 }
849
850 /*
851 * Restore page records
852 */
853 for (uint32_t i=0;i<csamInfo.savedstate.cPageRecords + csamInfo.savedstate.cPatchPageRecords;i++)
854 {
855 CSAMPAGEREC page;
856 PCSAMPAGE pPage;
857
858 RT_ZERO(page);
859 rc = SSMR3GetStructEx(pSSM, &page, sizeof(page), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamPageRecFields[0], NULL);
860 AssertRCReturn(rc, rc);
861
862 /*
863 * Recreate the page record
864 */
865 pPage = csamR3CreatePageRecord(pVM, page.page.pPageGC, page.page.enmTag, page.page.fCode32, page.page.fMonitorInvalidation);
866 AssertReturn(pPage, VERR_NO_MEMORY);
867
868 pPage->GCPhys = page.page.GCPhys;
869 pPage->fFlags = page.page.fFlags;
870 pPage->u64Hash = page.page.u64Hash;
871
872 if (page.page.pBitmap)
873 {
874 rc = SSMR3GetMem(pSSM, pPage->pBitmap, CSAM_PAGE_BITMAP_SIZE);
875 AssertRCReturn(rc, rc);
876 }
877 else
878 {
879 MMR3HeapFree(pPage->pBitmap);
880 pPage->pBitmap = NULL;
881 }
882 }
883
884 /* Note: we don't restore aDangerousInstr; it will be recreated automatically. */
885 memset(&pVM->csam.s.aDangerousInstr, 0, sizeof(pVM->csam.s.aDangerousInstr));
886 pVM->csam.s.cDangerousInstr = 0;
887 pVM->csam.s.iDangerousInstr = 0;
888 }
889 return VINF_SUCCESS;
890}
891
892/**
893 * Convert guest context address to host context pointer
894 *
895 * @returns Byte pointer (ring-3 context) corresponding to pGCPtr on success,
896 * NULL on failure.
897 * @param pVM The cross context VM structure.
898 * @param pCacheRec Address conversion cache record
899 * @param pGCPtr Guest context pointer
900 * @returns Host context pointer or NULL in case of an error
901 *
902 */
903static uint8_t *csamR3GCVirtToHCVirt(PVM pVM, PCSAMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
904{
905 int rc;
906 void *pHCPtr;
907 Assert(pVM->cCpus == 1);
908 PVMCPU pVCpu = VMMGetCpu0(pVM);
909
910 STAM_PROFILE_START(&pVM->csam.s.StatTimeAddrConv, a);
911
912 pHCPtr = PATMR3GCPtrToHCPtr(pVM, pGCPtr);
913 if (pHCPtr)
914 return (uint8_t *)pHCPtr;
915
916 if (pCacheRec->pPageLocStartHC)
917 {
918 uint32_t offset = pGCPtr & PAGE_OFFSET_MASK;
919 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
920 {
921 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
922 return pCacheRec->pPageLocStartHC + offset;
923 }
924 }
925
926 /* Release previous lock if any. */
927 if (pCacheRec->Lock.pvMap)
928 {
929 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
930 pCacheRec->Lock.pvMap = NULL;
931 }
932
933 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
934 if (rc != VINF_SUCCESS)
935 {
936//// AssertMsgRC(rc, ("MMR3PhysGCVirt2HCVirtEx failed for %RRv\n", pGCPtr));
937 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
938 return NULL;
939 }
940
941 pCacheRec->pPageLocStartHC = (uint8_t*)((uintptr_t)pHCPtr & PAGE_BASE_HC_MASK);
942 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
943 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
944 return (uint8_t *)pHCPtr;
945}
946
947
948/** For csamR3ReadBytes. */
949typedef struct CSAMDISINFO
950{
951 PVM pVM;
952 uint8_t const *pbSrcInstr; /* aka pInstHC */
953} CSAMDISINFO, *PCSAMDISINFO;
954
955
956/**
957 * @callback_method_impl{FNDISREADBYTES}
958 */
959static DECLCALLBACK(int) csamR3ReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
960{
961 PCSAMDISINFO pDisInfo = (PCSAMDISINFO)pDis->pvUser;
962
963 /*
964 * We are not interested in patched instructions, so read the original opcode bytes.
965 *
966 * Note! single instruction patches (int3) are checked in CSAMR3AnalyseCallback
967 *
968 * Since we're decoding one instruction at the time, we don't need to be
969 * concerned about any patched instructions following the first one. We
970 * could in fact probably skip this PATM call for offInstr != 0.
971 */
972 size_t cbRead = cbMaxRead;
973 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
974 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
975 if (RT_SUCCESS(rc))
976 {
977 if (cbRead >= cbMinRead)
978 {
979 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
980 return rc;
981 }
982
983 cbMinRead -= (uint8_t)cbRead;
984 cbMaxRead -= (uint8_t)cbRead;
985 offInstr += (uint8_t)cbRead;
986 uSrcAddr += cbRead;
987 }
988
989 /*
990 * The current byte isn't a patch instruction byte.
991 */
992 AssertPtr(pDisInfo->pbSrcInstr);
993 if ((pDis->uInstrAddr >> PAGE_SHIFT) == ((uSrcAddr + cbMaxRead - 1) >> PAGE_SHIFT))
994 {
995 memcpy(&pDis->abInstr[offInstr], &pDisInfo->pbSrcInstr[offInstr], cbMaxRead);
996 offInstr += cbMaxRead;
997 rc = VINF_SUCCESS;
998 }
999 else if ( (pDis->uInstrAddr >> PAGE_SHIFT) == ((uSrcAddr + cbMinRead - 1) >> PAGE_SHIFT)
1000 || PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr) /** @todo does CSAM actually analyze patch code, or is this just a copy&past check? */
1001 )
1002 {
1003 memcpy(&pDis->abInstr[offInstr], &pDisInfo->pbSrcInstr[offInstr], cbMinRead);
1004 offInstr += cbMinRead;
1005 rc = VINF_SUCCESS;
1006 }
1007 else
1008 {
1009 /* Crossed page boundrary, pbSrcInstr is no good... */
1010 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pDisInfo->pVM), &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
1011 offInstr += cbMinRead;
1012 }
1013
1014 pDis->cbCachedInstr = offInstr;
1015 return rc;
1016}
1017
1018DECLINLINE(int) csamR3DISInstr(PVM pVM, RTRCPTR InstrGC, uint8_t *InstrHC, DISCPUMODE enmCpuMode,
1019 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
1020{
1021 CSAMDISINFO DisInfo = { pVM, InstrHC };
1022#ifdef DEBUG
1023 return DISInstrToStrEx(InstrGC, enmCpuMode, csamR3ReadBytes, &DisInfo, DISOPTYPE_ALL,
1024 pCpu, pcbInstr, pszOutput, cbOutput);
1025#else
1026 /* We are interested in everything except harmless stuff */
1027 if (pszOutput)
1028 return DISInstrToStrEx(InstrGC, enmCpuMode, csamR3ReadBytes, &DisInfo,
1029 ~(DISOPTYPE_INVALID | DISOPTYPE_HARMLESS | DISOPTYPE_RRM_MASK),
1030 pCpu, pcbInstr, pszOutput, cbOutput);
1031 return DISInstrEx(InstrGC, enmCpuMode, ~(DISOPTYPE_INVALID | DISOPTYPE_HARMLESS | DISOPTYPE_RRM_MASK),
1032 csamR3ReadBytes, &DisInfo, pCpu, pcbInstr);
1033#endif
1034}
1035
1036/**
1037 * Analyses the instructions following the cli for compliance with our heuristics for cli
1038 *
1039 * @returns VBox status code.
1040 * @param pVM The cross context VM structure.
1041 * @param pCpu CPU disassembly state
1042 * @param pInstrGC Guest context pointer to privileged instruction
1043 * @param pCurInstrGC Guest context pointer to the current instruction
1044 * @param pCacheRec GC to HC cache record
1045 * @param pUserData User pointer (callback specific)
1046 *
1047 */
1048static DECLCALLBACK(int) CSAMR3AnalyseCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC,
1049 PCSAMP2GLOOKUPREC pCacheRec, void *pUserData)
1050{
1051 PCSAMPAGE pPage = (PCSAMPAGE)pUserData;
1052 int rc;
1053 NOREF(pInstrGC);
1054
1055 switch (pCpu->pCurInstr->uOpcode)
1056 {
1057 case OP_INT:
1058 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE8);
1059 if (pCpu->Param1.uValue == 3)
1060 {
1061 //two byte int 3
1062 return VINF_SUCCESS;
1063 }
1064 break;
1065
1066 /* removing breaks win2k guests? */
1067 case OP_IRET:
1068 if (EMIsRawRing1Enabled(pVM))
1069 break;
1070 RT_FALL_THRU();
1071
1072 case OP_ILLUD2:
1073 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue. */
1074 case OP_RETN:
1075 case OP_INT3:
1076 case OP_INVALID:
1077 return VINF_SUCCESS;
1078 }
1079
1080 // Check for exit points
1081 switch (pCpu->pCurInstr->uOpcode)
1082 {
1083 /* It's not a good idea to patch pushf instructions:
1084 * - increases the chance of conflicts (code jumping to the next instruction)
1085 * - better to patch the cli
1086 * - code that branches before the cli will likely hit an int 3
1087 * - in general doesn't offer any benefits as we don't allow nested patch blocks (IF is always 1)
1088 */
1089 case OP_PUSHF:
1090 case OP_POPF:
1091 break;
1092
1093 case OP_CLI:
1094 {
1095 uint32_t cbInstrs = 0;
1096 uint32_t cbCurInstr = pCpu->cbInstr;
1097 bool fCode32 = pPage->fCode32;
1098
1099 Assert(fCode32);
1100
1101 PATMR3AddHint(pVM, pCurInstrGC, (fCode32) ? PATMFL_CODE32 : 0);
1102
1103 /* Make sure the instructions that follow the cli have not been encountered before. */
1104 while (true)
1105 {
1106 DISCPUSTATE cpu;
1107
1108 if (cbInstrs + cbCurInstr >= SIZEOF_NEARJUMP32)
1109 break;
1110
1111 if (csamIsCodeScanned(pVM, pCurInstrGC + cbCurInstr, &pPage) == true)
1112 {
1113 /* We've scanned the next instruction(s) already. This means we've
1114 followed a branch that ended up there before -> dangerous!! */
1115 PATMR3DetectConflict(pVM, pCurInstrGC, pCurInstrGC + cbCurInstr);
1116 break;
1117 }
1118 pCurInstrGC += cbCurInstr;
1119 cbInstrs += cbCurInstr;
1120
1121 { /* Force pCurInstrHC out of scope after we stop using it (page lock!) */
1122 uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
1123 if (pCurInstrHC == NULL)
1124 {
1125 Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
1126 break;
1127 }
1128 Assert(VALID_PTR(pCurInstrHC));
1129
1130 rc = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, (fCode32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1131 &cpu, &cbCurInstr, NULL, 0);
1132 }
1133 AssertRC(rc);
1134 if (RT_FAILURE(rc))
1135 break;
1136 }
1137 break;
1138 }
1139
1140#ifdef VBOX_WITH_RAW_RING1
1141 case OP_MOV:
1142 /* mov xx, CS is a dangerous instruction as our raw ring usage leaks through. */
1143 if ( EMIsRawRing1Enabled(pVM)
1144 && (pCpu->Param2.fUse & DISUSE_REG_SEG)
1145 && (pCpu->Param2.Base.idxSegReg == DISSELREG_CS))
1146 {
1147 Log(("CSAM: Patching dangerous 'mov xx, cs' instruction at %RGv with an int3\n", pCurInstrGC));
1148 if (PATMR3HasBeenPatched(pVM, pCurInstrGC) == false)
1149 {
1150 rc = PATMR3InstallPatch(pVM, pCurInstrGC, (pPage->fCode32) ? PATMFL_CODE32 : 0);
1151 if (RT_FAILURE(rc))
1152 {
1153 Log(("PATMR3InstallPatch failed with %d\n", rc));
1154 return VWRN_CONTINUE_ANALYSIS;
1155 }
1156 }
1157 return VWRN_CONTINUE_ANALYSIS;
1158 }
1159 break;
1160#endif
1161
1162 case OP_PUSH:
1163 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
1164 if (pCpu->pCurInstr->fParam1 != OP_PARM_REG_CS)
1165 break;
1166
1167#ifndef VBOX_WITH_SAFE_STR
1168 RT_FALL_THRU();
1169 case OP_STR:
1170#endif
1171 RT_FALL_THRU();
1172 case OP_LSL:
1173 case OP_LAR:
1174 case OP_SGDT:
1175 case OP_SLDT:
1176 case OP_SIDT:
1177 case OP_SMSW:
1178 case OP_VERW:
1179 case OP_VERR:
1180 case OP_CPUID:
1181 case OP_IRET:
1182#ifdef DEBUG
1183 switch(pCpu->pCurInstr->uOpcode)
1184 {
1185 case OP_STR:
1186 Log(("Privileged instruction at %RRv: str!!\n", pCurInstrGC));
1187 break;
1188 case OP_LSL:
1189 Log(("Privileged instruction at %RRv: lsl!!\n", pCurInstrGC));
1190 break;
1191 case OP_LAR:
1192 Log(("Privileged instruction at %RRv: lar!!\n", pCurInstrGC));
1193 break;
1194 case OP_SGDT:
1195 Log(("Privileged instruction at %RRv: sgdt!!\n", pCurInstrGC));
1196 break;
1197 case OP_SLDT:
1198 Log(("Privileged instruction at %RRv: sldt!!\n", pCurInstrGC));
1199 break;
1200 case OP_SIDT:
1201 Log(("Privileged instruction at %RRv: sidt!!\n", pCurInstrGC));
1202 break;
1203 case OP_SMSW:
1204 Log(("Privileged instruction at %RRv: smsw!!\n", pCurInstrGC));
1205 break;
1206 case OP_VERW:
1207 Log(("Privileged instruction at %RRv: verw!!\n", pCurInstrGC));
1208 break;
1209 case OP_VERR:
1210 Log(("Privileged instruction at %RRv: verr!!\n", pCurInstrGC));
1211 break;
1212 case OP_CPUID:
1213 Log(("Privileged instruction at %RRv: cpuid!!\n", pCurInstrGC));
1214 break;
1215 case OP_PUSH:
1216 Log(("Privileged instruction at %RRv: push cs!!\n", pCurInstrGC));
1217 break;
1218 case OP_IRET:
1219 Log(("Privileged instruction at %RRv: iret!!\n", pCurInstrGC));
1220 break;
1221 }
1222#endif
1223
1224 if (PATMR3HasBeenPatched(pVM, pCurInstrGC) == false)
1225 {
1226 rc = PATMR3InstallPatch(pVM, pCurInstrGC, (pPage->fCode32) ? PATMFL_CODE32 : 0);
1227 if (RT_FAILURE(rc))
1228 {
1229 Log(("PATMR3InstallPatch failed with %d\n", rc));
1230 return VWRN_CONTINUE_ANALYSIS;
1231 }
1232 }
1233 if (pCpu->pCurInstr->uOpcode == OP_IRET)
1234 return VINF_SUCCESS; /* Look no further in this branch. */
1235
1236 return VWRN_CONTINUE_ANALYSIS;
1237
1238 case OP_JMP:
1239 case OP_CALL:
1240 {
1241 // return or jump/call through a jump table
1242 if (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J)
1243 {
1244#ifdef DEBUG
1245 switch(pCpu->pCurInstr->uOpcode)
1246 {
1247 case OP_JMP:
1248 Log(("Control Flow instruction at %RRv: jmp!!\n", pCurInstrGC));
1249 break;
1250 case OP_CALL:
1251 Log(("Control Flow instruction at %RRv: call!!\n", pCurInstrGC));
1252 break;
1253 }
1254#endif
1255 return VWRN_CONTINUE_ANALYSIS;
1256 }
1257 return VWRN_CONTINUE_ANALYSIS;
1258 }
1259
1260 }
1261
1262 return VWRN_CONTINUE_ANALYSIS;
1263}
1264
1265#ifdef CSAM_ANALYSE_BEYOND_RET
1266/**
1267 * Wrapper for csamAnalyseCodeStream for call instructions.
1268 *
1269 * @returns VBox status code.
1270 * @param pVM The cross context VM structure.
1271 * @param pInstrGC Guest context pointer to privileged instruction
1272 * @param pCurInstrGC Guest context pointer to the current instruction
1273 * @param fCode32 16 or 32 bits code
1274 * @param pfnCSAMR3Analyse Callback for testing the disassembled instruction
1275 * @param pUserData User pointer (callback specific)
1276 *
1277 */
1278static int csamAnalyseCallCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
1279 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec)
1280{
1281 int rc;
1282 CSAMCALLEXITREC CallExitRec;
1283 PCSAMCALLEXITREC pOldCallRec;
1284 PCSAMPAGE pPage = 0;
1285 uint32_t i;
1286
1287 CallExitRec.cInstrAfterRet = 0;
1288
1289 pOldCallRec = pCacheRec->pCallExitRec;
1290 pCacheRec->pCallExitRec = &CallExitRec;
1291
1292 rc = csamAnalyseCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1293
1294 for (i=0;i<CallExitRec.cInstrAfterRet;i++)
1295 {
1296 PCSAMPAGE pPage = 0;
1297
1298 pCurInstrGC = CallExitRec.pInstrAfterRetGC[i];
1299
1300 /* Check if we've previously encountered the instruction after the ret. */
1301 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1302 {
1303 DISCPUSTATE cpu;
1304 uint32_t cbInstr;
1305 int rc2;
1306#ifdef DEBUG
1307 char szOutput[256];
1308#endif
1309 if (pPage == NULL)
1310 {
1311 /* New address; let's take a look at it. */
1312 pPage = csamR3CreatePageRecord(pVM, pCurInstrGC, CSAM_TAG_CSAM, fCode32);
1313 if (pPage == NULL)
1314 {
1315 rc = VERR_NO_MEMORY;
1316 goto done;
1317 }
1318 }
1319
1320 /**
1321 * Some generic requirements for recognizing an adjacent function:
1322 * - alignment fillers that consist of:
1323 * - nop
1324 * - lea genregX, [genregX (+ 0)]
1325 * - push ebp after the filler (can extend this later); aligned at at least a 4 byte boundary
1326 */
1327 for (int j = 0; j < 16; j++)
1328 {
1329 uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
1330 if (pCurInstrHC == NULL)
1331 {
1332 Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
1333 goto done;
1334 }
1335 Assert(VALID_PTR(pCurInstrHC));
1336
1337 STAM_PROFILE_START(&pVM->csam.s.StatTimeDisasm, a);
1338#ifdef DEBUG
1339 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, (fCode32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1340 &cpu, &cbInstr, szOutput, sizeof(szOutput));
1341 if (RT_SUCCESS(rc2)) Log(("CSAM Call Analysis: %s", szOutput));
1342#else
1343 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, (fCode32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1344 &cpu, &cbInstr, NULL, 0);
1345#endif
1346 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeDisasm, a);
1347 if (RT_FAILURE(rc2))
1348 {
1349 Log(("Disassembly failed at %RRv with %Rrc (probably page not present) -> return to caller\n", pCurInstrGC, rc2));
1350 goto done;
1351 }
1352
1353 STAM_COUNTER_ADD(&pVM->csam.s.StatNrBytesRead, cbInstr);
1354
1355 RCPTRTYPE(uint8_t *) addr = 0;
1356 PCSAMPAGE pJmpPage = NULL;
1357
1358 if (PAGE_ADDRESS(pCurInstrGC) != PAGE_ADDRESS(pCurInstrGC + cbInstr - 1))
1359 {
1360 if (!PGMGstIsPagePresent(pVM, pCurInstrGC + cbInstr - 1))
1361 {
1362 /// @todo fault in the page
1363 Log(("Page for current instruction %RRv is not present!!\n", pCurInstrGC));
1364 goto done;
1365 }
1366 //all is fine, let's continue
1367 csamR3CheckPageRecord(pVM, pCurInstrGC + cbInstr - 1);
1368 }
1369
1370 switch (cpu.pCurInstr->uOpcode)
1371 {
1372 case OP_NOP:
1373 case OP_INT3:
1374 break; /* acceptable */
1375
1376 case OP_LEA:
1377 /* Must be similar to:
1378 *
1379 * lea esi, [esi]
1380 * lea esi, [esi+0]
1381 * Any register is allowed as long as source and destination are identical.
1382 */
1383 if ( cpu.Param1.fUse != DISUSE_REG_GEN32
1384 || ( cpu.Param2.flags != DISUSE_REG_GEN32
1385 && ( !(cpu.Param2.flags & DISUSE_REG_GEN32)
1386 || !(cpu.Param2.flags & (DISUSE_DISPLACEMENT8|DISUSE_DISPLACEMENT16|DISUSE_DISPLACEMENT32))
1387 || cpu.Param2.uValue != 0
1388 )
1389 )
1390 || cpu.Param1.base.reg_gen32 != cpu.Param2.base.reg_gen32
1391 )
1392 {
1393 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1394 goto next_function;
1395 }
1396 break;
1397
1398 case OP_PUSH:
1399 {
1400 if ( (pCurInstrGC & 0x3) != 0
1401 || cpu.Param1.fUse != DISUSE_REG_GEN32
1402 || cpu.Param1.base.reg_gen32 != USE_REG_EBP
1403 )
1404 {
1405 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1406 goto next_function;
1407 }
1408
1409 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1410 {
1411 CSAMCALLEXITREC CallExitRec2;
1412 CallExitRec2.cInstrAfterRet = 0;
1413
1414 pCacheRec->pCallExitRec = &CallExitRec2;
1415
1416 /* Analyse the function. */
1417 Log(("Found new function at %RRv\n", pCurInstrGC));
1418 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunction);
1419 csamAnalyseCallCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1420 }
1421 goto next_function;
1422 }
1423
1424 case OP_SUB:
1425 {
1426 if ( (pCurInstrGC & 0x3) != 0
1427 || cpu.Param1.fUse != DISUSE_REG_GEN32
1428 || cpu.Param1.base.reg_gen32 != USE_REG_ESP
1429 )
1430 {
1431 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1432 goto next_function;
1433 }
1434
1435 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1436 {
1437 CSAMCALLEXITREC CallExitRec2;
1438 CallExitRec2.cInstrAfterRet = 0;
1439
1440 pCacheRec->pCallExitRec = &CallExitRec2;
1441
1442 /* Analyse the function. */
1443 Log(("Found new function at %RRv\n", pCurInstrGC));
1444 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunction);
1445 csamAnalyseCallCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1446 }
1447 goto next_function;
1448 }
1449
1450 default:
1451 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1452 goto next_function;
1453 }
1454 /* Mark it as scanned. */
1455 csamMarkCode(pVM, pPage, pCurInstrGC, cbInstr, true);
1456 pCurInstrGC += cbInstr;
1457 } /* for at most 16 instructions */
1458next_function:
1459 ; /* MSVC complains otherwise */
1460 }
1461 }
1462done:
1463 pCacheRec->pCallExitRec = pOldCallRec;
1464 return rc;
1465}
1466#else
1467#define csamAnalyseCallCodeStream csamAnalyseCodeStream
1468#endif
1469
1470/**
1471 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
1472 *
1473 * @returns VBox status code.
1474 * @param pVM The cross context VM structure.
1475 * @param pInstrGC Guest context pointer to privileged instruction
1476 * @param pCurInstrGC Guest context pointer to the current instruction
1477 * @param fCode32 16 or 32 bits code
1478 * @param pfnCSAMR3Analyse Callback for testing the disassembled instruction
1479 * @param pUserData User pointer (callback specific)
1480 * @param pCacheRec GC to HC cache record.
1481 */
1482static int csamAnalyseCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
1483 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec)
1484{
1485 DISCPUSTATE cpu;
1486 PCSAMPAGE pPage = (PCSAMPAGE)pUserData;
1487 int rc = VWRN_CONTINUE_ANALYSIS;
1488 uint32_t cbInstr;
1489 int rc2;
1490 Assert(pVM->cCpus == 1);
1491 PVMCPU pVCpu = VMMGetCpu0(pVM);
1492
1493#ifdef DEBUG
1494 char szOutput[256];
1495#endif
1496
1497 LogFlow(("csamAnalyseCodeStream: code at %RRv depth=%d\n", pCurInstrGC, pCacheRec->depth));
1498
1499 pVM->csam.s.fScanningStarted = true;
1500
1501 pCacheRec->depth++;
1502 /*
1503 * Limit the call depth. (rather arbitrary upper limit; too low and we won't detect certain
1504 * cpuid instructions in Linux kernels; too high and we waste too much time scanning code)
1505 * (512 is necessary to detect cpuid instructions in Red Hat EL4; see defect 1355)
1506 * @note we are using a lot of stack here. couple of 100k when we go to the full depth (!)
1507 */
1508 if (pCacheRec->depth > 512)
1509 {
1510 LogFlow(("CSAM: maximum calldepth reached for %RRv\n", pCurInstrGC));
1511 pCacheRec->depth--;
1512 return VINF_SUCCESS; //let's not go on forever
1513 }
1514
1515 Assert(!PATMIsPatchGCAddr(pVM, pCurInstrGC));
1516 csamR3CheckPageRecord(pVM, pCurInstrGC);
1517
1518 while(rc == VWRN_CONTINUE_ANALYSIS)
1519 {
1520 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1521 {
1522 if (pPage == NULL)
1523 {
1524 /* New address; let's take a look at it. */
1525 pPage = csamR3CreatePageRecord(pVM, pCurInstrGC, CSAM_TAG_CSAM, fCode32);
1526 if (pPage == NULL)
1527 {
1528 rc = VERR_NO_MEMORY;
1529 goto done;
1530 }
1531 }
1532 }
1533 else
1534 {
1535 LogFlow(("Code at %RRv has been scanned before\n", pCurInstrGC));
1536 rc = VINF_SUCCESS;
1537 goto done;
1538 }
1539
1540 { /* Force pCurInstrHC out of scope after we stop using it (page lock!) */
1541 uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
1542 if (pCurInstrHC == NULL)
1543 {
1544 Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
1545 rc = VERR_PATCHING_REFUSED;
1546 goto done;
1547 }
1548 Assert(VALID_PTR(pCurInstrHC));
1549
1550 STAM_PROFILE_START(&pVM->csam.s.StatTimeDisasm, a);
1551#ifdef DEBUG
1552 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, fCode32 ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1553 &cpu, &cbInstr, szOutput, sizeof(szOutput));
1554 if (RT_SUCCESS(rc2)) Log(("CSAM Analysis: %s", szOutput));
1555#else
1556 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, fCode32 ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1557 &cpu, &cbInstr, NULL, 0);
1558#endif
1559 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeDisasm, a);
1560 }
1561 if (RT_FAILURE(rc2))
1562 {
1563 Log(("Disassembly failed at %RRv with %Rrc (probably page not present) -> return to caller\n", pCurInstrGC, rc2));
1564 rc = VINF_SUCCESS;
1565 goto done;
1566 }
1567
1568 STAM_COUNTER_ADD(&pVM->csam.s.StatNrBytesRead, cbInstr);
1569
1570 csamMarkCode(pVM, pPage, pCurInstrGC, cbInstr, true);
1571
1572 RCPTRTYPE(uint8_t *) addr = 0;
1573 PCSAMPAGE pJmpPage = NULL;
1574
1575 if (PAGE_ADDRESS(pCurInstrGC) != PAGE_ADDRESS(pCurInstrGC + cbInstr - 1))
1576 {
1577 if (!PGMGstIsPagePresent(pVCpu, pCurInstrGC + cbInstr - 1))
1578 {
1579 /// @todo fault in the page
1580 Log(("Page for current instruction %RRv is not present!!\n", pCurInstrGC));
1581 rc = VWRN_CONTINUE_ANALYSIS;
1582 goto next_please;
1583 }
1584 //all is fine, let's continue
1585 csamR3CheckPageRecord(pVM, pCurInstrGC + cbInstr - 1);
1586 }
1587 /*
1588 * If it's harmless, then don't bother checking it (the disasm tables had better be accurate!)
1589 */
1590 if ((cpu.pCurInstr->fOpType & ~DISOPTYPE_RRM_MASK) == DISOPTYPE_HARMLESS)
1591 {
1592 AssertMsg(pfnCSAMR3Analyse(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec, (void *)pPage) == VWRN_CONTINUE_ANALYSIS, ("Instruction incorrectly marked harmless?!?!?\n"));
1593 rc = VWRN_CONTINUE_ANALYSIS;
1594 goto next_please;
1595 }
1596
1597#ifdef CSAM_ANALYSE_BEYOND_RET
1598 /* Remember the address of the instruction following the ret in case the parent instruction was a call. */
1599 if ( pCacheRec->pCallExitRec
1600 && cpu.pCurInstr->uOpcode == OP_RETN
1601 && pCacheRec->pCallExitRec->cInstrAfterRet < CSAM_MAX_CALLEXIT_RET)
1602 {
1603 pCacheRec->pCallExitRec->pInstrAfterRetGC[pCacheRec->pCallExitRec->cInstrAfterRet] = pCurInstrGC + cbInstr;
1604 pCacheRec->pCallExitRec->cInstrAfterRet++;
1605 }
1606#endif
1607
1608 rc = pfnCSAMR3Analyse(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec, (void *)pPage);
1609 if (rc == VINF_SUCCESS)
1610 goto done;
1611
1612 // For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction)
1613 if ( ((cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW) && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J))
1614 || (cpu.pCurInstr->uOpcode == OP_CALL && cpu.Param1.fUse == DISUSE_DISPLACEMENT32)) /* simple indirect call (call dword ptr [address]) */
1615 {
1616 /* We need to parse 'call dword ptr [address]' type of calls to catch cpuid instructions in some recent Linux distributions (e.g. OpenSuse 10.3) */
1617 if ( cpu.pCurInstr->uOpcode == OP_CALL
1618 && cpu.Param1.fUse == DISUSE_DISPLACEMENT32)
1619 {
1620 addr = 0;
1621 PGMPhysSimpleReadGCPtr(pVCpu, &addr, (RTRCUINTPTR)cpu.Param1.uDisp.i32, sizeof(addr));
1622 }
1623 else
1624 addr = CSAMResolveBranch(&cpu, pCurInstrGC);
1625
1626 if (addr == 0)
1627 {
1628 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
1629 rc = VINF_SUCCESS;
1630 break;
1631 }
1632 Assert(!PATMIsPatchGCAddr(pVM, addr));
1633
1634 /* If the target address lies in a patch generated jump, then special action needs to be taken. */
1635 PATMR3DetectConflict(pVM, pCurInstrGC, addr);
1636
1637 /* Same page? */
1638 if (PAGE_ADDRESS(addr) != PAGE_ADDRESS(pCurInstrGC ))
1639 {
1640 if (!PGMGstIsPagePresent(pVCpu, addr))
1641 {
1642 Log(("Page for current instruction %RRv is not present!!\n", addr));
1643 rc = VWRN_CONTINUE_ANALYSIS;
1644 goto next_please;
1645 }
1646
1647 /* All is fine, let's continue. */
1648 csamR3CheckPageRecord(pVM, addr);
1649 }
1650
1651 pJmpPage = NULL;
1652 if (csamIsCodeScanned(pVM, addr, &pJmpPage) == false)
1653 {
1654 if (pJmpPage == NULL)
1655 {
1656 /* New branch target; let's take a look at it. */
1657 pJmpPage = csamR3CreatePageRecord(pVM, addr, CSAM_TAG_CSAM, fCode32);
1658 if (pJmpPage == NULL)
1659 {
1660 rc = VERR_NO_MEMORY;
1661 goto done;
1662 }
1663 Assert(pPage);
1664 }
1665 if (cpu.pCurInstr->uOpcode == OP_CALL)
1666 rc = csamAnalyseCallCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1667 else
1668 rc = csamAnalyseCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1669
1670 if (rc != VINF_SUCCESS) {
1671 goto done;
1672 }
1673 }
1674 if (cpu.pCurInstr->uOpcode == OP_JMP)
1675 {//unconditional jump; return to caller
1676 rc = VINF_SUCCESS;
1677 goto done;
1678 }
1679
1680 rc = VWRN_CONTINUE_ANALYSIS;
1681 } //if ((cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW) && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J))
1682#ifdef CSAM_SCAN_JUMP_TABLE
1683 else
1684 if ( cpu.pCurInstr->uOpcode == OP_JMP
1685 && (cpu.Param1.fUse & (DISUSE_DISPLACEMENT32|DISUSE_INDEX|DISUSE_SCALE)) == (DISUSE_DISPLACEMENT32|DISUSE_INDEX|DISUSE_SCALE)
1686 )
1687 {
1688 RTRCPTR pJumpTableGC = (RTRCPTR)cpu.Param1.disp32;
1689 uint8_t *pJumpTableHC;
1690 int rc2;
1691
1692 Log(("Jump through jump table\n"));
1693
1694 rc2 = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, pJumpTableGC, (PRTHCPTR)&pJumpTableHC, missing page lock);
1695 if (rc2 == VINF_SUCCESS)
1696 {
1697 for (uint32_t i=0;i<2;i++)
1698 {
1699 uint64_t fFlags;
1700
1701 addr = pJumpTableGC + cpu.Param1.scale * i;
1702 /* Same page? */
1703 if (PAGE_ADDRESS(addr) != PAGE_ADDRESS(pJumpTableGC))
1704 break;
1705
1706 addr = *(RTRCPTR *)(pJumpTableHC + cpu.Param1.scale * i);
1707
1708 rc2 = PGMGstGetPage(pVCpu, addr, &fFlags, NULL);
1709 if ( rc2 != VINF_SUCCESS
1710 || (fFlags & X86_PTE_US)
1711 || !(fFlags & X86_PTE_P)
1712 )
1713 break;
1714
1715 Log(("Jump to %RRv\n", addr));
1716
1717 pJmpPage = NULL;
1718 if (csamIsCodeScanned(pVM, addr, &pJmpPage) == false)
1719 {
1720 if (pJmpPage == NULL)
1721 {
1722 /* New branch target; let's take a look at it. */
1723 pJmpPage = csamR3CreatePageRecord(pVM, addr, CSAM_TAG_CSAM, fCode32);
1724 if (pJmpPage == NULL)
1725 {
1726 rc = VERR_NO_MEMORY;
1727 goto done;
1728 }
1729 Assert(pPage);
1730 }
1731 rc = csamAnalyseCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1732 if (rc != VINF_SUCCESS) {
1733 goto done;
1734 }
1735 }
1736 }
1737 }
1738 }
1739#endif
1740 if (rc != VWRN_CONTINUE_ANALYSIS) {
1741 break; //done!
1742 }
1743next_please:
1744 if (cpu.pCurInstr->uOpcode == OP_JMP)
1745 {
1746 rc = VINF_SUCCESS;
1747 goto done;
1748 }
1749 pCurInstrGC += cbInstr;
1750 }
1751done:
1752 pCacheRec->depth--;
1753 return rc;
1754}
1755
1756
1757/**
1758 * Calculates the 64 bits hash value for the current page
1759 *
1760 * @returns hash value
1761 * @param pVM The cross context VM structure.
1762 * @param pInstr Page address
1763 */
1764uint64_t csamR3CalcPageHash(PVM pVM, RTRCPTR pInstr)
1765{
1766 uint64_t hash = 0;
1767 uint32_t val[5];
1768 int rc;
1769 Assert(pVM->cCpus == 1);
1770 PVMCPU pVCpu = VMMGetCpu0(pVM);
1771
1772 Assert((pInstr & PAGE_OFFSET_MASK) == 0);
1773
1774 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[0], pInstr, sizeof(val[0]));
1775 if (RT_SUCCESS(rc))
1776 { /* likely */ }
1777 else
1778 {
1779 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS)
1780 {
1781 Log(("csamR3CalcPageHash: page %RRv not present/invalid!!\n", pInstr));
1782 return ~0ULL;
1783 }
1784 AssertMsgFailed(("rc = %Rrc %RRv\n", rc, pInstr));
1785 }
1786
1787 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[1], pInstr+1024, sizeof(val[0]));
1788 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1789 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1790 {
1791 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1792 return ~0ULL;
1793 }
1794
1795 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[2], pInstr+2048, sizeof(val[0]));
1796 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1797 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1798 {
1799 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1800 return ~0ULL;
1801 }
1802
1803 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[3], pInstr+3072, sizeof(val[0]));
1804 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1805 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1806 {
1807 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1808 return ~0ULL;
1809 }
1810
1811 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[4], pInstr+4092, sizeof(val[0]));
1812 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1813 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1814 {
1815 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1816 return ~0ULL;
1817 }
1818
1819 // don't want to get division by zero traps
1820 val[2] |= 1;
1821 val[4] |= 1;
1822
1823 hash = (uint64_t)val[0] * (uint64_t)val[1] / (uint64_t)val[2] + (val[3]%val[4]);
1824 return (hash == ~0ULL) ? hash - 1 : hash;
1825}
1826
1827
1828/**
1829 * Notify CSAM of a page flush
1830 *
1831 * @returns VBox status code
1832 * @param pVM The cross context VM structure.
1833 * @param addr GC address of the page to flush
1834 * @param fRemovePage Page removal flag
1835 */
1836static int csamFlushPage(PVM pVM, RTRCPTR addr, bool fRemovePage)
1837{
1838 PCSAMPAGEREC pPageRec;
1839 int rc;
1840 RTGCPHYS GCPhys = 0;
1841 uint64_t fFlags = 0;
1842 Assert(pVM->cCpus == 1 || !CSAMIsEnabled(pVM));
1843
1844 if (!CSAMIsEnabled(pVM))
1845 return VINF_SUCCESS;
1846 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1847
1848 PVMCPU pVCpu = VMMGetCpu0(pVM);
1849
1850 STAM_PROFILE_START(&pVM->csam.s.StatTimeFlushPage, a);
1851
1852 addr = addr & PAGE_BASE_GC_MASK;
1853
1854 /*
1855 * Note: searching for the page in our tree first is more expensive (skipped flushes are two orders of magnitude more common)
1856 */
1857 if (pVM->csam.s.pPageTree == NULL)
1858 {
1859 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1860 return VWRN_CSAM_PAGE_NOT_FOUND;
1861 }
1862
1863 rc = PGMGstGetPage(pVCpu, addr, &fFlags, &GCPhys);
1864 /* Returned at a very early stage (no paging yet presumably). */
1865 if (rc == VERR_NOT_SUPPORTED)
1866 {
1867 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1868 return rc;
1869 }
1870
1871 if (RT_SUCCESS(rc))
1872 {
1873 if ( (fFlags & X86_PTE_US)
1874 || rc == VERR_PGM_PHYS_PAGE_RESERVED
1875 )
1876 {
1877 /* User page -> not relevant for us. */
1878 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushesSkipped, 1);
1879 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1880 return VINF_SUCCESS;
1881 }
1882 }
1883 else
1884 if (rc != VERR_PAGE_NOT_PRESENT && rc != VERR_PAGE_TABLE_NOT_PRESENT)
1885 AssertMsgFailed(("PGMR3GetPage %RRv failed with %Rrc\n", addr, rc));
1886
1887 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)addr);
1888 if (pPageRec)
1889 {
1890 if ( GCPhys == pPageRec->page.GCPhys
1891 && (fFlags & X86_PTE_P))
1892 {
1893 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushesSkipped, 1);
1894 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1895 return VINF_SUCCESS;
1896 }
1897
1898 Log(("CSAMR3FlushPage: page %RRv has changed -> FLUSH (rc=%Rrc) (Phys: %RGp vs %RGp)\n", addr, rc, GCPhys, pPageRec->page.GCPhys));
1899
1900 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushes, 1);
1901
1902 if (fRemovePage)
1903 csamRemovePageRecord(pVM, addr);
1904 else
1905 {
1906 CSAMMarkPage(pVM, addr, false);
1907 pPageRec->page.GCPhys = 0;
1908 pPageRec->page.fFlags = 0;
1909 rc = PGMGstGetPage(pVCpu, addr, &pPageRec->page.fFlags, &pPageRec->page.GCPhys);
1910 if (rc == VINF_SUCCESS)
1911 pPageRec->page.u64Hash = csamR3CalcPageHash(pVM, addr);
1912
1913 if (pPageRec->page.pBitmap == NULL)
1914 {
1915 pPageRec->page.pBitmap = (uint8_t *)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, CSAM_PAGE_BITMAP_SIZE);
1916 Assert(pPageRec->page.pBitmap);
1917 if (pPageRec->page.pBitmap == NULL)
1918 return VERR_NO_MEMORY;
1919 }
1920 else
1921 memset(pPageRec->page.pBitmap, 0, CSAM_PAGE_BITMAP_SIZE);
1922 }
1923
1924
1925 /*
1926 * Inform patch manager about the flush; no need to repeat the above check twice.
1927 */
1928 PATMR3FlushPage(pVM, addr);
1929
1930 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1931 return VINF_SUCCESS;
1932 }
1933 else
1934 {
1935 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1936 return VWRN_CSAM_PAGE_NOT_FOUND;
1937 }
1938}
1939
1940/**
1941 * Notify CSAM of a page flush
1942 *
1943 * @returns VBox status code
1944 * @param pVM The cross context VM structure.
1945 * @param addr GC address of the page to flush
1946 */
1947VMMR3_INT_DECL(int) CSAMR3FlushPage(PVM pVM, RTRCPTR addr)
1948{
1949 return csamFlushPage(pVM, addr, true /* remove page record */);
1950}
1951
1952/**
1953 * Remove a CSAM monitored page. Use with care!
1954 *
1955 * @returns VBox status code
1956 * @param pVM The cross context VM structure.
1957 * @param addr GC address of the page to flush
1958 */
1959VMMR3_INT_DECL(int) CSAMR3RemovePage(PVM pVM, RTRCPTR addr)
1960{
1961 PCSAMPAGEREC pPageRec;
1962 int rc;
1963
1964 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_CSAM_HM_IPE);
1965
1966 addr = addr & PAGE_BASE_GC_MASK;
1967
1968 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)addr);
1969 if (pPageRec)
1970 {
1971 rc = csamRemovePageRecord(pVM, addr);
1972 if (RT_SUCCESS(rc))
1973 PATMR3FlushPage(pVM, addr);
1974 return VINF_SUCCESS;
1975 }
1976 return VWRN_CSAM_PAGE_NOT_FOUND;
1977}
1978
1979/**
1980 * Check a page record in case a page has been changed
1981 *
1982 * @returns VBox status code. (trap handled or not)
1983 * @param pVM The cross context VM structure.
1984 * @param pInstrGC GC instruction pointer
1985 */
1986int csamR3CheckPageRecord(PVM pVM, RTRCPTR pInstrGC)
1987{
1988 PCSAMPAGEREC pPageRec;
1989 uint64_t u64hash;
1990
1991 pInstrGC = pInstrGC & PAGE_BASE_GC_MASK;
1992
1993 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1994 if (pPageRec)
1995 {
1996 u64hash = csamR3CalcPageHash(pVM, pInstrGC);
1997 if (u64hash != pPageRec->page.u64Hash)
1998 csamFlushPage(pVM, pInstrGC, false /* don't remove page record */);
1999 }
2000 else
2001 return VWRN_CSAM_PAGE_NOT_FOUND;
2002
2003 return VINF_SUCCESS;
2004}
2005
2006/**
2007 * Returns monitor description based on CSAM tag
2008 *
2009 * @return description string
2010 * @param enmTag Owner tag
2011 */
2012const char *csamGetMonitorDescription(CSAMTAG enmTag)
2013{
2014 if (enmTag == CSAM_TAG_PATM)
2015 return "CSAM-PATM self-modifying code monitor handler";
2016 else
2017 if (enmTag == CSAM_TAG_REM)
2018 return "CSAM-REM self-modifying code monitor handler";
2019 Assert(enmTag == CSAM_TAG_CSAM);
2020 return "CSAM self-modifying code monitor handler";
2021}
2022
2023/**
2024 * Adds page record to our lookup tree
2025 *
2026 * @returns CSAMPAGE ptr or NULL if failure
2027 * @param pVM The cross context VM structure.
2028 * @param GCPtr Page address
2029 * @param enmTag Owner tag
2030 * @param fCode32 16 or 32 bits code
2031 * @param fMonitorInvalidation Monitor page invalidation flag
2032 */
2033static PCSAMPAGE csamR3CreatePageRecord(PVM pVM, RTRCPTR GCPtr, CSAMTAG enmTag, bool fCode32, bool fMonitorInvalidation)
2034{
2035 PCSAMPAGEREC pPage;
2036 int rc;
2037 bool ret;
2038 Assert(pVM->cCpus == 1);
2039 PVMCPU pVCpu = VMMGetCpu0(pVM);
2040
2041 Log(("New page record for %RRv\n", GCPtr & PAGE_BASE_GC_MASK));
2042
2043 pPage = (PCSAMPAGEREC)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, sizeof(CSAMPAGEREC));
2044 if (pPage == NULL)
2045 {
2046 AssertMsgFailed(("csamR3CreatePageRecord: Out of memory!!!!\n"));
2047 return NULL;
2048 }
2049 /* Round down to page boundary. */
2050 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2051 pPage->Core.Key = (AVLPVKEY)(uintptr_t)GCPtr;
2052 pPage->page.pPageGC = GCPtr;
2053 pPage->page.fCode32 = fCode32;
2054 pPage->page.fMonitorInvalidation = fMonitorInvalidation;
2055 pPage->page.enmTag = enmTag;
2056 pPage->page.fMonitorActive = false;
2057 pPage->page.pBitmap = (uint8_t *)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, PAGE_SIZE/sizeof(uint8_t));
2058 rc = PGMGstGetPage(pVCpu, GCPtr, &pPage->page.fFlags, &pPage->page.GCPhys);
2059 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
2060
2061 pPage->page.u64Hash = csamR3CalcPageHash(pVM, GCPtr);
2062 ret = RTAvlPVInsert(&pVM->csam.s.pPageTree, &pPage->Core);
2063 Assert(ret);
2064
2065#ifdef CSAM_MONITOR_CODE_PAGES
2066 AssertRelease(!g_fInCsamR3CodePageInvalidate);
2067
2068 switch (enmTag)
2069 {
2070 case CSAM_TAG_PATM:
2071 case CSAM_TAG_REM:
2072# ifdef CSAM_MONITOR_CSAM_CODE_PAGES
2073 case CSAM_TAG_CSAM:
2074# endif
2075 {
2076 rc = PGMR3HandlerVirtualRegister(pVM, pVCpu, fMonitorInvalidation
2077 ? pVM->csam.s.hCodePageWriteAndInvPgType : pVM->csam.s.hCodePageWriteType,
2078 GCPtr, GCPtr + (PAGE_SIZE - 1) /* inclusive! */,
2079 pPage, NIL_RTRCPTR, csamGetMonitorDescription(enmTag));
2080 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT,
2081 ("PGMR3HandlerVirtualRegister %RRv failed with %Rrc\n", GCPtr, rc));
2082 if (RT_FAILURE(rc))
2083 Log(("PGMR3HandlerVirtualRegister for %RRv failed with %Rrc\n", GCPtr, rc));
2084
2085 /* Could fail, because it's already monitored. Don't treat that condition as fatal. */
2086
2087 /* Prefetch it in case it's not there yet. */
2088 rc = PGMPrefetchPage(pVCpu, GCPtr);
2089 AssertRC(rc);
2090
2091 rc = PGMShwMakePageReadonly(pVCpu, GCPtr, 0 /*fFlags*/);
2092 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2093
2094 pPage->page.fMonitorActive = true;
2095 STAM_COUNTER_INC(&pVM->csam.s.StatPageMonitor);
2096 break;
2097 }
2098 default:
2099 break; /* to shut up GCC */
2100 }
2101
2102 Log(("csamR3CreatePageRecord %RRv GCPhys=%RGp\n", GCPtr, pPage->page.GCPhys));
2103
2104# ifdef VBOX_WITH_STATISTICS
2105 switch (enmTag)
2106 {
2107 case CSAM_TAG_CSAM:
2108 STAM_COUNTER_INC(&pVM->csam.s.StatPageCSAM);
2109 break;
2110 case CSAM_TAG_PATM:
2111 STAM_COUNTER_INC(&pVM->csam.s.StatPagePATM);
2112 break;
2113 case CSAM_TAG_REM:
2114 STAM_COUNTER_INC(&pVM->csam.s.StatPageREM);
2115 break;
2116 default:
2117 break; /* to shut up GCC */
2118 }
2119# endif
2120
2121#endif
2122
2123 STAM_COUNTER_INC(&pVM->csam.s.StatNrPages);
2124 if (fMonitorInvalidation)
2125 STAM_COUNTER_INC(&pVM->csam.s.StatNrPagesInv);
2126
2127 return &pPage->page;
2128}
2129
2130/**
2131 * Monitors a code page (if not already monitored)
2132 *
2133 * @returns VBox status code
2134 * @param pVM The cross context VM structure.
2135 * @param pPageAddrGC The page to monitor
2136 * @param enmTag Monitor tag
2137 */
2138VMMR3DECL(int) CSAMR3MonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
2139{
2140 ;
2141 int rc;
2142 bool fMonitorInvalidation;
2143 Assert(pVM->cCpus == 1);
2144 PVMCPU pVCpu = VMMGetCpu0(pVM);
2145 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
2146
2147 /* Dirty pages must be handled before calling this function!. */
2148 Assert(!pVM->csam.s.cDirtyPages);
2149
2150 if (pVM->csam.s.fScanningStarted == false)
2151 return VINF_SUCCESS; /* too early */
2152
2153 pPageAddrGC &= PAGE_BASE_GC_MASK;
2154
2155 Log(("CSAMR3MonitorPage %RRv %d\n", pPageAddrGC, enmTag));
2156
2157 /** @todo implicit assumption */
2158 fMonitorInvalidation = (enmTag == CSAM_TAG_PATM);
2159
2160 PCSAMPAGEREC pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pPageAddrGC);
2161 if (pPageRec == NULL)
2162 {
2163 uint64_t fFlags;
2164
2165 rc = PGMGstGetPage(pVCpu, pPageAddrGC, &fFlags, NULL);
2166 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
2167 if ( rc == VINF_SUCCESS
2168 && (fFlags & X86_PTE_US))
2169 {
2170 /* We don't care about user pages. */
2171 STAM_COUNTER_INC(&pVM->csam.s.StatNrUserPages);
2172 return VINF_SUCCESS;
2173 }
2174
2175 csamR3CreatePageRecord(pVM, pPageAddrGC, enmTag, true /* 32 bits code */, fMonitorInvalidation);
2176
2177 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pPageAddrGC);
2178 Assert(pPageRec);
2179 }
2180 /** @todo reference count */
2181
2182#ifdef CSAM_MONITOR_CSAM_CODE_PAGES
2183 Assert(pPageRec->page.fMonitorActive);
2184#endif
2185
2186#ifdef CSAM_MONITOR_CODE_PAGES
2187 if (!pPageRec->page.fMonitorActive)
2188 {
2189 Log(("CSAMR3MonitorPage: activate monitoring for %RRv\n", pPageAddrGC));
2190
2191 rc = PGMR3HandlerVirtualRegister(pVM, pVCpu, fMonitorInvalidation
2192 ? pVM->csam.s.hCodePageWriteAndInvPgType : pVM->csam.s.hCodePageWriteType,
2193 pPageAddrGC, pPageAddrGC + (PAGE_SIZE - 1) /* inclusive! */,
2194 pPageRec, NIL_RTRCPTR /*pvUserRC*/, csamGetMonitorDescription(enmTag));
2195 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT,
2196 ("PGMR3HandlerVirtualRegister %RRv failed with %Rrc\n", pPageAddrGC, rc));
2197 if (RT_FAILURE(rc))
2198 Log(("PGMR3HandlerVirtualRegister for %RRv failed with %Rrc\n", pPageAddrGC, rc));
2199
2200 /* Could fail, because it's already monitored. Don't treat that condition as fatal. */
2201
2202 /* Prefetch it in case it's not there yet. */
2203 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
2204 AssertRC(rc);
2205
2206 rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
2207 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2208
2209 STAM_COUNTER_INC(&pVM->csam.s.StatPageMonitor);
2210
2211 pPageRec->page.fMonitorActive = true;
2212 pPageRec->page.fMonitorInvalidation = fMonitorInvalidation;
2213 }
2214 else
2215 if ( !pPageRec->page.fMonitorInvalidation
2216 && fMonitorInvalidation)
2217 {
2218 Assert(pPageRec->page.fMonitorActive);
2219 rc = PGMHandlerVirtualChangeType(pVM, pPageRec->page.pPageGC, pVM->csam.s.hCodePageWriteAndInvPgType);
2220 AssertRC(rc);
2221 pPageRec->page.fMonitorInvalidation = true;
2222 STAM_COUNTER_INC(&pVM->csam.s.StatNrPagesInv);
2223
2224 /* Prefetch it in case it's not there yet. */
2225 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
2226 AssertRC(rc);
2227
2228 /* Make sure it's readonly. Page invalidation may have modified the attributes. */
2229 rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
2230 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2231 }
2232
2233#if 0 /* def VBOX_STRICT -> very annoying) */
2234 if (pPageRec->page.fMonitorActive)
2235 {
2236 uint64_t fPageShw;
2237 RTHCPHYS GCPhys;
2238 rc = PGMShwGetPage(pVCpu, pPageAddrGC, &fPageShw, &GCPhys);
2239// AssertMsg( (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
2240// || !(fPageShw & X86_PTE_RW)
2241// || (pPageRec->page.GCPhys == 0), ("Shadow page flags for %RRv (%RHp) aren't readonly (%RX64)!!\n", pPageAddrGC, GCPhys, fPageShw));
2242 }
2243#endif
2244
2245 if (pPageRec->page.GCPhys == 0)
2246 {
2247 /* Prefetch it in case it's not there yet. */
2248 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
2249 AssertRC(rc);
2250 /* The page was changed behind our back. It won't be made read-only until the next SyncCR3, so force it here. */
2251 rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
2252 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2253 }
2254#endif /* CSAM_MONITOR_CODE_PAGES */
2255 return VINF_SUCCESS;
2256}
2257
2258/**
2259 * Unmonitors a code page
2260 *
2261 * @returns VBox status code
2262 * @param pVM The cross context VM structure.
2263 * @param pPageAddrGC The page to monitor
2264 * @param enmTag Monitor tag
2265 */
2266VMMR3DECL(int) CSAMR3UnmonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
2267{
2268 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
2269
2270 pPageAddrGC &= PAGE_BASE_GC_MASK;
2271
2272 Log(("CSAMR3UnmonitorPage %RRv %d\n", pPageAddrGC, enmTag));
2273
2274 Assert(enmTag == CSAM_TAG_REM); RT_NOREF_PV(enmTag);
2275
2276#ifdef VBOX_STRICT
2277 PCSAMPAGEREC pPageRec;
2278
2279 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pPageAddrGC);
2280 Assert(pPageRec && pPageRec->page.enmTag == enmTag);
2281#endif
2282 return CSAMR3RemovePage(pVM, pPageAddrGC);
2283}
2284
2285/**
2286 * Removes a page record from our lookup tree
2287 *
2288 * @returns VBox status code
2289 * @param pVM The cross context VM structure.
2290 * @param GCPtr Page address
2291 */
2292static int csamRemovePageRecord(PVM pVM, RTRCPTR GCPtr)
2293{
2294 PCSAMPAGEREC pPageRec;
2295 Assert(pVM->cCpus == 1);
2296 PVMCPU pVCpu = VMMGetCpu0(pVM);
2297
2298 Log(("csamRemovePageRecord %RRv\n", GCPtr));
2299 pPageRec = (PCSAMPAGEREC)RTAvlPVRemove(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)GCPtr);
2300
2301 if (pPageRec)
2302 {
2303 STAM_COUNTER_INC(&pVM->csam.s.StatNrRemovedPages);
2304
2305#ifdef CSAM_MONITOR_CODE_PAGES
2306 if (pPageRec->page.fMonitorActive)
2307 {
2308 /** @todo -> this is expensive (cr3 reload)!!!
2309 * if this happens often, then reuse it instead!!!
2310 */
2311 Assert(!g_fInCsamR3CodePageInvalidate);
2312 STAM_COUNTER_DEC(&pVM->csam.s.StatPageMonitor);
2313 PGMHandlerVirtualDeregister(pVM, pVCpu, GCPtr, false /*fHypervisor*/);
2314 }
2315 if (pPageRec->page.enmTag == CSAM_TAG_PATM)
2316 {
2317 /* Make sure the recompiler flushes its cache as this page is no longer monitored. */
2318 STAM_COUNTER_INC(&pVM->csam.s.StatPageRemoveREMFlush);
2319 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
2320 }
2321#endif
2322
2323#ifdef VBOX_WITH_STATISTICS
2324 switch (pPageRec->page.enmTag)
2325 {
2326 case CSAM_TAG_CSAM:
2327 STAM_COUNTER_DEC(&pVM->csam.s.StatPageCSAM);
2328 break;
2329 case CSAM_TAG_PATM:
2330 STAM_COUNTER_DEC(&pVM->csam.s.StatPagePATM);
2331 break;
2332 case CSAM_TAG_REM:
2333 STAM_COUNTER_DEC(&pVM->csam.s.StatPageREM);
2334 break;
2335 default:
2336 break; /* to shut up GCC */
2337 }
2338#endif
2339
2340 if (pPageRec->page.pBitmap) MMR3HeapFree(pPageRec->page.pBitmap);
2341 MMR3HeapFree(pPageRec);
2342 }
2343 else
2344 AssertFailed();
2345
2346 return VINF_SUCCESS;
2347}
2348
2349#if 0 /* Unused */
2350/**
2351 * Callback for delayed writes from non-EMT threads
2352 *
2353 * @param pVM The cross context VM structure.
2354 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
2355 * @param cbBuf How much it's reading/writing.
2356 */
2357static DECLCALLBACK(void) CSAMDelayedWriteHandler(PVM pVM, RTRCPTR GCPtr, size_t cbBuf)
2358{
2359 int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
2360 AssertRC(rc);
2361}
2362#endif
2363
2364/**
2365 * \#PF Handler callback for invalidation of virtual access handler ranges.
2366 *
2367 * @param pVM The cross context VM structure.
2368 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2369 * @param GCPtr The virtual address the guest has changed.
2370 * @param pvUser Ignored.
2371 *
2372 * @remarks Not currently called by PGM. It was actually only called for a month
2373 * back in 2006...
2374 */
2375static DECLCALLBACK(int) csamR3CodePageInvalidate(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvUser)
2376{
2377 RT_NOREF2(pVCpu, pvUser);
2378
2379 g_fInCsamR3CodePageInvalidate = true;
2380 LogFlow(("csamR3CodePageInvalidate %RGv\n", GCPtr));
2381 /** @todo We can't remove the page (which unregisters the virtual handler) as we are called from a DoWithAll on the virtual handler tree. Argh. */
2382 csamFlushPage(pVM, GCPtr, false /* don't remove page! */);
2383 g_fInCsamR3CodePageInvalidate = false;
2384
2385 return VINF_SUCCESS;
2386}
2387
2388/**
2389 * Check if the current instruction has already been checked before
2390 *
2391 * @returns VBox status code. (trap handled or not)
2392 * @param pVM The cross context VM structure.
2393 * @param pInstr Instruction pointer
2394 * @param pPage CSAM patch structure pointer
2395 */
2396bool csamIsCodeScanned(PVM pVM, RTRCPTR pInstr, PCSAMPAGE *pPage)
2397{
2398 PCSAMPAGEREC pPageRec;
2399 uint32_t offset;
2400
2401 STAM_PROFILE_START(&pVM->csam.s.StatTimeCheckAddr, a);
2402
2403 offset = pInstr & PAGE_OFFSET_MASK;
2404 pInstr = pInstr & PAGE_BASE_GC_MASK;
2405
2406 Assert(pPage);
2407
2408 if (*pPage && (*pPage)->pPageGC == pInstr)
2409 {
2410 if ((*pPage)->pBitmap == NULL || ASMBitTest((*pPage)->pBitmap, offset))
2411 {
2412 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesHC, 1);
2413 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2414 return true;
2415 }
2416 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2417 return false;
2418 }
2419
2420 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pInstr);
2421 if (pPageRec)
2422 {
2423 if (pPage) *pPage= &pPageRec->page;
2424 if (pPageRec->page.pBitmap == NULL || ASMBitTest(pPageRec->page.pBitmap, offset))
2425 {
2426 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesHC, 1);
2427 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2428 return true;
2429 }
2430 }
2431 else
2432 {
2433 if (pPage) *pPage = NULL;
2434 }
2435 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2436 return false;
2437}
2438
2439/**
2440 * Mark an instruction in a page as scanned/not scanned
2441 *
2442 * @param pVM The cross context VM structure.
2443 * @param pPage Patch structure pointer
2444 * @param pInstr Instruction pointer
2445 * @param cbInstr Instruction size
2446 * @param fScanned Mark as scanned or not
2447 */
2448static void csamMarkCode(PVM pVM, PCSAMPAGE pPage, RTRCPTR pInstr, uint32_t cbInstr, bool fScanned)
2449{
2450 LogFlow(("csamMarkCodeAsScanned %RRv cbInstr=%d\n", pInstr, cbInstr));
2451 CSAMMarkPage(pVM, pInstr, fScanned);
2452
2453 /** @todo should recreate empty bitmap if !fScanned */
2454 if (pPage->pBitmap == NULL)
2455 return;
2456
2457 if (fScanned)
2458 {
2459 // retn instructions can be scanned more than once
2460 if (ASMBitTest(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK) == 0)
2461 {
2462 pPage->uSize += cbInstr;
2463 STAM_COUNTER_ADD(&pVM->csam.s.StatNrInstr, 1);
2464 }
2465 if (pPage->uSize >= PAGE_SIZE)
2466 {
2467 Log(("Scanned full page (%RRv) -> free bitmap\n", pInstr & PAGE_BASE_GC_MASK));
2468 MMR3HeapFree(pPage->pBitmap);
2469 pPage->pBitmap = NULL;
2470 }
2471 else
2472 ASMBitSet(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK);
2473 }
2474 else
2475 ASMBitClear(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK);
2476}
2477
2478/**
2479 * Mark an instruction in a page as scanned/not scanned
2480 *
2481 * @returns VBox status code.
2482 * @param pVM The cross context VM structure.
2483 * @param pInstr Instruction pointer
2484 * @param cbInstr Instruction size
2485 * @param fScanned Mark as scanned or not
2486 */
2487VMMR3_INT_DECL(int) CSAMR3MarkCode(PVM pVM, RTRCPTR pInstr, uint32_t cbInstr, bool fScanned)
2488{
2489 PCSAMPAGE pPage = 0;
2490
2491 Assert(!fScanned); /* other case not implemented. */
2492 Assert(!PATMIsPatchGCAddr(pVM, pInstr));
2493 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
2494
2495 if (csamIsCodeScanned(pVM, pInstr, &pPage) == false)
2496 {
2497 Assert(fScanned == true); /* other case should not be possible */
2498 return VINF_SUCCESS;
2499 }
2500
2501 Log(("CSAMR3MarkCode: %RRv size=%d fScanned=%d\n", pInstr, cbInstr, fScanned));
2502 csamMarkCode(pVM, pPage, pInstr, cbInstr, fScanned);
2503 return VINF_SUCCESS;
2504}
2505
2506
2507/**
2508 * Scan and analyse code
2509 *
2510 * @returns VBox status code.
2511 * @param pVM The cross context VM structure.
2512 * @param pCtx Guest CPU context.
2513 * @param pInstrGC Instruction pointer.
2514 */
2515VMMR3_INT_DECL(int) CSAMR3CheckCodeEx(PVM pVM, PCPUMCTX pCtx, RTRCPTR pInstrGC)
2516{
2517 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
2518 if (!EMIsRawRing0Enabled(pVM) || PATMIsPatchGCAddr(pVM, pInstrGC) == true)
2519 {
2520 // No use
2521 return VINF_SUCCESS;
2522 }
2523
2524 if (CSAMIsEnabled(pVM))
2525 {
2526 /* Assuming 32 bits code for now. */
2527 Assert(CPUMGetGuestCodeBits(VMMGetCpu0(pVM)) == 32);
2528
2529 pInstrGC = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
2530 return CSAMR3CheckCode(pVM, pInstrGC);
2531 }
2532 return VINF_SUCCESS;
2533}
2534
2535/**
2536 * Scan and analyse code
2537 *
2538 * @returns VBox status code.
2539 * @param pVM The cross context VM structure.
2540 * @param pInstrGC Instruction pointer (0:32 virtual address)
2541 */
2542VMMR3_INT_DECL(int) CSAMR3CheckCode(PVM pVM, RTRCPTR pInstrGC)
2543{
2544 int rc;
2545 PCSAMPAGE pPage = NULL;
2546 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
2547
2548 if ( !EMIsRawRing0Enabled(pVM)
2549 || PATMIsPatchGCAddr(pVM, pInstrGC) == true)
2550 {
2551 /* Not active. */
2552 return VINF_SUCCESS;
2553 }
2554
2555 if (CSAMIsEnabled(pVM))
2556 {
2557 /* Cache record for csamR3GCVirtToHCVirt */
2558 CSAMP2GLOOKUPREC cacheRec;
2559 RT_ZERO(cacheRec);
2560
2561 STAM_PROFILE_START(&pVM->csam.s.StatTime, a);
2562 rc = csamAnalyseCallCodeStream(pVM, pInstrGC, pInstrGC, true /* 32 bits code */, CSAMR3AnalyseCallback, pPage, &cacheRec);
2563 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, a);
2564 if (cacheRec.Lock.pvMap)
2565 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2566
2567 if (rc != VINF_SUCCESS)
2568 {
2569 Log(("csamAnalyseCodeStream failed with %d\n", rc));
2570 return rc;
2571 }
2572 }
2573 return VINF_SUCCESS;
2574}
2575
2576/**
2577 * Flush dirty code pages
2578 *
2579 * @returns VBox status code.
2580 * @param pVM The cross context VM structure.
2581 */
2582static int csamR3FlushDirtyPages(PVM pVM)
2583{
2584 Assert(pVM->cCpus == 1);
2585 PVMCPU pVCpu = VMMGetCpu0(pVM);
2586
2587 STAM_PROFILE_START(&pVM->csam.s.StatFlushDirtyPages, a);
2588
2589 for (uint32_t i = 0; i < pVM->csam.s.cDirtyPages; i++)
2590 {
2591 int rc;
2592 PCSAMPAGEREC pPageRec;
2593 RTRCPTR GCPtr = pVM->csam.s.pvDirtyBasePage[i] & PAGE_BASE_GC_MASK;
2594
2595#ifdef VBOX_WITH_REM
2596 /* Notify the recompiler that this page has been changed. */
2597 REMR3NotifyCodePageChanged(pVM, pVCpu, GCPtr);
2598 if (pVM->csam.s.pvDirtyFaultPage[i] != pVM->csam.s.pvDirtyBasePage[i])
2599 REMR3NotifyCodePageChanged(pVM, pVCpu, pVM->csam.s.pvDirtyFaultPage[i] & PAGE_BASE_GC_MASK);
2600#endif
2601
2602 /* Enable write protection again. (use the fault address as it might be an alias) */
2603 rc = PGMShwMakePageReadonly(pVCpu, pVM->csam.s.pvDirtyFaultPage[i], 0 /*fFlags*/);
2604 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2605
2606 Log(("CSAMR3FlushDirtyPages: flush %RRv (modifypage rc=%Rrc)\n", pVM->csam.s.pvDirtyBasePage[i], rc));
2607
2608 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)GCPtr);
2609 if (pPageRec && pPageRec->page.enmTag == CSAM_TAG_REM)
2610 {
2611 uint64_t fFlags;
2612
2613 rc = PGMGstGetPage(pVCpu, GCPtr, &fFlags, NULL);
2614 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
2615 if ( rc == VINF_SUCCESS
2616 && (fFlags & X86_PTE_US))
2617 {
2618 /* We don't care about user pages. */
2619 csamRemovePageRecord(pVM, GCPtr);
2620 STAM_COUNTER_INC(&pVM->csam.s.StatNrUserPages);
2621 }
2622 }
2623 }
2624 pVM->csam.s.cDirtyPages = 0;
2625 STAM_PROFILE_STOP(&pVM->csam.s.StatFlushDirtyPages, a);
2626 return VINF_SUCCESS;
2627}
2628
2629/**
2630 * Flush potential new code pages
2631 *
2632 * @returns VBox status code.
2633 * @param pVM The cross context VM structure.
2634 */
2635static int csamR3FlushCodePages(PVM pVM)
2636{
2637 Assert(pVM->cCpus == 1);
2638 PVMCPU pVCpu = VMMGetCpu0(pVM);
2639
2640 for (uint32_t i=0;i<pVM->csam.s.cPossibleCodePages;i++)
2641 {
2642 RTRCPTR GCPtr = pVM->csam.s.pvPossibleCodePage[i];
2643
2644 GCPtr = GCPtr & PAGE_BASE_GC_MASK;
2645
2646 Log(("csamR3FlushCodePages: %RRv\n", GCPtr));
2647 PGMShwMakePageNotPresent(pVCpu, GCPtr, 0 /*fFlags*/);
2648 /* Resync the page to make sure instruction fetch will fault */
2649 CSAMMarkPage(pVM, GCPtr, false);
2650 }
2651 pVM->csam.s.cPossibleCodePages = 0;
2652 return VINF_SUCCESS;
2653}
2654
2655/**
2656 * Perform any pending actions
2657 *
2658 * @returns VBox status code.
2659 * @param pVM The cross context VM structure.
2660 * @param pVCpu The cross context virtual CPU structure.
2661 */
2662VMMR3_INT_DECL(int) CSAMR3DoPendingAction(PVM pVM, PVMCPU pVCpu)
2663{
2664 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_CSAM_HM_IPE);
2665
2666 csamR3FlushDirtyPages(pVM);
2667 csamR3FlushCodePages(pVM);
2668
2669 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
2670 return VINF_SUCCESS;
2671}
2672
2673/**
2674 * Analyse interrupt and trap gates
2675 *
2676 * @returns VBox status code.
2677 * @param pVM The cross context VM structure.
2678 * @param iGate Start gate
2679 * @param cGates Number of gates to check
2680 */
2681VMMR3_INT_DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
2682{
2683#ifdef VBOX_WITH_RAW_MODE
2684 Assert(pVM->cCpus == 1);
2685 PVMCPU pVCpu = VMMGetCpu0(pVM);
2686 uint16_t cbIDT;
2687 RTRCPTR GCPtrIDT = CPUMGetGuestIDTR(pVCpu, &cbIDT);
2688 uint32_t iGateEnd;
2689 uint32_t maxGates;
2690 VBOXIDTE aIDT[256];
2691 PVBOXIDTE pGuestIdte;
2692 int rc;
2693
2694 AssertReturn(VM_IS_RAW_MODE_ENABLED(pVM), VERR_CSAM_HM_IPE);
2695 if (!EMIsRawRing0Enabled(pVM))
2696 {
2697 /* Enabling interrupt gates only works when raw ring 0 is enabled. */
2698 //AssertFailed();
2699 return VINF_SUCCESS;
2700 }
2701
2702 /* We only check all gates once during a session */
2703 if ( !pVM->csam.s.fGatesChecked
2704 && cGates != 256)
2705 return VINF_SUCCESS; /* too early */
2706
2707 /* We only check all gates once during a session */
2708 if ( pVM->csam.s.fGatesChecked
2709 && cGates != 1)
2710 return VINF_SUCCESS; /* ignored */
2711
2712 Assert(cGates <= 256);
2713 if (!GCPtrIDT || cGates > 256)
2714 return VERR_INVALID_PARAMETER;
2715
2716 if (cGates != 1)
2717 {
2718 pVM->csam.s.fGatesChecked = true;
2719 for (unsigned i=0;i<RT_ELEMENTS(pVM->csam.s.pvCallInstruction);i++)
2720 {
2721 RTRCPTR pHandler = pVM->csam.s.pvCallInstruction[i];
2722
2723 if (pHandler)
2724 {
2725 PCSAMPAGE pPage = NULL;
2726 CSAMP2GLOOKUPREC cacheRec; /* Cache record for csamR3GCVirtToHCVirt. */
2727 RT_ZERO(cacheRec);
2728
2729 Log(("CSAMCheckGates: checking previous call instruction %RRv\n", pHandler));
2730 STAM_PROFILE_START(&pVM->csam.s.StatTime, a);
2731 rc = csamAnalyseCodeStream(pVM, pHandler, pHandler, true, CSAMR3AnalyseCallback, pPage, &cacheRec);
2732 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, a);
2733 if (cacheRec.Lock.pvMap)
2734 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2735
2736 if (rc != VINF_SUCCESS)
2737 {
2738 Log(("CSAMCheckGates: csamAnalyseCodeStream failed with %d\n", rc));
2739 continue;
2740 }
2741 }
2742 }
2743 }
2744
2745 /* Determine valid upper boundary. */
2746 maxGates = (cbIDT+1) / sizeof(VBOXIDTE);
2747 Assert(iGate < maxGates);
2748 if (iGate > maxGates)
2749 return VERR_INVALID_PARAMETER;
2750
2751 if (iGate + cGates > maxGates)
2752 cGates = maxGates - iGate;
2753
2754 GCPtrIDT = GCPtrIDT + iGate * sizeof(VBOXIDTE);
2755 iGateEnd = iGate + cGates;
2756
2757 STAM_PROFILE_START(&pVM->csam.s.StatCheckGates, a);
2758
2759 /*
2760 * Get IDT entries.
2761 */
2762 rc = PGMPhysSimpleReadGCPtr(pVCpu, aIDT, GCPtrIDT, cGates*sizeof(VBOXIDTE));
2763 if (RT_FAILURE(rc))
2764 {
2765 AssertMsgRC(rc, ("Failed to read IDTE! rc=%Rrc\n", rc));
2766 STAM_PROFILE_STOP(&pVM->csam.s.StatCheckGates, a);
2767 return rc;
2768 }
2769 pGuestIdte = &aIDT[0];
2770
2771 for (/*iGate*/; iGate<iGateEnd; iGate++, pGuestIdte++)
2772 {
2773 Assert(TRPMR3GetGuestTrapHandler(pVM, iGate) == TRPM_INVALID_HANDLER);
2774
2775 if ( pGuestIdte->Gen.u1Present
2776 && (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32 || pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_INT_32)
2777 && (pGuestIdte->Gen.u2DPL == 3 || pGuestIdte->Gen.u2DPL == 0)
2778 )
2779 {
2780 RTRCPTR pHandler;
2781 PCSAMPAGE pPage = NULL;
2782 DBGFSELINFO selInfo;
2783 CSAMP2GLOOKUPREC cacheRec; /* Cache record for csamR3GCVirtToHCVirt. */
2784 RT_ZERO(cacheRec);
2785
2786 pHandler = VBOXIDTE_OFFSET(*pGuestIdte);
2787 pHandler = SELMToFlatBySel(pVM, pGuestIdte->Gen.u16SegSel, pHandler);
2788
2789 rc = SELMR3GetSelectorInfo(pVM, pVCpu, pGuestIdte->Gen.u16SegSel, &selInfo);
2790 if ( RT_FAILURE(rc)
2791 || (selInfo.fFlags & (DBGFSELINFO_FLAGS_NOT_PRESENT | DBGFSELINFO_FLAGS_INVALID))
2792 || selInfo.GCPtrBase != 0
2793 || selInfo.cbLimit != ~0U
2794 )
2795 {
2796 /* Refuse to patch a handler whose idt cs selector isn't wide open. */
2797 Log(("CSAMCheckGates: check gate %d failed due to rc %Rrc GCPtrBase=%RRv limit=%x\n", iGate, rc, selInfo.GCPtrBase, selInfo.cbLimit));
2798 continue;
2799 }
2800
2801
2802 if (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32)
2803 {
2804 Log(("CSAMCheckGates: check trap gate %d at %04X:%08X (flat %RRv)\n", iGate, pGuestIdte->Gen.u16SegSel, VBOXIDTE_OFFSET(*pGuestIdte), pHandler));
2805 }
2806 else
2807 {
2808 Log(("CSAMCheckGates: check interrupt gate %d at %04X:%08X (flat %RRv)\n", iGate, pGuestIdte->Gen.u16SegSel, VBOXIDTE_OFFSET(*pGuestIdte), pHandler));
2809 }
2810
2811 STAM_PROFILE_START(&pVM->csam.s.StatTime, b);
2812 rc = csamAnalyseCodeStream(pVM, pHandler, pHandler, true, CSAMR3AnalyseCallback, pPage, &cacheRec);
2813 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, b);
2814 if (cacheRec.Lock.pvMap)
2815 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2816
2817 if (rc != VINF_SUCCESS)
2818 {
2819 Log(("CSAMCheckGates: csamAnalyseCodeStream failed with %d\n", rc));
2820 continue;
2821 }
2822 /* OpenBSD guest specific patch test. */
2823 if (iGate >= 0x20)
2824 {
2825 PCPUMCTX pCtx;
2826 DISCPUSTATE cpu;
2827 RTGCUINTPTR32 aOpenBsdPushCSOffset[3] = {0x03, /* OpenBSD 3.7 & 3.8 */
2828 0x2B, /* OpenBSD 4.0 installation ISO */
2829 0x2F}; /* OpenBSD 4.0 after install */
2830
2831 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2832
2833 for (unsigned i=0;i<RT_ELEMENTS(aOpenBsdPushCSOffset);i++)
2834 {
2835 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pHandler - aOpenBsdPushCSOffset[i], &cpu, NULL);
2836 if ( rc == VINF_SUCCESS
2837 && cpu.pCurInstr->uOpcode == OP_PUSH
2838 && cpu.pCurInstr->fParam1 == OP_PARM_REG_CS)
2839 {
2840 rc = PATMR3InstallPatch(pVM, pHandler - aOpenBsdPushCSOffset[i], PATMFL_CODE32 | PATMFL_GUEST_SPECIFIC);
2841 if (RT_SUCCESS(rc))
2842 Log(("Installed OpenBSD interrupt handler prefix instruction (push cs) patch\n"));
2843 }
2844 }
2845 }
2846
2847 /* Trap gates and certain interrupt gates. */
2848 uint32_t fPatchFlags = PATMFL_CODE32 | PATMFL_IDTHANDLER;
2849
2850 if (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32)
2851 fPatchFlags |= PATMFL_TRAPHANDLER;
2852 else
2853 fPatchFlags |= PATMFL_INTHANDLER;
2854
2855 switch (iGate) {
2856 case 8:
2857 case 10:
2858 case 11:
2859 case 12:
2860 case 13:
2861 case 14:
2862 case 17:
2863 fPatchFlags |= PATMFL_TRAPHANDLER_WITH_ERRORCODE;
2864 break;
2865 default:
2866 /* No error code. */
2867 break;
2868 }
2869
2870 Log(("Installing %s gate handler for 0x%X at %RRv\n", (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32) ? "trap" : "intr", iGate, pHandler));
2871
2872 rc = PATMR3InstallPatch(pVM, pHandler, fPatchFlags);
2873 if ( RT_SUCCESS(rc)
2874 || rc == VERR_PATM_ALREADY_PATCHED)
2875 {
2876 Log(("Gate handler 0x%X is SAFE!\n", iGate));
2877
2878 RTRCPTR pNewHandlerGC = PATMR3QueryPatchGCPtr(pVM, pHandler);
2879 if (pNewHandlerGC)
2880 {
2881 rc = TRPMR3SetGuestTrapHandler(pVM, iGate, pNewHandlerGC);
2882 if (RT_FAILURE(rc))
2883 Log(("TRPMR3SetGuestTrapHandler %d failed with %Rrc\n", iGate, rc));
2884 }
2885 }
2886 }
2887 } /* for */
2888 STAM_PROFILE_STOP(&pVM->csam.s.StatCheckGates, a);
2889#endif /* VBOX_WITH_RAW_MODE */
2890 return VINF_SUCCESS;
2891}
2892
2893/**
2894 * Record previous call instruction addresses
2895 *
2896 * @returns VBox status code.
2897 * @param pVM The cross context VM structure.
2898 * @param GCPtrCall Call address
2899 */
2900VMMR3DECL(int) CSAMR3RecordCallAddress(PVM pVM, RTRCPTR GCPtrCall)
2901{
2902 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
2903 for (unsigned i=0;i<RT_ELEMENTS(pVM->csam.s.pvCallInstruction);i++)
2904 {
2905 if (pVM->csam.s.pvCallInstruction[i] == GCPtrCall)
2906 return VINF_SUCCESS;
2907 }
2908
2909 Log(("CSAMR3RecordCallAddress %RRv\n", GCPtrCall));
2910
2911 pVM->csam.s.pvCallInstruction[pVM->csam.s.iCallInstruction++] = GCPtrCall;
2912 if (pVM->csam.s.iCallInstruction >= RT_ELEMENTS(pVM->csam.s.pvCallInstruction))
2913 pVM->csam.s.iCallInstruction = 0;
2914
2915 return VINF_SUCCESS;
2916}
2917
2918
2919/**
2920 * Query CSAM state (enabled/disabled)
2921 *
2922 * @returns true if enabled, false otherwise.
2923 * @param pUVM The user mode VM handle.
2924 */
2925VMMR3DECL(bool) CSAMR3IsEnabled(PUVM pUVM)
2926{
2927 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2928 PVM pVM = pUVM->pVM;
2929 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2930 return CSAMIsEnabled(pVM);
2931}
2932
2933
2934/**
2935 * Enables or disables code scanning.
2936 *
2937 * @returns VBox status code.
2938 * @param pUVM The user mode VM handle.
2939 * @param fEnabled Whether to enable or disable scanning.
2940 */
2941VMMR3DECL(int) CSAMR3SetScanningEnabled(PUVM pUVM, bool fEnabled)
2942{
2943 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2944 PVM pVM = pUVM->pVM;
2945 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2946
2947 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2948 {
2949 Assert(!pVM->fCSAMEnabled);
2950 return VINF_SUCCESS;
2951 }
2952
2953 int rc;
2954 if (fEnabled)
2955 rc = CSAMEnableScanning(pVM);
2956 else
2957 rc = CSAMDisableScanning(pVM);
2958 return rc;
2959}
2960
2961
2962#ifdef VBOX_WITH_DEBUGGER
2963
2964/**
2965 * @callback_method_impl{FNDBGCCMD, The '.csamoff' command.}
2966 */
2967static DECLCALLBACK(int) csamr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2968{
2969 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
2970 NOREF(cArgs); NOREF(paArgs);
2971
2972 if (HMR3IsEnabled(pUVM))
2973 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM is permanently disabled by HM.\n");
2974
2975 int rc = CSAMR3SetScanningEnabled(pUVM, false);
2976 if (RT_FAILURE(rc))
2977 return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "CSAMR3SetScanningEnabled");
2978 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM Scanning disabled\n");
2979}
2980
2981/**
2982 * @callback_method_impl{FNDBGCCMD, The '.csamon' command.}
2983 */
2984static DECLCALLBACK(int) csamr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2985{
2986 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
2987 NOREF(cArgs); NOREF(paArgs);
2988
2989 if (HMR3IsEnabled(pUVM))
2990 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM is permanently disabled by HM.\n");
2991
2992 int rc = CSAMR3SetScanningEnabled(pUVM, true);
2993 if (RT_FAILURE(rc))
2994 return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "CSAMR3SetScanningEnabled");
2995 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM Scanning enabled\n");
2996}
2997
2998#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette