VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CSAM.cpp@ 55895

Last change on this file since 55895 was 55895, checked in by vboxsync, 10 years ago

Added pvUser to the raw-mode context virtual handler callbacks.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 101.2 KB
Line 
1/* $Id: CSAM.cpp 55895 2015-05-17 19:42:38Z vboxsync $ */
2/** @file
3 * CSAM - Guest OS Code Scanning and Analysis Manager
4 */
5
6/*
7 * Copyright (C) 2006-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_CSAM
22#include <VBox/vmm/cpum.h>
23#include <VBox/vmm/stam.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/csam.h>
26#include <VBox/vmm/cpumdis.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/vmm/mm.h>
30#include <VBox/vmm/em.h>
31#include <VBox/vmm/hm.h>
32#ifdef VBOX_WITH_REM
33# include <VBox/vmm/rem.h>
34#endif
35#include <VBox/vmm/selm.h>
36#include <VBox/vmm/trpm.h>
37#include <VBox/vmm/cfgm.h>
38#include <VBox/vmm/ssm.h>
39#include <VBox/param.h>
40#include <iprt/avl.h>
41#include <iprt/asm.h>
42#include <iprt/thread.h>
43#include "CSAMInternal.h"
44#include <VBox/vmm/vm.h>
45#include <VBox/vmm/uvm.h>
46
47#include <VBox/dbg.h>
48#include <VBox/sup.h>
49#include <VBox/err.h>
50#include <VBox/log.h>
51
52#include <VBox/dis.h>
53#include <VBox/disopcode.h>
54#include <iprt/assert.h>
55#include <iprt/string.h>
56#include "internal/pgm.h"
57
58
59/* Enabled by default */
60#define CSAM_ENABLE
61
62/* Enable to monitor code pages for self-modifying code. */
63#define CSAM_MONITOR_CODE_PAGES
64/* Enable to monitor all scanned pages
65#define CSAM_MONITOR_CSAM_CODE_PAGES */
66/* Enable to scan beyond ret instructions.
67#define CSAM_ANALYSE_BEYOND_RET */
68
69/*******************************************************************************
70* Internal Functions *
71*******************************************************************************/
72static DECLCALLBACK(int) csamR3Save(PVM pVM, PSSMHANDLE pSSM);
73static DECLCALLBACK(int) csamR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
74static FNPGMR3VIRTHANDLER csamR3CodePageWriteHandler;
75static FNPGMR3VIRTINVALIDATE csamR3CodePageInvalidate;
76
77bool csamIsCodeScanned(PVM pVM, RTRCPTR pInstr, PCSAMPAGE *pPage);
78int csamR3CheckPageRecord(PVM pVM, RTRCPTR pInstr);
79static PCSAMPAGE csamCreatePageRecord(PVM pVM, RTRCPTR GCPtr, CSAMTAG enmTag, bool fCode32, bool fMonitorInvalidation = false);
80static int csamRemovePageRecord(PVM pVM, RTRCPTR GCPtr);
81static int csamReinit(PVM pVM);
82static void csamMarkCode(PVM pVM, PCSAMPAGE pPage, RTRCPTR pInstr, uint32_t opsize, bool fScanned);
83static int csamAnalyseCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
84 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec);
85
86/** @todo "Temporary" for debugging. */
87static bool g_fInCsamR3CodePageInvalidate = false;
88
89#ifdef VBOX_WITH_DEBUGGER
90static FNDBGCCMD csamr3CmdOn;
91static FNDBGCCMD csamr3CmdOff;
92#endif
93
94
95/*******************************************************************************
96* Global Variables *
97*******************************************************************************/
98#ifdef VBOX_WITH_DEBUGGER
99/** Command descriptors. */
100static const DBGCCMD g_aCmds[] =
101{
102 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
103 { "csamon", 0, 0, NULL, 0, 0, csamr3CmdOn, "", "Enable CSAM code scanning." },
104 { "csamoff", 0, 0, NULL, 0, 0, csamr3CmdOff, "", "Disable CSAM code scanning." },
105};
106#endif
107
108/**
109 * SSM descriptor table for the CSAM structure.
110 */
111static const SSMFIELD g_aCsamFields[] =
112{
113 /** @todo there are more fields that can be ignored here. */
114 SSMFIELD_ENTRY_IGNORE( CSAM, offVM),
115 SSMFIELD_ENTRY_PAD_HC64( CSAM, Alignment0, sizeof(uint32_t)),
116 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPageTree),
117 SSMFIELD_ENTRY( CSAM, aDangerousInstr),
118 SSMFIELD_ENTRY( CSAM, cDangerousInstr),
119 SSMFIELD_ENTRY( CSAM, iDangerousInstr),
120 SSMFIELD_ENTRY_RCPTR( CSAM, pPDBitmapGC), /// @todo ignore this?
121 SSMFIELD_ENTRY_RCPTR( CSAM, pPDHCBitmapGC), /// @todo ignore this?
122 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDBitmapHC),
123 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDGCBitmapHC),
124 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, savedstate.pSSM),
125 SSMFIELD_ENTRY( CSAM, savedstate.cPageRecords),
126 SSMFIELD_ENTRY( CSAM, savedstate.cPatchPageRecords),
127 SSMFIELD_ENTRY( CSAM, cDirtyPages),
128 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyBasePage),
129 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyFaultPage),
130 SSMFIELD_ENTRY( CSAM, cPossibleCodePages),
131 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvPossibleCodePage),
132 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvCallInstruction),
133 SSMFIELD_ENTRY( CSAM, iCallInstruction),
134 SSMFIELD_ENTRY( CSAM, fScanningStarted),
135 SSMFIELD_ENTRY( CSAM, fGatesChecked),
136 SSMFIELD_ENTRY_PAD_HC( CSAM, Alignment1, 6, 2),
137 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrTraps),
138 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPages),
139 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPagesInv),
140 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrRemovedPages),
141 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPatchPages),
142 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPHC),
143 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPGC),
144 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushes),
145 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushesSkipped),
146 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesHC),
147 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesGC),
148 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrInstr),
149 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrBytesRead),
150 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrOpcodeRead),
151 SSMFIELD_ENTRY_IGNORE( CSAM, StatTime),
152 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeCheckAddr),
153 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeAddrConv),
154 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeFlushPage),
155 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeDisasm),
156 SSMFIELD_ENTRY_IGNORE( CSAM, StatFlushDirtyPages),
157 SSMFIELD_ENTRY_IGNORE( CSAM, StatCheckGates),
158 SSMFIELD_ENTRY_IGNORE( CSAM, StatCodePageModified),
159 SSMFIELD_ENTRY_IGNORE( CSAM, StatDangerousWrite),
160 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheHit),
161 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheMiss),
162 SSMFIELD_ENTRY_IGNORE( CSAM, StatPagePATM),
163 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageCSAM),
164 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageREM),
165 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrUserPages),
166 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageMonitor),
167 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageRemoveREMFlush),
168 SSMFIELD_ENTRY_IGNORE( CSAM, StatBitmapAlloc),
169 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunction),
170 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunctionFailed),
171 SSMFIELD_ENTRY_TERM()
172};
173
174/** Fake type to simplify g_aCsamPDBitmapArray construction. */
175typedef struct
176{
177 uint8_t *a[CSAM_PGDIRBMP_CHUNKS];
178} CSAMPDBITMAPARRAY;
179
180/**
181 * SSM descriptor table for the CSAM::pPDBitmapHC array.
182 */
183static SSMFIELD const g_aCsamPDBitmapArray[] =
184{
185 SSMFIELD_ENTRY_HCPTR_NI_ARRAY(CSAMPDBITMAPARRAY, a),
186 SSMFIELD_ENTRY_TERM()
187};
188
189/**
190 * SSM descriptor table for the CSAMPAGEREC structure.
191 */
192static const SSMFIELD g_aCsamPageRecFields[] =
193{
194 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.Key),
195 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.pLeft),
196 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.pRight),
197 SSMFIELD_ENTRY_IGNORE( CSAMPAGEREC, Core.uchHeight),
198 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
199 SSMFIELD_ENTRY_RCPTR( CSAMPAGEREC, page.pPageGC),
200 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
201 SSMFIELD_ENTRY_PAD_MSC32_AUTO( 4),
202 SSMFIELD_ENTRY_GCPHYS( CSAMPAGEREC, page.GCPhys),
203 SSMFIELD_ENTRY( CSAMPAGEREC, page.fFlags),
204 SSMFIELD_ENTRY( CSAMPAGEREC, page.uSize),
205 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
206 SSMFIELD_ENTRY_HCPTR_NI( CSAMPAGEREC, page.pBitmap),
207 SSMFIELD_ENTRY( CSAMPAGEREC, page.fCode32),
208 SSMFIELD_ENTRY( CSAMPAGEREC, page.fMonitorActive),
209 SSMFIELD_ENTRY( CSAMPAGEREC, page.fMonitorInvalidation),
210 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 1),
211 SSMFIELD_ENTRY( CSAMPAGEREC, page.enmTag),
212 SSMFIELD_ENTRY( CSAMPAGEREC, page.u64Hash),
213 SSMFIELD_ENTRY_TERM()
214};
215
216
217/**
218 * Initializes the CSAM.
219 *
220 * @returns VBox status code.
221 * @param pVM Pointer to the VM.
222 */
223VMMR3_INT_DECL(int) CSAMR3Init(PVM pVM)
224{
225 int rc;
226
227 /*
228 * We only need a saved state dummy loader if HM is enabled.
229 */
230 if (HMIsEnabled(pVM))
231 {
232 pVM->fCSAMEnabled = false;
233 return SSMR3RegisterStub(pVM, "CSAM", 0);
234 }
235
236 /*
237 * Raw-mode.
238 */
239 LogFlow(("CSAMR3Init\n"));
240
241 /* Allocate bitmap for the page directory. */
242 rc = MMR3HyperAllocOnceNoRel(pVM, CSAM_PGDIRBMP_CHUNKS*sizeof(RTHCPTR), 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDBitmapHC);
243 AssertRCReturn(rc, rc);
244 rc = MMR3HyperAllocOnceNoRel(pVM, CSAM_PGDIRBMP_CHUNKS*sizeof(RTRCPTR), 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDGCBitmapHC);
245 AssertRCReturn(rc, rc);
246 pVM->csam.s.pPDBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDGCBitmapHC);
247 pVM->csam.s.pPDHCBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC);
248
249 rc = csamReinit(pVM);
250 AssertRCReturn(rc, rc);
251
252 /*
253 * Register virtual handler types.
254 */
255 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/,
256 NULL /*pfnInvalidateR3 */,
257 csamR3CodePageWriteHandler,
258 "csamRCCodePageWritePfHandler", NULL /*pszModRC*/,
259 "CSAM code page write handler",
260 &pVM->csam.s.hCodePageWriteType);
261 AssertLogRelRCReturn(rc, rc);
262 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/,
263 csamR3CodePageInvalidate,
264 csamR3CodePageWriteHandler,
265 "csamRCCodePageWritePfHandler", NULL /*pszModRC*/,
266 "CSAM code page write and invlpg handler",
267 &pVM->csam.s.hCodePageWriteAndInvPgType);
268 AssertLogRelRCReturn(rc, rc);
269
270 /*
271 * Register save and load state notifiers.
272 */
273 rc = SSMR3RegisterInternal(pVM, "CSAM", 0, CSAM_SAVED_STATE_VERSION, sizeof(pVM->csam.s) + PAGE_SIZE*16,
274 NULL, NULL, NULL,
275 NULL, csamR3Save, NULL,
276 NULL, csamR3Load, NULL);
277 AssertRCReturn(rc, rc);
278
279 STAM_REG(pVM, &pVM->csam.s.StatNrTraps, STAMTYPE_COUNTER, "/CSAM/PageTraps", STAMUNIT_OCCURENCES, "The number of CSAM page traps.");
280 STAM_REG(pVM, &pVM->csam.s.StatDangerousWrite, STAMTYPE_COUNTER, "/CSAM/DangerousWrites", STAMUNIT_OCCURENCES, "The number of dangerous writes that cause a context switch.");
281
282 STAM_REG(pVM, &pVM->csam.s.StatNrPageNPHC, STAMTYPE_COUNTER, "/CSAM/HC/PageNotPresent", STAMUNIT_OCCURENCES, "The number of CSAM pages marked not present.");
283 STAM_REG(pVM, &pVM->csam.s.StatNrPageNPGC, STAMTYPE_COUNTER, "/CSAM/GC/PageNotPresent", STAMUNIT_OCCURENCES, "The number of CSAM pages marked not present.");
284 STAM_REG(pVM, &pVM->csam.s.StatNrPages, STAMTYPE_COUNTER, "/CSAM/PageRec/AddedRW", STAMUNIT_OCCURENCES, "The number of CSAM page records (RW monitoring).");
285 STAM_REG(pVM, &pVM->csam.s.StatNrPagesInv, STAMTYPE_COUNTER, "/CSAM/PageRec/AddedRWI", STAMUNIT_OCCURENCES, "The number of CSAM page records (RW & invalidation monitoring).");
286 STAM_REG(pVM, &pVM->csam.s.StatNrRemovedPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Removed", STAMUNIT_OCCURENCES, "The number of removed CSAM page records.");
287 STAM_REG(pVM, &pVM->csam.s.StatPageRemoveREMFlush,STAMTYPE_COUNTER, "/CSAM/PageRec/Removed/REMFlush", STAMUNIT_OCCURENCES, "The number of removed CSAM page records that caused a REM flush.");
288
289 STAM_REG(pVM, &pVM->csam.s.StatNrPatchPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Patch", STAMUNIT_OCCURENCES, "The number of CSAM patch page records.");
290 STAM_REG(pVM, &pVM->csam.s.StatNrUserPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Ignore/User", STAMUNIT_OCCURENCES, "The number of CSAM user page records (ignored).");
291 STAM_REG(pVM, &pVM->csam.s.StatPagePATM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/PATM", STAMUNIT_OCCURENCES, "The number of PATM page records.");
292 STAM_REG(pVM, &pVM->csam.s.StatPageCSAM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/CSAM", STAMUNIT_OCCURENCES, "The number of CSAM page records.");
293 STAM_REG(pVM, &pVM->csam.s.StatPageREM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/REM", STAMUNIT_OCCURENCES, "The number of REM page records.");
294 STAM_REG(pVM, &pVM->csam.s.StatPageMonitor, STAMTYPE_COUNTER, "/CSAM/PageRec/Monitored", STAMUNIT_OCCURENCES, "The number of monitored pages.");
295
296 STAM_REG(pVM, &pVM->csam.s.StatCodePageModified, STAMTYPE_COUNTER, "/CSAM/Monitor/DirtyPage", STAMUNIT_OCCURENCES, "The number of code page modifications.");
297
298 STAM_REG(pVM, &pVM->csam.s.StatNrFlushes, STAMTYPE_COUNTER, "/CSAM/PageFlushes", STAMUNIT_OCCURENCES, "The number of CSAM page flushes.");
299 STAM_REG(pVM, &pVM->csam.s.StatNrFlushesSkipped, STAMTYPE_COUNTER, "/CSAM/PageFlushesSkipped", STAMUNIT_OCCURENCES, "The number of CSAM page flushes that were skipped.");
300 STAM_REG(pVM, &pVM->csam.s.StatNrKnownPagesHC, STAMTYPE_COUNTER, "/CSAM/HC/KnownPageRecords", STAMUNIT_OCCURENCES, "The number of known CSAM page records.");
301 STAM_REG(pVM, &pVM->csam.s.StatNrKnownPagesGC, STAMTYPE_COUNTER, "/CSAM/GC/KnownPageRecords", STAMUNIT_OCCURENCES, "The number of known CSAM page records.");
302 STAM_REG(pVM, &pVM->csam.s.StatNrInstr, STAMTYPE_COUNTER, "/CSAM/ScannedInstr", STAMUNIT_OCCURENCES, "The number of scanned instructions.");
303 STAM_REG(pVM, &pVM->csam.s.StatNrBytesRead, STAMTYPE_COUNTER, "/CSAM/BytesRead", STAMUNIT_OCCURENCES, "The number of bytes read for scanning.");
304 STAM_REG(pVM, &pVM->csam.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/CSAM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
305
306 STAM_REG(pVM, &pVM->csam.s.StatBitmapAlloc, STAMTYPE_COUNTER, "/CSAM/Alloc/PageBitmap", STAMUNIT_OCCURENCES, "The number of page bitmap allocations.");
307
308 STAM_REG(pVM, &pVM->csam.s.StatInstrCacheHit, STAMTYPE_COUNTER, "/CSAM/Cache/Hit", STAMUNIT_OCCURENCES, "The number of dangerous instruction cache hits.");
309 STAM_REG(pVM, &pVM->csam.s.StatInstrCacheMiss, STAMTYPE_COUNTER, "/CSAM/Cache/Miss", STAMUNIT_OCCURENCES, "The number of dangerous instruction cache misses.");
310
311 STAM_REG(pVM, &pVM->csam.s.StatScanNextFunction, STAMTYPE_COUNTER, "/CSAM/Function/Scan/Success", STAMUNIT_OCCURENCES, "The number of found functions beyond the ret border.");
312 STAM_REG(pVM, &pVM->csam.s.StatScanNextFunctionFailed, STAMTYPE_COUNTER, "/CSAM/Function/Scan/Failed", STAMUNIT_OCCURENCES, "The number of refused functions beyond the ret border.");
313
314 STAM_REG(pVM, &pVM->csam.s.StatTime, STAMTYPE_PROFILE, "/PROF/CSAM/Scan", STAMUNIT_TICKS_PER_CALL, "Scanning overhead.");
315 STAM_REG(pVM, &pVM->csam.s.StatTimeCheckAddr, STAMTYPE_PROFILE, "/PROF/CSAM/CheckAddr", STAMUNIT_TICKS_PER_CALL, "Address check overhead.");
316 STAM_REG(pVM, &pVM->csam.s.StatTimeAddrConv, STAMTYPE_PROFILE, "/PROF/CSAM/AddrConv", STAMUNIT_TICKS_PER_CALL, "Address conversion overhead.");
317 STAM_REG(pVM, &pVM->csam.s.StatTimeFlushPage, STAMTYPE_PROFILE, "/PROF/CSAM/FlushPage", STAMUNIT_TICKS_PER_CALL, "Page flushing overhead.");
318 STAM_REG(pVM, &pVM->csam.s.StatTimeDisasm, STAMTYPE_PROFILE, "/PROF/CSAM/Disasm", STAMUNIT_TICKS_PER_CALL, "Disassembly overhead.");
319 STAM_REG(pVM, &pVM->csam.s.StatFlushDirtyPages, STAMTYPE_PROFILE, "/PROF/CSAM/FlushDirtyPage", STAMUNIT_TICKS_PER_CALL, "Dirty page flushing overhead.");
320 STAM_REG(pVM, &pVM->csam.s.StatCheckGates, STAMTYPE_PROFILE, "/PROF/CSAM/CheckGates", STAMUNIT_TICKS_PER_CALL, "CSAMR3CheckGates overhead.");
321
322 /*
323 * Check CFGM option and enable/disable CSAM.
324 */
325 bool fEnabled;
326 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "CSAMEnabled", &fEnabled);
327 if (RT_FAILURE(rc))
328#ifdef CSAM_ENABLE
329 fEnabled = true;
330#else
331 fEnabled = false;
332#endif
333 if (fEnabled)
334 CSAMEnableScanning(pVM);
335
336#ifdef VBOX_WITH_DEBUGGER
337 /*
338 * Debugger commands.
339 */
340 static bool fRegisteredCmds = false;
341 if (!fRegisteredCmds)
342 {
343 rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
344 if (RT_SUCCESS(rc))
345 fRegisteredCmds = true;
346 }
347#endif
348
349 return VINF_SUCCESS;
350}
351
352/**
353 * (Re)initializes CSAM
354 *
355 * @param pVM The VM.
356 */
357static int csamReinit(PVM pVM)
358{
359 /*
360 * Assert alignment and sizes.
361 */
362 AssertRelease(!(RT_OFFSETOF(VM, csam.s) & 31));
363 AssertRelease(sizeof(pVM->csam.s) <= sizeof(pVM->csam.padding));
364 AssertRelease(!HMIsEnabled(pVM));
365
366 /*
367 * Setup any fixed pointers and offsets.
368 */
369 pVM->csam.s.offVM = RT_OFFSETOF(VM, patm);
370
371 pVM->csam.s.fGatesChecked = false;
372 pVM->csam.s.fScanningStarted = false;
373
374 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies 1 VPCU */
375 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
376 pVM->csam.s.cDirtyPages = 0;
377 /* not necessary */
378 memset(pVM->csam.s.pvDirtyBasePage, 0, sizeof(pVM->csam.s.pvDirtyBasePage));
379 memset(pVM->csam.s.pvDirtyFaultPage, 0, sizeof(pVM->csam.s.pvDirtyFaultPage));
380
381 memset(&pVM->csam.s.aDangerousInstr, 0, sizeof(pVM->csam.s.aDangerousInstr));
382 pVM->csam.s.cDangerousInstr = 0;
383 pVM->csam.s.iDangerousInstr = 0;
384
385 memset(pVM->csam.s.pvCallInstruction, 0, sizeof(pVM->csam.s.pvCallInstruction));
386 pVM->csam.s.iCallInstruction = 0;
387
388 /** @note never mess with the pgdir bitmap here! */
389 return VINF_SUCCESS;
390}
391
392/**
393 * Applies relocations to data and code managed by this
394 * component. This function will be called at init and
395 * whenever the VMM need to relocate itself inside the GC.
396 *
397 * The csam will update the addresses used by the switcher.
398 *
399 * @param pVM The VM.
400 * @param offDelta Relocation delta.
401 */
402VMMR3_INT_DECL(void) CSAMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
403{
404 if (offDelta && !HMIsEnabled(pVM))
405 {
406 /* Adjust pgdir and page bitmap pointers. */
407 pVM->csam.s.pPDBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDGCBitmapHC);
408 pVM->csam.s.pPDHCBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC);
409
410 for(int i=0;i<CSAM_PGDIRBMP_CHUNKS;i++)
411 {
412 if (pVM->csam.s.pPDGCBitmapHC[i])
413 {
414 pVM->csam.s.pPDGCBitmapHC[i] += offDelta;
415 }
416 }
417 }
418 return;
419}
420
421/**
422 * Terminates the csam.
423 *
424 * Termination means cleaning up and freeing all resources,
425 * the VM it self is at this point powered off or suspended.
426 *
427 * @returns VBox status code.
428 * @param pVM Pointer to the VM.
429 */
430VMMR3_INT_DECL(int) CSAMR3Term(PVM pVM)
431{
432 if (HMIsEnabled(pVM))
433 return VINF_SUCCESS;
434
435 int rc;
436
437 rc = CSAMR3Reset(pVM);
438 AssertRC(rc);
439
440 /* @todo triggers assertion in MMHyperFree */
441#if 0
442 for(int i=0;i<CSAM_PAGEBMP_CHUNKS;i++)
443 {
444 if (pVM->csam.s.pPDBitmapHC[i])
445 MMHyperFree(pVM, pVM->csam.s.pPDBitmapHC[i]);
446 }
447#endif
448
449 return VINF_SUCCESS;
450}
451
452/**
453 * CSAM reset callback.
454 *
455 * @returns VBox status code.
456 * @param pVM The VM which is reset.
457 */
458VMMR3_INT_DECL(int) CSAMR3Reset(PVM pVM)
459{
460 if (HMIsEnabled(pVM))
461 return VINF_SUCCESS;
462
463 /* Clear page bitmaps. */
464 for (int i = 0; i < CSAM_PGDIRBMP_CHUNKS; i++)
465 {
466 if (pVM->csam.s.pPDBitmapHC[i])
467 {
468 Assert((CSAM_PAGE_BITMAP_SIZE& 3) == 0);
469 ASMMemZero32(pVM->csam.s.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
470 }
471 }
472
473 /* Remove all CSAM page records. */
474 for (;;)
475 {
476 PCSAMPAGEREC pPageRec = (PCSAMPAGEREC)RTAvlPVGetBestFit(&pVM->csam.s.pPageTree, 0, true);
477 if (!pPageRec)
478 break;
479 csamRemovePageRecord(pVM, pPageRec->page.pPageGC);
480 }
481 Assert(!pVM->csam.s.pPageTree);
482
483 csamReinit(pVM);
484
485 return VINF_SUCCESS;
486}
487
488
489/**
490 * Callback function for RTAvlPVDoWithAll
491 *
492 * Counts the number of records in the tree
493 *
494 * @returns VBox status code.
495 * @param pNode Current node
496 * @param pcPatches Pointer to patch counter
497 */
498static DECLCALLBACK(int) CountRecord(PAVLPVNODECORE pNode, void *pcPatches)
499{
500 NOREF(pNode);
501 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
502 return VINF_SUCCESS;
503}
504
505/**
506 * Callback function for RTAvlPVDoWithAll
507 *
508 * Saves the state of the page record
509 *
510 * @returns VBox status code.
511 * @param pNode Current node
512 * @param pVM1 Pointer to the VM
513 */
514static DECLCALLBACK(int) SavePageState(PAVLPVNODECORE pNode, void *pVM1)
515{
516 PVM pVM = (PVM)pVM1;
517 PCSAMPAGEREC pPage = (PCSAMPAGEREC)pNode;
518 CSAMPAGEREC page = *pPage;
519 PSSMHANDLE pSSM = pVM->csam.s.savedstate.pSSM;
520 int rc;
521
522 /* Save the page record itself */
523 rc = SSMR3PutMem(pSSM, &page, sizeof(page));
524 AssertRCReturn(rc, rc);
525
526 if (page.page.pBitmap)
527 {
528 rc = SSMR3PutMem(pSSM, page.page.pBitmap, CSAM_PAGE_BITMAP_SIZE);
529 AssertRCReturn(rc, rc);
530 }
531
532 return VINF_SUCCESS;
533}
534
535/**
536 * Execute state save operation.
537 *
538 * @returns VBox status code.
539 * @param pVM Pointer to the VM.
540 * @param pSSM SSM operation handle.
541 */
542static DECLCALLBACK(int) csamR3Save(PVM pVM, PSSMHANDLE pSSM)
543{
544 CSAM csamInfo = pVM->csam.s;
545 int rc;
546
547 /*
548 * Count the number of page records in the tree (feeling lazy)
549 */
550 csamInfo.savedstate.cPageRecords = 0;
551 RTAvlPVDoWithAll(&pVM->csam.s.pPageTree, true, CountRecord, &csamInfo.savedstate.cPageRecords);
552
553 /*
554 * Save CSAM structure
555 */
556 pVM->csam.s.savedstate.pSSM = pSSM;
557 rc = SSMR3PutMem(pSSM, &csamInfo, sizeof(csamInfo));
558 AssertRCReturn(rc, rc);
559
560 /* Save pgdir bitmap */
561 rc = SSMR3PutMem(pSSM, csamInfo.pPDBitmapHC, CSAM_PGDIRBMP_CHUNKS*sizeof(RTHCPTR));
562 AssertRCReturn(rc, rc);
563
564 for (unsigned i = 0; i < CSAM_PGDIRBMP_CHUNKS; i++)
565 {
566 if(csamInfo.pPDBitmapHC[i])
567 {
568 /* Save the page bitmap. */
569 rc = SSMR3PutMem(pSSM, csamInfo.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
570 AssertRCReturn(rc, rc);
571 }
572 }
573
574 /*
575 * Save page records
576 */
577 rc = RTAvlPVDoWithAll(&pVM->csam.s.pPageTree, true, SavePageState, pVM);
578 AssertRCReturn(rc, rc);
579
580 /** @note we don't restore aDangerousInstr; it will be recreated automatically. */
581 return VINF_SUCCESS;
582}
583
584
585/**
586 * Execute state load operation.
587 *
588 * @returns VBox status code.
589 * @param pVM Pointer to the VM.
590 * @param pSSM SSM operation handle.
591 * @param uVersion Data layout version.
592 * @param uPass The data pass.
593 */
594static DECLCALLBACK(int) csamR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
595{
596 int rc;
597 CSAM csamInfo;
598
599 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
600 if (uVersion != CSAM_SAVED_STATE_VERSION)
601 {
602 AssertMsgFailed(("csamR3Load: Invalid version uVersion=%d!\n", uVersion));
603 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
604 }
605
606 pVM->csam.s.savedstate.pSSM = pSSM;
607
608 /*
609 * Restore CSAM structure
610 */
611 RT_ZERO(csamInfo);
612 rc = SSMR3GetStructEx(pSSM, &csamInfo, sizeof(csamInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamFields[0], NULL);
613 AssertRCReturn(rc, rc);
614
615 pVM->csam.s.fGatesChecked = csamInfo.fGatesChecked;
616 pVM->csam.s.fScanningStarted = csamInfo.fScanningStarted;
617
618 /* Restore dirty code page info. */
619 pVM->csam.s.cDirtyPages = csamInfo.cDirtyPages;
620 memcpy(pVM->csam.s.pvDirtyBasePage, csamInfo.pvDirtyBasePage, sizeof(pVM->csam.s.pvDirtyBasePage));
621 memcpy(pVM->csam.s.pvDirtyFaultPage, csamInfo.pvDirtyFaultPage, sizeof(pVM->csam.s.pvDirtyFaultPage));
622
623 /* Restore possible code page */
624 pVM->csam.s.cPossibleCodePages = csamInfo.cPossibleCodePages;
625 memcpy(pVM->csam.s.pvPossibleCodePage, csamInfo.pvPossibleCodePage, sizeof(pVM->csam.s.pvPossibleCodePage));
626
627 /* Restore pgdir bitmap (we'll change the pointers next). */
628 rc = SSMR3GetStructEx(pSSM, pVM->csam.s.pPDBitmapHC, sizeof(uint8_t *) * CSAM_PGDIRBMP_CHUNKS,
629 SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamPDBitmapArray[0], NULL);
630 AssertRCReturn(rc, rc);
631
632 /*
633 * Restore page bitmaps
634 */
635 for (unsigned i=0;i<CSAM_PGDIRBMP_CHUNKS;i++)
636 {
637 if(pVM->csam.s.pPDBitmapHC[i])
638 {
639 rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDBitmapHC[i]);
640 if (RT_FAILURE(rc))
641 {
642 Log(("MMHyperAlloc failed with %Rrc\n", rc));
643 return rc;
644 }
645 /* Convert to GC pointer. */
646 pVM->csam.s.pPDGCBitmapHC[i] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[i]);
647 Assert(pVM->csam.s.pPDGCBitmapHC[i]);
648
649 /* Restore the bitmap. */
650 rc = SSMR3GetMem(pSSM, pVM->csam.s.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
651 AssertRCReturn(rc, rc);
652 }
653 else
654 {
655 Assert(!pVM->csam.s.pPDGCBitmapHC[i]);
656 pVM->csam.s.pPDGCBitmapHC[i] = 0;
657 }
658 }
659
660 /*
661 * Restore page records
662 */
663 for (uint32_t i=0;i<csamInfo.savedstate.cPageRecords + csamInfo.savedstate.cPatchPageRecords;i++)
664 {
665 CSAMPAGEREC page;
666 PCSAMPAGE pPage;
667
668 RT_ZERO(page);
669 rc = SSMR3GetStructEx(pSSM, &page, sizeof(page), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED, &g_aCsamPageRecFields[0], NULL);
670 AssertRCReturn(rc, rc);
671
672 /*
673 * Recreate the page record
674 */
675 pPage = csamCreatePageRecord(pVM, page.page.pPageGC, page.page.enmTag, page.page.fCode32, page.page.fMonitorInvalidation);
676 AssertReturn(pPage, VERR_NO_MEMORY);
677
678 pPage->GCPhys = page.page.GCPhys;
679 pPage->fFlags = page.page.fFlags;
680 pPage->u64Hash = page.page.u64Hash;
681
682 if (page.page.pBitmap)
683 {
684 rc = SSMR3GetMem(pSSM, pPage->pBitmap, CSAM_PAGE_BITMAP_SIZE);
685 AssertRCReturn(rc, rc);
686 }
687 else
688 {
689 MMR3HeapFree(pPage->pBitmap);
690 pPage->pBitmap = 0;
691 }
692 }
693
694 /* Note: we don't restore aDangerousInstr; it will be recreated automatically. */
695 memset(&pVM->csam.s.aDangerousInstr, 0, sizeof(pVM->csam.s.aDangerousInstr));
696 pVM->csam.s.cDangerousInstr = 0;
697 pVM->csam.s.iDangerousInstr = 0;
698 return VINF_SUCCESS;
699}
700
701/**
702 * Convert guest context address to host context pointer
703 *
704 * @returns Byte pointer (ring-3 context) corresponding to pGCPtr on success,
705 * NULL on failure.
706 * @param pVM Pointer to the VM.
707 * @param pCacheRec Address conversion cache record
708 * @param pGCPtr Guest context pointer
709 * @returns Host context pointer or NULL in case of an error
710 *
711 */
712static uint8_t *csamR3GCVirtToHCVirt(PVM pVM, PCSAMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
713{
714 int rc;
715 void *pHCPtr;
716 Assert(pVM->cCpus == 1);
717 PVMCPU pVCpu = VMMGetCpu0(pVM);
718
719 STAM_PROFILE_START(&pVM->csam.s.StatTimeAddrConv, a);
720
721 pHCPtr = PATMR3GCPtrToHCPtr(pVM, pGCPtr);
722 if (pHCPtr)
723 return (uint8_t *)pHCPtr;
724
725 if (pCacheRec->pPageLocStartHC)
726 {
727 uint32_t offset = pGCPtr & PAGE_OFFSET_MASK;
728 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
729 {
730 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
731 return pCacheRec->pPageLocStartHC + offset;
732 }
733 }
734
735 /* Release previous lock if any. */
736 if (pCacheRec->Lock.pvMap)
737 {
738 PGMPhysReleasePageMappingLock(pVM, &pCacheRec->Lock);
739 pCacheRec->Lock.pvMap = NULL;
740 }
741
742 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, pGCPtr, (const void **)&pHCPtr, &pCacheRec->Lock);
743 if (rc != VINF_SUCCESS)
744 {
745//// AssertMsgRC(rc, ("MMR3PhysGCVirt2HCVirtEx failed for %RRv\n", pGCPtr));
746 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
747 return NULL;
748 }
749
750 pCacheRec->pPageLocStartHC = (uint8_t*)((uintptr_t)pHCPtr & PAGE_BASE_HC_MASK);
751 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
752 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
753 return (uint8_t *)pHCPtr;
754}
755
756
757/** For csamR3ReadBytes. */
758typedef struct CSAMDISINFO
759{
760 PVM pVM;
761 uint8_t const *pbSrcInstr; /* aka pInstHC */
762} CSAMDISINFO, *PCSAMDISINFO;
763
764
765/**
766 * @callback_method_impl{FNDISREADBYTES}
767 */
768static DECLCALLBACK(int) csamR3ReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
769{
770 PCSAMDISINFO pDisInfo = (PCSAMDISINFO)pDis->pvUser;
771
772 /*
773 * We are not interested in patched instructions, so read the original opcode bytes.
774 *
775 * Note! single instruction patches (int3) are checked in CSAMR3AnalyseCallback
776 *
777 * Since we're decoding one instruction at the time, we don't need to be
778 * concerned about any patched instructions following the first one. We
779 * could in fact probably skip this PATM call for offInstr != 0.
780 */
781 size_t cbRead = cbMaxRead;
782 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
783 int rc = PATMR3ReadOrgInstr(pDisInfo->pVM, pDis->uInstrAddr + offInstr, &pDis->abInstr[offInstr], cbRead, &cbRead);
784 if (RT_SUCCESS(rc))
785 {
786 if (cbRead >= cbMinRead)
787 {
788 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;
789 return rc;
790 }
791
792 cbMinRead -= (uint8_t)cbRead;
793 cbMaxRead -= (uint8_t)cbRead;
794 offInstr += (uint8_t)cbRead;
795 uSrcAddr += cbRead;
796 }
797
798 /*
799 * The current byte isn't a patch instruction byte.
800 */
801 AssertPtr(pDisInfo->pbSrcInstr);
802 if ((pDis->uInstrAddr >> PAGE_SHIFT) == ((uSrcAddr + cbMaxRead - 1) >> PAGE_SHIFT))
803 {
804 memcpy(&pDis->abInstr[offInstr], &pDisInfo->pbSrcInstr[offInstr], cbMaxRead);
805 offInstr += cbMaxRead;
806 rc = VINF_SUCCESS;
807 }
808 else if ( (pDis->uInstrAddr >> PAGE_SHIFT) == ((uSrcAddr + cbMinRead - 1) >> PAGE_SHIFT)
809 || PATMIsPatchGCAddr(pDisInfo->pVM, uSrcAddr) /** @todo does CSAM actually analyze patch code, or is this just a copy&past check? */
810 )
811 {
812 memcpy(&pDis->abInstr[offInstr], &pDisInfo->pbSrcInstr[offInstr], cbMinRead);
813 offInstr += cbMinRead;
814 rc = VINF_SUCCESS;
815 }
816 else
817 {
818 /* Crossed page boundrary, pbSrcInstr is no good... */
819 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pDisInfo->pVM), &pDis->abInstr[offInstr], uSrcAddr, cbMinRead);
820 offInstr += cbMinRead;
821 }
822
823 pDis->cbCachedInstr = offInstr;
824 return rc;
825}
826
827DECLINLINE(int) csamR3DISInstr(PVM pVM, RTRCPTR InstrGC, uint8_t *InstrHC, DISCPUMODE enmCpuMode,
828 PDISCPUSTATE pCpu, uint32_t *pcbInstr, char *pszOutput, size_t cbOutput)
829{
830 CSAMDISINFO DisInfo = { pVM, InstrHC };
831#ifdef DEBUG
832 return DISInstrToStrEx(InstrGC, enmCpuMode, csamR3ReadBytes, &DisInfo, DISOPTYPE_ALL,
833 pCpu, pcbInstr, pszOutput, cbOutput);
834#else
835 /* We are interested in everything except harmless stuff */
836 if (pszOutput)
837 return DISInstrToStrEx(InstrGC, enmCpuMode, csamR3ReadBytes, &DisInfo,
838 ~(DISOPTYPE_INVALID | DISOPTYPE_HARMLESS | DISOPTYPE_RRM_MASK),
839 pCpu, pcbInstr, pszOutput, cbOutput);
840 return DISInstrEx(InstrGC, enmCpuMode, ~(DISOPTYPE_INVALID | DISOPTYPE_HARMLESS | DISOPTYPE_RRM_MASK),
841 csamR3ReadBytes, &DisInfo, pCpu, pcbInstr);
842#endif
843}
844
845/**
846 * Analyses the instructions following the cli for compliance with our heuristics for cli
847 *
848 * @returns VBox status code.
849 * @param pVM Pointer to the VM.
850 * @param pCpu CPU disassembly state
851 * @param pInstrGC Guest context pointer to privileged instruction
852 * @param pCurInstrGC Guest context pointer to the current instruction
853 * @param pCacheRec GC to HC cache record
854 * @param pUserData User pointer (callback specific)
855 *
856 */
857static int CSAMR3AnalyseCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC,
858 PCSAMP2GLOOKUPREC pCacheRec, void *pUserData)
859{
860 PCSAMPAGE pPage = (PCSAMPAGE)pUserData;
861 int rc;
862 NOREF(pInstrGC);
863
864 switch (pCpu->pCurInstr->uOpcode)
865 {
866 case OP_INT:
867 Assert(pCpu->Param1.fUse & DISUSE_IMMEDIATE8);
868 if (pCpu->Param1.uValue == 3)
869 {
870 //two byte int 3
871 return VINF_SUCCESS;
872 }
873 break;
874
875 /* removing breaks win2k guests? */
876 case OP_IRET:
877 if (EMIsRawRing1Enabled(pVM))
878 break;
879 /* no break */
880
881 case OP_ILLUD2:
882 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue. */
883 case OP_RETN:
884 case OP_INT3:
885 case OP_INVALID:
886 return VINF_SUCCESS;
887 }
888
889 // Check for exit points
890 switch (pCpu->pCurInstr->uOpcode)
891 {
892 /* It's not a good idea to patch pushf instructions:
893 * - increases the chance of conflicts (code jumping to the next instruction)
894 * - better to patch the cli
895 * - code that branches before the cli will likely hit an int 3
896 * - in general doesn't offer any benefits as we don't allow nested patch blocks (IF is always 1)
897 */
898 case OP_PUSHF:
899 case OP_POPF:
900 break;
901
902 case OP_CLI:
903 {
904 uint32_t cbInstrs = 0;
905 uint32_t cbCurInstr = pCpu->cbInstr;
906 bool fCode32 = pPage->fCode32;
907
908 Assert(fCode32);
909
910 PATMR3AddHint(pVM, pCurInstrGC, (fCode32) ? PATMFL_CODE32 : 0);
911
912 /* Make sure the instructions that follow the cli have not been encountered before. */
913 while (true)
914 {
915 DISCPUSTATE cpu;
916
917 if (cbInstrs + cbCurInstr >= SIZEOF_NEARJUMP32)
918 break;
919
920 if (csamIsCodeScanned(pVM, pCurInstrGC + cbCurInstr, &pPage) == true)
921 {
922 /* We've scanned the next instruction(s) already. This means we've
923 followed a branch that ended up there before -> dangerous!! */
924 PATMR3DetectConflict(pVM, pCurInstrGC, pCurInstrGC + cbCurInstr);
925 break;
926 }
927 pCurInstrGC += cbCurInstr;
928 cbInstrs += cbCurInstr;
929
930 { /* Force pCurInstrHC out of scope after we stop using it (page lock!) */
931 uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
932 if (pCurInstrHC == NULL)
933 {
934 Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
935 break;
936 }
937 Assert(VALID_PTR(pCurInstrHC));
938
939 rc = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, (fCode32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
940 &cpu, &cbCurInstr, NULL, 0);
941 }
942 AssertRC(rc);
943 if (RT_FAILURE(rc))
944 break;
945 }
946 break;
947 }
948
949#ifdef VBOX_WITH_RAW_RING1
950 case OP_MOV:
951 /* mov xx, CS is a dangerous instruction as our raw ring usage leaks through. */
952 if ( EMIsRawRing1Enabled(pVM)
953 && (pCpu->Param2.fUse & DISUSE_REG_SEG)
954 && (pCpu->Param2.Base.idxSegReg == DISSELREG_CS))
955 {
956 Log(("CSAM: Patching dangerous 'mov xx, cs' instruction at %RGv with an int3\n", pCurInstrGC));
957 if (PATMR3HasBeenPatched(pVM, pCurInstrGC) == false)
958 {
959 rc = PATMR3InstallPatch(pVM, pCurInstrGC, (pPage->fCode32) ? PATMFL_CODE32 : 0);
960 if (RT_FAILURE(rc))
961 {
962 Log(("PATMR3InstallPatch failed with %d\n", rc));
963 return VWRN_CONTINUE_ANALYSIS;
964 }
965 }
966 return VWRN_CONTINUE_ANALYSIS;
967 }
968 break;
969#endif
970
971 case OP_PUSH:
972 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */
973 if (pCpu->pCurInstr->fParam1 != OP_PARM_REG_CS)
974 break;
975
976 /* no break */
977#ifndef VBOX_WITH_SAFE_STR
978 case OP_STR:
979#endif
980 case OP_LSL:
981 case OP_LAR:
982 case OP_SGDT:
983 case OP_SLDT:
984 case OP_SIDT:
985 case OP_SMSW:
986 case OP_VERW:
987 case OP_VERR:
988 case OP_CPUID:
989 case OP_IRET:
990#ifdef DEBUG
991 switch(pCpu->pCurInstr->uOpcode)
992 {
993 case OP_STR:
994 Log(("Privileged instruction at %RRv: str!!\n", pCurInstrGC));
995 break;
996 case OP_LSL:
997 Log(("Privileged instruction at %RRv: lsl!!\n", pCurInstrGC));
998 break;
999 case OP_LAR:
1000 Log(("Privileged instruction at %RRv: lar!!\n", pCurInstrGC));
1001 break;
1002 case OP_SGDT:
1003 Log(("Privileged instruction at %RRv: sgdt!!\n", pCurInstrGC));
1004 break;
1005 case OP_SLDT:
1006 Log(("Privileged instruction at %RRv: sldt!!\n", pCurInstrGC));
1007 break;
1008 case OP_SIDT:
1009 Log(("Privileged instruction at %RRv: sidt!!\n", pCurInstrGC));
1010 break;
1011 case OP_SMSW:
1012 Log(("Privileged instruction at %RRv: smsw!!\n", pCurInstrGC));
1013 break;
1014 case OP_VERW:
1015 Log(("Privileged instruction at %RRv: verw!!\n", pCurInstrGC));
1016 break;
1017 case OP_VERR:
1018 Log(("Privileged instruction at %RRv: verr!!\n", pCurInstrGC));
1019 break;
1020 case OP_CPUID:
1021 Log(("Privileged instruction at %RRv: cpuid!!\n", pCurInstrGC));
1022 break;
1023 case OP_PUSH:
1024 Log(("Privileged instruction at %RRv: push cs!!\n", pCurInstrGC));
1025 break;
1026 case OP_IRET:
1027 Log(("Privileged instruction at %RRv: iret!!\n", pCurInstrGC));
1028 break;
1029 }
1030#endif
1031
1032 if (PATMR3HasBeenPatched(pVM, pCurInstrGC) == false)
1033 {
1034 rc = PATMR3InstallPatch(pVM, pCurInstrGC, (pPage->fCode32) ? PATMFL_CODE32 : 0);
1035 if (RT_FAILURE(rc))
1036 {
1037 Log(("PATMR3InstallPatch failed with %d\n", rc));
1038 return VWRN_CONTINUE_ANALYSIS;
1039 }
1040 }
1041 if (pCpu->pCurInstr->uOpcode == OP_IRET)
1042 return VINF_SUCCESS; /* Look no further in this branch. */
1043
1044 return VWRN_CONTINUE_ANALYSIS;
1045
1046 case OP_JMP:
1047 case OP_CALL:
1048 {
1049 // return or jump/call through a jump table
1050 if (OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J)
1051 {
1052#ifdef DEBUG
1053 switch(pCpu->pCurInstr->uOpcode)
1054 {
1055 case OP_JMP:
1056 Log(("Control Flow instruction at %RRv: jmp!!\n", pCurInstrGC));
1057 break;
1058 case OP_CALL:
1059 Log(("Control Flow instruction at %RRv: call!!\n", pCurInstrGC));
1060 break;
1061 }
1062#endif
1063 return VWRN_CONTINUE_ANALYSIS;
1064 }
1065 return VWRN_CONTINUE_ANALYSIS;
1066 }
1067
1068 }
1069
1070 return VWRN_CONTINUE_ANALYSIS;
1071}
1072
1073#ifdef CSAM_ANALYSE_BEYOND_RET
1074/**
1075 * Wrapper for csamAnalyseCodeStream for call instructions.
1076 *
1077 * @returns VBox status code.
1078 * @param pVM Pointer to the VM.
1079 * @param pInstrGC Guest context pointer to privileged instruction
1080 * @param pCurInstrGC Guest context pointer to the current instruction
1081 * @param fCode32 16 or 32 bits code
1082 * @param pfnCSAMR3Analyse Callback for testing the disassembled instruction
1083 * @param pUserData User pointer (callback specific)
1084 *
1085 */
1086static int csamAnalyseCallCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
1087 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec)
1088{
1089 int rc;
1090 CSAMCALLEXITREC CallExitRec;
1091 PCSAMCALLEXITREC pOldCallRec;
1092 PCSAMPAGE pPage = 0;
1093 uint32_t i;
1094
1095 CallExitRec.cInstrAfterRet = 0;
1096
1097 pOldCallRec = pCacheRec->pCallExitRec;
1098 pCacheRec->pCallExitRec = &CallExitRec;
1099
1100 rc = csamAnalyseCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1101
1102 for (i=0;i<CallExitRec.cInstrAfterRet;i++)
1103 {
1104 PCSAMPAGE pPage = 0;
1105
1106 pCurInstrGC = CallExitRec.pInstrAfterRetGC[i];
1107
1108 /* Check if we've previously encountered the instruction after the ret. */
1109 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1110 {
1111 DISCPUSTATE cpu;
1112 uint32_t cbInstr;
1113 int rc2;
1114#ifdef DEBUG
1115 char szOutput[256];
1116#endif
1117 if (pPage == NULL)
1118 {
1119 /* New address; let's take a look at it. */
1120 pPage = csamCreatePageRecord(pVM, pCurInstrGC, CSAM_TAG_CSAM, fCode32);
1121 if (pPage == NULL)
1122 {
1123 rc = VERR_NO_MEMORY;
1124 goto done;
1125 }
1126 }
1127
1128 /**
1129 * Some generic requirements for recognizing an adjacent function:
1130 * - alignment fillers that consist of:
1131 * - nop
1132 * - lea genregX, [genregX (+ 0)]
1133 * - push ebp after the filler (can extend this later); aligned at at least a 4 byte boundary
1134 */
1135 for (int j = 0; j < 16; j++)
1136 {
1137 uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
1138 if (pCurInstrHC == NULL)
1139 {
1140 Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
1141 goto done;
1142 }
1143 Assert(VALID_PTR(pCurInstrHC));
1144
1145 STAM_PROFILE_START(&pVM->csam.s.StatTimeDisasm, a);
1146#ifdef DEBUG
1147 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, (fCode32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1148 &cpu, &cbInstr, szOutput, sizeof(szOutput));
1149 if (RT_SUCCESS(rc2)) Log(("CSAM Call Analysis: %s", szOutput));
1150#else
1151 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, (fCode32) ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1152 &cpu, &cbInstr, NULL, 0);
1153#endif
1154 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeDisasm, a);
1155 if (RT_FAILURE(rc2))
1156 {
1157 Log(("Disassembly failed at %RRv with %Rrc (probably page not present) -> return to caller\n", pCurInstrGC, rc2));
1158 goto done;
1159 }
1160
1161 STAM_COUNTER_ADD(&pVM->csam.s.StatNrBytesRead, cbInstr);
1162
1163 RCPTRTYPE(uint8_t *) addr = 0;
1164 PCSAMPAGE pJmpPage = NULL;
1165
1166 if (PAGE_ADDRESS(pCurInstrGC) != PAGE_ADDRESS(pCurInstrGC + cbInstr - 1))
1167 {
1168 if (!PGMGstIsPagePresent(pVM, pCurInstrGC + cbInstr - 1))
1169 {
1170 /// @todo fault in the page
1171 Log(("Page for current instruction %RRv is not present!!\n", pCurInstrGC));
1172 goto done;
1173 }
1174 //all is fine, let's continue
1175 csamR3CheckPageRecord(pVM, pCurInstrGC + cbInstr - 1);
1176 }
1177
1178 switch (cpu.pCurInstr->uOpcode)
1179 {
1180 case OP_NOP:
1181 case OP_INT3:
1182 break; /* acceptable */
1183
1184 case OP_LEA:
1185 /* Must be similar to:
1186 *
1187 * lea esi, [esi]
1188 * lea esi, [esi+0]
1189 * Any register is allowed as long as source and destination are identical.
1190 */
1191 if ( cpu.Param1.fUse != DISUSE_REG_GEN32
1192 || ( cpu.Param2.flags != DISUSE_REG_GEN32
1193 && ( !(cpu.Param2.flags & DISUSE_REG_GEN32)
1194 || !(cpu.Param2.flags & (DISUSE_DISPLACEMENT8|DISUSE_DISPLACEMENT16|DISUSE_DISPLACEMENT32))
1195 || cpu.Param2.uValue != 0
1196 )
1197 )
1198 || cpu.Param1.base.reg_gen32 != cpu.Param2.base.reg_gen32
1199 )
1200 {
1201 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1202 goto next_function;
1203 }
1204 break;
1205
1206 case OP_PUSH:
1207 {
1208 if ( (pCurInstrGC & 0x3) != 0
1209 || cpu.Param1.fUse != DISUSE_REG_GEN32
1210 || cpu.Param1.base.reg_gen32 != USE_REG_EBP
1211 )
1212 {
1213 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1214 goto next_function;
1215 }
1216
1217 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1218 {
1219 CSAMCALLEXITREC CallExitRec2;
1220 CallExitRec2.cInstrAfterRet = 0;
1221
1222 pCacheRec->pCallExitRec = &CallExitRec2;
1223
1224 /* Analyse the function. */
1225 Log(("Found new function at %RRv\n", pCurInstrGC));
1226 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunction);
1227 csamAnalyseCallCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1228 }
1229 goto next_function;
1230 }
1231
1232 case OP_SUB:
1233 {
1234 if ( (pCurInstrGC & 0x3) != 0
1235 || cpu.Param1.fUse != DISUSE_REG_GEN32
1236 || cpu.Param1.base.reg_gen32 != USE_REG_ESP
1237 )
1238 {
1239 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1240 goto next_function;
1241 }
1242
1243 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1244 {
1245 CSAMCALLEXITREC CallExitRec2;
1246 CallExitRec2.cInstrAfterRet = 0;
1247
1248 pCacheRec->pCallExitRec = &CallExitRec2;
1249
1250 /* Analyse the function. */
1251 Log(("Found new function at %RRv\n", pCurInstrGC));
1252 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunction);
1253 csamAnalyseCallCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1254 }
1255 goto next_function;
1256 }
1257
1258 default:
1259 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1260 goto next_function;
1261 }
1262 /* Mark it as scanned. */
1263 csamMarkCode(pVM, pPage, pCurInstrGC, cbInstr, true);
1264 pCurInstrGC += cbInstr;
1265 } /* for at most 16 instructions */
1266next_function:
1267 ; /* MSVC complains otherwise */
1268 }
1269 }
1270done:
1271 pCacheRec->pCallExitRec = pOldCallRec;
1272 return rc;
1273}
1274#else
1275#define csamAnalyseCallCodeStream csamAnalyseCodeStream
1276#endif
1277
1278/**
1279 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
1280 *
1281 * @returns VBox status code.
1282 * @param pVM Pointer to the VM.
1283 * @param pInstrGC Guest context pointer to privileged instruction
1284 * @param pCurInstrGC Guest context pointer to the current instruction
1285 * @param fCode32 16 or 32 bits code
1286 * @param pfnCSAMR3Analyse Callback for testing the disassembled instruction
1287 * @param pUserData User pointer (callback specific)
1288 *
1289 */
1290static int csamAnalyseCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
1291 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec)
1292{
1293 DISCPUSTATE cpu;
1294 PCSAMPAGE pPage = (PCSAMPAGE)pUserData;
1295 int rc = VWRN_CONTINUE_ANALYSIS;
1296 uint32_t cbInstr;
1297 int rc2;
1298 Assert(pVM->cCpus == 1);
1299 PVMCPU pVCpu = VMMGetCpu0(pVM);
1300
1301#ifdef DEBUG
1302 char szOutput[256];
1303#endif
1304
1305 LogFlow(("csamAnalyseCodeStream: code at %RRv depth=%d\n", pCurInstrGC, pCacheRec->depth));
1306
1307 pVM->csam.s.fScanningStarted = true;
1308
1309 pCacheRec->depth++;
1310 /*
1311 * Limit the call depth. (rather arbitrary upper limit; too low and we won't detect certain
1312 * cpuid instructions in Linux kernels; too high and we waste too much time scanning code)
1313 * (512 is necessary to detect cpuid instructions in Red Hat EL4; see defect 1355)
1314 * @note we are using a lot of stack here. couple of 100k when we go to the full depth (!)
1315 */
1316 if (pCacheRec->depth > 512)
1317 {
1318 LogFlow(("CSAM: maximum calldepth reached for %RRv\n", pCurInstrGC));
1319 pCacheRec->depth--;
1320 return VINF_SUCCESS; //let's not go on forever
1321 }
1322
1323 Assert(!PATMIsPatchGCAddr(pVM, pCurInstrGC));
1324 csamR3CheckPageRecord(pVM, pCurInstrGC);
1325
1326 while(rc == VWRN_CONTINUE_ANALYSIS)
1327 {
1328 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1329 {
1330 if (pPage == NULL)
1331 {
1332 /* New address; let's take a look at it. */
1333 pPage = csamCreatePageRecord(pVM, pCurInstrGC, CSAM_TAG_CSAM, fCode32);
1334 if (pPage == NULL)
1335 {
1336 rc = VERR_NO_MEMORY;
1337 goto done;
1338 }
1339 }
1340 }
1341 else
1342 {
1343 LogFlow(("Code at %RRv has been scanned before\n", pCurInstrGC));
1344 rc = VINF_SUCCESS;
1345 goto done;
1346 }
1347
1348 { /* Force pCurInstrHC out of scope after we stop using it (page lock!) */
1349 uint8_t *pCurInstrHC = csamR3GCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
1350 if (pCurInstrHC == NULL)
1351 {
1352 Log(("csamR3GCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
1353 rc = VERR_PATCHING_REFUSED;
1354 goto done;
1355 }
1356 Assert(VALID_PTR(pCurInstrHC));
1357
1358 STAM_PROFILE_START(&pVM->csam.s.StatTimeDisasm, a);
1359#ifdef DEBUG
1360 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, fCode32 ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1361 &cpu, &cbInstr, szOutput, sizeof(szOutput));
1362 if (RT_SUCCESS(rc2)) Log(("CSAM Analysis: %s", szOutput));
1363#else
1364 rc2 = csamR3DISInstr(pVM, pCurInstrGC, pCurInstrHC, fCode32 ? DISCPUMODE_32BIT : DISCPUMODE_16BIT,
1365 &cpu, &cbInstr, NULL, 0);
1366#endif
1367 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeDisasm, a);
1368 }
1369 if (RT_FAILURE(rc2))
1370 {
1371 Log(("Disassembly failed at %RRv with %Rrc (probably page not present) -> return to caller\n", pCurInstrGC, rc2));
1372 rc = VINF_SUCCESS;
1373 goto done;
1374 }
1375
1376 STAM_COUNTER_ADD(&pVM->csam.s.StatNrBytesRead, cbInstr);
1377
1378 csamMarkCode(pVM, pPage, pCurInstrGC, cbInstr, true);
1379
1380 RCPTRTYPE(uint8_t *) addr = 0;
1381 PCSAMPAGE pJmpPage = NULL;
1382
1383 if (PAGE_ADDRESS(pCurInstrGC) != PAGE_ADDRESS(pCurInstrGC + cbInstr - 1))
1384 {
1385 if (!PGMGstIsPagePresent(pVCpu, pCurInstrGC + cbInstr - 1))
1386 {
1387 /// @todo fault in the page
1388 Log(("Page for current instruction %RRv is not present!!\n", pCurInstrGC));
1389 rc = VWRN_CONTINUE_ANALYSIS;
1390 goto next_please;
1391 }
1392 //all is fine, let's continue
1393 csamR3CheckPageRecord(pVM, pCurInstrGC + cbInstr - 1);
1394 }
1395 /*
1396 * If it's harmless, then don't bother checking it (the disasm tables had better be accurate!)
1397 */
1398 if ((cpu.pCurInstr->fOpType & ~DISOPTYPE_RRM_MASK) == DISOPTYPE_HARMLESS)
1399 {
1400 AssertMsg(pfnCSAMR3Analyse(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec, (void *)pPage) == VWRN_CONTINUE_ANALYSIS, ("Instruction incorrectly marked harmless?!?!?\n"));
1401 rc = VWRN_CONTINUE_ANALYSIS;
1402 goto next_please;
1403 }
1404
1405#ifdef CSAM_ANALYSE_BEYOND_RET
1406 /* Remember the address of the instruction following the ret in case the parent instruction was a call. */
1407 if ( pCacheRec->pCallExitRec
1408 && cpu.pCurInstr->uOpcode == OP_RETN
1409 && pCacheRec->pCallExitRec->cInstrAfterRet < CSAM_MAX_CALLEXIT_RET)
1410 {
1411 pCacheRec->pCallExitRec->pInstrAfterRetGC[pCacheRec->pCallExitRec->cInstrAfterRet] = pCurInstrGC + cbInstr;
1412 pCacheRec->pCallExitRec->cInstrAfterRet++;
1413 }
1414#endif
1415
1416 rc = pfnCSAMR3Analyse(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec, (void *)pPage);
1417 if (rc == VINF_SUCCESS)
1418 goto done;
1419
1420 // For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction)
1421 if ( ((cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW) && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J))
1422 || (cpu.pCurInstr->uOpcode == OP_CALL && cpu.Param1.fUse == DISUSE_DISPLACEMENT32)) /* simple indirect call (call dword ptr [address]) */
1423 {
1424 /* We need to parse 'call dword ptr [address]' type of calls to catch cpuid instructions in some recent Linux distributions (e.g. OpenSuse 10.3) */
1425 if ( cpu.pCurInstr->uOpcode == OP_CALL
1426 && cpu.Param1.fUse == DISUSE_DISPLACEMENT32)
1427 {
1428 addr = 0;
1429 PGMPhysSimpleReadGCPtr(pVCpu, &addr, (RTRCUINTPTR)cpu.Param1.uDisp.i32, sizeof(addr));
1430 }
1431 else
1432 addr = CSAMResolveBranch(&cpu, pCurInstrGC);
1433
1434 if (addr == 0)
1435 {
1436 Log(("We don't support far jumps here!! (%08X)\n", cpu.Param1.fUse));
1437 rc = VINF_SUCCESS;
1438 break;
1439 }
1440 Assert(!PATMIsPatchGCAddr(pVM, addr));
1441
1442 /* If the target address lies in a patch generated jump, then special action needs to be taken. */
1443 PATMR3DetectConflict(pVM, pCurInstrGC, addr);
1444
1445 /* Same page? */
1446 if (PAGE_ADDRESS(addr) != PAGE_ADDRESS(pCurInstrGC ))
1447 {
1448 if (!PGMGstIsPagePresent(pVCpu, addr))
1449 {
1450 Log(("Page for current instruction %RRv is not present!!\n", addr));
1451 rc = VWRN_CONTINUE_ANALYSIS;
1452 goto next_please;
1453 }
1454
1455 /* All is fine, let's continue. */
1456 csamR3CheckPageRecord(pVM, addr);
1457 }
1458
1459 pJmpPage = NULL;
1460 if (csamIsCodeScanned(pVM, addr, &pJmpPage) == false)
1461 {
1462 if (pJmpPage == NULL)
1463 {
1464 /* New branch target; let's take a look at it. */
1465 pJmpPage = csamCreatePageRecord(pVM, addr, CSAM_TAG_CSAM, fCode32);
1466 if (pJmpPage == NULL)
1467 {
1468 rc = VERR_NO_MEMORY;
1469 goto done;
1470 }
1471 Assert(pPage);
1472 }
1473 if (cpu.pCurInstr->uOpcode == OP_CALL)
1474 rc = csamAnalyseCallCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1475 else
1476 rc = csamAnalyseCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1477
1478 if (rc != VINF_SUCCESS) {
1479 goto done;
1480 }
1481 }
1482 if (cpu.pCurInstr->uOpcode == OP_JMP)
1483 {//unconditional jump; return to caller
1484 rc = VINF_SUCCESS;
1485 goto done;
1486 }
1487
1488 rc = VWRN_CONTINUE_ANALYSIS;
1489 } //if ((cpu.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW) && (OP_PARM_VTYPE(cpu.pCurInstr->fParam1) == OP_PARM_J))
1490#ifdef CSAM_SCAN_JUMP_TABLE
1491 else
1492 if ( cpu.pCurInstr->uOpcode == OP_JMP
1493 && (cpu.Param1.fUse & (DISUSE_DISPLACEMENT32|DISUSE_INDEX|DISUSE_SCALE)) == (DISUSE_DISPLACEMENT32|DISUSE_INDEX|DISUSE_SCALE)
1494 )
1495 {
1496 RTRCPTR pJumpTableGC = (RTRCPTR)cpu.Param1.disp32;
1497 uint8_t *pJumpTableHC;
1498 int rc2;
1499
1500 Log(("Jump through jump table\n"));
1501
1502 rc2 = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, pJumpTableGC, (PRTHCPTR)&pJumpTableHC, missing page lock);
1503 if (rc2 == VINF_SUCCESS)
1504 {
1505 for (uint32_t i=0;i<2;i++)
1506 {
1507 uint64_t fFlags;
1508
1509 addr = pJumpTableGC + cpu.Param1.scale * i;
1510 /* Same page? */
1511 if (PAGE_ADDRESS(addr) != PAGE_ADDRESS(pJumpTableGC))
1512 break;
1513
1514 addr = *(RTRCPTR *)(pJumpTableHC + cpu.Param1.scale * i);
1515
1516 rc2 = PGMGstGetPage(pVCpu, addr, &fFlags, NULL);
1517 if ( rc2 != VINF_SUCCESS
1518 || (fFlags & X86_PTE_US)
1519 || !(fFlags & X86_PTE_P)
1520 )
1521 break;
1522
1523 Log(("Jump to %RRv\n", addr));
1524
1525 pJmpPage = NULL;
1526 if (csamIsCodeScanned(pVM, addr, &pJmpPage) == false)
1527 {
1528 if (pJmpPage == NULL)
1529 {
1530 /* New branch target; let's take a look at it. */
1531 pJmpPage = csamCreatePageRecord(pVM, addr, CSAM_TAG_CSAM, fCode32);
1532 if (pJmpPage == NULL)
1533 {
1534 rc = VERR_NO_MEMORY;
1535 goto done;
1536 }
1537 Assert(pPage);
1538 }
1539 rc = csamAnalyseCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1540 if (rc != VINF_SUCCESS) {
1541 goto done;
1542 }
1543 }
1544 }
1545 }
1546 }
1547#endif
1548 if (rc != VWRN_CONTINUE_ANALYSIS) {
1549 break; //done!
1550 }
1551next_please:
1552 if (cpu.pCurInstr->uOpcode == OP_JMP)
1553 {
1554 rc = VINF_SUCCESS;
1555 goto done;
1556 }
1557 pCurInstrGC += cbInstr;
1558 }
1559done:
1560 pCacheRec->depth--;
1561 return rc;
1562}
1563
1564
1565/**
1566 * Calculates the 64 bits hash value for the current page
1567 *
1568 * @returns hash value
1569 * @param pVM Pointer to the VM.
1570 * @param pInstr Page address
1571 */
1572uint64_t csamR3CalcPageHash(PVM pVM, RTRCPTR pInstr)
1573{
1574 uint64_t hash = 0;
1575 uint32_t val[5];
1576 int rc;
1577 Assert(pVM->cCpus == 1);
1578 PVMCPU pVCpu = VMMGetCpu0(pVM);
1579
1580 Assert((pInstr & PAGE_OFFSET_MASK) == 0);
1581
1582 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[0], pInstr, sizeof(val[0]));
1583 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1584 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1585 {
1586 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1587 return ~0ULL;
1588 }
1589
1590 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[1], pInstr+1024, sizeof(val[0]));
1591 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1592 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1593 {
1594 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1595 return ~0ULL;
1596 }
1597
1598 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[2], pInstr+2048, sizeof(val[0]));
1599 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1600 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1601 {
1602 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1603 return ~0ULL;
1604 }
1605
1606 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[3], pInstr+3072, sizeof(val[0]));
1607 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1608 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1609 {
1610 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1611 return ~0ULL;
1612 }
1613
1614 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[4], pInstr+4092, sizeof(val[0]));
1615 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1616 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1617 {
1618 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1619 return ~0ULL;
1620 }
1621
1622 // don't want to get division by zero traps
1623 val[2] |= 1;
1624 val[4] |= 1;
1625
1626 hash = (uint64_t)val[0] * (uint64_t)val[1] / (uint64_t)val[2] + (val[3]%val[4]);
1627 return (hash == ~0ULL) ? hash - 1 : hash;
1628}
1629
1630
1631/**
1632 * Notify CSAM of a page flush
1633 *
1634 * @returns VBox status code
1635 * @param pVM Pointer to the VM.
1636 * @param addr GC address of the page to flush
1637 * @param fRemovePage Page removal flag
1638 */
1639static int csamFlushPage(PVM pVM, RTRCPTR addr, bool fRemovePage)
1640{
1641 PCSAMPAGEREC pPageRec;
1642 int rc;
1643 RTGCPHYS GCPhys = 0;
1644 uint64_t fFlags = 0;
1645 Assert(pVM->cCpus == 1 || !CSAMIsEnabled(pVM));
1646
1647 if (!CSAMIsEnabled(pVM))
1648 return VINF_SUCCESS;
1649 Assert(!HMIsEnabled(pVM));
1650
1651 PVMCPU pVCpu = VMMGetCpu0(pVM);
1652
1653 STAM_PROFILE_START(&pVM->csam.s.StatTimeFlushPage, a);
1654
1655 addr = addr & PAGE_BASE_GC_MASK;
1656
1657 /*
1658 * Note: searching for the page in our tree first is more expensive (skipped flushes are two orders of magnitude more common)
1659 */
1660 if (pVM->csam.s.pPageTree == NULL)
1661 {
1662 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1663 return VWRN_CSAM_PAGE_NOT_FOUND;
1664 }
1665
1666 rc = PGMGstGetPage(pVCpu, addr, &fFlags, &GCPhys);
1667 /* Returned at a very early stage (no paging yet presumably). */
1668 if (rc == VERR_NOT_SUPPORTED)
1669 {
1670 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1671 return rc;
1672 }
1673
1674 if (RT_SUCCESS(rc))
1675 {
1676 if ( (fFlags & X86_PTE_US)
1677 || rc == VERR_PGM_PHYS_PAGE_RESERVED
1678 )
1679 {
1680 /* User page -> not relevant for us. */
1681 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushesSkipped, 1);
1682 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1683 return VINF_SUCCESS;
1684 }
1685 }
1686 else
1687 if (rc != VERR_PAGE_NOT_PRESENT && rc != VERR_PAGE_TABLE_NOT_PRESENT)
1688 AssertMsgFailed(("PGMR3GetPage %RRv failed with %Rrc\n", addr, rc));
1689
1690 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)addr);
1691 if (pPageRec)
1692 {
1693 if ( GCPhys == pPageRec->page.GCPhys
1694 && (fFlags & X86_PTE_P))
1695 {
1696 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushesSkipped, 1);
1697 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1698 return VINF_SUCCESS;
1699 }
1700
1701 Log(("CSAMR3FlushPage: page %RRv has changed -> FLUSH (rc=%Rrc) (Phys: %RGp vs %RGp)\n", addr, rc, GCPhys, pPageRec->page.GCPhys));
1702
1703 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushes, 1);
1704
1705 if (fRemovePage)
1706 csamRemovePageRecord(pVM, addr);
1707 else
1708 {
1709 CSAMMarkPage(pVM, addr, false);
1710 pPageRec->page.GCPhys = 0;
1711 pPageRec->page.fFlags = 0;
1712 rc = PGMGstGetPage(pVCpu, addr, &pPageRec->page.fFlags, &pPageRec->page.GCPhys);
1713 if (rc == VINF_SUCCESS)
1714 pPageRec->page.u64Hash = csamR3CalcPageHash(pVM, addr);
1715
1716 if (pPageRec->page.pBitmap == NULL)
1717 {
1718 pPageRec->page.pBitmap = (uint8_t *)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, CSAM_PAGE_BITMAP_SIZE);
1719 Assert(pPageRec->page.pBitmap);
1720 if (pPageRec->page.pBitmap == NULL)
1721 return VERR_NO_MEMORY;
1722 }
1723 else
1724 memset(pPageRec->page.pBitmap, 0, CSAM_PAGE_BITMAP_SIZE);
1725 }
1726
1727
1728 /*
1729 * Inform patch manager about the flush; no need to repeat the above check twice.
1730 */
1731 PATMR3FlushPage(pVM, addr);
1732
1733 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1734 return VINF_SUCCESS;
1735 }
1736 else
1737 {
1738 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1739 return VWRN_CSAM_PAGE_NOT_FOUND;
1740 }
1741}
1742
1743/**
1744 * Notify CSAM of a page flush
1745 *
1746 * @returns VBox status code
1747 * @param pVM Pointer to the VM.
1748 * @param addr GC address of the page to flush
1749 */
1750VMMR3_INT_DECL(int) CSAMR3FlushPage(PVM pVM, RTRCPTR addr)
1751{
1752 return csamFlushPage(pVM, addr, true /* remove page record */);
1753}
1754
1755/**
1756 * Remove a CSAM monitored page. Use with care!
1757 *
1758 * @returns VBox status code
1759 * @param pVM Pointer to the VM.
1760 * @param addr GC address of the page to flush
1761 */
1762VMMR3_INT_DECL(int) CSAMR3RemovePage(PVM pVM, RTRCPTR addr)
1763{
1764 PCSAMPAGEREC pPageRec;
1765 int rc;
1766
1767 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
1768
1769 addr = addr & PAGE_BASE_GC_MASK;
1770
1771 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)addr);
1772 if (pPageRec)
1773 {
1774 rc = csamRemovePageRecord(pVM, addr);
1775 if (RT_SUCCESS(rc))
1776 PATMR3FlushPage(pVM, addr);
1777 return VINF_SUCCESS;
1778 }
1779 return VWRN_CSAM_PAGE_NOT_FOUND;
1780}
1781
1782/**
1783 * Check a page record in case a page has been changed
1784 *
1785 * @returns VBox status code. (trap handled or not)
1786 * @param pVM Pointer to the VM.
1787 * @param pInstrGC GC instruction pointer
1788 */
1789int csamR3CheckPageRecord(PVM pVM, RTRCPTR pInstrGC)
1790{
1791 PCSAMPAGEREC pPageRec;
1792 uint64_t u64hash;
1793
1794 pInstrGC = pInstrGC & PAGE_BASE_GC_MASK;
1795
1796 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pInstrGC);
1797 if (pPageRec)
1798 {
1799 u64hash = csamR3CalcPageHash(pVM, pInstrGC);
1800 if (u64hash != pPageRec->page.u64Hash)
1801 csamFlushPage(pVM, pInstrGC, false /* don't remove page record */);
1802 }
1803 else
1804 return VWRN_CSAM_PAGE_NOT_FOUND;
1805
1806 return VINF_SUCCESS;
1807}
1808
1809/**
1810 * Returns monitor description based on CSAM tag
1811 *
1812 * @return description string
1813 * @param enmTag Owner tag
1814 */
1815const char *csamGetMonitorDescription(CSAMTAG enmTag)
1816{
1817 if (enmTag == CSAM_TAG_PATM)
1818 return "CSAM-PATM self-modifying code monitor handler";
1819 else
1820 if (enmTag == CSAM_TAG_REM)
1821 return "CSAM-REM self-modifying code monitor handler";
1822 Assert(enmTag == CSAM_TAG_CSAM);
1823 return "CSAM self-modifying code monitor handler";
1824}
1825
1826/**
1827 * Adds page record to our lookup tree
1828 *
1829 * @returns CSAMPAGE ptr or NULL if failure
1830 * @param pVM Pointer to the VM.
1831 * @param GCPtr Page address
1832 * @param enmTag Owner tag
1833 * @param fCode32 16 or 32 bits code
1834 * @param fMonitorInvalidation Monitor page invalidation flag
1835 */
1836static PCSAMPAGE csamCreatePageRecord(PVM pVM, RTRCPTR GCPtr, CSAMTAG enmTag, bool fCode32, bool fMonitorInvalidation)
1837{
1838 PCSAMPAGEREC pPage;
1839 int rc;
1840 bool ret;
1841 Assert(pVM->cCpus == 1);
1842 PVMCPU pVCpu = VMMGetCpu0(pVM);
1843
1844 Log(("New page record for %RRv\n", GCPtr & PAGE_BASE_GC_MASK));
1845
1846 pPage = (PCSAMPAGEREC)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, sizeof(CSAMPAGEREC));
1847 if (pPage == NULL)
1848 {
1849 AssertMsgFailed(("csamCreatePageRecord: Out of memory!!!!\n"));
1850 return NULL;
1851 }
1852 /* Round down to page boundary. */
1853 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1854 pPage->Core.Key = (AVLPVKEY)(uintptr_t)GCPtr;
1855 pPage->page.pPageGC = GCPtr;
1856 pPage->page.fCode32 = fCode32;
1857 pPage->page.fMonitorInvalidation = fMonitorInvalidation;
1858 pPage->page.enmTag = enmTag;
1859 pPage->page.fMonitorActive = false;
1860 pPage->page.pBitmap = (uint8_t *)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, PAGE_SIZE/sizeof(uint8_t));
1861 rc = PGMGstGetPage(pVCpu, GCPtr, &pPage->page.fFlags, &pPage->page.GCPhys);
1862 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1863
1864 pPage->page.u64Hash = csamR3CalcPageHash(pVM, GCPtr);
1865 ret = RTAvlPVInsert(&pVM->csam.s.pPageTree, &pPage->Core);
1866 Assert(ret);
1867
1868#ifdef CSAM_MONITOR_CODE_PAGES
1869 AssertRelease(!g_fInCsamR3CodePageInvalidate);
1870
1871 switch (enmTag)
1872 {
1873 case CSAM_TAG_PATM:
1874 case CSAM_TAG_REM:
1875# ifdef CSAM_MONITOR_CSAM_CODE_PAGES
1876 case CSAM_TAG_CSAM:
1877# endif
1878 {
1879 rc = PGMR3HandlerVirtualRegister(pVM, pVCpu, fMonitorInvalidation
1880 ? pVM->csam.s.hCodePageWriteAndInvPgType : pVM->csam.s.hCodePageWriteType,
1881 GCPtr, GCPtr + (PAGE_SIZE - 1) /* inclusive! */,
1882 pPage, NIL_RTRCPTR, csamGetMonitorDescription(enmTag));
1883 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT,
1884 ("PGMR3HandlerVirtualRegister %RRv failed with %Rrc\n", GCPtr, rc));
1885 if (RT_FAILURE(rc))
1886 Log(("PGMR3HandlerVirtualRegister for %RRv failed with %Rrc\n", GCPtr, rc));
1887
1888 /* Could fail, because it's already monitored. Don't treat that condition as fatal. */
1889
1890 /* Prefetch it in case it's not there yet. */
1891 rc = PGMPrefetchPage(pVCpu, GCPtr);
1892 AssertRC(rc);
1893
1894 rc = PGMShwMakePageReadonly(pVCpu, GCPtr, 0 /*fFlags*/);
1895 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1896
1897 pPage->page.fMonitorActive = true;
1898 STAM_COUNTER_INC(&pVM->csam.s.StatPageMonitor);
1899 break;
1900 }
1901 default:
1902 break; /* to shut up GCC */
1903 }
1904
1905 Log(("csamCreatePageRecord %RRv GCPhys=%RGp\n", GCPtr, pPage->page.GCPhys));
1906
1907# ifdef VBOX_WITH_STATISTICS
1908 switch (enmTag)
1909 {
1910 case CSAM_TAG_CSAM:
1911 STAM_COUNTER_INC(&pVM->csam.s.StatPageCSAM);
1912 break;
1913 case CSAM_TAG_PATM:
1914 STAM_COUNTER_INC(&pVM->csam.s.StatPagePATM);
1915 break;
1916 case CSAM_TAG_REM:
1917 STAM_COUNTER_INC(&pVM->csam.s.StatPageREM);
1918 break;
1919 default:
1920 break; /* to shut up GCC */
1921 }
1922# endif
1923
1924#endif
1925
1926 STAM_COUNTER_INC(&pVM->csam.s.StatNrPages);
1927 if (fMonitorInvalidation)
1928 STAM_COUNTER_INC(&pVM->csam.s.StatNrPagesInv);
1929
1930 return &pPage->page;
1931}
1932
1933/**
1934 * Monitors a code page (if not already monitored)
1935 *
1936 * @returns VBox status code
1937 * @param pVM Pointer to the VM.
1938 * @param pPageAddrGC The page to monitor
1939 * @param enmTag Monitor tag
1940 */
1941VMMR3DECL(int) CSAMR3MonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
1942{
1943 ;
1944 int rc;
1945 bool fMonitorInvalidation;
1946 Assert(pVM->cCpus == 1);
1947 PVMCPU pVCpu = VMMGetCpu0(pVM);
1948 Assert(!HMIsEnabled(pVM));
1949
1950 /* Dirty pages must be handled before calling this function!. */
1951 Assert(!pVM->csam.s.cDirtyPages);
1952
1953 if (pVM->csam.s.fScanningStarted == false)
1954 return VINF_SUCCESS; /* too early */
1955
1956 pPageAddrGC &= PAGE_BASE_GC_MASK;
1957
1958 Log(("CSAMR3MonitorPage %RRv %d\n", pPageAddrGC, enmTag));
1959
1960 /** @todo implicit assumption */
1961 fMonitorInvalidation = (enmTag == CSAM_TAG_PATM);
1962
1963 PCSAMPAGEREC pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pPageAddrGC);
1964 if (pPageRec == NULL)
1965 {
1966 uint64_t fFlags;
1967
1968 rc = PGMGstGetPage(pVCpu, pPageAddrGC, &fFlags, NULL);
1969 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1970 if ( rc == VINF_SUCCESS
1971 && (fFlags & X86_PTE_US))
1972 {
1973 /* We don't care about user pages. */
1974 STAM_COUNTER_INC(&pVM->csam.s.StatNrUserPages);
1975 return VINF_SUCCESS;
1976 }
1977
1978 csamCreatePageRecord(pVM, pPageAddrGC, enmTag, true /* 32 bits code */, fMonitorInvalidation);
1979
1980 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pPageAddrGC);
1981 Assert(pPageRec);
1982 }
1983 /** @todo reference count */
1984
1985#ifdef CSAM_MONITOR_CSAM_CODE_PAGES
1986 Assert(pPageRec->page.fMonitorActive);
1987#endif
1988
1989#ifdef CSAM_MONITOR_CODE_PAGES
1990 if (!pPageRec->page.fMonitorActive)
1991 {
1992 Log(("CSAMR3MonitorPage: activate monitoring for %RRv\n", pPageAddrGC));
1993
1994 rc = PGMR3HandlerVirtualRegister(pVM, pVCpu, fMonitorInvalidation
1995 ? pVM->csam.s.hCodePageWriteAndInvPgType : pVM->csam.s.hCodePageWriteType,
1996 pPageAddrGC, pPageAddrGC + (PAGE_SIZE - 1) /* inclusive! */,
1997 pPageRec, NIL_RTRCPTR /*pvUserRC*/, csamGetMonitorDescription(enmTag));
1998 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT,
1999 ("PGMR3HandlerVirtualRegister %RRv failed with %Rrc\n", pPageAddrGC, rc));
2000 if (RT_FAILURE(rc))
2001 Log(("PGMR3HandlerVirtualRegister for %RRv failed with %Rrc\n", pPageAddrGC, rc));
2002
2003 /* Could fail, because it's already monitored. Don't treat that condition as fatal. */
2004
2005 /* Prefetch it in case it's not there yet. */
2006 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
2007 AssertRC(rc);
2008
2009 rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
2010 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2011
2012 STAM_COUNTER_INC(&pVM->csam.s.StatPageMonitor);
2013
2014 pPageRec->page.fMonitorActive = true;
2015 pPageRec->page.fMonitorInvalidation = fMonitorInvalidation;
2016 }
2017 else
2018 if ( !pPageRec->page.fMonitorInvalidation
2019 && fMonitorInvalidation)
2020 {
2021 Assert(pPageRec->page.fMonitorActive);
2022 rc = PGMHandlerVirtualChangeType(pVM, pPageRec->page.pPageGC, pVM->csam.s.hCodePageWriteAndInvPgType);
2023 AssertRC(rc);
2024 pPageRec->page.fMonitorInvalidation = true;
2025 STAM_COUNTER_INC(&pVM->csam.s.StatNrPagesInv);
2026
2027 /* Prefetch it in case it's not there yet. */
2028 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
2029 AssertRC(rc);
2030
2031 /* Make sure it's readonly. Page invalidation may have modified the attributes. */
2032 rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
2033 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2034 }
2035
2036#if 0 /* def VBOX_STRICT -> very annoying) */
2037 if (pPageRec->page.fMonitorActive)
2038 {
2039 uint64_t fPageShw;
2040 RTHCPHYS GCPhys;
2041 rc = PGMShwGetPage(pVCpu, pPageAddrGC, &fPageShw, &GCPhys);
2042// AssertMsg( (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
2043// || !(fPageShw & X86_PTE_RW)
2044// || (pPageRec->page.GCPhys == 0), ("Shadow page flags for %RRv (%RHp) aren't readonly (%RX64)!!\n", pPageAddrGC, GCPhys, fPageShw));
2045 }
2046#endif
2047
2048 if (pPageRec->page.GCPhys == 0)
2049 {
2050 /* Prefetch it in case it's not there yet. */
2051 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
2052 AssertRC(rc);
2053 /* The page was changed behind our back. It won't be made read-only until the next SyncCR3, so force it here. */
2054 rc = PGMShwMakePageReadonly(pVCpu, pPageAddrGC, 0 /*fFlags*/);
2055 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2056 }
2057#endif /* CSAM_MONITOR_CODE_PAGES */
2058 return VINF_SUCCESS;
2059}
2060
2061/**
2062 * Unmonitors a code page
2063 *
2064 * @returns VBox status code
2065 * @param pVM Pointer to the VM.
2066 * @param pPageAddrGC The page to monitor
2067 * @param enmTag Monitor tag
2068 */
2069VMMR3DECL(int) CSAMR3UnmonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
2070{
2071 Assert(!HMIsEnabled(pVM));
2072
2073 pPageAddrGC &= PAGE_BASE_GC_MASK;
2074
2075 Log(("CSAMR3UnmonitorPage %RRv %d\n", pPageAddrGC, enmTag));
2076
2077 Assert(enmTag == CSAM_TAG_REM);
2078
2079#ifdef VBOX_STRICT
2080 PCSAMPAGEREC pPageRec;
2081
2082 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pPageAddrGC);
2083 Assert(pPageRec && pPageRec->page.enmTag == enmTag);
2084#endif
2085 return CSAMR3RemovePage(pVM, pPageAddrGC);
2086}
2087
2088/**
2089 * Removes a page record from our lookup tree
2090 *
2091 * @returns VBox status code
2092 * @param pVM Pointer to the VM.
2093 * @param GCPtr Page address
2094 */
2095static int csamRemovePageRecord(PVM pVM, RTRCPTR GCPtr)
2096{
2097 PCSAMPAGEREC pPageRec;
2098 Assert(pVM->cCpus == 1);
2099 PVMCPU pVCpu = VMMGetCpu0(pVM);
2100
2101 Log(("csamRemovePageRecord %RRv\n", GCPtr));
2102 pPageRec = (PCSAMPAGEREC)RTAvlPVRemove(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)GCPtr);
2103
2104 if (pPageRec)
2105 {
2106 STAM_COUNTER_INC(&pVM->csam.s.StatNrRemovedPages);
2107
2108#ifdef CSAM_MONITOR_CODE_PAGES
2109 if (pPageRec->page.fMonitorActive)
2110 {
2111 /* @todo -> this is expensive (cr3 reload)!!!
2112 * if this happens often, then reuse it instead!!!
2113 */
2114 Assert(!g_fInCsamR3CodePageInvalidate);
2115 STAM_COUNTER_DEC(&pVM->csam.s.StatPageMonitor);
2116 PGMHandlerVirtualDeregister(pVM, pVCpu, GCPtr, false /*fHypervisor*/);
2117 }
2118 if (pPageRec->page.enmTag == CSAM_TAG_PATM)
2119 {
2120 /* Make sure the recompiler flushes its cache as this page is no longer monitored. */
2121 STAM_COUNTER_INC(&pVM->csam.s.StatPageRemoveREMFlush);
2122 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
2123 }
2124#endif
2125
2126#ifdef VBOX_WITH_STATISTICS
2127 switch (pPageRec->page.enmTag)
2128 {
2129 case CSAM_TAG_CSAM:
2130 STAM_COUNTER_DEC(&pVM->csam.s.StatPageCSAM);
2131 break;
2132 case CSAM_TAG_PATM:
2133 STAM_COUNTER_DEC(&pVM->csam.s.StatPagePATM);
2134 break;
2135 case CSAM_TAG_REM:
2136 STAM_COUNTER_DEC(&pVM->csam.s.StatPageREM);
2137 break;
2138 default:
2139 break; /* to shut up GCC */
2140 }
2141#endif
2142
2143 if (pPageRec->page.pBitmap) MMR3HeapFree(pPageRec->page.pBitmap);
2144 MMR3HeapFree(pPageRec);
2145 }
2146 else
2147 AssertFailed();
2148
2149 return VINF_SUCCESS;
2150}
2151
2152/**
2153 * Callback for delayed writes from non-EMT threads
2154 *
2155 * @param pVM Pointer to the VM.
2156 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
2157 * @param cbBuf How much it's reading/writing.
2158 */
2159static DECLCALLBACK(void) CSAMDelayedWriteHandler(PVM pVM, RTRCPTR GCPtr, size_t cbBuf)
2160{
2161 int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
2162 AssertRC(rc);
2163}
2164
2165/**
2166 * \#PF Handler callback for virtual access handler ranges.
2167 *
2168 * Important to realize that a physical page in a range can have aliases, and
2169 * for ALL and WRITE handlers these will also trigger.
2170 *
2171 * @returns VINF_SUCCESS if the handler have carried out the operation.
2172 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
2173 * @param pVM Pointer to the VM.
2174 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
2175 * @param pvPtr The HC mapping of that address.
2176 * @param pvBuf What the guest is reading/writing.
2177 * @param cbBuf How much it's reading/writing.
2178 * @param enmAccessType The access type.
2179 * @param pvUser User argument.
2180 */
2181static DECLCALLBACK(int) csamR3CodePageWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
2182 PGMACCESSTYPE enmAccessType, void *pvUser)
2183{
2184 int rc;
2185
2186 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
2187 Log(("csamR3CodePageWriteHandler: write to %RGv size=%zu\n", GCPtr, cbBuf));
2188 NOREF(pvUser);
2189
2190 if ( PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1)
2191 && !memcmp(pvPtr, pvBuf, cbBuf))
2192 {
2193 Log(("csamR3CodePageWriteHandler: dummy write -> ignore\n"));
2194 return VINF_PGM_HANDLER_DO_DEFAULT;
2195 }
2196
2197 if (VM_IS_EMT(pVM))
2198 rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
2199 else
2200 {
2201 /* Queue the write instead otherwise we'll get concurrency issues. */
2202 /** @note in theory not correct to let it write the data first before disabling a patch!
2203 * (if it writes the same data as the patch jump and we replace it with obsolete opcodes)
2204 */
2205 Log(("csamR3CodePageWriteHandler: delayed write!\n"));
2206 AssertCompileSize(RTRCPTR, 4);
2207 rc = VMR3ReqCallVoidNoWait(pVM, VMCPUID_ANY, (PFNRT)CSAMDelayedWriteHandler, 3, pVM, (RTRCPTR)GCPtr, cbBuf);
2208 }
2209 AssertRC(rc);
2210
2211 return VINF_PGM_HANDLER_DO_DEFAULT;
2212}
2213
2214/**
2215 * \#PF Handler callback for invalidation of virtual access handler ranges.
2216 *
2217 * @param pVM Pointer to the VM.
2218 * @param GCPtr The virtual address the guest has changed.
2219 */
2220static DECLCALLBACK(int) csamR3CodePageInvalidate(PVM pVM, RTGCPTR GCPtr, void *pvUser)
2221{
2222 g_fInCsamR3CodePageInvalidate = true;
2223 LogFlow(("csamR3CodePageInvalidate %RGv\n", GCPtr));
2224 /** @todo We can't remove the page (which unregisters the virtual handler) as we are called from a DoWithAll on the virtual handler tree. Argh. */
2225 csamFlushPage(pVM, GCPtr, false /* don't remove page! */);
2226 g_fInCsamR3CodePageInvalidate = false;
2227 return VINF_SUCCESS;
2228}
2229
2230/**
2231 * Check if the current instruction has already been checked before
2232 *
2233 * @returns VBox status code. (trap handled or not)
2234 * @param pVM Pointer to the VM.
2235 * @param pInstr Instruction pointer
2236 * @param pPage CSAM patch structure pointer
2237 */
2238bool csamIsCodeScanned(PVM pVM, RTRCPTR pInstr, PCSAMPAGE *pPage)
2239{
2240 PCSAMPAGEREC pPageRec;
2241 uint32_t offset;
2242
2243 STAM_PROFILE_START(&pVM->csam.s.StatTimeCheckAddr, a);
2244
2245 offset = pInstr & PAGE_OFFSET_MASK;
2246 pInstr = pInstr & PAGE_BASE_GC_MASK;
2247
2248 Assert(pPage);
2249
2250 if (*pPage && (*pPage)->pPageGC == pInstr)
2251 {
2252 if ((*pPage)->pBitmap == NULL || ASMBitTest((*pPage)->pBitmap, offset))
2253 {
2254 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesHC, 1);
2255 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2256 return true;
2257 }
2258 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2259 return false;
2260 }
2261
2262 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)pInstr);
2263 if (pPageRec)
2264 {
2265 if (pPage) *pPage= &pPageRec->page;
2266 if (pPageRec->page.pBitmap == NULL || ASMBitTest(pPageRec->page.pBitmap, offset))
2267 {
2268 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesHC, 1);
2269 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2270 return true;
2271 }
2272 }
2273 else
2274 {
2275 if (pPage) *pPage = NULL;
2276 }
2277 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2278 return false;
2279}
2280
2281/**
2282 * Mark an instruction in a page as scanned/not scanned
2283 *
2284 * @param pVM Pointer to the VM.
2285 * @param pPage Patch structure pointer
2286 * @param pInstr Instruction pointer
2287 * @param cbInstr Instruction size
2288 * @param fScanned Mark as scanned or not
2289 */
2290static void csamMarkCode(PVM pVM, PCSAMPAGE pPage, RTRCPTR pInstr, uint32_t cbInstr, bool fScanned)
2291{
2292 LogFlow(("csamMarkCodeAsScanned %RRv cbInstr=%d\n", pInstr, cbInstr));
2293 CSAMMarkPage(pVM, pInstr, fScanned);
2294
2295 /** @todo should recreate empty bitmap if !fScanned */
2296 if (pPage->pBitmap == NULL)
2297 return;
2298
2299 if (fScanned)
2300 {
2301 // retn instructions can be scanned more than once
2302 if (ASMBitTest(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK) == 0)
2303 {
2304 pPage->uSize += cbInstr;
2305 STAM_COUNTER_ADD(&pVM->csam.s.StatNrInstr, 1);
2306 }
2307 if (pPage->uSize >= PAGE_SIZE)
2308 {
2309 Log(("Scanned full page (%RRv) -> free bitmap\n", pInstr & PAGE_BASE_GC_MASK));
2310 MMR3HeapFree(pPage->pBitmap);
2311 pPage->pBitmap = NULL;
2312 }
2313 else
2314 ASMBitSet(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK);
2315 }
2316 else
2317 ASMBitClear(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK);
2318}
2319
2320/**
2321 * Mark an instruction in a page as scanned/not scanned
2322 *
2323 * @returns VBox status code.
2324 * @param pVM Pointer to the VM.
2325 * @param pInstr Instruction pointer
2326 * @param cbInstr Instruction size
2327 * @param fScanned Mark as scanned or not
2328 */
2329VMMR3_INT_DECL(int) CSAMR3MarkCode(PVM pVM, RTRCPTR pInstr, uint32_t cbInstr, bool fScanned)
2330{
2331 PCSAMPAGE pPage = 0;
2332
2333 Assert(!fScanned); /* other case not implemented. */
2334 Assert(!PATMIsPatchGCAddr(pVM, pInstr));
2335 Assert(!HMIsEnabled(pVM));
2336
2337 if (csamIsCodeScanned(pVM, pInstr, &pPage) == false)
2338 {
2339 Assert(fScanned == true); /* other case should not be possible */
2340 return VINF_SUCCESS;
2341 }
2342
2343 Log(("CSAMR3MarkCode: %RRv size=%d fScanned=%d\n", pInstr, cbInstr, fScanned));
2344 csamMarkCode(pVM, pPage, pInstr, cbInstr, fScanned);
2345 return VINF_SUCCESS;
2346}
2347
2348
2349/**
2350 * Scan and analyse code
2351 *
2352 * @returns VBox status code.
2353 * @param pVM Pointer to the VM.
2354 * @param pCtx Guest CPU context.
2355 * @param pInstrGC Instruction pointer.
2356 */
2357VMMR3_INT_DECL(int) CSAMR3CheckCodeEx(PVM pVM, PCPUMCTX pCtx, RTRCPTR pInstrGC)
2358{
2359 Assert(!HMIsEnabled(pVM));
2360 if (EMIsRawRing0Enabled(pVM) == false || PATMIsPatchGCAddr(pVM, pInstrGC) == true)
2361 {
2362 // No use
2363 return VINF_SUCCESS;
2364 }
2365
2366 if (CSAMIsEnabled(pVM))
2367 {
2368 /* Assuming 32 bits code for now. */
2369 Assert(CPUMGetGuestCodeBits(VMMGetCpu0(pVM)) == 32);
2370
2371 pInstrGC = SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
2372 return CSAMR3CheckCode(pVM, pInstrGC);
2373 }
2374 return VINF_SUCCESS;
2375}
2376
2377/**
2378 * Scan and analyse code
2379 *
2380 * @returns VBox status code.
2381 * @param pVM Pointer to the VM.
2382 * @param pInstrGC Instruction pointer (0:32 virtual address)
2383 */
2384VMMR3_INT_DECL(int) CSAMR3CheckCode(PVM pVM, RTRCPTR pInstrGC)
2385{
2386 int rc;
2387 PCSAMPAGE pPage = NULL;
2388 Assert(!HMIsEnabled(pVM));
2389
2390 if ( EMIsRawRing0Enabled(pVM) == false
2391 || PATMIsPatchGCAddr(pVM, pInstrGC) == true)
2392 {
2393 /* Not active. */
2394 return VINF_SUCCESS;
2395 }
2396
2397 if (CSAMIsEnabled(pVM))
2398 {
2399 /* Cache record for csamR3GCVirtToHCVirt */
2400 CSAMP2GLOOKUPREC cacheRec;
2401 RT_ZERO(cacheRec);
2402
2403 STAM_PROFILE_START(&pVM->csam.s.StatTime, a);
2404 rc = csamAnalyseCallCodeStream(pVM, pInstrGC, pInstrGC, true /* 32 bits code */, CSAMR3AnalyseCallback, pPage, &cacheRec);
2405 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, a);
2406 if (cacheRec.Lock.pvMap)
2407 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2408
2409 if (rc != VINF_SUCCESS)
2410 {
2411 Log(("csamAnalyseCodeStream failed with %d\n", rc));
2412 return rc;
2413 }
2414 }
2415 return VINF_SUCCESS;
2416}
2417
2418/**
2419 * Flush dirty code pages
2420 *
2421 * @returns VBox status code.
2422 * @param pVM Pointer to the VM.
2423 */
2424static int csamR3FlushDirtyPages(PVM pVM)
2425{
2426 Assert(pVM->cCpus == 1);
2427 PVMCPU pVCpu = VMMGetCpu0(pVM);
2428
2429 STAM_PROFILE_START(&pVM->csam.s.StatFlushDirtyPages, a);
2430
2431 for (uint32_t i=0;i<pVM->csam.s.cDirtyPages;i++)
2432 {
2433 int rc;
2434 PCSAMPAGEREC pPageRec;
2435 RTRCPTR GCPtr = pVM->csam.s.pvDirtyBasePage[i];
2436
2437 GCPtr = GCPtr & PAGE_BASE_GC_MASK;
2438
2439#ifdef VBOX_WITH_REM
2440 /* Notify the recompiler that this page has been changed. */
2441 REMR3NotifyCodePageChanged(pVM, pVCpu, GCPtr);
2442#endif
2443
2444 /* Enable write protection again. (use the fault address as it might be an alias) */
2445 rc = PGMShwMakePageReadonly(pVCpu, pVM->csam.s.pvDirtyFaultPage[i], 0 /*fFlags*/);
2446 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2447
2448 Log(("CSAMR3FlushDirtyPages: flush %RRv (modifypage rc=%Rrc)\n", pVM->csam.s.pvDirtyBasePage[i], rc));
2449
2450 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)(uintptr_t)GCPtr);
2451 if (pPageRec && pPageRec->page.enmTag == CSAM_TAG_REM)
2452 {
2453 uint64_t fFlags;
2454
2455 rc = PGMGstGetPage(pVCpu, GCPtr, &fFlags, NULL);
2456 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
2457 if ( rc == VINF_SUCCESS
2458 && (fFlags & X86_PTE_US))
2459 {
2460 /* We don't care about user pages. */
2461 csamRemovePageRecord(pVM, GCPtr);
2462 STAM_COUNTER_INC(&pVM->csam.s.StatNrUserPages);
2463 }
2464 }
2465 }
2466 pVM->csam.s.cDirtyPages = 0;
2467 STAM_PROFILE_STOP(&pVM->csam.s.StatFlushDirtyPages, a);
2468 return VINF_SUCCESS;
2469}
2470
2471/**
2472 * Flush potential new code pages
2473 *
2474 * @returns VBox status code.
2475 * @param pVM Pointer to the VM.
2476 */
2477static int csamR3FlushCodePages(PVM pVM)
2478{
2479 Assert(pVM->cCpus == 1);
2480 PVMCPU pVCpu = VMMGetCpu0(pVM);
2481
2482 for (uint32_t i=0;i<pVM->csam.s.cPossibleCodePages;i++)
2483 {
2484 RTRCPTR GCPtr = pVM->csam.s.pvPossibleCodePage[i];
2485
2486 GCPtr = GCPtr & PAGE_BASE_GC_MASK;
2487
2488 Log(("csamR3FlushCodePages: %RRv\n", GCPtr));
2489 PGMShwMakePageNotPresent(pVCpu, GCPtr, 0 /*fFlags*/);
2490 /* Resync the page to make sure instruction fetch will fault */
2491 CSAMMarkPage(pVM, GCPtr, false);
2492 }
2493 pVM->csam.s.cPossibleCodePages = 0;
2494 return VINF_SUCCESS;
2495}
2496
2497/**
2498 * Perform any pending actions
2499 *
2500 * @returns VBox status code.
2501 * @param pVM Pointer to the VM.
2502 * @param pVCpu Pointer to the VMCPU.
2503 */
2504VMMR3_INT_DECL(int) CSAMR3DoPendingAction(PVM pVM, PVMCPU pVCpu)
2505{
2506 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
2507
2508 csamR3FlushDirtyPages(pVM);
2509 csamR3FlushCodePages(pVM);
2510
2511 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
2512 return VINF_SUCCESS;
2513}
2514
2515/**
2516 * Analyse interrupt and trap gates
2517 *
2518 * @returns VBox status code.
2519 * @param pVM Pointer to the VM.
2520 * @param iGate Start gate
2521 * @param cGates Number of gates to check
2522 */
2523VMMR3_INT_DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
2524{
2525#ifdef VBOX_WITH_RAW_MODE
2526 Assert(pVM->cCpus == 1);
2527 PVMCPU pVCpu = VMMGetCpu0(pVM);
2528 uint16_t cbIDT;
2529 RTRCPTR GCPtrIDT = CPUMGetGuestIDTR(pVCpu, &cbIDT);
2530 uint32_t iGateEnd;
2531 uint32_t maxGates;
2532 VBOXIDTE aIDT[256];
2533 PVBOXIDTE pGuestIdte;
2534 int rc;
2535
2536 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
2537 if (EMIsRawRing0Enabled(pVM) == false)
2538 {
2539 /* Enabling interrupt gates only works when raw ring 0 is enabled. */
2540 //AssertFailed();
2541 return VINF_SUCCESS;
2542 }
2543
2544 /* We only check all gates once during a session */
2545 if ( !pVM->csam.s.fGatesChecked
2546 && cGates != 256)
2547 return VINF_SUCCESS; /* too early */
2548
2549 /* We only check all gates once during a session */
2550 if ( pVM->csam.s.fGatesChecked
2551 && cGates != 1)
2552 return VINF_SUCCESS; /* ignored */
2553
2554 Assert(cGates <= 256);
2555 if (!GCPtrIDT || cGates > 256)
2556 return VERR_INVALID_PARAMETER;
2557
2558 if (cGates != 1)
2559 {
2560 pVM->csam.s.fGatesChecked = true;
2561 for (unsigned i=0;i<RT_ELEMENTS(pVM->csam.s.pvCallInstruction);i++)
2562 {
2563 RTRCPTR pHandler = pVM->csam.s.pvCallInstruction[i];
2564
2565 if (pHandler)
2566 {
2567 PCSAMPAGE pPage = NULL;
2568 CSAMP2GLOOKUPREC cacheRec; /* Cache record for csamR3GCVirtToHCVirt. */
2569 RT_ZERO(cacheRec);
2570
2571 Log(("CSAMCheckGates: checking previous call instruction %RRv\n", pHandler));
2572 STAM_PROFILE_START(&pVM->csam.s.StatTime, a);
2573 rc = csamAnalyseCodeStream(pVM, pHandler, pHandler, true, CSAMR3AnalyseCallback, pPage, &cacheRec);
2574 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, a);
2575 if (cacheRec.Lock.pvMap)
2576 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2577
2578 if (rc != VINF_SUCCESS)
2579 {
2580 Log(("CSAMCheckGates: csamAnalyseCodeStream failed with %d\n", rc));
2581 continue;
2582 }
2583 }
2584 }
2585 }
2586
2587 /* Determine valid upper boundary. */
2588 maxGates = (cbIDT+1) / sizeof(VBOXIDTE);
2589 Assert(iGate < maxGates);
2590 if (iGate > maxGates)
2591 return VERR_INVALID_PARAMETER;
2592
2593 if (iGate + cGates > maxGates)
2594 cGates = maxGates - iGate;
2595
2596 GCPtrIDT = GCPtrIDT + iGate * sizeof(VBOXIDTE);
2597 iGateEnd = iGate + cGates;
2598
2599 STAM_PROFILE_START(&pVM->csam.s.StatCheckGates, a);
2600
2601 /*
2602 * Get IDT entries.
2603 */
2604 rc = PGMPhysSimpleReadGCPtr(pVCpu, aIDT, GCPtrIDT, cGates*sizeof(VBOXIDTE));
2605 if (RT_FAILURE(rc))
2606 {
2607 AssertMsgRC(rc, ("Failed to read IDTE! rc=%Rrc\n", rc));
2608 STAM_PROFILE_STOP(&pVM->csam.s.StatCheckGates, a);
2609 return rc;
2610 }
2611 pGuestIdte = &aIDT[0];
2612
2613 for (/*iGate*/; iGate<iGateEnd; iGate++, pGuestIdte++)
2614 {
2615 Assert(TRPMR3GetGuestTrapHandler(pVM, iGate) == TRPM_INVALID_HANDLER);
2616
2617 if ( pGuestIdte->Gen.u1Present
2618 && (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32 || pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_INT_32)
2619 && (pGuestIdte->Gen.u2DPL == 3 || pGuestIdte->Gen.u2DPL == 0)
2620 )
2621 {
2622 RTRCPTR pHandler;
2623 PCSAMPAGE pPage = NULL;
2624 DBGFSELINFO selInfo;
2625 CSAMP2GLOOKUPREC cacheRec; /* Cache record for csamR3GCVirtToHCVirt. */
2626 RT_ZERO(cacheRec);
2627
2628 pHandler = VBOXIDTE_OFFSET(*pGuestIdte);
2629 pHandler = SELMToFlatBySel(pVM, pGuestIdte->Gen.u16SegSel, pHandler);
2630
2631 rc = SELMR3GetSelectorInfo(pVM, pVCpu, pGuestIdte->Gen.u16SegSel, &selInfo);
2632 if ( RT_FAILURE(rc)
2633 || (selInfo.fFlags & (DBGFSELINFO_FLAGS_NOT_PRESENT | DBGFSELINFO_FLAGS_INVALID))
2634 || selInfo.GCPtrBase != 0
2635 || selInfo.cbLimit != ~0U
2636 )
2637 {
2638 /* Refuse to patch a handler whose idt cs selector isn't wide open. */
2639 Log(("CSAMCheckGates: check gate %d failed due to rc %Rrc GCPtrBase=%RRv limit=%x\n", iGate, rc, selInfo.GCPtrBase, selInfo.cbLimit));
2640 continue;
2641 }
2642
2643
2644 if (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32)
2645 {
2646 Log(("CSAMCheckGates: check trap gate %d at %04X:%08X (flat %RRv)\n", iGate, pGuestIdte->Gen.u16SegSel, VBOXIDTE_OFFSET(*pGuestIdte), pHandler));
2647 }
2648 else
2649 {
2650 Log(("CSAMCheckGates: check interrupt gate %d at %04X:%08X (flat %RRv)\n", iGate, pGuestIdte->Gen.u16SegSel, VBOXIDTE_OFFSET(*pGuestIdte), pHandler));
2651 }
2652
2653 STAM_PROFILE_START(&pVM->csam.s.StatTime, b);
2654 rc = csamAnalyseCodeStream(pVM, pHandler, pHandler, true, CSAMR3AnalyseCallback, pPage, &cacheRec);
2655 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, b);
2656 if (cacheRec.Lock.pvMap)
2657 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
2658
2659 if (rc != VINF_SUCCESS)
2660 {
2661 Log(("CSAMCheckGates: csamAnalyseCodeStream failed with %d\n", rc));
2662 continue;
2663 }
2664 /* OpenBSD guest specific patch test. */
2665 if (iGate >= 0x20)
2666 {
2667 PCPUMCTX pCtx;
2668 DISCPUSTATE cpu;
2669 RTGCUINTPTR32 aOpenBsdPushCSOffset[3] = {0x03, /* OpenBSD 3.7 & 3.8 */
2670 0x2B, /* OpenBSD 4.0 installation ISO */
2671 0x2F}; /* OpenBSD 4.0 after install */
2672
2673 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2674
2675 for (unsigned i=0;i<RT_ELEMENTS(aOpenBsdPushCSOffset);i++)
2676 {
2677 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pHandler - aOpenBsdPushCSOffset[i], &cpu, NULL);
2678 if ( rc == VINF_SUCCESS
2679 && cpu.pCurInstr->uOpcode == OP_PUSH
2680 && cpu.pCurInstr->fParam1 == OP_PARM_REG_CS)
2681 {
2682 rc = PATMR3InstallPatch(pVM, pHandler - aOpenBsdPushCSOffset[i], PATMFL_CODE32 | PATMFL_GUEST_SPECIFIC);
2683 if (RT_SUCCESS(rc))
2684 Log(("Installed OpenBSD interrupt handler prefix instruction (push cs) patch\n"));
2685 }
2686 }
2687 }
2688
2689 /* Trap gates and certain interrupt gates. */
2690 uint32_t fPatchFlags = PATMFL_CODE32 | PATMFL_IDTHANDLER;
2691
2692 if (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32)
2693 fPatchFlags |= PATMFL_TRAPHANDLER;
2694 else
2695 fPatchFlags |= PATMFL_INTHANDLER;
2696
2697 switch (iGate) {
2698 case 8:
2699 case 10:
2700 case 11:
2701 case 12:
2702 case 13:
2703 case 14:
2704 case 17:
2705 fPatchFlags |= PATMFL_TRAPHANDLER_WITH_ERRORCODE;
2706 break;
2707 default:
2708 /* No error code. */
2709 break;
2710 }
2711
2712 Log(("Installing %s gate handler for 0x%X at %RRv\n", (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32) ? "trap" : "intr", iGate, pHandler));
2713
2714 rc = PATMR3InstallPatch(pVM, pHandler, fPatchFlags);
2715 if ( RT_SUCCESS(rc)
2716 || rc == VERR_PATM_ALREADY_PATCHED)
2717 {
2718 Log(("Gate handler 0x%X is SAFE!\n", iGate));
2719
2720 RTRCPTR pNewHandlerGC = PATMR3QueryPatchGCPtr(pVM, pHandler);
2721 if (pNewHandlerGC)
2722 {
2723 rc = TRPMR3SetGuestTrapHandler(pVM, iGate, pNewHandlerGC);
2724 if (RT_FAILURE(rc))
2725 Log(("TRPMR3SetGuestTrapHandler %d failed with %Rrc\n", iGate, rc));
2726 }
2727 }
2728 }
2729 } /* for */
2730 STAM_PROFILE_STOP(&pVM->csam.s.StatCheckGates, a);
2731#endif /* VBOX_WITH_RAW_MODE */
2732 return VINF_SUCCESS;
2733}
2734
2735/**
2736 * Record previous call instruction addresses
2737 *
2738 * @returns VBox status code.
2739 * @param pVM Pointer to the VM.
2740 * @param GCPtrCall Call address
2741 */
2742VMMR3DECL(int) CSAMR3RecordCallAddress(PVM pVM, RTRCPTR GCPtrCall)
2743{
2744 Assert(!HMIsEnabled(pVM));
2745 for (unsigned i=0;i<RT_ELEMENTS(pVM->csam.s.pvCallInstruction);i++)
2746 {
2747 if (pVM->csam.s.pvCallInstruction[i] == GCPtrCall)
2748 return VINF_SUCCESS;
2749 }
2750
2751 Log(("CSAMR3RecordCallAddress %RRv\n", GCPtrCall));
2752
2753 pVM->csam.s.pvCallInstruction[pVM->csam.s.iCallInstruction++] = GCPtrCall;
2754 if (pVM->csam.s.iCallInstruction >= RT_ELEMENTS(pVM->csam.s.pvCallInstruction))
2755 pVM->csam.s.iCallInstruction = 0;
2756
2757 return VINF_SUCCESS;
2758}
2759
2760
2761/**
2762 * Query CSAM state (enabled/disabled)
2763 *
2764 * @returns true if enabled, false otherwise.
2765 * @param pUVM The user mode VM handle.
2766 */
2767VMMR3DECL(bool) CSAMR3IsEnabled(PUVM pUVM)
2768{
2769 UVM_ASSERT_VALID_EXT_RETURN(pUVM, false);
2770 PVM pVM = pUVM->pVM;
2771 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2772 return CSAMIsEnabled(pVM);
2773}
2774
2775
2776/**
2777 * Enables or disables code scanning.
2778 *
2779 * @returns VBox status code.
2780 * @param pUVM The user mode VM handle.
2781 * @param fEnabled Whether to enable or disable scanning.
2782 */
2783VMMR3DECL(int) CSAMR3SetScanningEnabled(PUVM pUVM, bool fEnabled)
2784{
2785 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2786 PVM pVM = pUVM->pVM;
2787 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2788
2789 if (HMIsEnabled(pVM))
2790 {
2791 Assert(!pVM->fCSAMEnabled);
2792 return VINF_SUCCESS;
2793 }
2794
2795 int rc;
2796 if (fEnabled)
2797 rc = CSAMEnableScanning(pVM);
2798 else
2799 rc = CSAMDisableScanning(pVM);
2800 return rc;
2801}
2802
2803
2804#ifdef VBOX_WITH_DEBUGGER
2805
2806/**
2807 * @callback_method_impl{FNDBGCCMD, The '.csamoff' command.}
2808 */
2809static DECLCALLBACK(int) csamr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2810{
2811 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
2812 NOREF(cArgs); NOREF(paArgs);
2813
2814 if (HMR3IsEnabled(pUVM))
2815 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM is permanently disabled by HM.\n");
2816
2817 int rc = CSAMR3SetScanningEnabled(pUVM, false);
2818 if (RT_FAILURE(rc))
2819 return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "CSAMR3SetScanningEnabled");
2820 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM Scanning disabled\n");
2821}
2822
2823/**
2824 * @callback_method_impl{FNDBGCCMD, The '.csamon' command.}
2825 */
2826static DECLCALLBACK(int) csamr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
2827{
2828 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
2829 NOREF(cArgs); NOREF(paArgs);
2830
2831 if (HMR3IsEnabled(pUVM))
2832 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM is permanently disabled by HM.\n");
2833
2834 int rc = CSAMR3SetScanningEnabled(pUVM, true);
2835 if (RT_FAILURE(rc))
2836 return DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "CSAMR3SetScanningEnabled");
2837 return DBGCCmdHlpPrintf(pCmdHlp, "CSAM Scanning enabled\n");
2838}
2839
2840#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette