VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CSAMAll.cpp@ 64655

Last change on this file since 64655 was 62654, checked in by vboxsync, 8 years ago

VMMR3: warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 11.9 KB
Line 
1/* $Id: CSAMAll.cpp 62654 2016-07-28 22:19:37Z vboxsync $ */
2/** @file
3 * CSAM - Guest OS Code Scanning and Analysis Manager - Any Context
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CSAM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/patm.h>
26#include <VBox/vmm/csam.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/hm.h>
30#include <VBox/vmm/mm.h>
31#ifdef VBOX_WITH_REM
32# include <VBox/vmm/rem.h>
33#endif
34#include <VBox/sup.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/param.h>
37#include <iprt/avl.h>
38#include "CSAMInternal.h"
39#include <VBox/vmm/vm.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/dbg.h>
42#include <VBox/err.h>
43#include <VBox/log.h>
44#include <VBox/dis.h>
45#include <VBox/disopcode.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50#ifdef IN_RING0
51# error "IN_RING3 & IN_RC only!"
52#endif
53
54
55/**
56 * @callback_method_impl{FNPGMVIRTHANDLER,
57 * Access handler callback for virtual access handler ranges.}
58 */
59PGM_ALL_CB2_DECL(VBOXSTRICTRC)
60csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
61 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
62{
63 Log(("csamCodePageWriteHandler: write to %RGv LB %zu\n", GCPtr, cbBuf));
64 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
65 Assert(VMCPU_IS_EMT(pVCpu));
66 RT_NOREF_PV(pvUser);
67 RT_NOREF_PV(enmOrigin);
68
69 /*
70 * Check if it's a dummy write that doesn't change anything.
71 */
72 if ( PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1)
73 && !memcmp(pvPtr, pvBuf, cbBuf))
74 {
75 Log(("csamCodePageWriteHandler: dummy write -> ignore\n"));
76 return VINF_PGM_HANDLER_DO_DEFAULT;
77 }
78
79#ifdef IN_RING3
80 /*
81 * Ring-3: Do proper handling.
82 */
83 int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
84 AssertRC(rc);
85 RT_NOREF_PV(pVCpu);
86 return VINF_PGM_HANDLER_DO_DEFAULT;
87
88#else
89 /*
90 * Raw-mode: Try avoid needing to go to ring-3 (same as csamRCCodePageWritePfHandler).
91 */
92 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
93 bool const fPatchCode = PATMIsPatchGCAddr(pVM, CPUMGetGuestRIP(pVCpu));
94 PPATMGCSTATE pPATMGCState = PATMGetGCState(pVM);
95
96 Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES);
97 Assert(pPATMGCState);
98 Assert(pPATMGCState->fPIF || fPatchCode);
99
100# ifdef VBOX_WITH_REM
101 /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */
102 /** @todo a bit overkill?? */
103 REMFlushTBs(pVM);
104# endif
105
106 /*
107 * When patch code is executing instructions that must complete, then we
108 * must *never* interrupt it.
109 */
110 if (!pPATMGCState->fPIF && fPatchCode)
111 {
112 Log(("csamRCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", CPUMGetGuestRIP(pVCpu)));
113 return VINF_PGM_HANDLER_DO_DEFAULT;
114 }
115
116 Log(("csamRCCodePageWriteHandler: code page write at %RGv (cpl=%d)\n", GCPtr, cpl));
117
118 /*
119 * If user code is modifying one of our monitored pages, then we can safely
120 * write to it as it's no longer being used for supervisor code.
121 */
122 if (cpl != 3)
123 {
124 VBOXSTRICTRC rcStrict = PATMRCHandleWriteToPatchPage(pVM, NULL /* pRegFrame = no interpret */, (RTRCPTR)GCPtr, cbBuf);
125 if ( rcStrict == VINF_PGM_HANDLER_DO_DEFAULT
126 || rcStrict == VINF_SUCCESS)
127 return rcStrict;
128 if (rcStrict == VINF_EM_RAW_EMULATE_INSTR)
129 {
130 STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite);
131 return VINF_EM_RAW_EMULATE_INSTR;
132 }
133 Assert(rcStrict == VERR_PATCH_NOT_FOUND);
134 }
135
136 /*
137 * Schedule ring-3 activity.
138 * Note that GCPtr might be a different address in case of aliases. So,
139 * take down both alternatives.
140 */
141 VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
142 pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr;
143 pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr;
144 if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES)
145 return VINF_CSAM_PENDING_ACTION;
146
147 /*
148 * Continue with the write. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again.
149 */
150 Log(("csamRCCodePageWriteHandler: enabled r/w for page %RGv (%RGv)\n", GCPtr, GCPtr));
151 STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified);
152 return VINF_PGM_HANDLER_DO_DEFAULT;
153#endif
154}
155
156
157/**
158 * Check if this page needs to be analysed by CSAM
159 *
160 * @returns VBox status code
161 * @param pVM The cross context VM structure.
162 * @param pvFault Fault address
163 */
164VMM_INT_DECL(int) CSAMExecFault(PVM pVM, RTRCPTR pvFault)
165{
166 Assert(!HMIsEnabled(pVM));
167 if (!CSAMIsEnabled(pVM))
168 return VINF_SUCCESS;
169
170 LogFlow(("CSAMGCExecFault: for page %08X scanned=%d\n", pvFault, CSAMIsPageScanned(pVM, pvFault)));
171
172 if (CSAMIsPageScanned(pVM, pvFault))
173 {
174 // Already checked!
175 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesGC, 1);
176 return VINF_SUCCESS;
177 }
178
179 STAM_COUNTER_ADD(&pVM->csam.s.StatNrTraps, 1);
180 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_SCAN_PAGE);
181 return VINF_CSAM_PENDING_ACTION;
182}
183
184
185/**
186 * Check if this page was previously scanned by CSAM
187 *
188 * @returns true -> scanned, false -> not scanned
189 * @param pVM The cross context VM structure.
190 * @param pPage GC page address
191 */
192VMM_INT_DECL(bool) CSAMIsPageScanned(PVM pVM, RTRCPTR pPage)
193{
194 int pgdir, bit;
195 uintptr_t page;
196 Assert(!HMIsEnabled(pVM));
197
198 page = (uintptr_t)pPage;
199 pgdir = page >> X86_PAGE_4M_SHIFT;
200 bit = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;
201
202 Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
203 Assert(bit < PAGE_SIZE);
204
205 return pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir] && ASMBitTest((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
206}
207
208
209
210/**
211 * Mark a page as scanned/not scanned
212 *
213 * @note: we always mark it as scanned, even if we haven't completely done so
214 *
215 * @returns VBox status code.
216 * @param pVM The cross context VM structure.
217 * @param pPage GC page address (not necessarily aligned)
218 * @param fScanned Mark as scanned or not scanned
219 *
220 */
221VMM_INT_DECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned)
222{
223 int pgdir, bit;
224 uintptr_t page;
225
226#ifdef LOG_ENABLED
227 if (fScanned && !CSAMIsPageScanned(pVM, (RTRCPTR)pPage))
228 Log(("CSAMMarkPage %RRv\n", pPage));
229#endif
230
231 if (!CSAMIsEnabled(pVM))
232 return VINF_SUCCESS;
233 Assert(!HMIsEnabled(pVM));
234
235 page = (uintptr_t)pPage;
236 pgdir = page >> X86_PAGE_4M_SHIFT;
237 bit = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;
238
239 Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
240 Assert(bit < PAGE_SIZE);
241
242 if(!CTXSUFF(pVM->csam.s.pPDBitmap)[pgdir])
243 {
244 STAM_COUNTER_INC(&pVM->csam.s.StatBitmapAlloc);
245 int rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir]);
246 if (RT_FAILURE(rc))
247 {
248 Log(("MMHyperAlloc failed with %Rrc\n", rc));
249 return rc;
250 }
251#ifdef IN_RC
252 pVM->csam.s.pPDHCBitmapGC[pgdir] = MMHyperRCToR3(pVM, (RCPTRTYPE(void*))pVM->csam.s.pPDBitmapGC[pgdir]);
253 if (!pVM->csam.s.pPDHCBitmapGC[pgdir])
254 {
255 Log(("MMHyperHC2GC failed for %RRv\n", pVM->csam.s.pPDBitmapGC[pgdir]));
256 return rc;
257 }
258#else
259 pVM->csam.s.pPDGCBitmapHC[pgdir] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[pgdir]);
260 if (!pVM->csam.s.pPDGCBitmapHC[pgdir])
261 {
262 Log(("MMHyperHC2GC failed for %RHv\n", pVM->csam.s.pPDBitmapHC[pgdir]));
263 return rc;
264 }
265#endif
266 }
267 if(fScanned)
268 ASMBitSet((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
269 else
270 ASMBitClear((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
271
272 return VINF_SUCCESS;
273}
274
275/**
276 * Check if this page needs to be analysed by CSAM.
277 *
278 * This function should only be called for supervisor pages and
279 * only when CSAM is enabled. Leaving these selection criteria
280 * to the caller simplifies the interface (PTE passing).
281 *
282 * Note that the page has not yet been synced, so the TLB trick
283 * (which wasn't ever active anyway) cannot be applied.
284 *
285 * @returns true if the page should be marked not present because
286 * CSAM want need to scan it.
287 * @returns false if the page was already scanned.
288 * @param pVM The cross context VM structure.
289 * @param GCPtr GC pointer of page
290 */
291VMM_INT_DECL(bool) CSAMDoesPageNeedScanning(PVM pVM, RTRCUINTPTR GCPtr)
292{
293 if (!CSAMIsEnabled(pVM))
294 return false;
295 Assert(!HMIsEnabled(pVM));
296
297 if(CSAMIsPageScanned(pVM, (RTRCPTR)GCPtr))
298 {
299 /* Already checked! */
300 STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrKnownPages), 1);
301 return false;
302 }
303 STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrPageNP), 1);
304 return true;
305}
306
307
308/**
309 * Remember a possible code page for later inspection
310 *
311 * @returns VBox status code.
312 * @param pVM The cross context VM structure.
313 * @param GCPtr GC pointer of page
314 */
315VMM_INT_DECL(void) CSAMMarkPossibleCodePage(PVM pVM, RTRCPTR GCPtr)
316{
317 Assert(!HMIsEnabled(pVM));
318 if (pVM->csam.s.cPossibleCodePages < RT_ELEMENTS(pVM->csam.s.pvPossibleCodePage))
319 {
320 pVM->csam.s.pvPossibleCodePage[pVM->csam.s.cPossibleCodePages++] = (RTRCPTR)GCPtr;
321 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_PENDING_ACTION);
322 }
323 return;
324}
325
326
327/**
328 * Turn on code scanning
329 *
330 * @returns VBox status code.
331 * @param pVM The cross context VM structure.
332 */
333VMM_INT_DECL(int) CSAMEnableScanning(PVM pVM)
334{
335 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
336 pVM->fCSAMEnabled = true;
337 return VINF_SUCCESS;
338}
339
340/**
341 * Turn off code scanning
342 *
343 * @returns VBox status code.
344 * @param pVM The cross context VM structure.
345 */
346VMM_INT_DECL(int) CSAMDisableScanning(PVM pVM)
347{
348 pVM->fCSAMEnabled = false;
349 return VINF_SUCCESS;
350}
351
352
353/**
354 * Check if we've scanned this instruction before. If true, then we can emulate
355 * it instead of returning to ring 3.
356 *
357 * Using a simple array here as there are generally few mov crx instructions and
358 * tree lookup is likely to be more expensive. (as it would also have to be offset based)
359 *
360 * @returns boolean
361 * @param pVM The cross context VM structure.
362 * @param GCPtr GC pointer of page table entry
363 */
364VMM_INT_DECL(bool) CSAMIsKnownDangerousInstr(PVM pVM, RTRCUINTPTR GCPtr)
365{
366 Assert(!HMIsEnabled(pVM));
367
368 for (uint32_t i=0;i<pVM->csam.s.cDangerousInstr;i++)
369 {
370 if (pVM->csam.s.aDangerousInstr[i] == (RTRCPTR)GCPtr)
371 {
372 STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheHit);
373 return true;
374 }
375 }
376 /* Record that we're about to process it in ring 3. */
377 pVM->csam.s.aDangerousInstr[pVM->csam.s.iDangerousInstr++] = (RTRCPTR)GCPtr;
378 pVM->csam.s.iDangerousInstr &= CSAM_MAX_DANGR_INSTR_MASK;
379
380 if (++pVM->csam.s.cDangerousInstr > CSAM_MAX_DANGR_INSTR)
381 pVM->csam.s.cDangerousInstr = CSAM_MAX_DANGR_INSTR;
382
383 STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheMiss);
384 return false;
385}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette