VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CSAMAll.cpp@ 56284

Last change on this file since 56284 was 56042, checked in by vboxsync, 10 years ago

CSAMAll.cpp: Removed strict rc todo.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 12.6 KB
Line 
1/* $Id: CSAMAll.cpp 56042 2015-05-22 21:03:03Z vboxsync $ */
2/** @file
3 * CSAM - Guest OS Code Scanning and Analysis Manager - Any Context
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CSAM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/patm.h>
26#include <VBox/vmm/csam.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/hm.h>
30#include <VBox/vmm/mm.h>
31#ifdef VBOX_WITH_REM
32# include <VBox/vmm/rem.h>
33#endif
34#include <VBox/sup.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/param.h>
37#include <iprt/avl.h>
38#include "CSAMInternal.h"
39#include <VBox/vmm/vm.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/dbg.h>
42#include <VBox/err.h>
43#include <VBox/log.h>
44#include <VBox/dis.h>
45#include <VBox/disopcode.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50#ifdef IN_RING0
51# error "IN_RING3 & IN_RC only!"
52#endif
53
54
55/**
56 * Access handler callback for virtual access handler ranges.
57 *
58 * Important to realize that a physical page in a range can have aliases, and
59 * for ALL and WRITE handlers these will also trigger.
60 *
61 * @returns VINF_SUCCESS if the handler have carried out the operation.
62 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
63 * @param pVM Pointer to the VM.
64 * @param pVCpu Pointer to the cross context CPU context for the
65 * calling EMT.
66 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
67 * @param pvPtr The HC mapping of that address.
68 * @param pvBuf What the guest is reading/writing.
69 * @param cbBuf How much it's reading/writing.
70 * @param enmAccessType The access type.
71 * @param enmOrigin Who is making this write.
72 * @param pvUser User argument.
73 */
74PGM_ALL_CB2_DECL(VBOXSTRICTRC)
75csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
76 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
77{
78 RTGCPTR const GCPtrMonitored = (uintptr_t)pvUser | (GCPtr & PAGE_OFFSET_MASK);
79 Log(("csamCodePageWriteHandler: write to %RGv LB %zu\n", GCPtr, cbBuf));
80
81 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
82 Assert(VMCPU_IS_EMT(pVCpu));
83
84 /*
85 * Check if it's a dummy write that doesn't change anything.
86 */
87 if ( PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1)
88 && !memcmp(pvPtr, pvBuf, cbBuf))
89 {
90 Log(("csamCodePageWriteHandler: dummy write -> ignore\n"));
91 return VINF_PGM_HANDLER_DO_DEFAULT;
92 }
93
94#ifdef IN_RING3
95 /*
96 * Ring-3: Do proper handling.
97 */
98 int rc = PATMR3PatchWrite(pVM, GCPtrMonitored, (uint32_t)cbBuf);
99 AssertRC(rc);
100 return VINF_PGM_HANDLER_DO_DEFAULT;
101
102#else
103 /*
104 * Raw-mode: Try avoid needing to go to ring-3 (same as csamRCCodePageWritePfHandler).
105 */
106 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
107 bool const fPatchCode = PATMIsPatchGCAddr(pVM, CPUMGetGuestRIP(pVCpu));
108 PPATMGCSTATE pPATMGCState = PATMGetGCState(pVM);
109
110 Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES);
111 Assert(pPATMGCState);
112 Assert(pPATMGCState->fPIF || fPatchCode);
113
114# ifdef VBOX_WITH_REM
115 /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */
116 /** @todo a bit overkill?? */
117 REMFlushTBs(pVM);
118# endif
119
120 /*
121 * When patch code is executing instructions that must complete, then we
122 * must *never* interrupt it.
123 */
124 if (!pPATMGCState->fPIF && fPatchCode)
125 {
126 Log(("csamRCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", CPUMGetGuestRIP(pVCpu)));
127 return VINF_PGM_HANDLER_DO_DEFAULT;
128 }
129
130 Log(("csamRCCodePageWriteHandler: code page write at %RGv original address %RGv (cpl=%d)\n", GCPtr, GCPtrMonitored, cpl));
131
132 /*
133 * If user code is modifying one of our monitored pages, then we can safely
134 * write to it as it's no longer being used for supervisor code.
135 */
136 if (cpl != 3)
137 {
138 VBOXSTRICTRC rcStrict = PATMRCHandleWriteToPatchPage(pVM, NULL /* pRegFrame = no interpret */,
139 (RTRCPTR)GCPtrMonitored, cbBuf);
140 if ( rcStrict == VINF_PGM_HANDLER_DO_DEFAULT
141 || rcStrict == VINF_SUCCESS)
142 return rcStrict;
143 if (rcStrict == VINF_EM_RAW_EMULATE_INSTR)
144 {
145 STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite);
146 return VINF_EM_RAW_EMULATE_INSTR;
147 }
148 Assert(rcStrict == VERR_PATCH_NOT_FOUND);
149 }
150
151 /*
152 * Schedule ring-3 activity.
153 * Note that GCPtr might be a different address in case of aliases. So,
154 * take down both alternatives.
155 */
156 VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
157 pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtrMonitored;
158 pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr;
159 if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES)
160 return VINF_CSAM_PENDING_ACTION;
161
162 /*
163 * Continue with the write. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again.
164 */
165 Log(("csamRCCodePageWriteHandler: enabled r/w for page %RGv (%RGv)\n", GCPtr, GCPtrMonitored));
166 STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified);
167 return VINF_PGM_HANDLER_DO_DEFAULT;
168#endif
169}
170
171
172/**
173 * Check if this page needs to be analysed by CSAM
174 *
175 * @returns VBox status code
176 * @param pVM Pointer to the VM.
177 * @param pvFault Fault address
178 */
179VMM_INT_DECL(int) CSAMExecFault(PVM pVM, RTRCPTR pvFault)
180{
181 Assert(!HMIsEnabled(pVM));
182 if (!CSAMIsEnabled(pVM))
183 return VINF_SUCCESS;
184
185 LogFlow(("CSAMGCExecFault: for page %08X scanned=%d\n", pvFault, CSAMIsPageScanned(pVM, pvFault)));
186
187 if (CSAMIsPageScanned(pVM, pvFault))
188 {
189 // Already checked!
190 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesGC, 1);
191 return VINF_SUCCESS;
192 }
193
194 STAM_COUNTER_ADD(&pVM->csam.s.StatNrTraps, 1);
195 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_SCAN_PAGE);
196 return VINF_CSAM_PENDING_ACTION;
197}
198
199
200/**
201 * Check if this page was previously scanned by CSAM
202 *
203 * @returns true -> scanned, false -> not scanned
204 * @param pVM Pointer to the VM.
205 * @param pPage GC page address
206 */
207VMM_INT_DECL(bool) CSAMIsPageScanned(PVM pVM, RTRCPTR pPage)
208{
209 int pgdir, bit;
210 uintptr_t page;
211 Assert(!HMIsEnabled(pVM));
212
213 page = (uintptr_t)pPage;
214 pgdir = page >> X86_PAGE_4M_SHIFT;
215 bit = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;
216
217 Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
218 Assert(bit < PAGE_SIZE);
219
220 return pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir] && ASMBitTest((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
221}
222
223
224
225/**
226 * Mark a page as scanned/not scanned
227 *
228 * @note: we always mark it as scanned, even if we haven't completely done so
229 *
230 * @returns VBox status code.
231 * @param pVM Pointer to the VM.
232 * @param pPage GC page address (not necessarily aligned)
233 * @param fScanned Mark as scanned or not scanned
234 *
235 */
236VMM_INT_DECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned)
237{
238 int pgdir, bit;
239 uintptr_t page;
240
241#ifdef LOG_ENABLED
242 if (fScanned && !CSAMIsPageScanned(pVM, (RTRCPTR)pPage))
243 Log(("CSAMMarkPage %RRv\n", pPage));
244#endif
245
246 if (!CSAMIsEnabled(pVM))
247 return VINF_SUCCESS;
248 Assert(!HMIsEnabled(pVM));
249
250 page = (uintptr_t)pPage;
251 pgdir = page >> X86_PAGE_4M_SHIFT;
252 bit = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;
253
254 Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
255 Assert(bit < PAGE_SIZE);
256
257 if(!CTXSUFF(pVM->csam.s.pPDBitmap)[pgdir])
258 {
259 STAM_COUNTER_INC(&pVM->csam.s.StatBitmapAlloc);
260 int rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir]);
261 if (RT_FAILURE(rc))
262 {
263 Log(("MMHyperAlloc failed with %Rrc\n", rc));
264 return rc;
265 }
266#ifdef IN_RC
267 pVM->csam.s.pPDHCBitmapGC[pgdir] = MMHyperRCToR3(pVM, (RCPTRTYPE(void*))pVM->csam.s.pPDBitmapGC[pgdir]);
268 if (!pVM->csam.s.pPDHCBitmapGC[pgdir])
269 {
270 Log(("MMHyperHC2GC failed for %RRv\n", pVM->csam.s.pPDBitmapGC[pgdir]));
271 return rc;
272 }
273#else
274 pVM->csam.s.pPDGCBitmapHC[pgdir] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[pgdir]);
275 if (!pVM->csam.s.pPDGCBitmapHC[pgdir])
276 {
277 Log(("MMHyperHC2GC failed for %RHv\n", pVM->csam.s.pPDBitmapHC[pgdir]));
278 return rc;
279 }
280#endif
281 }
282 if(fScanned)
283 ASMBitSet((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
284 else
285 ASMBitClear((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
286
287 return VINF_SUCCESS;
288}
289
290/**
291 * Check if this page needs to be analysed by CSAM.
292 *
293 * This function should only be called for supervisor pages and
294 * only when CSAM is enabled. Leaving these selection criteria
295 * to the caller simplifies the interface (PTE passing).
296 *
297 * Note that the page has not yet been synced, so the TLB trick
298 * (which wasn't ever active anyway) cannot be applied.
299 *
300 * @returns true if the page should be marked not present because
301 * CSAM want need to scan it.
302 * @returns false if the page was already scanned.
303 * @param pVM Pointer to the VM.
304 * @param GCPtr GC pointer of page
305 */
306VMM_INT_DECL(bool) CSAMDoesPageNeedScanning(PVM pVM, RTRCUINTPTR GCPtr)
307{
308 if (!CSAMIsEnabled(pVM))
309 return false;
310 Assert(!HMIsEnabled(pVM));
311
312 if(CSAMIsPageScanned(pVM, (RTRCPTR)GCPtr))
313 {
314 /* Already checked! */
315 STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrKnownPages), 1);
316 return false;
317 }
318 STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrPageNP), 1);
319 return true;
320}
321
322
323/**
324 * Remember a possible code page for later inspection
325 *
326 * @returns VBox status code.
327 * @param pVM Pointer to the VM.
328 * @param GCPtr GC pointer of page
329 */
330VMM_INT_DECL(void) CSAMMarkPossibleCodePage(PVM pVM, RTRCPTR GCPtr)
331{
332 Assert(!HMIsEnabled(pVM));
333 if (pVM->csam.s.cPossibleCodePages < RT_ELEMENTS(pVM->csam.s.pvPossibleCodePage))
334 {
335 pVM->csam.s.pvPossibleCodePage[pVM->csam.s.cPossibleCodePages++] = (RTRCPTR)GCPtr;
336 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_PENDING_ACTION);
337 }
338 return;
339}
340
341
342/**
343 * Turn on code scanning
344 *
345 * @returns VBox status code.
346 * @param pVM Pointer to the VM.
347 */
348VMM_INT_DECL(int) CSAMEnableScanning(PVM pVM)
349{
350 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
351 pVM->fCSAMEnabled = true;
352 return VINF_SUCCESS;
353}
354
355/**
356 * Turn off code scanning
357 *
358 * @returns VBox status code.
359 * @param pVM Pointer to the VM.
360 */
361VMM_INT_DECL(int) CSAMDisableScanning(PVM pVM)
362{
363 pVM->fCSAMEnabled = false;
364 return VINF_SUCCESS;
365}
366
367
368/**
369 * Check if we've scanned this instruction before. If true, then we can emulate
370 * it instead of returning to ring 3.
371 *
372 * Using a simple array here as there are generally few mov crx instructions and
373 * tree lookup is likely to be more expensive. (as it would also have to be offset based)
374 *
375 * @returns boolean
376 * @param pVM Pointer to the VM.
377 * @param GCPtr GC pointer of page table entry
378 */
379VMM_INT_DECL(bool) CSAMIsKnownDangerousInstr(PVM pVM, RTRCUINTPTR GCPtr)
380{
381 Assert(!HMIsEnabled(pVM));
382
383 for (uint32_t i=0;i<pVM->csam.s.cDangerousInstr;i++)
384 {
385 if (pVM->csam.s.aDangerousInstr[i] == (RTRCPTR)GCPtr)
386 {
387 STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheHit);
388 return true;
389 }
390 }
391 /* Record that we're about to process it in ring 3. */
392 pVM->csam.s.aDangerousInstr[pVM->csam.s.iDangerousInstr++] = (RTRCPTR)GCPtr;
393 pVM->csam.s.iDangerousInstr &= CSAM_MAX_DANGR_INSTR_MASK;
394
395 if (++pVM->csam.s.cDangerousInstr > CSAM_MAX_DANGR_INSTR)
396 pVM->csam.s.cDangerousInstr = CSAM_MAX_DANGR_INSTR;
397
398 STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheMiss);
399 return false;
400}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette