VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CSAMAll.cpp@ 57076

Last change on this file since 57076 was 56420, checked in by vboxsync, 10 years ago

csamCodePageWriteHandler: pvUser is the pointer to the CSAMPAGEREC in ring-3, not the original virtual page address.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 12.4 KB
Line 
1/* $Id: CSAMAll.cpp 56420 2015-06-14 18:51:55Z vboxsync $ */
2/** @file
3 * CSAM - Guest OS Code Scanning and Analysis Manager - Any Context
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CSAM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/patm.h>
26#include <VBox/vmm/csam.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/hm.h>
30#include <VBox/vmm/mm.h>
31#ifdef VBOX_WITH_REM
32# include <VBox/vmm/rem.h>
33#endif
34#include <VBox/sup.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/param.h>
37#include <iprt/avl.h>
38#include "CSAMInternal.h"
39#include <VBox/vmm/vm.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/dbg.h>
42#include <VBox/err.h>
43#include <VBox/log.h>
44#include <VBox/dis.h>
45#include <VBox/disopcode.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50#ifdef IN_RING0
51# error "IN_RING3 & IN_RC only!"
52#endif
53
54
55/**
56 * Access handler callback for virtual access handler ranges.
57 *
58 * Important to realize that a physical page in a range can have aliases, and
59 * for ALL and WRITE handlers these will also trigger.
60 *
61 * @returns VINF_SUCCESS if the handler have carried out the operation.
62 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
63 * @param pVM Pointer to the VM.
64 * @param pVCpu Pointer to the cross context CPU context for the
65 * calling EMT.
66 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
67 * @param pvPtr The HC mapping of that address.
68 * @param pvBuf What the guest is reading/writing.
69 * @param cbBuf How much it's reading/writing.
70 * @param enmAccessType The access type.
71 * @param enmOrigin Who is making this write.
72 * @param pvUser The CSAMPAGEREC in ring-3, NIL in RC.
73 */
74PGM_ALL_CB2_DECL(VBOXSTRICTRC)
75csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
76 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
77{
78 Log(("csamCodePageWriteHandler: write to %RGv LB %zu\n", GCPtr, cbBuf));
79 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
80 Assert(VMCPU_IS_EMT(pVCpu));
81
82 /*
83 * Check if it's a dummy write that doesn't change anything.
84 */
85 if ( PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1)
86 && !memcmp(pvPtr, pvBuf, cbBuf))
87 {
88 Log(("csamCodePageWriteHandler: dummy write -> ignore\n"));
89 return VINF_PGM_HANDLER_DO_DEFAULT;
90 }
91
92#ifdef IN_RING3
93 /*
94 * Ring-3: Do proper handling.
95 */
96 int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
97 AssertRC(rc);
98 return VINF_PGM_HANDLER_DO_DEFAULT;
99
100#else
101 /*
102 * Raw-mode: Try avoid needing to go to ring-3 (same as csamRCCodePageWritePfHandler).
103 */
104 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
105 bool const fPatchCode = PATMIsPatchGCAddr(pVM, CPUMGetGuestRIP(pVCpu));
106 PPATMGCSTATE pPATMGCState = PATMGetGCState(pVM);
107
108 Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES);
109 Assert(pPATMGCState);
110 Assert(pPATMGCState->fPIF || fPatchCode);
111
112# ifdef VBOX_WITH_REM
113 /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */
114 /** @todo a bit overkill?? */
115 REMFlushTBs(pVM);
116# endif
117
118 /*
119 * When patch code is executing instructions that must complete, then we
120 * must *never* interrupt it.
121 */
122 if (!pPATMGCState->fPIF && fPatchCode)
123 {
124 Log(("csamRCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", CPUMGetGuestRIP(pVCpu)));
125 return VINF_PGM_HANDLER_DO_DEFAULT;
126 }
127
128 Log(("csamRCCodePageWriteHandler: code page write at %RGv (cpl=%d)\n", GCPtr, cpl));
129
130 /*
131 * If user code is modifying one of our monitored pages, then we can safely
132 * write to it as it's no longer being used for supervisor code.
133 */
134 if (cpl != 3)
135 {
136 VBOXSTRICTRC rcStrict = PATMRCHandleWriteToPatchPage(pVM, NULL /* pRegFrame = no interpret */, (RTRCPTR)GCPtr, cbBuf);
137 if ( rcStrict == VINF_PGM_HANDLER_DO_DEFAULT
138 || rcStrict == VINF_SUCCESS)
139 return rcStrict;
140 if (rcStrict == VINF_EM_RAW_EMULATE_INSTR)
141 {
142 STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite);
143 return VINF_EM_RAW_EMULATE_INSTR;
144 }
145 Assert(rcStrict == VERR_PATCH_NOT_FOUND);
146 }
147
148 /*
149 * Schedule ring-3 activity.
150 * Note that GCPtr might be a different address in case of aliases. So,
151 * take down both alternatives.
152 */
153 VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
154 pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr;
155 pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr;
156 if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES)
157 return VINF_CSAM_PENDING_ACTION;
158
159 /*
160 * Continue with the write. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again.
161 */
162 Log(("csamRCCodePageWriteHandler: enabled r/w for page %RGv (%RGv)\n", GCPtr, GCPtr));
163 STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified);
164 return VINF_PGM_HANDLER_DO_DEFAULT;
165#endif
166}
167
168
169/**
170 * Check if this page needs to be analysed by CSAM
171 *
172 * @returns VBox status code
173 * @param pVM Pointer to the VM.
174 * @param pvFault Fault address
175 */
176VMM_INT_DECL(int) CSAMExecFault(PVM pVM, RTRCPTR pvFault)
177{
178 Assert(!HMIsEnabled(pVM));
179 if (!CSAMIsEnabled(pVM))
180 return VINF_SUCCESS;
181
182 LogFlow(("CSAMGCExecFault: for page %08X scanned=%d\n", pvFault, CSAMIsPageScanned(pVM, pvFault)));
183
184 if (CSAMIsPageScanned(pVM, pvFault))
185 {
186 // Already checked!
187 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesGC, 1);
188 return VINF_SUCCESS;
189 }
190
191 STAM_COUNTER_ADD(&pVM->csam.s.StatNrTraps, 1);
192 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_SCAN_PAGE);
193 return VINF_CSAM_PENDING_ACTION;
194}
195
196
197/**
198 * Check if this page was previously scanned by CSAM
199 *
200 * @returns true -> scanned, false -> not scanned
201 * @param pVM Pointer to the VM.
202 * @param pPage GC page address
203 */
204VMM_INT_DECL(bool) CSAMIsPageScanned(PVM pVM, RTRCPTR pPage)
205{
206 int pgdir, bit;
207 uintptr_t page;
208 Assert(!HMIsEnabled(pVM));
209
210 page = (uintptr_t)pPage;
211 pgdir = page >> X86_PAGE_4M_SHIFT;
212 bit = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;
213
214 Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
215 Assert(bit < PAGE_SIZE);
216
217 return pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir] && ASMBitTest((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
218}
219
220
221
222/**
223 * Mark a page as scanned/not scanned
224 *
225 * @note: we always mark it as scanned, even if we haven't completely done so
226 *
227 * @returns VBox status code.
228 * @param pVM Pointer to the VM.
229 * @param pPage GC page address (not necessarily aligned)
230 * @param fScanned Mark as scanned or not scanned
231 *
232 */
233VMM_INT_DECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned)
234{
235 int pgdir, bit;
236 uintptr_t page;
237
238#ifdef LOG_ENABLED
239 if (fScanned && !CSAMIsPageScanned(pVM, (RTRCPTR)pPage))
240 Log(("CSAMMarkPage %RRv\n", pPage));
241#endif
242
243 if (!CSAMIsEnabled(pVM))
244 return VINF_SUCCESS;
245 Assert(!HMIsEnabled(pVM));
246
247 page = (uintptr_t)pPage;
248 pgdir = page >> X86_PAGE_4M_SHIFT;
249 bit = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;
250
251 Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
252 Assert(bit < PAGE_SIZE);
253
254 if(!CTXSUFF(pVM->csam.s.pPDBitmap)[pgdir])
255 {
256 STAM_COUNTER_INC(&pVM->csam.s.StatBitmapAlloc);
257 int rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir]);
258 if (RT_FAILURE(rc))
259 {
260 Log(("MMHyperAlloc failed with %Rrc\n", rc));
261 return rc;
262 }
263#ifdef IN_RC
264 pVM->csam.s.pPDHCBitmapGC[pgdir] = MMHyperRCToR3(pVM, (RCPTRTYPE(void*))pVM->csam.s.pPDBitmapGC[pgdir]);
265 if (!pVM->csam.s.pPDHCBitmapGC[pgdir])
266 {
267 Log(("MMHyperHC2GC failed for %RRv\n", pVM->csam.s.pPDBitmapGC[pgdir]));
268 return rc;
269 }
270#else
271 pVM->csam.s.pPDGCBitmapHC[pgdir] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[pgdir]);
272 if (!pVM->csam.s.pPDGCBitmapHC[pgdir])
273 {
274 Log(("MMHyperHC2GC failed for %RHv\n", pVM->csam.s.pPDBitmapHC[pgdir]));
275 return rc;
276 }
277#endif
278 }
279 if(fScanned)
280 ASMBitSet((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
281 else
282 ASMBitClear((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
283
284 return VINF_SUCCESS;
285}
286
287/**
288 * Check if this page needs to be analysed by CSAM.
289 *
290 * This function should only be called for supervisor pages and
291 * only when CSAM is enabled. Leaving these selection criteria
292 * to the caller simplifies the interface (PTE passing).
293 *
294 * Note that the page has not yet been synced, so the TLB trick
295 * (which wasn't ever active anyway) cannot be applied.
296 *
297 * @returns true if the page should be marked not present because
298 * CSAM want need to scan it.
299 * @returns false if the page was already scanned.
300 * @param pVM Pointer to the VM.
301 * @param GCPtr GC pointer of page
302 */
303VMM_INT_DECL(bool) CSAMDoesPageNeedScanning(PVM pVM, RTRCUINTPTR GCPtr)
304{
305 if (!CSAMIsEnabled(pVM))
306 return false;
307 Assert(!HMIsEnabled(pVM));
308
309 if(CSAMIsPageScanned(pVM, (RTRCPTR)GCPtr))
310 {
311 /* Already checked! */
312 STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrKnownPages), 1);
313 return false;
314 }
315 STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrPageNP), 1);
316 return true;
317}
318
319
320/**
321 * Remember a possible code page for later inspection
322 *
323 * @returns VBox status code.
324 * @param pVM Pointer to the VM.
325 * @param GCPtr GC pointer of page
326 */
327VMM_INT_DECL(void) CSAMMarkPossibleCodePage(PVM pVM, RTRCPTR GCPtr)
328{
329 Assert(!HMIsEnabled(pVM));
330 if (pVM->csam.s.cPossibleCodePages < RT_ELEMENTS(pVM->csam.s.pvPossibleCodePage))
331 {
332 pVM->csam.s.pvPossibleCodePage[pVM->csam.s.cPossibleCodePages++] = (RTRCPTR)GCPtr;
333 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_PENDING_ACTION);
334 }
335 return;
336}
337
338
339/**
340 * Turn on code scanning
341 *
342 * @returns VBox status code.
343 * @param pVM Pointer to the VM.
344 */
345VMM_INT_DECL(int) CSAMEnableScanning(PVM pVM)
346{
347 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
348 pVM->fCSAMEnabled = true;
349 return VINF_SUCCESS;
350}
351
352/**
353 * Turn off code scanning
354 *
355 * @returns VBox status code.
356 * @param pVM Pointer to the VM.
357 */
358VMM_INT_DECL(int) CSAMDisableScanning(PVM pVM)
359{
360 pVM->fCSAMEnabled = false;
361 return VINF_SUCCESS;
362}
363
364
365/**
366 * Check if we've scanned this instruction before. If true, then we can emulate
367 * it instead of returning to ring 3.
368 *
369 * Using a simple array here as there are generally few mov crx instructions and
370 * tree lookup is likely to be more expensive. (as it would also have to be offset based)
371 *
372 * @returns boolean
373 * @param pVM Pointer to the VM.
374 * @param GCPtr GC pointer of page table entry
375 */
376VMM_INT_DECL(bool) CSAMIsKnownDangerousInstr(PVM pVM, RTRCUINTPTR GCPtr)
377{
378 Assert(!HMIsEnabled(pVM));
379
380 for (uint32_t i=0;i<pVM->csam.s.cDangerousInstr;i++)
381 {
382 if (pVM->csam.s.aDangerousInstr[i] == (RTRCPTR)GCPtr)
383 {
384 STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheHit);
385 return true;
386 }
387 }
388 /* Record that we're about to process it in ring 3. */
389 pVM->csam.s.aDangerousInstr[pVM->csam.s.iDangerousInstr++] = (RTRCPTR)GCPtr;
390 pVM->csam.s.iDangerousInstr &= CSAM_MAX_DANGR_INSTR_MASK;
391
392 if (++pVM->csam.s.cDangerousInstr > CSAM_MAX_DANGR_INSTR)
393 pVM->csam.s.cDangerousInstr = CSAM_MAX_DANGR_INSTR;
394
395 STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheMiss);
396 return false;
397}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette