VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CSAMAll.cpp@ 55979

Last change on this file since 55979 was 55937, checked in by vboxsync, 10 years ago

CSAM,PATM: Changed csamRCCodePageWritePfHandler to store the pvFault address in pvDirtyFaultPage and made csamR3FlushDirtyPages make it instead of pvDirtyBasePage read-only (+ tell REM about it). Preparing ring-3 access handlers for raw-mode.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 12.7 KB
Line 
1/* $Id: CSAMAll.cpp 55937 2015-05-19 14:27:00Z vboxsync $ */
2/** @file
3 * CSAM - Guest OS Code Scanning and Analysis Manager - Any Context
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_CSAM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/vmm/patm.h>
26#include <VBox/vmm/csam.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/hm.h>
30#include <VBox/vmm/mm.h>
31#ifdef VBOX_WITH_REM
32# include <VBox/vmm/rem.h>
33#endif
34#include <VBox/sup.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/param.h>
37#include <iprt/avl.h>
38#include "CSAMInternal.h"
39#include <VBox/vmm/vm.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/dbg.h>
42#include <VBox/err.h>
43#include <VBox/log.h>
44#include <VBox/dis.h>
45#include <VBox/disopcode.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50#ifdef IN_RING0
51# error "IN_RING3 & IN_RC only!"
52#endif
53
54
55/**
56 * Access handler callback for virtual access handler ranges.
57 *
58 * Important to realize that a physical page in a range can have aliases, and
59 * for ALL and WRITE handlers these will also trigger.
60 *
61 * @returns VINF_SUCCESS if the handler have carried out the operation.
62 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
63 * @param pVM Pointer to the VM.
64 * @param pVCpu Pointer to the cross context CPU context for the
65 * calling EMT.
66 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
67 * @param pvPtr The HC mapping of that address.
68 * @param pvBuf What the guest is reading/writing.
69 * @param cbBuf How much it's reading/writing.
70 * @param enmAccessType The access type.
71 * @param enmOrigin Who is making this write.
72 * @param pvUser User argument.
73 */
74PGM_ALL_CB2_DECL(int) csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
75 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
76{
77 RTGCPTR const GCPtrMonitored = (uintptr_t)pvUser | (GCPtr & PAGE_OFFSET_MASK);
78 Log(("csamCodePageWriteHandler: write to %RGv LB %zu\n", GCPtr, cbBuf));
79
80 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
81 Assert(VMCPU_IS_EMT(pVCpu));
82
83 /*
84 * Check if it's a dummy write that doesn't change anything.
85 */
86 if ( PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1)
87 && !memcmp(pvPtr, pvBuf, cbBuf))
88 {
89 Log(("csamCodePageWriteHandler: dummy write -> ignore\n"));
90 return VINF_PGM_HANDLER_DO_DEFAULT;
91 }
92
93#ifdef IN_RING3
94 /*
95 * Ring-3: Do proper handling.
96 */
97 int rc = PATMR3PatchWrite(pVM, GCPtrMonitored, (uint32_t)cbBuf);
98 AssertRC(rc);
99 return VINF_PGM_HANDLER_DO_DEFAULT;
100
101#else
102 /*
103 * Raw-mode: Try avoid needing to go to ring-3 (same as csamRCCodePageWritePfHandler).
104 */
105 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
106 bool const fPatchCode = PATMIsPatchGCAddr(pVM, CPUMGetGuestRIP(pVCpu));
107 PPATMGCSTATE pPATMGCState = PATMGetGCState(pVM);
108
109 Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES);
110 Assert(pPATMGCState);
111 Assert(pPATMGCState->fPIF || fPatchCode);
112
113# ifdef VBOX_WITH_REM
114 /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */
115 /** @todo a bit overkill?? */
116 REMFlushTBs(pVM);
117# endif
118
119 /*
120 * When patch code is executing instructions that must complete, then we
121 * must *never* interrupt it.
122 */
123 if (!pPATMGCState->fPIF && fPatchCode)
124 {
125 Log(("csamRCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", CPUMGetGuestRIP(pVCpu)));
126 return VINF_PGM_HANDLER_DO_DEFAULT;
127 }
128
129 Log(("csamRCCodePageWriteHandler: code page write at %RGv original address %RGv (cpl=%d)\n", GCPtr, GCPtrMonitored, cpl));
130
131 /*
132 * If user code is modifying one of our monitored pages, then we can safely
133 * write to it as it's no longer being used for supervisor code.
134 */
135 if (cpl != 3)
136 {
137 VBOXSTRICTRC rcStrict = PATMRCHandleWriteToPatchPage(pVM, NULL /* pRegFrame = no interpret */,
138 (RTRCPTR)GCPtrMonitored, cbBuf);
139 if ( rcStrict == VINF_PGM_HANDLER_DO_DEFAULT
140 || rcStrict == VINF_SUCCESS)
141 return VBOXSTRICTRC_TODO(rcStrict);
142 if (rcStrict == VINF_EM_RAW_EMULATE_INSTR)
143 {
144 STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite);
145 return VINF_EM_RAW_EMULATE_INSTR;
146 }
147 Assert(rcStrict == VERR_PATCH_NOT_FOUND);
148 }
149
150 /*
151 * Schedule ring-3 activity.
152 * Note that GCPtr might be a different address in case of aliases. So,
153 * take down both alternatives.
154 */
155 VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
156 pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtrMonitored;
157 pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr;
158 if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES)
159 return VINF_CSAM_PENDING_ACTION;
160
161 /*
162 * Continue with the write. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again.
163 */
164 Log(("csamRCCodePageWriteHandler: enabled r/w for page %RGv (%RGv)\n", GCPtr, GCPtrMonitored));
165 STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified);
166 return VINF_PGM_HANDLER_DO_DEFAULT;
167#endif
168}
169
170
171/**
172 * Check if this page needs to be analysed by CSAM
173 *
174 * @returns VBox status code
175 * @param pVM Pointer to the VM.
176 * @param pvFault Fault address
177 */
178VMM_INT_DECL(int) CSAMExecFault(PVM pVM, RTRCPTR pvFault)
179{
180 Assert(!HMIsEnabled(pVM));
181 if (!CSAMIsEnabled(pVM))
182 return VINF_SUCCESS;
183
184 LogFlow(("CSAMGCExecFault: for page %08X scanned=%d\n", pvFault, CSAMIsPageScanned(pVM, pvFault)));
185
186 if (CSAMIsPageScanned(pVM, pvFault))
187 {
188 // Already checked!
189 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesGC, 1);
190 return VINF_SUCCESS;
191 }
192
193 STAM_COUNTER_ADD(&pVM->csam.s.StatNrTraps, 1);
194 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_SCAN_PAGE);
195 return VINF_CSAM_PENDING_ACTION;
196}
197
198
199/**
200 * Check if this page was previously scanned by CSAM
201 *
202 * @returns true -> scanned, false -> not scanned
203 * @param pVM Pointer to the VM.
204 * @param pPage GC page address
205 */
206VMM_INT_DECL(bool) CSAMIsPageScanned(PVM pVM, RTRCPTR pPage)
207{
208 int pgdir, bit;
209 uintptr_t page;
210 Assert(!HMIsEnabled(pVM));
211
212 page = (uintptr_t)pPage;
213 pgdir = page >> X86_PAGE_4M_SHIFT;
214 bit = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;
215
216 Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
217 Assert(bit < PAGE_SIZE);
218
219 return pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir] && ASMBitTest((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
220}
221
222
223
224/**
225 * Mark a page as scanned/not scanned
226 *
227 * @note: we always mark it as scanned, even if we haven't completely done so
228 *
229 * @returns VBox status code.
230 * @param pVM Pointer to the VM.
231 * @param pPage GC page address (not necessarily aligned)
232 * @param fScanned Mark as scanned or not scanned
233 *
234 */
235VMM_INT_DECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned)
236{
237 int pgdir, bit;
238 uintptr_t page;
239
240#ifdef LOG_ENABLED
241 if (fScanned && !CSAMIsPageScanned(pVM, (RTRCPTR)pPage))
242 Log(("CSAMMarkPage %RRv\n", pPage));
243#endif
244
245 if (!CSAMIsEnabled(pVM))
246 return VINF_SUCCESS;
247 Assert(!HMIsEnabled(pVM));
248
249 page = (uintptr_t)pPage;
250 pgdir = page >> X86_PAGE_4M_SHIFT;
251 bit = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;
252
253 Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
254 Assert(bit < PAGE_SIZE);
255
256 if(!CTXSUFF(pVM->csam.s.pPDBitmap)[pgdir])
257 {
258 STAM_COUNTER_INC(&pVM->csam.s.StatBitmapAlloc);
259 int rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir]);
260 if (RT_FAILURE(rc))
261 {
262 Log(("MMHyperAlloc failed with %Rrc\n", rc));
263 return rc;
264 }
265#ifdef IN_RC
266 pVM->csam.s.pPDHCBitmapGC[pgdir] = MMHyperRCToR3(pVM, (RCPTRTYPE(void*))pVM->csam.s.pPDBitmapGC[pgdir]);
267 if (!pVM->csam.s.pPDHCBitmapGC[pgdir])
268 {
269 Log(("MMHyperHC2GC failed for %RRv\n", pVM->csam.s.pPDBitmapGC[pgdir]));
270 return rc;
271 }
272#else
273 pVM->csam.s.pPDGCBitmapHC[pgdir] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[pgdir]);
274 if (!pVM->csam.s.pPDGCBitmapHC[pgdir])
275 {
276 Log(("MMHyperHC2GC failed for %RHv\n", pVM->csam.s.pPDBitmapHC[pgdir]));
277 return rc;
278 }
279#endif
280 }
281 if(fScanned)
282 ASMBitSet((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
283 else
284 ASMBitClear((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
285
286 return VINF_SUCCESS;
287}
288
289/**
290 * Check if this page needs to be analysed by CSAM.
291 *
292 * This function should only be called for supervisor pages and
293 * only when CSAM is enabled. Leaving these selection criteria
294 * to the caller simplifies the interface (PTE passing).
295 *
296 * Note that the page has not yet been synced, so the TLB trick
297 * (which wasn't ever active anyway) cannot be applied.
298 *
299 * @returns true if the page should be marked not present because
300 * CSAM want need to scan it.
301 * @returns false if the page was already scanned.
302 * @param pVM Pointer to the VM.
303 * @param GCPtr GC pointer of page
304 */
305VMM_INT_DECL(bool) CSAMDoesPageNeedScanning(PVM pVM, RTRCUINTPTR GCPtr)
306{
307 if (!CSAMIsEnabled(pVM))
308 return false;
309 Assert(!HMIsEnabled(pVM));
310
311 if(CSAMIsPageScanned(pVM, (RTRCPTR)GCPtr))
312 {
313 /* Already checked! */
314 STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrKnownPages), 1);
315 return false;
316 }
317 STAM_COUNTER_ADD(&CTXSUFF(pVM->csam.s.StatNrPageNP), 1);
318 return true;
319}
320
321
322/**
323 * Remember a possible code page for later inspection
324 *
325 * @returns VBox status code.
326 * @param pVM Pointer to the VM.
327 * @param GCPtr GC pointer of page
328 */
329VMM_INT_DECL(void) CSAMMarkPossibleCodePage(PVM pVM, RTRCPTR GCPtr)
330{
331 Assert(!HMIsEnabled(pVM));
332 if (pVM->csam.s.cPossibleCodePages < RT_ELEMENTS(pVM->csam.s.pvPossibleCodePage))
333 {
334 pVM->csam.s.pvPossibleCodePage[pVM->csam.s.cPossibleCodePages++] = (RTRCPTR)GCPtr;
335 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_CSAM_PENDING_ACTION);
336 }
337 return;
338}
339
340
341/**
342 * Turn on code scanning
343 *
344 * @returns VBox status code.
345 * @param pVM Pointer to the VM.
346 */
347VMM_INT_DECL(int) CSAMEnableScanning(PVM pVM)
348{
349 AssertReturn(!HMIsEnabled(pVM), VERR_CSAM_HM_IPE);
350 pVM->fCSAMEnabled = true;
351 return VINF_SUCCESS;
352}
353
354/**
355 * Turn off code scanning
356 *
357 * @returns VBox status code.
358 * @param pVM Pointer to the VM.
359 */
360VMM_INT_DECL(int) CSAMDisableScanning(PVM pVM)
361{
362 pVM->fCSAMEnabled = false;
363 return VINF_SUCCESS;
364}
365
366
367/**
368 * Check if we've scanned this instruction before. If true, then we can emulate
369 * it instead of returning to ring 3.
370 *
371 * Using a simple array here as there are generally few mov crx instructions and
372 * tree lookup is likely to be more expensive. (as it would also have to be offset based)
373 *
374 * @returns boolean
375 * @param pVM Pointer to the VM.
376 * @param GCPtr GC pointer of page table entry
377 */
378VMM_INT_DECL(bool) CSAMIsKnownDangerousInstr(PVM pVM, RTRCUINTPTR GCPtr)
379{
380 Assert(!HMIsEnabled(pVM));
381
382 for (uint32_t i=0;i<pVM->csam.s.cDangerousInstr;i++)
383 {
384 if (pVM->csam.s.aDangerousInstr[i] == (RTRCPTR)GCPtr)
385 {
386 STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheHit);
387 return true;
388 }
389 }
390 /* Record that we're about to process it in ring 3. */
391 pVM->csam.s.aDangerousInstr[pVM->csam.s.iDangerousInstr++] = (RTRCPTR)GCPtr;
392 pVM->csam.s.iDangerousInstr &= CSAM_MAX_DANGR_INSTR_MASK;
393
394 if (++pVM->csam.s.cDangerousInstr > CSAM_MAX_DANGR_INSTR)
395 pVM->csam.s.cDangerousInstr = CSAM_MAX_DANGR_INSTR;
396
397 STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheMiss);
398 return false;
399}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette