VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp@ 41801

Last change on this file since 41801 was 41801, checked in by vboxsync, 12 years ago

Doxygen.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 9.5 KB
Line 
1/* $Id: HWACCMAll.cpp 41801 2012-06-17 16:46:51Z vboxsync $ */
2/** @file
3 * HWACCM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_HWACCM
23#include <VBox/vmm/hwaccm.h>
24#include <VBox/vmm/pgm.h>
25#include "HWACCMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/hwacc_vmx.h>
28#include <VBox/vmm/hwacc_svm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/param.h>
32#include <iprt/assert.h>
33#include <iprt/asm.h>
34#include <iprt/string.h>
35#include <iprt/x86.h>
36
37/**
38 * Queues a page for invalidation
39 *
40 * @returns VBox status code.
41 * @param pVCpu Pointer to the VMCPU.
42 * @param GCVirt Page to invalidate
43 */
44static void hwaccmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
45{
46 /* Nothing to do if a TLB flush is already pending */
47 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
48 return;
49#if 1
50 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
51 NOREF(GCVirt);
52#else
53 Be very careful when activating this code!
54 if (iPage == RT_ELEMENTS(pVCpu->hwaccm.s.TlbShootdown.aPages))
55 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
56 else
57 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
58#endif
59}
60
61/**
62 * Invalidates a guest page
63 *
64 * @returns VBox status code.
65 * @param pVCpu Pointer to the VMCPU.
66 * @param GCVirt Page to invalidate
67 */
68VMMDECL(int) HWACCMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
69{
70 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual);
71#ifdef IN_RING0
72 PVM pVM = pVCpu->CTX_SUFF(pVM);
73 if (pVM->hwaccm.s.vmx.fSupported)
74 return VMXR0InvalidatePage(pVM, pVCpu, GCVirt);
75
76 Assert(pVM->hwaccm.s.svm.fSupported);
77 return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
78
79#else
80 hwaccmQueueInvlPage(pVCpu, GCVirt);
81 return VINF_SUCCESS;
82#endif
83}
84
85/**
86 * Flushes the guest TLB
87 *
88 * @returns VBox status code.
89 * @param pVCpu Pointer to the VMCPU.
90 */
91VMMDECL(int) HWACCMFlushTLB(PVMCPU pVCpu)
92{
93 LogFlow(("HWACCMFlushTLB\n"));
94
95 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
96 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBManual);
97 return VINF_SUCCESS;
98}
99
100#ifdef IN_RING0
101
102/**
103 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
104 *
105 */
106static DECLCALLBACK(void) hwaccmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
107{
108 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
109 return;
110}
111
112/**
113 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
114 */
115static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
116{
117 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hwaccm.s.cWorldSwitchExits);
118
119 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatPoke, x);
120 int rc = RTMpPokeCpu(idHostCpu);
121 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatPoke, x);
122
123 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
124 back to a less efficient implementation (broadcast). */
125 if (rc == VERR_NOT_SUPPORTED)
126 {
127 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPoke, z);
128 /* synchronous. */
129 RTMpOnSpecific(idHostCpu, hwaccmFlushHandler, 0, 0);
130 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPoke, z);
131 }
132 else
133 {
134 if (rc == VINF_SUCCESS)
135 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPoke, z);
136 else
137 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPokeFailed, z);
138
139/** @todo If more than one CPU is going to be poked, we could optimize this
140 * operation by poking them first and wait afterwards. Would require
141 * recording who to poke and their current cWorldSwitchExits values,
142 * that's something not suitable for stack... So, pVCpu->hm.s.something
143 * then. */
144 /* Spin until the VCPU has switched back (poking is async). */
145 while ( ASMAtomicUoReadBool(&pVCpu->hwaccm.s.fCheckedTLBFlush)
146 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hwaccm.s.cWorldSwitchExits))
147 ASMNopPause();
148
149 if (rc == VINF_SUCCESS)
150 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPoke, z);
151 else
152 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPokeFailed, z);
153 }
154}
155
156#endif /* IN_RING0 */
157#ifndef IN_RC
158
159/**
160 * Poke an EMT so it can perform the appropriate TLB shootdowns.
161 *
162 * @param pVCpu The handle of the virtual CPU to poke.
163 * @param fAccountFlushStat Whether to account the call to
164 * StatTlbShootdownFlush or StatTlbShootdown.
165 */
166static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
167{
168 if (ASMAtomicUoReadBool(&pVCpu->hwaccm.s.fCheckedTLBFlush))
169 {
170 if (fAccountFlushStat)
171 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdownFlush);
172 else
173 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
174#ifdef IN_RING0
175 RTCPUID idHostCpu = pVCpu->hwaccm.s.idEnteredCpu;
176 if (idHostCpu != NIL_RTCPUID)
177 hmR0PokeCpu(pVCpu, idHostCpu);
178#else
179 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
180#endif
181 }
182 else
183 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual);
184}
185
186
187/**
188 * Invalidates a guest page on all VCPUs.
189 *
190 * @returns VBox status code.
191 * @param pVM Pointer to the VM.
192 * @param GCVirt Page to invalidate
193 */
194VMMDECL(int) HWACCMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCPtr)
195{
196 VMCPUID idCurCpu = VMMGetCpuId(pVM);
197 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hwaccm.s.StatFlushPage);
198
199 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
200 {
201 PVMCPU pVCpu = &pVM->aCpus[idCpu];
202
203 /* Nothing to do if a TLB flush is already pending; the VCPU should
204 have already been poked if it were active. */
205 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
206 continue;
207
208 if (pVCpu->idCpu == idCurCpu)
209 HWACCMInvalidatePage(pVCpu, GCPtr);
210 else
211 {
212 hwaccmQueueInvlPage(pVCpu, GCPtr);
213 hmPokeCpuForTlbFlush(pVCpu, false /*fAccountFlushStat*/);
214 }
215 }
216
217 return VINF_SUCCESS;
218}
219
220
221/**
222 * Flush the TLBs of all VCPUs
223 *
224 * @returns VBox status code.
225 * @param pVM Pointer to the VM.
226 */
227VMMDECL(int) HWACCMFlushTLBOnAllVCpus(PVM pVM)
228{
229 if (pVM->cCpus == 1)
230 return HWACCMFlushTLB(&pVM->aCpus[0]);
231
232 VMCPUID idThisCpu = VMMGetCpuId(pVM);
233
234 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hwaccm.s.StatFlushTLB);
235
236 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
237 {
238 PVMCPU pVCpu = &pVM->aCpus[idCpu];
239
240 /* Nothing to do if a TLB flush is already pending; the VCPU should
241 have already been poked if it were active. */
242 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
243 {
244 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
245 if (idThisCpu != idCpu)
246 hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
247 }
248 }
249
250 return VINF_SUCCESS;
251}
252
253#endif /* !IN_RC */
254
255/**
256 * Checks if nested paging is enabled
257 *
258 * @returns boolean
259 * @param pVM Pointer to the VM.
260 */
261VMMDECL(bool) HWACCMIsNestedPagingActive(PVM pVM)
262{
263 return HWACCMIsEnabled(pVM) && pVM->hwaccm.s.fNestedPaging;
264}
265
266/**
267 * Return the shadow paging mode for nested paging/ept
268 *
269 * @returns shadow paging mode
270 * @param pVM Pointer to the VM.
271 */
272VMMDECL(PGMMODE) HWACCMGetShwPagingMode(PVM pVM)
273{
274 Assert(HWACCMIsNestedPagingActive(pVM));
275 if (pVM->hwaccm.s.svm.fSupported)
276 return PGMMODE_NESTED;
277
278 Assert(pVM->hwaccm.s.vmx.fSupported);
279 return PGMMODE_EPT;
280}
281
282/**
283 * Invalidates a guest page by physical address
284 *
285 * NOTE: Assumes the current instruction references this physical page though a virtual address!!
286 *
287 * @returns VBox status code.
288 * @param pVM Pointer to the VM.
289 * @param GCPhys Page to invalidate
290 */
291VMMDECL(int) HWACCMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
292{
293 if (!HWACCMIsNestedPagingActive(pVM))
294 return VINF_SUCCESS;
295
296#ifdef IN_RING0
297 if (pVM->hwaccm.s.vmx.fSupported)
298 {
299 VMCPUID idThisCpu = VMMGetCpuId(pVM);
300
301 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
302 {
303 PVMCPU pVCpu = &pVM->aCpus[idCpu];
304
305 if (idThisCpu == idCpu)
306 VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys);
307 else
308 {
309 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
310 hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
311 }
312 }
313 return VINF_SUCCESS;
314 }
315
316 /* AMD-V doesn't support invalidation with guest physical addresses; see
317 comment in SVMR0InvalidatePhysPage. */
318 Assert(pVM->hwaccm.s.svm.fSupported);
319#else
320 NOREF(GCPhys);
321#endif
322
323 HWACCMFlushTLBOnAllVCpus(pVM);
324 return VINF_SUCCESS;
325}
326
327/**
328 * Checks if an interrupt event is currently pending.
329 *
330 * @returns Interrupt event pending state.
331 * @param pVM Pointer to the VM.
332 */
333VMMDECL(bool) HWACCMHasPendingIrq(PVM pVM)
334{
335 PVMCPU pVCpu = VMMGetCpu(pVM);
336 return !!pVCpu->hwaccm.s.Event.fPending;
337}
338
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette