VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp@ 39038

Last change on this file since 39038 was 39038, checked in by vboxsync, 13 years ago

VMM: -W4 warnings (MSC).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 9.4 KB
Line 
1/* $Id: HWACCMAll.cpp 39038 2011-10-19 14:36:27Z vboxsync $ */
2/** @file
3 * HWACCM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_HWACCM
23#include <VBox/vmm/hwaccm.h>
24#include <VBox/vmm/pgm.h>
25#include "HWACCMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/hwacc_vmx.h>
28#include <VBox/vmm/hwacc_svm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/param.h>
32#include <iprt/assert.h>
33#include <iprt/asm.h>
34#include <iprt/string.h>
35#include <iprt/x86.h>
36
37/**
38 * Queues a page for invalidation
39 *
40 * @returns VBox status code.
41 * @param pVCpu The VMCPU to operate on.
42 * @param GCVirt Page to invalidate
43 */
44void hwaccmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
45{
46 /* Nothing to do if a TLB flush is already pending */
47 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
48 return;
49#if 1
50 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
51#else
52 Be very careful when activating this code!
53 if (iPage == RT_ELEMENTS(pVCpu->hwaccm.s.TlbShootdown.aPages))
54 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
55 else
56 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
57#endif
58}
59
60/**
61 * Invalidates a guest page
62 *
63 * @returns VBox status code.
64 * @param pVCpu The VMCPU to operate on.
65 * @param GCVirt Page to invalidate
66 */
67VMMDECL(int) HWACCMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
68{
69 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual);
70#ifdef IN_RING0
71 PVM pVM = pVCpu->CTX_SUFF(pVM);
72 if (pVM->hwaccm.s.vmx.fSupported)
73 return VMXR0InvalidatePage(pVM, pVCpu, GCVirt);
74
75 Assert(pVM->hwaccm.s.svm.fSupported);
76 return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
77
78#else
79 hwaccmQueueInvlPage(pVCpu, GCVirt);
80 return VINF_SUCCESS;
81#endif
82}
83
84/**
85 * Flushes the guest TLB
86 *
87 * @returns VBox status code.
88 * @param pVCpu The VMCPU to operate on.
89 */
90VMMDECL(int) HWACCMFlushTLB(PVMCPU pVCpu)
91{
92 LogFlow(("HWACCMFlushTLB\n"));
93
94 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
95 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBManual);
96 return VINF_SUCCESS;
97}
98
99#ifdef IN_RING0
100
101/**
102 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
103 *
104 */
105static DECLCALLBACK(void) hwaccmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
106{
107 return;
108}
109
110/**
111 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
112 */
113static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
114{
115 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hwaccm.s.cWorldSwitchExits);
116
117 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatPoke, x);
118 int rc = RTMpPokeCpu(idHostCpu);
119 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatPoke, x);
120
121 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
122 back to a less efficient implementation (broadcast). */
123 if (rc == VERR_NOT_SUPPORTED)
124 {
125 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPoke, z);
126 /* synchronous. */
127 RTMpOnSpecific(idHostCpu, hwaccmFlushHandler, 0, 0);
128 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPoke, z);
129 }
130 else
131 {
132 if (rc == VINF_SUCCESS)
133 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPoke, z);
134 else
135 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPokeFailed, z);
136
137/** @todo If more than one CPU is going to be poked, we could optimize this
138 * operation by poking them first and wait afterwards. Would require
139 * recording who to poke and their current cWorldSwitchExits values,
140 * that's something not suitable for stack... So, pVCpu->hm.s.something
141 * then. */
142 /* Spin until the VCPU has switched back (poking is async). */
143 while ( ASMAtomicUoReadBool(&pVCpu->hwaccm.s.fCheckedTLBFlush)
144 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hwaccm.s.cWorldSwitchExits))
145 ASMNopPause();
146
147 if (rc == VINF_SUCCESS)
148 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPoke, z);
149 else
150 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPokeFailed, z);
151 }
152}
153
154#endif /* IN_RING0 */
155#ifndef IN_RC
156
157/**
158 * Poke an EMT so it can perform the appropriate TLB shootdowns.
159 *
160 * @param pVCpu The handle of the virtual CPU to poke.
161 * @param fAccountFlushStat Whether to account the call to
162 * StatTlbShootdownFlush or StatTlbShootdown.
163 */
164static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
165{
166 if (ASMAtomicUoReadBool(&pVCpu->hwaccm.s.fCheckedTLBFlush))
167 {
168 if (fAccountFlushStat)
169 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdownFlush);
170 else
171 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
172#ifdef IN_RING0
173 RTCPUID idHostCpu = pVCpu->hwaccm.s.idEnteredCpu;
174 if (idHostCpu != NIL_RTCPUID)
175 hmR0PokeCpu(pVCpu, idHostCpu);
176#else
177 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
178#endif
179 }
180 else
181 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual);
182}
183
184
185/**
186 * Invalidates a guest page on all VCPUs.
187 *
188 * @returns VBox status code.
189 * @param pVM The VM to operate on.
190 * @param GCVirt Page to invalidate
191 */
192VMMDECL(int) HWACCMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCPtr)
193{
194 VMCPUID idCurCpu = VMMGetCpuId(pVM);
195 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hwaccm.s.StatFlushPage);
196
197 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
198 {
199 PVMCPU pVCpu = &pVM->aCpus[idCpu];
200
201 /* Nothing to do if a TLB flush is already pending; the VCPU should
202 have already been poked if it were active. */
203 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
204 continue;
205
206 if (pVCpu->idCpu == idCurCpu)
207 HWACCMInvalidatePage(pVCpu, GCPtr);
208 else
209 {
210 hwaccmQueueInvlPage(pVCpu, GCPtr);
211 hmPokeCpuForTlbFlush(pVCpu, false /*fAccountFlushStat*/);
212 }
213 }
214
215 return VINF_SUCCESS;
216}
217
218
219/**
220 * Flush the TLBs of all VCPUs
221 *
222 * @returns VBox status code.
223 * @param pVM The VM to operate on.
224 */
225VMMDECL(int) HWACCMFlushTLBOnAllVCpus(PVM pVM)
226{
227 if (pVM->cCpus == 1)
228 return HWACCMFlushTLB(&pVM->aCpus[0]);
229
230 VMCPUID idThisCpu = VMMGetCpuId(pVM);
231
232 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hwaccm.s.StatFlushTLB);
233
234 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
235 {
236 PVMCPU pVCpu = &pVM->aCpus[idCpu];
237
238 /* Nothing to do if a TLB flush is already pending; the VCPU should
239 have already been poked if it were active. */
240 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
241 {
242 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
243 if (idThisCpu != idCpu)
244 hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
245 }
246 }
247
248 return VINF_SUCCESS;
249}
250
251#endif /* !IN_RC */
252
253/**
254 * Checks if nested paging is enabled
255 *
256 * @returns boolean
257 * @param pVM The VM to operate on.
258 */
259VMMDECL(bool) HWACCMIsNestedPagingActive(PVM pVM)
260{
261 return HWACCMIsEnabled(pVM) && pVM->hwaccm.s.fNestedPaging;
262}
263
264/**
265 * Return the shadow paging mode for nested paging/ept
266 *
267 * @returns shadow paging mode
268 * @param pVM The VM to operate on.
269 */
270VMMDECL(PGMMODE) HWACCMGetShwPagingMode(PVM pVM)
271{
272 Assert(HWACCMIsNestedPagingActive(pVM));
273 if (pVM->hwaccm.s.svm.fSupported)
274 return PGMMODE_NESTED;
275
276 Assert(pVM->hwaccm.s.vmx.fSupported);
277 return PGMMODE_EPT;
278}
279
280/**
281 * Invalidates a guest page by physical address
282 *
283 * NOTE: Assumes the current instruction references this physical page though a virtual address!!
284 *
285 * @returns VBox status code.
286 * @param pVM The VM to operate on.
287 * @param GCPhys Page to invalidate
288 */
289VMMDECL(int) HWACCMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
290{
291 if (!HWACCMIsNestedPagingActive(pVM))
292 return VINF_SUCCESS;
293
294#ifdef IN_RING0
295 if (pVM->hwaccm.s.vmx.fSupported)
296 {
297 VMCPUID idThisCpu = VMMGetCpuId(pVM);
298
299 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
300 {
301 PVMCPU pVCpu = &pVM->aCpus[idCpu];
302
303 if (idThisCpu == idCpu)
304 VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys);
305 else
306 {
307 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
308 hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
309 }
310 }
311 return VINF_SUCCESS;
312 }
313
314 /* AMD-V doesn't support invalidation with guest physical addresses; see
315 comment in SVMR0InvalidatePhysPage. */
316 Assert(pVM->hwaccm.s.svm.fSupported);
317#endif
318
319 HWACCMFlushTLBOnAllVCpus(pVM);
320 return VINF_SUCCESS;
321}
322
323/**
324 * Checks if an interrupt event is currently pending.
325 *
326 * @returns Interrupt event pending state.
327 * @param pVM The VM to operate on.
328 */
329VMMDECL(bool) HWACCMHasPendingIrq(PVM pVM)
330{
331 PVMCPU pVCpu = VMMGetCpu(pVM);
332 return !!pVCpu->hwaccm.s.Event.fPending;
333}
334
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette