VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp@ 38025

Last change on this file since 38025 was 37955, checked in by vboxsync, 13 years ago

Moved VBox/x86.h/mac to iprt/x86.h/mac.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 9.4 KB
Line 
1/* $Id: HWACCMAll.cpp 37955 2011-07-14 12:23:02Z vboxsync $ */
2/** @file
3 * HWACCM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_HWACCM
23#include <VBox/vmm/hwaccm.h>
24#include <VBox/vmm/pgm.h>
25#include "HWACCMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/hwacc_vmx.h>
28#include <VBox/vmm/hwacc_svm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/param.h>
32#include <iprt/assert.h>
33#include <iprt/asm.h>
34#include <iprt/string.h>
35#include <iprt/x86.h>
36
37/**
38 * Queues a page for invalidation
39 *
40 * @returns VBox status code.
41 * @param pVCpu The VMCPU to operate on.
42 * @param GCVirt Page to invalidate
43 */
44void hwaccmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
45{
46 /* Nothing to do if a TLB flush is already pending */
47 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
48 return;
49#if 1
50 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
51#else
52 Be very careful when activating this code!
53 if (iPage == RT_ELEMENTS(pVCpu->hwaccm.s.TlbShootdown.aPages))
54 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
55 else
56 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
57#endif
58}
59
60/**
61 * Invalidates a guest page
62 *
63 * @returns VBox status code.
64 * @param pVCpu The VMCPU to operate on.
65 * @param GCVirt Page to invalidate
66 */
67VMMDECL(int) HWACCMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
68{
69 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual);
70#ifdef IN_RING0
71 PVM pVM = pVCpu->CTX_SUFF(pVM);
72 if (pVM->hwaccm.s.vmx.fSupported)
73 return VMXR0InvalidatePage(pVM, pVCpu, GCVirt);
74
75 Assert(pVM->hwaccm.s.svm.fSupported);
76 return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
77#endif
78
79 hwaccmQueueInvlPage(pVCpu, GCVirt);
80 return VINF_SUCCESS;
81}
82
83/**
84 * Flushes the guest TLB
85 *
86 * @returns VBox status code.
87 * @param pVCpu The VMCPU to operate on.
88 */
89VMMDECL(int) HWACCMFlushTLB(PVMCPU pVCpu)
90{
91 LogFlow(("HWACCMFlushTLB\n"));
92
93 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
94 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBManual);
95 return VINF_SUCCESS;
96}
97
98#ifdef IN_RING0
99
100/**
101 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
102 *
103 */
104static DECLCALLBACK(void) hwaccmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
105{
106 return;
107}
108
109/**
110 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
111 */
112static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
113{
114 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hwaccm.s.cWorldSwitchExits);
115
116 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatPoke, x);
117 int rc = RTMpPokeCpu(idHostCpu);
118 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatPoke, x);
119
120 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
121 back to a less efficient implementation (broadcast). */
122 if (rc == VERR_NOT_SUPPORTED)
123 {
124 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPoke, z);
125 /* synchronous. */
126 RTMpOnSpecific(idHostCpu, hwaccmFlushHandler, 0, 0);
127 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPoke, z);
128 }
129 else
130 {
131 if (rc == VINF_SUCCESS)
132 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPoke, z);
133 else
134 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPokeFailed, z);
135
136/** @todo If more than one CPU is going to be poked, we could optimize this
137 * operation by poking them first and wait afterwards. Would require
138 * recording who to poke and their current cWorldSwitchExits values,
139 * that's something not suitable for stack... So, pVCpu->hm.s.something
140 * then. */
141 /* Spin until the VCPU has switched back (poking is async). */
142 while ( ASMAtomicUoReadBool(&pVCpu->hwaccm.s.fCheckedTLBFlush)
143 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hwaccm.s.cWorldSwitchExits))
144 ASMNopPause();
145
146 if (rc == VINF_SUCCESS)
147 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPoke, z);
148 else
149 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPokeFailed, z);
150 }
151}
152
153#endif /* IN_RING0 */
154#ifndef IN_RC
155
156/**
157 * Poke an EMT so it can perform the appropriate TLB shootdowns.
158 *
159 * @param pVCpu The handle of the virtual CPU to poke.
160 * @param fAccountFlushStat Whether to account the call to
161 * StatTlbShootdownFlush or StatTlbShootdown.
162 */
163static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
164{
165 if (ASMAtomicUoReadBool(&pVCpu->hwaccm.s.fCheckedTLBFlush))
166 {
167 if (fAccountFlushStat)
168 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdownFlush);
169 else
170 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
171#ifdef IN_RING0
172 RTCPUID idHostCpu = pVCpu->hwaccm.s.idEnteredCpu;
173 if (idHostCpu != NIL_RTCPUID)
174 hmR0PokeCpu(pVCpu, idHostCpu);
175#else
176 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
177#endif
178 }
179 else
180 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual);
181}
182
183
184/**
185 * Invalidates a guest page on all VCPUs.
186 *
187 * @returns VBox status code.
188 * @param pVM The VM to operate on.
189 * @param GCVirt Page to invalidate
190 */
191VMMDECL(int) HWACCMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCPtr)
192{
193 VMCPUID idCurCpu = VMMGetCpuId(pVM);
194 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hwaccm.s.StatFlushPage);
195
196 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
197 {
198 PVMCPU pVCpu = &pVM->aCpus[idCpu];
199
200 /* Nothing to do if a TLB flush is already pending; the VCPU should
201 have already been poked if it were active. */
202 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
203 continue;
204
205 if (pVCpu->idCpu == idCurCpu)
206 HWACCMInvalidatePage(pVCpu, GCPtr);
207 else
208 {
209 hwaccmQueueInvlPage(pVCpu, GCPtr);
210 hmPokeCpuForTlbFlush(pVCpu, false /*fAccountFlushStat*/);
211 }
212 }
213
214 return VINF_SUCCESS;
215}
216
217
218/**
219 * Flush the TLBs of all VCPUs
220 *
221 * @returns VBox status code.
222 * @param pVM The VM to operate on.
223 */
224VMMDECL(int) HWACCMFlushTLBOnAllVCpus(PVM pVM)
225{
226 if (pVM->cCpus == 1)
227 return HWACCMFlushTLB(&pVM->aCpus[0]);
228
229 VMCPUID idThisCpu = VMMGetCpuId(pVM);
230
231 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hwaccm.s.StatFlushTLB);
232
233 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
234 {
235 PVMCPU pVCpu = &pVM->aCpus[idCpu];
236
237 /* Nothing to do if a TLB flush is already pending; the VCPU should
238 have already been poked if it were active. */
239 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
240 {
241 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
242 if (idThisCpu != idCpu)
243 hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
244 }
245 }
246
247 return VINF_SUCCESS;
248}
249
250#endif /* !IN_RC */
251
252/**
253 * Checks if nested paging is enabled
254 *
255 * @returns boolean
256 * @param pVM The VM to operate on.
257 */
258VMMDECL(bool) HWACCMIsNestedPagingActive(PVM pVM)
259{
260 return HWACCMIsEnabled(pVM) && pVM->hwaccm.s.fNestedPaging;
261}
262
263/**
264 * Return the shadow paging mode for nested paging/ept
265 *
266 * @returns shadow paging mode
267 * @param pVM The VM to operate on.
268 */
269VMMDECL(PGMMODE) HWACCMGetShwPagingMode(PVM pVM)
270{
271 Assert(HWACCMIsNestedPagingActive(pVM));
272 if (pVM->hwaccm.s.svm.fSupported)
273 return PGMMODE_NESTED;
274
275 Assert(pVM->hwaccm.s.vmx.fSupported);
276 return PGMMODE_EPT;
277}
278
279/**
280 * Invalidates a guest page by physical address
281 *
282 * NOTE: Assumes the current instruction references this physical page though a virtual address!!
283 *
284 * @returns VBox status code.
285 * @param pVM The VM to operate on.
286 * @param GCPhys Page to invalidate
287 */
288VMMDECL(int) HWACCMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
289{
290 if (!HWACCMIsNestedPagingActive(pVM))
291 return VINF_SUCCESS;
292
293#ifdef IN_RING0
294 if (pVM->hwaccm.s.vmx.fSupported)
295 {
296 VMCPUID idThisCpu = VMMGetCpuId(pVM);
297
298 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
299 {
300 PVMCPU pVCpu = &pVM->aCpus[idCpu];
301
302 if (idThisCpu == idCpu)
303 VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys);
304 else
305 {
306 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
307 hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
308 }
309 }
310 return VINF_SUCCESS;
311 }
312
313 /* AMD-V doesn't support invalidation with guest physical addresses; see
314 comment in SVMR0InvalidatePhysPage. */
315 Assert(pVM->hwaccm.s.svm.fSupported);
316#endif
317
318 HWACCMFlushTLBOnAllVCpus(pVM);
319 return VINF_SUCCESS;
320}
321
322/**
323 * Checks if an interrupt event is currently pending.
324 *
325 * @returns Interrupt event pending state.
326 * @param pVM The VM to operate on.
327 */
328VMMDECL(bool) HWACCMHasPendingIrq(PVM pVM)
329{
330 PVMCPU pVCpu = VMMGetCpu(pVM);
331 return !!pVCpu->hwaccm.s.Event.fPending;
332}
333
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette