VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMAll.cpp@ 47413

Last change on this file since 47413 was 46420, checked in by vboxsync, 12 years ago

VMM, recompiler: Purge deprecated macros.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 12.1 KB
Line 
1/* $Id: HMAll.cpp 46420 2013-06-06 16:27:25Z vboxsync $ */
2/** @file
3 * HM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#include <VBox/vmm/hm.h>
24#include <VBox/vmm/pgm.h>
25#include "HMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/hm_svm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/param.h>
32#include <iprt/assert.h>
33#include <iprt/asm.h>
34#include <iprt/string.h>
35#include <iprt/x86.h>
36#include <iprt/asm-amd64-x86.h>
37
38
39
40/**
41 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
42 *
43 * @retval @c true if used.
44 * @retval @c false if software virtualization (raw-mode) is used.
45 * @param pVM The cross context VM structure.
46 * @sa HMIsEnabled, HMR3IsEnabled
47 * @internal
48 */
49VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM)
50{
51 Assert(pVM->fHMEnabledFixed);
52 return pVM->fHMEnabled;
53}
54
55
56/**
57 * Queues a page for invalidation
58 *
59 * @returns VBox status code.
60 * @param pVCpu Pointer to the VMCPU.
61 * @param GCVirt Page to invalidate
62 */
63static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
64{
65 /* Nothing to do if a TLB flush is already pending */
66 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
67 return;
68#if 1
69 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
70 NOREF(GCVirt);
71#else
72 /* Be very careful when activating this code! */
73 if (iPage == RT_ELEMENTS(pVCpu->hm.s.TlbShootdown.aPages))
74 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
75 else
76 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
77#endif
78}
79
80/**
81 * Invalidates a guest page
82 *
83 * @returns VBox status code.
84 * @param pVCpu Pointer to the VMCPU.
85 * @param GCVirt Page to invalidate
86 */
87VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
88{
89 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
90#ifdef IN_RING0
91 PVM pVM = pVCpu->CTX_SUFF(pVM);
92 if (pVM->hm.s.vmx.fSupported)
93 return VMXR0InvalidatePage(pVM, pVCpu, GCVirt);
94
95 Assert(pVM->hm.s.svm.fSupported);
96 return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
97
98#else
99 hmQueueInvlPage(pVCpu, GCVirt);
100 return VINF_SUCCESS;
101#endif
102}
103
104/**
105 * Flushes the guest TLB.
106 *
107 * @returns VBox status code.
108 * @param pVCpu Pointer to the VMCPU.
109 */
110VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
111{
112 LogFlow(("HMFlushTLB\n"));
113
114 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
115 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
116 return VINF_SUCCESS;
117}
118
119#ifdef IN_RING0
120
121/**
122 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
123 *
124 */
125static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
126{
127 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
128 return;
129}
130
131/**
132 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
133 */
134static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
135{
136 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits);
137
138 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x);
139 int rc = RTMpPokeCpu(idHostCpu);
140 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x);
141
142 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
143 back to a less efficient implementation (broadcast). */
144 if (rc == VERR_NOT_SUPPORTED)
145 {
146 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
147 /* synchronous. */
148 RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0);
149 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
150 }
151 else
152 {
153 if (rc == VINF_SUCCESS)
154 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
155 else
156 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z);
157
158/** @todo If more than one CPU is going to be poked, we could optimize this
159 * operation by poking them first and wait afterwards. Would require
160 * recording who to poke and their current cWorldSwitchExits values,
161 * that's something not suitable for stack... So, pVCpu->hm.s.something
162 * then. */
163 /* Spin until the VCPU has switched back (poking is async). */
164 while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)
165 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits))
166 ASMNopPause();
167
168 if (rc == VINF_SUCCESS)
169 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
170 else
171 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z);
172 }
173}
174
175#endif /* IN_RING0 */
176#ifndef IN_RC
177
178/**
179 * Poke an EMT so it can perform the appropriate TLB shootdowns.
180 *
181 * @param pVCpu The handle of the virtual CPU to poke.
182 * @param fAccountFlushStat Whether to account the call to
183 * StatTlbShootdownFlush or StatTlbShootdown.
184 */
185static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
186{
187 if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush))
188 {
189 if (fAccountFlushStat)
190 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush);
191 else
192 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
193#ifdef IN_RING0
194 RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu;
195 if (idHostCpu != NIL_RTCPUID)
196 hmR0PokeCpu(pVCpu, idHostCpu);
197#else
198 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
199#endif
200 }
201 else
202 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
203}
204
205
206/**
207 * Invalidates a guest page on all VCPUs.
208 *
209 * @returns VBox status code.
210 * @param pVM Pointer to the VM.
211 * @param GCVirt Page to invalidate
212 */
213VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCPtr)
214{
215 VMCPUID idCurCpu = VMMGetCpuId(pVM);
216 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage);
217
218 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
219 {
220 PVMCPU pVCpu = &pVM->aCpus[idCpu];
221
222 /* Nothing to do if a TLB flush is already pending; the VCPU should
223 have already been poked if it were active. */
224 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
225 continue;
226
227 if (pVCpu->idCpu == idCurCpu)
228 HMInvalidatePage(pVCpu, GCPtr);
229 else
230 {
231 hmQueueInvlPage(pVCpu, GCPtr);
232 hmPokeCpuForTlbFlush(pVCpu, false /* fAccountFlushStat */);
233 }
234 }
235
236 return VINF_SUCCESS;
237}
238
239
240/**
241 * Flush the TLBs of all VCPUs.
242 *
243 * @returns VBox status code.
244 * @param pVM Pointer to the VM.
245 */
246VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM)
247{
248 if (pVM->cCpus == 1)
249 return HMFlushTLB(&pVM->aCpus[0]);
250
251 VMCPUID idThisCpu = VMMGetCpuId(pVM);
252
253 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTlb);
254
255 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
256 {
257 PVMCPU pVCpu = &pVM->aCpus[idCpu];
258
259 /* Nothing to do if a TLB flush is already pending; the VCPU should
260 have already been poked if it were active. */
261 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
262 {
263 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
264 if (idThisCpu != idCpu)
265 hmPokeCpuForTlbFlush(pVCpu, true /* fAccountFlushStat */);
266 }
267 }
268
269 return VINF_SUCCESS;
270}
271
272#endif /* !IN_RC */
273
274/**
275 * Checks if nested paging is enabled
276 *
277 * @returns boolean
278 * @param pVM Pointer to the VM.
279 */
280VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
281{
282 return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
283}
284
285/**
286 * Return the shadow paging mode for nested paging/ept
287 *
288 * @returns shadow paging mode
289 * @param pVM Pointer to the VM.
290 */
291VMM_INT_DECL(PGMMODE) HMGetShwPagingMode(PVM pVM)
292{
293 Assert(HMIsNestedPagingActive(pVM));
294 if (pVM->hm.s.svm.fSupported)
295 return PGMMODE_NESTED;
296
297 Assert(pVM->hm.s.vmx.fSupported);
298 return PGMMODE_EPT;
299}
300
301/**
302 * Invalidates a guest page by physical address
303 *
304 * NOTE: Assumes the current instruction references this physical page though a virtual address!!
305 *
306 * @returns VBox status code.
307 * @param pVM Pointer to the VM.
308 * @param GCPhys Page to invalidate
309 */
310VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
311{
312 if (!HMIsNestedPagingActive(pVM))
313 return VINF_SUCCESS;
314
315#ifdef IN_RING0
316 if (pVM->hm.s.vmx.fSupported)
317 {
318 VMCPUID idThisCpu = VMMGetCpuId(pVM);
319
320 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
321 {
322 PVMCPU pVCpu = &pVM->aCpus[idCpu];
323
324 if (idThisCpu == idCpu)
325 {
326 /** @todo r=ramshankar: Intel does not support flushing by guest physical
327 * address either. See comment in VMXR0InvalidatePhysPage(). Fix this. */
328 VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys);
329 }
330 else
331 {
332 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
333 hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
334 }
335 }
336 return VINF_SUCCESS;
337 }
338
339 /* AMD-V doesn't support invalidation with guest physical addresses; see
340 comment in SVMR0InvalidatePhysPage. */
341 Assert(pVM->hm.s.svm.fSupported);
342#else
343 NOREF(GCPhys);
344#endif
345
346 HMFlushTLBOnAllVCpus(pVM);
347 return VINF_SUCCESS;
348}
349
350/**
351 * Checks if an interrupt event is currently pending.
352 *
353 * @returns Interrupt event pending state.
354 * @param pVM Pointer to the VM.
355 */
356VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM)
357{
358 PVMCPU pVCpu = VMMGetCpu(pVM);
359 return !!pVCpu->hm.s.Event.fPending;
360}
361
362
363/**
364 * Return the PAE PDPE entries.
365 *
366 * @returns Pointer to the PAE PDPE array.
367 * @param pVCpu Pointer to the VMCPU.
368 */
369VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)
370{
371 return &pVCpu->hm.s.aPdpes[0];
372}
373
374
375/**
376 * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
377 * incorrect code bytes may be fetched after a world-switch".
378 *
379 * @param pu32Family Where to store the CPU family (can be NULL).
380 * @param pu32Model Where to store the CPU model (can be NULL).
381 * @param pu32Stepping Where to store the CPU stepping (can be NULL).
382 * @returns true if the erratum applies, false otherwise.
383 */
384VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
385{
386 /*
387 * Erratum 170 which requires a forced TLB flush for each world switch:
388 * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
389 *
390 * All BH-G1/2 and DH-G1/2 models include a fix:
391 * Athlon X2: 0x6b 1/2
392 * 0x68 1/2
393 * Athlon 64: 0x7f 1
394 * 0x6f 2
395 * Sempron: 0x7f 1/2
396 * 0x6f 2
397 * 0x6c 2
398 * 0x7c 2
399 * Turion 64: 0x68 2
400 */
401 uint32_t u32Dummy;
402 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
403 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
404 u32BaseFamily = (u32Version >> 8) & 0xf;
405 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
406 u32Model = ((u32Version >> 4) & 0xf);
407 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
408 u32Stepping = u32Version & 0xf;
409
410 bool fErratumApplies = false;
411 if ( u32Family == 0xf
412 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
413 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
414 {
415 fErratumApplies = true;
416 }
417
418 if (pu32Family)
419 *pu32Family = u32Family;
420 if (pu32Model)
421 *pu32Model = u32Model;
422 if (pu32Stepping)
423 *pu32Stepping = u32Stepping;
424
425 return fErratumApplies;
426}
427
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette