VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMAll.cpp@ 72969

Last change on this file since 72969 was 72744, checked in by vboxsync, 7 years ago

VMM: Extend HM changed flags. ​bugref:9193 [HM, CPUM]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.6 KB
Line 
1/* $Id: HMAll.cpp 72744 2018-06-29 07:36:19Z vboxsync $ */
2/** @file
3 * HM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#include <VBox/vmm/hm.h>
24#include <VBox/vmm/pgm.h>
25#include "HMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/hm_svm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/param.h>
32#include <iprt/assert.h>
33#include <iprt/asm.h>
34#include <iprt/string.h>
35#include <iprt/thread.h>
36#include <iprt/x86.h>
37#include <iprt/asm-amd64-x86.h>
38
39
40/**
41 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
42 *
43 * @retval true if used.
44 * @retval false if software virtualization (raw-mode) is used.
45 * @param pVM The cross context VM structure.
46 * @sa HMIsEnabled, HMR3IsEnabled
47 * @internal
48 */
49VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM)
50{
51 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
52 return pVM->fHMEnabled;
53}
54
55
56/**
57 * Queues a guest page for invalidation.
58 *
59 * @returns VBox status code.
60 * @param pVCpu The cross context virtual CPU structure.
61 * @param GCVirt Page to invalidate.
62 */
63static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
64{
65 /* Nothing to do if a TLB flush is already pending */
66 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
67 return;
68 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
69 NOREF(GCVirt);
70}
71
72
73/**
74 * Invalidates a guest page.
75 *
76 * @returns VBox status code.
77 * @param pVCpu The cross context virtual CPU structure.
78 * @param GCVirt Page to invalidate.
79 */
80VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
81{
82 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
83#ifdef IN_RING0
84 return HMR0InvalidatePage(pVCpu, GCVirt);
85#else
86 hmQueueInvlPage(pVCpu, GCVirt);
87 return VINF_SUCCESS;
88#endif
89}
90
91
92#ifdef IN_RING0
93
94/**
95 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
96 *
97 */
98static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
99{
100 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
101 return;
102}
103
104
105/**
106 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
107 */
108static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
109{
110 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits);
111
112 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x);
113 int rc = RTMpPokeCpu(idHostCpu);
114 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x);
115
116 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
117 back to a less efficient implementation (broadcast). */
118 if (rc == VERR_NOT_SUPPORTED)
119 {
120 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
121 /* synchronous. */
122 RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0);
123 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
124 }
125 else
126 {
127 if (rc == VINF_SUCCESS)
128 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
129 else
130 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z);
131
132/** @todo If more than one CPU is going to be poked, we could optimize this
133 * operation by poking them first and wait afterwards. Would require
134 * recording who to poke and their current cWorldSwitchExits values,
135 * that's something not suitable for stack... So, pVCpu->hm.s.something
136 * then. */
137 /* Spin until the VCPU has switched back (poking is async). */
138 while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)
139 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits))
140 ASMNopPause();
141
142 if (rc == VINF_SUCCESS)
143 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
144 else
145 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z);
146 }
147}
148
149#endif /* IN_RING0 */
150#ifndef IN_RC
151/**
152 * Flushes the guest TLB.
153 *
154 * @returns VBox status code.
155 * @param pVCpu The cross context virtual CPU structure.
156 */
157VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
158{
159 LogFlow(("HMFlushTLB\n"));
160
161 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
162 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
163 return VINF_SUCCESS;
164}
165
166/**
167 * Poke an EMT so it can perform the appropriate TLB shootdowns.
168 *
169 * @param pVCpu The cross context virtual CPU structure of the
170 * EMT poke.
171 * @param fAccountFlushStat Whether to account the call to
172 * StatTlbShootdownFlush or StatTlbShootdown.
173 */
174static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
175{
176 if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush))
177 {
178 if (fAccountFlushStat)
179 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush);
180 else
181 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
182#ifdef IN_RING0
183 RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu;
184 if (idHostCpu != NIL_RTCPUID)
185 hmR0PokeCpu(pVCpu, idHostCpu);
186#else
187 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
188#endif
189 }
190 else
191 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
192}
193
194
195/**
196 * Invalidates a guest page on all VCPUs.
197 *
198 * @returns VBox status code.
199 * @param pVM The cross context VM structure.
200 * @param GCVirt Page to invalidate.
201 */
202VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt)
203{
204 /*
205 * The VT-x/AMD-V code will be flushing TLB each time a VCPU migrates to a different
206 * host CPU, see hmR0VmxFlushTaggedTlbBoth() and hmR0SvmFlushTaggedTlb().
207 *
208 * This is the reason why we do not care about thread preemption here and just
209 * execute HMInvalidatePage() assuming it might be the 'right' CPU.
210 */
211 VMCPUID idCurCpu = VMMGetCpuId(pVM);
212 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage);
213
214 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
215 {
216 PVMCPU pVCpu = &pVM->aCpus[idCpu];
217
218 /* Nothing to do if a TLB flush is already pending; the VCPU should
219 have already been poked if it were active. */
220 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
221 continue;
222
223 if (pVCpu->idCpu == idCurCpu)
224 HMInvalidatePage(pVCpu, GCVirt);
225 else
226 {
227 hmQueueInvlPage(pVCpu, GCVirt);
228 hmPokeCpuForTlbFlush(pVCpu, false /* fAccountFlushStat */);
229 }
230 }
231
232 return VINF_SUCCESS;
233}
234
235
236/**
237 * Flush the TLBs of all VCPUs.
238 *
239 * @returns VBox status code.
240 * @param pVM The cross context VM structure.
241 */
242VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM)
243{
244 if (pVM->cCpus == 1)
245 return HMFlushTLB(&pVM->aCpus[0]);
246
247 VMCPUID idThisCpu = VMMGetCpuId(pVM);
248
249 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTlb);
250
251 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
252 {
253 PVMCPU pVCpu = &pVM->aCpus[idCpu];
254
255 /* Nothing to do if a TLB flush is already pending; the VCPU should
256 have already been poked if it were active. */
257 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
258 {
259 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
260 if (idThisCpu != idCpu)
261 hmPokeCpuForTlbFlush(pVCpu, true /* fAccountFlushStat */);
262 }
263 }
264
265 return VINF_SUCCESS;
266}
267
268
269/**
270 * Invalidates a guest page by physical address.
271 *
272 * @returns VBox status code.
273 * @param pVM The cross context VM structure.
274 * @param GCPhys Page to invalidate.
275 *
276 * @remarks Assumes the current instruction references this physical page
277 * though a virtual address!
278 */
279VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
280{
281 if (!HMIsNestedPagingActive(pVM))
282 return VINF_SUCCESS;
283
284 /*
285 * AMD-V: Doesn't support invalidation with guest physical addresses.
286 *
287 * VT-x: Doesn't support invalidation with guest physical addresses.
288 * INVVPID instruction takes only a linear address while invept only flushes by EPT
289 * not individual addresses.
290 *
291 * We update the force flag and flush before the next VM-entry, see @bugref{6568}.
292 */
293 RT_NOREF(GCPhys);
294 /** @todo Remove or figure out to way to update the Phys STAT counter. */
295 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys); */
296 return HMFlushTLBOnAllVCpus(pVM);
297}
298
299
300/**
301 * Checks if nested paging is enabled.
302 *
303 * @returns true if nested paging is active, false otherwise.
304 * @param pVM The cross context VM structure.
305 *
306 * @remarks Works before hmR3InitFinalizeR0.
307 */
308VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
309{
310 return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
311}
312
313
314/**
315 * Checks if both nested paging and unhampered guest execution are enabled.
316 *
317 * The almost complete guest execution in hardware is only applicable to VT-x.
318 *
319 * @returns true if we have both enabled, otherwise false.
320 * @param pVM The cross context VM structure.
321 *
322 * @remarks Works before hmR3InitFinalizeR0.
323 */
324VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
325{
326 return HMIsEnabled(pVM)
327 && pVM->hm.s.fNestedPaging
328 && ( pVM->hm.s.vmx.fUnrestrictedGuest
329 || pVM->hm.s.svm.fSupported);
330}
331
332
333/**
334 * Checks if this VM is using HM and is long-mode capable.
335 *
336 * Use VMR3IsLongModeAllowed() instead of this, when possible.
337 *
338 * @returns true if long mode is allowed, false otherwise.
339 * @param pVM The cross context VM structure.
340 * @sa VMR3IsLongModeAllowed, NEMHCIsLongModeAllowed
341 */
342VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
343{
344 return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
345}
346
347
348/**
349 * Checks if MSR bitmaps are available. It is assumed that when it's available
350 * it will be used as well.
351 *
352 * @returns true if MSR bitmaps are available, false otherwise.
353 * @param pVM The cross context VM structure.
354 */
355VMM_INT_DECL(bool) HMAreMsrBitmapsAvailable(PVM pVM)
356{
357 if (HMIsEnabled(pVM))
358 {
359 if (pVM->hm.s.svm.fSupported)
360 return true;
361
362 if ( pVM->hm.s.vmx.fSupported
363 && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
364 {
365 return true;
366 }
367 }
368 return false;
369}
370
371
372/**
373 * Return the shadow paging mode for nested paging/ept
374 *
375 * @returns shadow paging mode
376 * @param pVM The cross context VM structure.
377 */
378VMM_INT_DECL(PGMMODE) HMGetShwPagingMode(PVM pVM)
379{
380 Assert(HMIsNestedPagingActive(pVM));
381 if (pVM->hm.s.svm.fSupported)
382 return PGMMODE_NESTED;
383
384 Assert(pVM->hm.s.vmx.fSupported);
385 return PGMMODE_EPT;
386}
387#endif /* !IN_RC */
388
389
390/**
391 * Checks if an interrupt event is currently pending.
392 *
393 * @returns Interrupt event pending state.
394 * @param pVM The cross context VM structure.
395 */
396VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM)
397{
398 PVMCPU pVCpu = VMMGetCpu(pVM);
399 return !!pVCpu->hm.s.Event.fPending;
400}
401
402
403/**
404 * Return the PAE PDPE entries.
405 *
406 * @returns Pointer to the PAE PDPE array.
407 * @param pVCpu The cross context virtual CPU structure.
408 */
409VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)
410{
411 return &pVCpu->hm.s.aPdpes[0];
412}
413
414
415/**
416 * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
417 * incorrect code bytes may be fetched after a world-switch".
418 *
419 * @param pu32Family Where to store the CPU family (can be NULL).
420 * @param pu32Model Where to store the CPU model (can be NULL).
421 * @param pu32Stepping Where to store the CPU stepping (can be NULL).
422 * @returns true if the erratum applies, false otherwise.
423 */
424VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
425{
426 /*
427 * Erratum 170 which requires a forced TLB flush for each world switch:
428 * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
429 *
430 * All BH-G1/2 and DH-G1/2 models include a fix:
431 * Athlon X2: 0x6b 1/2
432 * 0x68 1/2
433 * Athlon 64: 0x7f 1
434 * 0x6f 2
435 * Sempron: 0x7f 1/2
436 * 0x6f 2
437 * 0x6c 2
438 * 0x7c 2
439 * Turion 64: 0x68 2
440 */
441 uint32_t u32Dummy;
442 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
443 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
444 u32BaseFamily = (u32Version >> 8) & 0xf;
445 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
446 u32Model = ((u32Version >> 4) & 0xf);
447 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
448 u32Stepping = u32Version & 0xf;
449
450 bool fErratumApplies = false;
451 if ( u32Family == 0xf
452 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
453 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
454 {
455 fErratumApplies = true;
456 }
457
458 if (pu32Family)
459 *pu32Family = u32Family;
460 if (pu32Model)
461 *pu32Model = u32Model;
462 if (pu32Stepping)
463 *pu32Stepping = u32Stepping;
464
465 return fErratumApplies;
466}
467
468
469/**
470 * Sets or clears the single instruction flag.
471 *
472 * When set, HM will try its best to return to ring-3 after executing a single
473 * instruction. This can be used for debugging. See also
474 * EMR3HmSingleInstruction.
475 *
476 * @returns The old flag state.
477 * @param pVM The cross context VM structure.
478 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
479 * @param fEnable The new flag state.
480 */
481VMM_INT_DECL(bool) HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
482{
483 VMCPU_ASSERT_EMT(pVCpu);
484 bool fOld = pVCpu->hm.s.fSingleInstruction;
485 pVCpu->hm.s.fSingleInstruction = fEnable;
486 pVCpu->hm.s.fUseDebugLoop = fEnable || pVM->hm.s.fUseDebugLoop;
487 return fOld;
488}
489
490
491/**
492 * Notifies HM that GIM provider wants to trap \#UD.
493 *
494 * @param pVCpu The cross context virtual CPU structure.
495 */
496VMM_INT_DECL(void) HMTrapXcptUDForGIMEnable(PVMCPU pVCpu)
497{
498 pVCpu->hm.s.fGIMTrapXcptUD = true;
499 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
500 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
501 else
502 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
503}
504
505
506/**
507 * Notifies HM that GIM provider no longer wants to trap \#UD.
508 *
509 * @param pVCpu The cross context virtual CPU structure.
510 */
511VMM_INT_DECL(void) HMTrapXcptUDForGIMDisable(PVMCPU pVCpu)
512{
513 pVCpu->hm.s.fGIMTrapXcptUD = false;
514 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
515 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
516 else
517 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
518}
519
520
521/**
522 * VMX nested-guest VM-exit handler.
523 *
524 * @param pVCpu The cross context virtual CPU structure.
525 * @param uBasicExitReason The basic exit reason.
526 */
527VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason)
528{
529 RT_NOREF2(pVCpu, uBasicExitReason);
530}
531
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette