VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMAll.cpp@ 74878

Last change on this file since 74878 was 73617, checked in by vboxsync, 6 years ago

VMM/HMVMXR0: Use IEMExecOne() rather than manually interpreting a select few instructions in the
real-on-v86 mode when unrestricted-guest execution is not allowed.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.7 KB
Line 
1/* $Id: HMAll.cpp 73617 2018-08-10 14:09:55Z vboxsync $ */
2/** @file
3 * HM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <VBox/vmm/hm.h>
25#include <VBox/vmm/pgm.h>
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/hm_vmx.h>
29#include <VBox/vmm/hm_svm.h>
30#include <VBox/err.h>
31#include <VBox/log.h>
32#include <iprt/param.h>
33#include <iprt/assert.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/thread.h>
37#include <iprt/x86.h>
38#include <iprt/asm-amd64-x86.h>
39
40
41/**
42 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
43 *
44 * @retval true if used.
45 * @retval false if software virtualization (raw-mode) is used.
46 * @param pVM The cross context VM structure.
47 * @sa HMIsEnabled, HMR3IsEnabled
48 * @internal
49 */
50VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM)
51{
52 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
53 return pVM->fHMEnabled;
54}
55
56
57/**
58 * Checks if the guest is in a suitable state for hardware-assisted execution.
59 *
60 * @returns @c true if it is suitable, @c false otherwise.
61 * @param pVCpu The cross context virtual CPU structure.
62 * @param pCtx Pointer to the guest CPU context.
63 *
64 * @remarks @a pCtx can be a partial context created and not necessarily the same as
65 * pVCpu->cpum.GstCtx.
66 */
67VMMDECL(bool) HMCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
68{
69 PVM pVM = pVCpu->CTX_SUFF(pVM);
70 Assert(HMIsEnabled(pVM));
71
72#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
73 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
74 || CPUMIsGuestVmxEnabled(pCtx))
75 {
76 LogFunc(("In nested-guest mode - returning false"));
77 return false;
78 }
79#endif
80
81 /* AMD-V supports real & protected mode with or without paging. */
82 if (pVM->hm.s.svm.fEnabled)
83 {
84 pVCpu->hm.s.fActive = true;
85 return true;
86 }
87
88 return HMVmxCanExecuteGuest(pVCpu, pCtx);
89}
90
91
92/**
93 * Queues a guest page for invalidation.
94 *
95 * @returns VBox status code.
96 * @param pVCpu The cross context virtual CPU structure.
97 * @param GCVirt Page to invalidate.
98 */
99static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
100{
101 /* Nothing to do if a TLB flush is already pending */
102 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
103 return;
104 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
105 NOREF(GCVirt);
106}
107
108
109/**
110 * Invalidates a guest page.
111 *
112 * @returns VBox status code.
113 * @param pVCpu The cross context virtual CPU structure.
114 * @param GCVirt Page to invalidate.
115 */
116VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
117{
118 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
119#ifdef IN_RING0
120 return HMR0InvalidatePage(pVCpu, GCVirt);
121#else
122 hmQueueInvlPage(pVCpu, GCVirt);
123 return VINF_SUCCESS;
124#endif
125}
126
127
128#ifdef IN_RING0
129
130/**
131 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
132 *
133 */
134static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
135{
136 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
137 return;
138}
139
140
141/**
142 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
143 */
144static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
145{
146 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits);
147
148 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x);
149 int rc = RTMpPokeCpu(idHostCpu);
150 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x);
151
152 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
153 back to a less efficient implementation (broadcast). */
154 if (rc == VERR_NOT_SUPPORTED)
155 {
156 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
157 /* synchronous. */
158 RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0);
159 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
160 }
161 else
162 {
163 if (rc == VINF_SUCCESS)
164 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
165 else
166 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z);
167
168/** @todo If more than one CPU is going to be poked, we could optimize this
169 * operation by poking them first and wait afterwards. Would require
170 * recording who to poke and their current cWorldSwitchExits values,
171 * that's something not suitable for stack... So, pVCpu->hm.s.something
172 * then. */
173 /* Spin until the VCPU has switched back (poking is async). */
174 while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)
175 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits))
176 ASMNopPause();
177
178 if (rc == VINF_SUCCESS)
179 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
180 else
181 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z);
182 }
183}
184
185#endif /* IN_RING0 */
186#ifndef IN_RC
187
188/**
189 * Flushes the guest TLB.
190 *
191 * @returns VBox status code.
192 * @param pVCpu The cross context virtual CPU structure.
193 */
194VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
195{
196 LogFlow(("HMFlushTLB\n"));
197
198 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
199 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
200 return VINF_SUCCESS;
201}
202
203/**
204 * Poke an EMT so it can perform the appropriate TLB shootdowns.
205 *
206 * @param pVCpu The cross context virtual CPU structure of the
207 * EMT poke.
208 * @param fAccountFlushStat Whether to account the call to
209 * StatTlbShootdownFlush or StatTlbShootdown.
210 */
211static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
212{
213 if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush))
214 {
215 if (fAccountFlushStat)
216 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush);
217 else
218 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
219#ifdef IN_RING0
220 RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu;
221 if (idHostCpu != NIL_RTCPUID)
222 hmR0PokeCpu(pVCpu, idHostCpu);
223#else
224 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
225#endif
226 }
227 else
228 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
229}
230
231
232/**
233 * Invalidates a guest page on all VCPUs.
234 *
235 * @returns VBox status code.
236 * @param pVM The cross context VM structure.
237 * @param GCVirt Page to invalidate.
238 */
239VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt)
240{
241 /*
242 * The VT-x/AMD-V code will be flushing TLB each time a VCPU migrates to a different
243 * host CPU, see hmR0VmxFlushTaggedTlbBoth() and hmR0SvmFlushTaggedTlb().
244 *
245 * This is the reason why we do not care about thread preemption here and just
246 * execute HMInvalidatePage() assuming it might be the 'right' CPU.
247 */
248 VMCPUID idCurCpu = VMMGetCpuId(pVM);
249 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage);
250
251 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
252 {
253 PVMCPU pVCpu = &pVM->aCpus[idCpu];
254
255 /* Nothing to do if a TLB flush is already pending; the VCPU should
256 have already been poked if it were active. */
257 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
258 continue;
259
260 if (pVCpu->idCpu == idCurCpu)
261 HMInvalidatePage(pVCpu, GCVirt);
262 else
263 {
264 hmQueueInvlPage(pVCpu, GCVirt);
265 hmPokeCpuForTlbFlush(pVCpu, false /* fAccountFlushStat */);
266 }
267 }
268
269 return VINF_SUCCESS;
270}
271
272
273/**
274 * Flush the TLBs of all VCPUs.
275 *
276 * @returns VBox status code.
277 * @param pVM The cross context VM structure.
278 */
279VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM)
280{
281 if (pVM->cCpus == 1)
282 return HMFlushTLB(&pVM->aCpus[0]);
283
284 VMCPUID idThisCpu = VMMGetCpuId(pVM);
285
286 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTlb);
287
288 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
289 {
290 PVMCPU pVCpu = &pVM->aCpus[idCpu];
291
292 /* Nothing to do if a TLB flush is already pending; the VCPU should
293 have already been poked if it were active. */
294 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
295 {
296 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
297 if (idThisCpu != idCpu)
298 hmPokeCpuForTlbFlush(pVCpu, true /* fAccountFlushStat */);
299 }
300 }
301
302 return VINF_SUCCESS;
303}
304
305
306/**
307 * Invalidates a guest page by physical address.
308 *
309 * @returns VBox status code.
310 * @param pVM The cross context VM structure.
311 * @param GCPhys Page to invalidate.
312 *
313 * @remarks Assumes the current instruction references this physical page
314 * though a virtual address!
315 */
316VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
317{
318 if (!HMIsNestedPagingActive(pVM))
319 return VINF_SUCCESS;
320
321 /*
322 * AMD-V: Doesn't support invalidation with guest physical addresses.
323 *
324 * VT-x: Doesn't support invalidation with guest physical addresses.
325 * INVVPID instruction takes only a linear address while invept only flushes by EPT
326 * not individual addresses.
327 *
328 * We update the force flag and flush before the next VM-entry, see @bugref{6568}.
329 */
330 RT_NOREF(GCPhys);
331 /** @todo Remove or figure out to way to update the Phys STAT counter. */
332 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys); */
333 return HMFlushTLBOnAllVCpus(pVM);
334}
335
336
337/**
338 * Checks if nested paging is enabled.
339 *
340 * @returns true if nested paging is active, false otherwise.
341 * @param pVM The cross context VM structure.
342 *
343 * @remarks Works before hmR3InitFinalizeR0.
344 */
345VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
346{
347 return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
348}
349
350
351/**
352 * Checks if both nested paging and unhampered guest execution are enabled.
353 *
354 * The almost complete guest execution in hardware is only applicable to VT-x.
355 *
356 * @returns true if we have both enabled, otherwise false.
357 * @param pVM The cross context VM structure.
358 *
359 * @remarks Works before hmR3InitFinalizeR0.
360 */
361VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
362{
363 return HMIsEnabled(pVM)
364 && pVM->hm.s.fNestedPaging
365 && ( pVM->hm.s.vmx.fUnrestrictedGuest
366 || pVM->hm.s.svm.fSupported);
367}
368
369
370/**
371 * Checks if this VM is using HM and is long-mode capable.
372 *
373 * Use VMR3IsLongModeAllowed() instead of this, when possible.
374 *
375 * @returns true if long mode is allowed, false otherwise.
376 * @param pVM The cross context VM structure.
377 * @sa VMR3IsLongModeAllowed, NEMHCIsLongModeAllowed
378 */
379VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
380{
381 return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
382}
383
384
385/**
386 * Checks if MSR bitmaps are active. It is assumed that when it's available
387 * it will be used as well.
388 *
389 * @returns true if MSR bitmaps are available, false otherwise.
390 * @param pVM The cross context VM structure.
391 */
392VMM_INT_DECL(bool) HMIsMsrBitmapActive(PVM pVM)
393{
394 if (HMIsEnabled(pVM))
395 {
396 if (pVM->hm.s.svm.fSupported)
397 return true;
398
399 if ( pVM->hm.s.vmx.fSupported
400 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS))
401 return true;
402 }
403 return false;
404}
405
406
407/**
408 * Checks if AMD-V is active.
409 *
410 * @returns true if AMD-V is active.
411 * @param pVM The cross context VM structure.
412 *
413 * @remarks Works before hmR3InitFinalizeR0.
414 */
415VMM_INT_DECL(bool) HMIsSvmActive(PVM pVM)
416{
417 return pVM->hm.s.svm.fSupported && HMIsEnabled(pVM);
418}
419
420
421/**
422 * Checks if VT-x is active.
423 *
424 * @returns true if VT-x is active.
425 * @param pVM The cross context VM structure.
426 *
427 * @remarks Works before hmR3InitFinalizeR0.
428 */
429VMM_INT_DECL(bool) HMIsVmxActive(PVM pVM)
430{
431 return HMIsVmxSupported(pVM) && HMIsEnabled(pVM);
432}
433
434
435/**
436 * Checks if VT-x is supported by the host CPU.
437 *
438 * @returns true if VT-x is supported, false otherwise.
439 * @param pVM The cross context VM structure.
440 *
441 * @remarks Works before hmR3InitFinalizeR0.
442 */
443VMM_INT_DECL(bool) HMIsVmxSupported(PVM pVM)
444{
445 return pVM->hm.s.vmx.fSupported;
446}
447
448#endif /* !IN_RC */
449
450/**
451 * Checks if an interrupt event is currently pending.
452 *
453 * @returns Interrupt event pending state.
454 * @param pVM The cross context VM structure.
455 */
456VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM)
457{
458 PVMCPU pVCpu = VMMGetCpu(pVM);
459 return !!pVCpu->hm.s.Event.fPending;
460}
461
462
463/**
464 * Return the PAE PDPE entries.
465 *
466 * @returns Pointer to the PAE PDPE array.
467 * @param pVCpu The cross context virtual CPU structure.
468 */
469VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)
470{
471 return &pVCpu->hm.s.aPdpes[0];
472}
473
474
475/**
476 * Sets or clears the single instruction flag.
477 *
478 * When set, HM will try its best to return to ring-3 after executing a single
479 * instruction. This can be used for debugging. See also
480 * EMR3HmSingleInstruction.
481 *
482 * @returns The old flag state.
483 * @param pVM The cross context VM structure.
484 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
485 * @param fEnable The new flag state.
486 */
487VMM_INT_DECL(bool) HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
488{
489 VMCPU_ASSERT_EMT(pVCpu);
490 bool fOld = pVCpu->hm.s.fSingleInstruction;
491 pVCpu->hm.s.fSingleInstruction = fEnable;
492 pVCpu->hm.s.fUseDebugLoop = fEnable || pVM->hm.s.fUseDebugLoop;
493 return fOld;
494}
495
496
497/**
498 * Notifies HM that GIM provider wants to trap \#UD.
499 *
500 * @param pVCpu The cross context virtual CPU structure.
501 */
502VMM_INT_DECL(void) HMTrapXcptUDForGIMEnable(PVMCPU pVCpu)
503{
504 pVCpu->hm.s.fGIMTrapXcptUD = true;
505 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
506 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
507 else
508 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
509}
510
511
512/**
513 * Notifies HM that GIM provider no longer wants to trap \#UD.
514 *
515 * @param pVCpu The cross context virtual CPU structure.
516 */
517VMM_INT_DECL(void) HMTrapXcptUDForGIMDisable(PVMCPU pVCpu)
518{
519 pVCpu->hm.s.fGIMTrapXcptUD = false;
520 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
521 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
522 else
523 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
524}
525
526
527#ifndef IN_RC
528/**
529 * Notification callback which is called whenever there is a chance that a CR3
530 * value might have changed.
531 *
532 * This is called by PGM.
533 *
534 * @param pVM The cross context VM structure.
535 * @param pVCpu The cross context virtual CPU structure.
536 * @param enmShadowMode New shadow paging mode.
537 * @param enmGuestMode New guest paging mode.
538 */
539VMM_INT_DECL(void) HMHCPagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
540{
541# ifdef IN_RING3
542 /* Ignore page mode changes during state loading. */
543 if (VMR3GetState(pVM) == VMSTATE_LOADING)
544 return;
545# endif
546
547 pVCpu->hm.s.enmShadowMode = enmShadowMode;
548
549 /*
550 * If the guest left protected mode VMX execution, we'll have to be
551 * extra careful if/when the guest switches back to protected mode.
552 */
553 if (enmGuestMode == PGMMODE_REAL)
554 pVCpu->hm.s.vmx.fWasInRealMode = true;
555
556# ifdef IN_RING0
557 /*
558 * We need to tickle SVM and VT-x state updates.
559 *
560 * Note! We could probably reduce this depending on what exactly changed.
561 */
562 if (VM_IS_HM_ENABLED(pVM))
563 {
564 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER); /* No recursion! */
565 uint64_t fChanged = HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_CR3 | HM_CHANGED_GUEST_CR4 | HM_CHANGED_GUEST_EFER_MSR;
566 if (pVM->hm.s.svm.fSupported)
567 fChanged |= HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS;
568 else
569 fChanged |= HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS;
570 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fChanged);
571 }
572# endif
573
574 Log4(("HMHCPagingModeChanged: Guest paging mode '%s', shadow paging mode '%s'\n", PGMGetModeName(enmGuestMode),
575 PGMGetModeName(enmShadowMode)));
576}
577#endif /* !IN_RC */
578
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette