1 | /* $Id: HMAll.cpp 72462 2018-06-06 14:24:04Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * HM - All contexts.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2017 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | */
|
---|
17 |
|
---|
18 |
|
---|
19 | /*********************************************************************************************************************************
|
---|
20 | * Header Files *
|
---|
21 | *********************************************************************************************************************************/
|
---|
22 | #define LOG_GROUP LOG_GROUP_HM
|
---|
23 | #include <VBox/vmm/hm.h>
|
---|
24 | #include <VBox/vmm/pgm.h>
|
---|
25 | #include "HMInternal.h"
|
---|
26 | #include <VBox/vmm/vm.h>
|
---|
27 | #include <VBox/vmm/hm_vmx.h>
|
---|
28 | #include <VBox/vmm/hm_svm.h>
|
---|
29 | #include <VBox/err.h>
|
---|
30 | #include <VBox/log.h>
|
---|
31 | #include <iprt/param.h>
|
---|
32 | #include <iprt/assert.h>
|
---|
33 | #include <iprt/asm.h>
|
---|
34 | #include <iprt/string.h>
|
---|
35 | #include <iprt/thread.h>
|
---|
36 | #include <iprt/x86.h>
|
---|
37 | #include <iprt/asm-amd64-x86.h>
|
---|
38 |
|
---|
39 |
|
---|
40 | /**
|
---|
41 | * Checks whether HM (VT-x/AMD-V) is being used by this VM.
|
---|
42 | *
|
---|
43 | * @retval true if used.
|
---|
44 | * @retval false if software virtualization (raw-mode) is used.
|
---|
45 | * @param pVM The cross context VM structure.
|
---|
46 | * @sa HMIsEnabled, HMR3IsEnabled
|
---|
47 | * @internal
|
---|
48 | */
|
---|
49 | VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM)
|
---|
50 | {
|
---|
51 | Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
|
---|
52 | return pVM->fHMEnabled;
|
---|
53 | }
|
---|
54 |
|
---|
55 |
|
---|
56 | /**
|
---|
57 | * Queues a guest page for invalidation.
|
---|
58 | *
|
---|
59 | * @returns VBox status code.
|
---|
60 | * @param pVCpu The cross context virtual CPU structure.
|
---|
61 | * @param GCVirt Page to invalidate.
|
---|
62 | */
|
---|
63 | static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
|
---|
64 | {
|
---|
65 | /* Nothing to do if a TLB flush is already pending */
|
---|
66 | if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
|
---|
67 | return;
|
---|
68 | VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
|
---|
69 | NOREF(GCVirt);
|
---|
70 | }
|
---|
71 |
|
---|
72 |
|
---|
73 | /**
|
---|
74 | * Invalidates a guest page.
|
---|
75 | *
|
---|
76 | * @returns VBox status code.
|
---|
77 | * @param pVCpu The cross context virtual CPU structure.
|
---|
78 | * @param GCVirt Page to invalidate.
|
---|
79 | */
|
---|
80 | VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
|
---|
81 | {
|
---|
82 | STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
|
---|
83 | #ifdef IN_RING0
|
---|
84 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
85 | if (pVM->hm.s.vmx.fSupported)
|
---|
86 | return VMXR0InvalidatePage(pVM, pVCpu, GCVirt);
|
---|
87 |
|
---|
88 | Assert(pVM->hm.s.svm.fSupported);
|
---|
89 | return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
|
---|
90 |
|
---|
91 | #else
|
---|
92 | hmQueueInvlPage(pVCpu, GCVirt);
|
---|
93 | return VINF_SUCCESS;
|
---|
94 | #endif
|
---|
95 | }
|
---|
96 |
|
---|
97 |
|
---|
98 | #ifdef IN_RING0
|
---|
99 |
|
---|
100 | /**
|
---|
101 | * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
|
---|
102 | *
|
---|
103 | */
|
---|
104 | static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
|
---|
105 | {
|
---|
106 | NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
|
---|
107 | return;
|
---|
108 | }
|
---|
109 |
|
---|
110 |
|
---|
111 | /**
|
---|
112 | * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
|
---|
113 | */
|
---|
114 | static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
|
---|
115 | {
|
---|
116 | uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits);
|
---|
117 |
|
---|
118 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x);
|
---|
119 | int rc = RTMpPokeCpu(idHostCpu);
|
---|
120 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x);
|
---|
121 |
|
---|
122 | /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
|
---|
123 | back to a less efficient implementation (broadcast). */
|
---|
124 | if (rc == VERR_NOT_SUPPORTED)
|
---|
125 | {
|
---|
126 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
|
---|
127 | /* synchronous. */
|
---|
128 | RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0);
|
---|
129 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
|
---|
130 | }
|
---|
131 | else
|
---|
132 | {
|
---|
133 | if (rc == VINF_SUCCESS)
|
---|
134 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
|
---|
135 | else
|
---|
136 | STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z);
|
---|
137 |
|
---|
138 | /** @todo If more than one CPU is going to be poked, we could optimize this
|
---|
139 | * operation by poking them first and wait afterwards. Would require
|
---|
140 | * recording who to poke and their current cWorldSwitchExits values,
|
---|
141 | * that's something not suitable for stack... So, pVCpu->hm.s.something
|
---|
142 | * then. */
|
---|
143 | /* Spin until the VCPU has switched back (poking is async). */
|
---|
144 | while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)
|
---|
145 | && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits))
|
---|
146 | ASMNopPause();
|
---|
147 |
|
---|
148 | if (rc == VINF_SUCCESS)
|
---|
149 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
|
---|
150 | else
|
---|
151 | STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z);
|
---|
152 | }
|
---|
153 | }
|
---|
154 |
|
---|
155 | #endif /* IN_RING0 */
|
---|
156 | #ifndef IN_RC
|
---|
157 | /**
|
---|
158 | * Flushes the guest TLB.
|
---|
159 | *
|
---|
160 | * @returns VBox status code.
|
---|
161 | * @param pVCpu The cross context virtual CPU structure.
|
---|
162 | */
|
---|
163 | VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
|
---|
164 | {
|
---|
165 | LogFlow(("HMFlushTLB\n"));
|
---|
166 |
|
---|
167 | VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
|
---|
168 | STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
|
---|
169 | return VINF_SUCCESS;
|
---|
170 | }
|
---|
171 |
|
---|
172 | /**
|
---|
173 | * Poke an EMT so it can perform the appropriate TLB shootdowns.
|
---|
174 | *
|
---|
175 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
176 | * EMT poke.
|
---|
177 | * @param fAccountFlushStat Whether to account the call to
|
---|
178 | * StatTlbShootdownFlush or StatTlbShootdown.
|
---|
179 | */
|
---|
180 | static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
|
---|
181 | {
|
---|
182 | if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush))
|
---|
183 | {
|
---|
184 | if (fAccountFlushStat)
|
---|
185 | STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush);
|
---|
186 | else
|
---|
187 | STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
|
---|
188 | #ifdef IN_RING0
|
---|
189 | RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu;
|
---|
190 | if (idHostCpu != NIL_RTCPUID)
|
---|
191 | hmR0PokeCpu(pVCpu, idHostCpu);
|
---|
192 | #else
|
---|
193 | VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
|
---|
194 | #endif
|
---|
195 | }
|
---|
196 | else
|
---|
197 | STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
|
---|
198 | }
|
---|
199 |
|
---|
200 |
|
---|
201 | /**
|
---|
202 | * Invalidates a guest page on all VCPUs.
|
---|
203 | *
|
---|
204 | * @returns VBox status code.
|
---|
205 | * @param pVM The cross context VM structure.
|
---|
206 | * @param GCVirt Page to invalidate.
|
---|
207 | */
|
---|
208 | VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt)
|
---|
209 | {
|
---|
210 | /*
|
---|
211 | * The VT-x/AMD-V code will be flushing TLB each time a VCPU migrates to a different
|
---|
212 | * host CPU, see hmR0VmxFlushTaggedTlbBoth() and hmR0SvmFlushTaggedTlb().
|
---|
213 | *
|
---|
214 | * This is the reason why we do not care about thread preemption here and just
|
---|
215 | * execute HMInvalidatePage() assuming it might be the 'right' CPU.
|
---|
216 | */
|
---|
217 | VMCPUID idCurCpu = VMMGetCpuId(pVM);
|
---|
218 | STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage);
|
---|
219 |
|
---|
220 | for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
|
---|
221 | {
|
---|
222 | PVMCPU pVCpu = &pVM->aCpus[idCpu];
|
---|
223 |
|
---|
224 | /* Nothing to do if a TLB flush is already pending; the VCPU should
|
---|
225 | have already been poked if it were active. */
|
---|
226 | if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
|
---|
227 | continue;
|
---|
228 |
|
---|
229 | if (pVCpu->idCpu == idCurCpu)
|
---|
230 | HMInvalidatePage(pVCpu, GCVirt);
|
---|
231 | else
|
---|
232 | {
|
---|
233 | hmQueueInvlPage(pVCpu, GCVirt);
|
---|
234 | hmPokeCpuForTlbFlush(pVCpu, false /* fAccountFlushStat */);
|
---|
235 | }
|
---|
236 | }
|
---|
237 |
|
---|
238 | return VINF_SUCCESS;
|
---|
239 | }
|
---|
240 |
|
---|
241 |
|
---|
242 | /**
|
---|
243 | * Flush the TLBs of all VCPUs.
|
---|
244 | *
|
---|
245 | * @returns VBox status code.
|
---|
246 | * @param pVM The cross context VM structure.
|
---|
247 | */
|
---|
248 | VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM)
|
---|
249 | {
|
---|
250 | if (pVM->cCpus == 1)
|
---|
251 | return HMFlushTLB(&pVM->aCpus[0]);
|
---|
252 |
|
---|
253 | VMCPUID idThisCpu = VMMGetCpuId(pVM);
|
---|
254 |
|
---|
255 | STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTlb);
|
---|
256 |
|
---|
257 | for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
|
---|
258 | {
|
---|
259 | PVMCPU pVCpu = &pVM->aCpus[idCpu];
|
---|
260 |
|
---|
261 | /* Nothing to do if a TLB flush is already pending; the VCPU should
|
---|
262 | have already been poked if it were active. */
|
---|
263 | if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
|
---|
264 | {
|
---|
265 | VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
|
---|
266 | if (idThisCpu != idCpu)
|
---|
267 | hmPokeCpuForTlbFlush(pVCpu, true /* fAccountFlushStat */);
|
---|
268 | }
|
---|
269 | }
|
---|
270 |
|
---|
271 | return VINF_SUCCESS;
|
---|
272 | }
|
---|
273 |
|
---|
274 |
|
---|
275 | /**
|
---|
276 | * Invalidates a guest page by physical address.
|
---|
277 | *
|
---|
278 | * @returns VBox status code.
|
---|
279 | * @param pVM The cross context VM structure.
|
---|
280 | * @param GCPhys Page to invalidate.
|
---|
281 | *
|
---|
282 | * @remarks Assumes the current instruction references this physical page
|
---|
283 | * though a virtual address!
|
---|
284 | */
|
---|
285 | VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
|
---|
286 | {
|
---|
287 | if (!HMIsNestedPagingActive(pVM))
|
---|
288 | return VINF_SUCCESS;
|
---|
289 |
|
---|
290 | #ifdef IN_RING0
|
---|
291 | if (pVM->hm.s.vmx.fSupported)
|
---|
292 | {
|
---|
293 | VMCPUID idThisCpu = VMMGetCpuId(pVM);
|
---|
294 |
|
---|
295 | for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
|
---|
296 | {
|
---|
297 | PVMCPU pVCpu = &pVM->aCpus[idCpu];
|
---|
298 |
|
---|
299 | if (idThisCpu == idCpu)
|
---|
300 | {
|
---|
301 | /** @todo r=ramshankar: Intel does not support flushing by guest physical
|
---|
302 | * address either. See comment in VMXR0InvalidatePhysPage(). Fix this. */
|
---|
303 | VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys);
|
---|
304 | }
|
---|
305 | else
|
---|
306 | {
|
---|
307 | VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
|
---|
308 | hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
|
---|
309 | }
|
---|
310 | }
|
---|
311 | return VINF_SUCCESS;
|
---|
312 | }
|
---|
313 |
|
---|
314 | /* AMD-V doesn't support invalidation with guest physical addresses; see
|
---|
315 | comment in SVMR0InvalidatePhysPage. */
|
---|
316 | Assert(pVM->hm.s.svm.fSupported);
|
---|
317 | #else
|
---|
318 | NOREF(GCPhys);
|
---|
319 | #endif
|
---|
320 |
|
---|
321 | HMFlushTLBOnAllVCpus(pVM);
|
---|
322 | return VINF_SUCCESS;
|
---|
323 | }
|
---|
324 |
|
---|
325 |
|
---|
326 | /**
|
---|
327 | * Checks if nested paging is enabled.
|
---|
328 | *
|
---|
329 | * @returns true if nested paging is active, false otherwise.
|
---|
330 | * @param pVM The cross context VM structure.
|
---|
331 | *
|
---|
332 | * @remarks Works before hmR3InitFinalizeR0.
|
---|
333 | */
|
---|
334 | VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
|
---|
335 | {
|
---|
336 | return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
|
---|
337 | }
|
---|
338 |
|
---|
339 |
|
---|
340 | /**
|
---|
341 | * Checks if both nested paging and unhampered guest execution are enabled.
|
---|
342 | *
|
---|
343 | * The almost complete guest execution in hardware is only applicable to VT-x.
|
---|
344 | *
|
---|
345 | * @returns true if we have both enabled, otherwise false.
|
---|
346 | * @param pVM The cross context VM structure.
|
---|
347 | *
|
---|
348 | * @remarks Works before hmR3InitFinalizeR0.
|
---|
349 | */
|
---|
350 | VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
|
---|
351 | {
|
---|
352 | return HMIsEnabled(pVM)
|
---|
353 | && pVM->hm.s.fNestedPaging
|
---|
354 | && ( pVM->hm.s.vmx.fUnrestrictedGuest
|
---|
355 | || pVM->hm.s.svm.fSupported);
|
---|
356 | }
|
---|
357 |
|
---|
358 |
|
---|
359 | /**
|
---|
360 | * Checks if this VM is using HM and is long-mode capable.
|
---|
361 | *
|
---|
362 | * Use VMR3IsLongModeAllowed() instead of this, when possible.
|
---|
363 | *
|
---|
364 | * @returns true if long mode is allowed, false otherwise.
|
---|
365 | * @param pVM The cross context VM structure.
|
---|
366 | * @sa VMR3IsLongModeAllowed, NEMHCIsLongModeAllowed
|
---|
367 | */
|
---|
368 | VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
|
---|
369 | {
|
---|
370 | return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
|
---|
371 | }
|
---|
372 |
|
---|
373 |
|
---|
374 | /**
|
---|
375 | * Checks if MSR bitmaps are available. It is assumed that when it's available
|
---|
376 | * it will be used as well.
|
---|
377 | *
|
---|
378 | * @returns true if MSR bitmaps are available, false otherwise.
|
---|
379 | * @param pVM The cross context VM structure.
|
---|
380 | */
|
---|
381 | VMM_INT_DECL(bool) HMAreMsrBitmapsAvailable(PVM pVM)
|
---|
382 | {
|
---|
383 | if (HMIsEnabled(pVM))
|
---|
384 | {
|
---|
385 | if (pVM->hm.s.svm.fSupported)
|
---|
386 | return true;
|
---|
387 |
|
---|
388 | if ( pVM->hm.s.vmx.fSupported
|
---|
389 | && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
|
---|
390 | {
|
---|
391 | return true;
|
---|
392 | }
|
---|
393 | }
|
---|
394 | return false;
|
---|
395 | }
|
---|
396 |
|
---|
397 |
|
---|
398 | /**
|
---|
399 | * Return the shadow paging mode for nested paging/ept
|
---|
400 | *
|
---|
401 | * @returns shadow paging mode
|
---|
402 | * @param pVM The cross context VM structure.
|
---|
403 | */
|
---|
404 | VMM_INT_DECL(PGMMODE) HMGetShwPagingMode(PVM pVM)
|
---|
405 | {
|
---|
406 | Assert(HMIsNestedPagingActive(pVM));
|
---|
407 | if (pVM->hm.s.svm.fSupported)
|
---|
408 | return PGMMODE_NESTED;
|
---|
409 |
|
---|
410 | Assert(pVM->hm.s.vmx.fSupported);
|
---|
411 | return PGMMODE_EPT;
|
---|
412 | }
|
---|
413 | #endif /* !IN_RC */
|
---|
414 |
|
---|
415 |
|
---|
416 | /**
|
---|
417 | * Checks if an interrupt event is currently pending.
|
---|
418 | *
|
---|
419 | * @returns Interrupt event pending state.
|
---|
420 | * @param pVM The cross context VM structure.
|
---|
421 | */
|
---|
422 | VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM)
|
---|
423 | {
|
---|
424 | PVMCPU pVCpu = VMMGetCpu(pVM);
|
---|
425 | return !!pVCpu->hm.s.Event.fPending;
|
---|
426 | }
|
---|
427 |
|
---|
428 |
|
---|
429 | /**
|
---|
430 | * Return the PAE PDPE entries.
|
---|
431 | *
|
---|
432 | * @returns Pointer to the PAE PDPE array.
|
---|
433 | * @param pVCpu The cross context virtual CPU structure.
|
---|
434 | */
|
---|
435 | VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)
|
---|
436 | {
|
---|
437 | return &pVCpu->hm.s.aPdpes[0];
|
---|
438 | }
|
---|
439 |
|
---|
440 |
|
---|
441 | /**
|
---|
442 | * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
|
---|
443 | * incorrect code bytes may be fetched after a world-switch".
|
---|
444 | *
|
---|
445 | * @param pu32Family Where to store the CPU family (can be NULL).
|
---|
446 | * @param pu32Model Where to store the CPU model (can be NULL).
|
---|
447 | * @param pu32Stepping Where to store the CPU stepping (can be NULL).
|
---|
448 | * @returns true if the erratum applies, false otherwise.
|
---|
449 | */
|
---|
450 | VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
|
---|
451 | {
|
---|
452 | /*
|
---|
453 | * Erratum 170 which requires a forced TLB flush for each world switch:
|
---|
454 | * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
|
---|
455 | *
|
---|
456 | * All BH-G1/2 and DH-G1/2 models include a fix:
|
---|
457 | * Athlon X2: 0x6b 1/2
|
---|
458 | * 0x68 1/2
|
---|
459 | * Athlon 64: 0x7f 1
|
---|
460 | * 0x6f 2
|
---|
461 | * Sempron: 0x7f 1/2
|
---|
462 | * 0x6f 2
|
---|
463 | * 0x6c 2
|
---|
464 | * 0x7c 2
|
---|
465 | * Turion 64: 0x68 2
|
---|
466 | */
|
---|
467 | uint32_t u32Dummy;
|
---|
468 | uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
|
---|
469 | ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
|
---|
470 | u32BaseFamily = (u32Version >> 8) & 0xf;
|
---|
471 | u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
|
---|
472 | u32Model = ((u32Version >> 4) & 0xf);
|
---|
473 | u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
|
---|
474 | u32Stepping = u32Version & 0xf;
|
---|
475 |
|
---|
476 | bool fErratumApplies = false;
|
---|
477 | if ( u32Family == 0xf
|
---|
478 | && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
|
---|
479 | && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
|
---|
480 | {
|
---|
481 | fErratumApplies = true;
|
---|
482 | }
|
---|
483 |
|
---|
484 | if (pu32Family)
|
---|
485 | *pu32Family = u32Family;
|
---|
486 | if (pu32Model)
|
---|
487 | *pu32Model = u32Model;
|
---|
488 | if (pu32Stepping)
|
---|
489 | *pu32Stepping = u32Stepping;
|
---|
490 |
|
---|
491 | return fErratumApplies;
|
---|
492 | }
|
---|
493 |
|
---|
494 |
|
---|
495 | /**
|
---|
496 | * Sets or clears the single instruction flag.
|
---|
497 | *
|
---|
498 | * When set, HM will try its best to return to ring-3 after executing a single
|
---|
499 | * instruction. This can be used for debugging. See also
|
---|
500 | * EMR3HmSingleInstruction.
|
---|
501 | *
|
---|
502 | * @returns The old flag state.
|
---|
503 | * @param pVM The cross context VM structure.
|
---|
504 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
---|
505 | * @param fEnable The new flag state.
|
---|
506 | */
|
---|
507 | VMM_INT_DECL(bool) HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
|
---|
508 | {
|
---|
509 | VMCPU_ASSERT_EMT(pVCpu);
|
---|
510 | bool fOld = pVCpu->hm.s.fSingleInstruction;
|
---|
511 | pVCpu->hm.s.fSingleInstruction = fEnable;
|
---|
512 | pVCpu->hm.s.fUseDebugLoop = fEnable || pVM->hm.s.fUseDebugLoop;
|
---|
513 | return fOld;
|
---|
514 | }
|
---|
515 |
|
---|
516 |
|
---|
517 | /**
|
---|
518 | * Notifies HM that GIM provider wants to trap \#UD.
|
---|
519 | *
|
---|
520 | * @param pVCpu The cross context virtual CPU structure.
|
---|
521 | */
|
---|
522 | VMM_INT_DECL(void) HMTrapXcptUDForGIMEnable(PVMCPU pVCpu)
|
---|
523 | {
|
---|
524 | pVCpu->hm.s.fGIMTrapXcptUD = true;
|
---|
525 | HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS);
|
---|
526 | }
|
---|
527 |
|
---|
528 |
|
---|
529 | /**
|
---|
530 | * Notifies HM that GIM provider no longer wants to trap \#UD.
|
---|
531 | *
|
---|
532 | * @param pVCpu The cross context virtual CPU structure.
|
---|
533 | */
|
---|
534 | VMM_INT_DECL(void) HMTrapXcptUDForGIMDisable(PVMCPU pVCpu)
|
---|
535 | {
|
---|
536 | pVCpu->hm.s.fGIMTrapXcptUD = false;
|
---|
537 | HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS);
|
---|
538 | }
|
---|
539 |
|
---|
540 |
|
---|
541 | /**
|
---|
542 | * VMX nested-guest VM-exit handler.
|
---|
543 | *
|
---|
544 | * @param pVCpu The cross context virtual CPU structure.
|
---|
545 | * @param uBasicExitReason The basic exit reason.
|
---|
546 | */
|
---|
547 | VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason)
|
---|
548 | {
|
---|
549 | RT_NOREF2(pVCpu, uBasicExitReason);
|
---|
550 | }
|
---|
551 |
|
---|