VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp@ 92296

Last change on this file since 92296 was 92220, checked in by vboxsync, 3 years ago

VMM/NEMR3Native-darwin.cpp: Some early code to work with Apple's Hypervisor.framework (which is just a thin layer over VT-x actually...), not built by default and many things don't work (only running 32bit guests without SMP and other fancy stuff works to some extent, i.e. mostly DamnSmallLinux gets you somewhere), bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 102.1 KB
Line 
1/* $Id: NEMR3Native-darwin.cpp 92220 2021-11-04 19:27:38Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2020 Oracle Corporation
12 *
13 * This file is part of VirtualBox Open Source Edition (OSE), as
14 * available from http://www.virtualbox.org. This file is free software;
15 * you can redistribute it and/or modify it under the terms of the GNU
16 * General Public License (GPL) as published by the Free Software
17 * Foundation, in version 2 as it comes in the "COPYING" file of the
18 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
19 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
20 */
21
22
23/*********************************************************************************************************************************
24* Header Files *
25*********************************************************************************************************************************/
26#define LOG_GROUP LOG_GROUP_NEM
27#define VMCPU_INCL_CPUM_GST_CTX
28#include <Hypervisor/hv.h>
29#include <Hypervisor/hv_arch_x86.h>
30#include <Hypervisor/hv_arch_vmx.h>
31#include <Hypervisor/hv_vmx.h>
32
33#include <VBox/vmm/nem.h>
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/em.h>
36#include <VBox/vmm/apic.h>
37#include <VBox/vmm/pdm.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/hm_vmx.h>
40#include <VBox/vmm/dbgftrace.h>
41#include "VMXInternal.h"
42#include "NEMInternal.h"
43#include <VBox/vmm/vmcc.h>
44#include "dtrace/VBoxVMM.h"
45
46#include <iprt/asm.h>
47#include <iprt/ldr.h>
48#include <iprt/path.h>
49#include <iprt/string.h>
50#include <iprt/system.h>
51#include <iprt/utf16.h>
52
53
54/*********************************************************************************************************************************
55* Defined Constants And Macros *
56*********************************************************************************************************************************/
57/* No nested hwvirt (for now). */
58#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
59# undef VBOX_WITH_NESTED_HWVIRT_VMX
60#endif
61
62
63/*********************************************************************************************************************************
64* Global Variables *
65*********************************************************************************************************************************/
66/** NEM_DARWIN_PAGE_STATE_XXX names. */
67NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
68/** MSRs. */
69SUPHWVIRTMSRS g_HmMsrs;
70
71
72/*********************************************************************************************************************************
73* Internal Functions *
74*********************************************************************************************************************************/
75
76/**
77 * Converts a HV return code to a VBox status code.
78 *
79 * @returns VBox status code.
80 * @param hrc The HV return code to convert.
81 */
82DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
83{
84 if (hrc == HV_SUCCESS)
85 return VINF_SUCCESS;
86
87 switch (hrc)
88 {
89 case HV_ERROR: return VERR_INVALID_STATE;
90 case HV_BUSY: return VERR_RESOURCE_BUSY;
91 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
92 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
93 case HV_NO_DEVICE: return VERR_NOT_FOUND;
94 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
95 }
96
97 return VERR_IPE_UNEXPECTED_STATUS;
98}
99
100
101/**
102 * Unmaps the given guest physical address range (page aligned).
103 *
104 * @returns VBox status code.
105 * @param GCPhys The guest physical address to start unmapping at.
106 * @param cb The size of the range to unmap in bytes.
107 */
108DECLINLINE(int) nemR3DarwinUnmap(RTGCPHYS GCPhys, size_t cb)
109{
110 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
111 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
112 return nemR3DarwinHvSts2Rc(hrc);
113}
114
115
116/**
117 * Maps a given guest physical address range backed by the given memory with the given
118 * protection flags.
119 *
120 * @returns VBox status code.
121 * @param GCPhys The guest physical address to start mapping.
122 * @param pvRam The R3 pointer of the memory to back the range with.
123 * @param cb The size of the range, page aligned.
124 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
125 */
126DECLINLINE(int) nemR3DarwinMap(RTGCPHYS GCPhys, void *pvRam, size_t cb, uint32_t fPageProt)
127{
128 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
129
130 hv_memory_flags_t fHvMemProt = 0;
131 if (fPageProt & NEM_PAGE_PROT_READ)
132 fHvMemProt |= HV_MEMORY_READ;
133 if (fPageProt & NEM_PAGE_PROT_WRITE)
134 fHvMemProt |= HV_MEMORY_WRITE;
135 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
136 fHvMemProt |= HV_MEMORY_EXEC;
137
138 hv_return_t hrc = hv_vm_map(pvRam, GCPhys, cb, fHvMemProt);
139 return nemR3DarwinHvSts2Rc(hrc);
140}
141
142
143#if 0 /* unused */
144DECLINLINE(int) nemR3DarwinProtectPage(RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt)
145{
146 hv_memory_flags_t fHvMemProt = 0;
147 if (fPageProt & NEM_PAGE_PROT_READ)
148 fHvMemProt |= HV_MEMORY_READ;
149 if (fPageProt & NEM_PAGE_PROT_WRITE)
150 fHvMemProt |= HV_MEMORY_WRITE;
151 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
152 fHvMemProt |= HV_MEMORY_EXEC;
153
154 hv_return_t hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
155 return nemR3DarwinHvSts2Rc(hrc);
156}
157#endif
158
159
160DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
161{
162 PGMPAGEMAPLOCK Lock;
163 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
164 if (RT_SUCCESS(rc))
165 PGMPhysReleasePageMappingLock(pVM, &Lock);
166 return rc;
167}
168
169
170DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
171{
172 PGMPAGEMAPLOCK Lock;
173 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
174 if (RT_SUCCESS(rc))
175 PGMPhysReleasePageMappingLock(pVM, &Lock);
176 return rc;
177}
178
179
180/**
181 * Worker that maps pages into Hyper-V.
182 *
183 * This is used by the PGM physical page notifications as well as the memory
184 * access VMEXIT handlers.
185 *
186 * @returns VBox status code.
187 * @param pVM The cross context VM structure.
188 * @param pVCpu The cross context virtual CPU structure of the
189 * calling EMT.
190 * @param GCPhysSrc The source page address.
191 * @param GCPhysDst The hyper-V destination page. This may differ from
192 * GCPhysSrc when A20 is disabled.
193 * @param fPageProt NEM_PAGE_PROT_XXX.
194 * @param pu2State Our page state (input/output).
195 * @param fBackingChanged Set if the page backing is being changed.
196 * @thread EMT(pVCpu)
197 */
198NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
199 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
200{
201 /*
202 * Looks like we need to unmap a page before we can change the backing
203 * or even modify the protection. This is going to be *REALLY* efficient.
204 * PGM lends us two bits to keep track of the state here.
205 */
206 RT_NOREF(pVCpu);
207 uint8_t const u2OldState = *pu2State;
208 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_DARWIN_PAGE_STATE_WRITABLE
209 : fPageProt & NEM_PAGE_PROT_READ ? NEM_DARWIN_PAGE_STATE_READABLE : NEM_DARWIN_PAGE_STATE_UNMAPPED;
210 if ( fBackingChanged
211 || u2NewState != u2OldState)
212 {
213 if (u2OldState > NEM_DARWIN_PAGE_STATE_UNMAPPED)
214 {
215 int rc = nemR3DarwinUnmap(GCPhysDst, X86_PAGE_SIZE);
216 if (RT_SUCCESS(rc))
217 {
218 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
219 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
220 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
221 if (u2NewState == NEM_DARWIN_PAGE_STATE_UNMAPPED)
222 {
223 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
224 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
225 return VINF_SUCCESS;
226 }
227 }
228 else
229 {
230 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
231 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
232 return VERR_NEM_INIT_FAILED;
233 }
234 }
235 }
236
237 /*
238 * Writeable mapping?
239 */
240 if (fPageProt & NEM_PAGE_PROT_WRITE)
241 {
242 void *pvPage;
243 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
244 if (RT_SUCCESS(rc))
245 {
246 rc = nemR3DarwinMap(GCPhysDst, pvPage, X86_PAGE_SIZE, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE);
247 if (RT_SUCCESS(rc))
248 {
249 *pu2State = NEM_DARWIN_PAGE_STATE_WRITABLE;
250 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
251 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
252 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
253 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
254 return VINF_SUCCESS;
255 }
256 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
257 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst));
258 return VERR_NEM_INIT_FAILED;
259 }
260 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
261 return rc;
262 }
263
264 if (fPageProt & NEM_PAGE_PROT_READ)
265 {
266 const void *pvPage;
267 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
268 if (RT_SUCCESS(rc))
269 {
270 rc = nemR3DarwinMap(GCPhysDst, (void *)pvPage, X86_PAGE_SIZE, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE);
271 if (RT_SUCCESS(rc))
272 {
273 *pu2State = NEM_DARWIN_PAGE_STATE_READABLE;
274 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
275 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
276 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
277 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
278 return VINF_SUCCESS;
279 }
280 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
281 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
282 return VERR_NEM_INIT_FAILED;
283 }
284 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
285 return rc;
286 }
287
288 /* We already unmapped it above. */
289 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
290 return VINF_SUCCESS;
291}
292
293
294#ifdef LOG_ENABLED
295/**
296 * Logs the current CPU state.
297 */
298static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
299{
300 if (LogIs3Enabled())
301 {
302#if 0
303 char szRegs[4096];
304 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
305 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
306 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
307 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
308 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
309 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
310 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
311 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
312 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
313 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
314 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
315 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
316 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
317 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
318 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
319 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
320 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
321 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
322 " efer=%016VR{efer}\n"
323 " pat=%016VR{pat}\n"
324 " sf_mask=%016VR{sf_mask}\n"
325 "krnl_gs_base=%016VR{krnl_gs_base}\n"
326 " lstar=%016VR{lstar}\n"
327 " star=%016VR{star} cstar=%016VR{cstar}\n"
328 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
329 );
330
331 char szInstr[256];
332 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
333 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
334 szInstr, sizeof(szInstr), NULL);
335 Log3(("%s%s\n", szRegs, szInstr));
336#else
337 RT_NOREF(pVM, pVCpu);
338#endif
339 }
340}
341#endif /* LOG_ENABLED */
342
343
344DECLINLINE(int) nemR3DarwinReadVmcs16(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint16_t *pData)
345{
346 uint64_t u64Data;
347 hv_return_t hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, &u64Data);
348 if (RT_LIKELY(hrc == HV_SUCCESS))
349 {
350 *pData = (uint16_t)u64Data;
351 return VINF_SUCCESS;
352 }
353
354 return nemR3DarwinHvSts2Rc(hrc);
355}
356
357
358DECLINLINE(int) nemR3DarwinReadVmcs32(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint32_t *pData)
359{
360 uint64_t u64Data;
361 hv_return_t hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, &u64Data);
362 if (RT_LIKELY(hrc == HV_SUCCESS))
363 {
364 *pData = (uint32_t)u64Data;
365 return VINF_SUCCESS;
366 }
367
368 return nemR3DarwinHvSts2Rc(hrc);
369}
370
371
372DECLINLINE(int) nemR3DarwinReadVmcs64(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint64_t *pData)
373{
374 hv_return_t hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, pData);
375 if (RT_LIKELY(hrc == HV_SUCCESS))
376 return VINF_SUCCESS;
377
378 return nemR3DarwinHvSts2Rc(hrc);
379}
380
381
382DECLINLINE(int) nemR3DarwinWriteVmcs16(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint16_t u16Val)
383{
384 hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, u16Val);
385 if (RT_LIKELY(hrc == HV_SUCCESS))
386 return VINF_SUCCESS;
387
388 return nemR3DarwinHvSts2Rc(hrc);
389}
390
391
392DECLINLINE(int) nemR3DarwinWriteVmcs32(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint32_t u32Val)
393{
394 hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, u32Val);
395 if (RT_LIKELY(hrc == HV_SUCCESS))
396 return VINF_SUCCESS;
397
398 return nemR3DarwinHvSts2Rc(hrc);
399}
400
401
402DECLINLINE(int) nemR3DarwinWriteVmcs64(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint64_t u64Val)
403{
404 hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, u64Val);
405 if (RT_LIKELY(hrc == HV_SUCCESS))
406 return VINF_SUCCESS;
407
408 return nemR3DarwinHvSts2Rc(hrc);
409}
410
411
412static int nemR3DarwinCopyStateToHv(PVMCC pVM, PVMCPUCC pVCpu)
413{
414#define WRITE_GREG(a_GReg, a_Value) \
415 do \
416 { \
417 hrc = hv_vcpu_write_register(pVCpu->nem.s.hVCpuId, (a_GReg), (a_Value)); \
418 if (RT_LIKELY(hrc == HV_SUCCESS)) \
419 { /* likely */ } \
420 else \
421 return VERR_INTERNAL_ERROR; \
422 } while(0)
423#define WRITE_VMCS_FIELD(a_Field, a_Value) \
424 do \
425 { \
426 hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), (a_Value)); \
427 if (RT_LIKELY(hrc == HV_SUCCESS)) \
428 { /* likely */ } \
429 else \
430 return VERR_INTERNAL_ERROR; \
431 } while(0)
432
433 RT_NOREF(pVM);
434
435 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
436 if (!fWhat)
437 return VINF_SUCCESS;
438
439 hv_return_t hrc;
440 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
441 {
442 if (fWhat & CPUMCTX_EXTRN_RAX)
443 WRITE_GREG(HV_X86_RAX, pVCpu->cpum.GstCtx.rax);
444 if (fWhat & CPUMCTX_EXTRN_RCX)
445 WRITE_GREG(HV_X86_RCX, pVCpu->cpum.GstCtx.rcx);
446 if (fWhat & CPUMCTX_EXTRN_RDX)
447 WRITE_GREG(HV_X86_RDX, pVCpu->cpum.GstCtx.rdx);
448 if (fWhat & CPUMCTX_EXTRN_RBX)
449 WRITE_GREG(HV_X86_RBX, pVCpu->cpum.GstCtx.rbx);
450 if (fWhat & CPUMCTX_EXTRN_RSP)
451 WRITE_GREG(HV_X86_RSP, pVCpu->cpum.GstCtx.rsp);
452 if (fWhat & CPUMCTX_EXTRN_RBP)
453 WRITE_GREG(HV_X86_RBP, pVCpu->cpum.GstCtx.rbp);
454 if (fWhat & CPUMCTX_EXTRN_RSI)
455 WRITE_GREG(HV_X86_RSI, pVCpu->cpum.GstCtx.rsi);
456 if (fWhat & CPUMCTX_EXTRN_RDI)
457 WRITE_GREG(HV_X86_RDI, pVCpu->cpum.GstCtx.rdi);
458 if (fWhat & CPUMCTX_EXTRN_R8_R15)
459 {
460 WRITE_GREG(HV_X86_R8, pVCpu->cpum.GstCtx.r8);
461 WRITE_GREG(HV_X86_R9, pVCpu->cpum.GstCtx.r9);
462 WRITE_GREG(HV_X86_R10, pVCpu->cpum.GstCtx.r10);
463 WRITE_GREG(HV_X86_R11, pVCpu->cpum.GstCtx.r11);
464 WRITE_GREG(HV_X86_R12, pVCpu->cpum.GstCtx.r12);
465 WRITE_GREG(HV_X86_R13, pVCpu->cpum.GstCtx.r13);
466 WRITE_GREG(HV_X86_R14, pVCpu->cpum.GstCtx.r14);
467 WRITE_GREG(HV_X86_R15, pVCpu->cpum.GstCtx.r15);
468 }
469 }
470
471 /* RIP & Flags */
472 if (fWhat & CPUMCTX_EXTRN_RIP)
473 WRITE_GREG(HV_X86_RIP, pVCpu->cpum.GstCtx.rip);
474 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
475 WRITE_GREG(HV_X86_RFLAGS, pVCpu->cpum.GstCtx.rflags.u);
476
477 /* Segments */
478#define ADD_SEG(a_enmName, a_SReg) \
479 do { \
480 WRITE_VMCS_FIELD(VMX_VMCS16_GUEST_ ## a_enmName ## _SEL, (a_SReg).Sel); \
481 WRITE_VMCS_FIELD(VMX_VMCS32_GUEST_ ## a_enmName ## _LIMIT, (a_SReg).u32Limit); \
482 WRITE_VMCS_FIELD(VMX_VMCS32_GUEST_ ## a_enmName ## _ACCESS_RIGHTS, (a_SReg).Attr.u); \
483 WRITE_VMCS_FIELD(VMX_VMCS_GUEST_ ## a_enmName ## _BASE, (a_SReg).u64Base); \
484 } while (0)
485 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
486 {
487 if (fWhat & CPUMCTX_EXTRN_ES)
488 ADD_SEG(ES, pVCpu->cpum.GstCtx.es);
489 if (fWhat & CPUMCTX_EXTRN_CS)
490 ADD_SEG(CS, pVCpu->cpum.GstCtx.cs);
491 if (fWhat & CPUMCTX_EXTRN_SS)
492 ADD_SEG(SS, pVCpu->cpum.GstCtx.ss);
493 if (fWhat & CPUMCTX_EXTRN_DS)
494 ADD_SEG(DS, pVCpu->cpum.GstCtx.ds);
495 if (fWhat & CPUMCTX_EXTRN_FS)
496 ADD_SEG(FS, pVCpu->cpum.GstCtx.fs);
497 if (fWhat & CPUMCTX_EXTRN_GS)
498 ADD_SEG(GS, pVCpu->cpum.GstCtx.gs);
499 }
500
501 /* Descriptor tables & task segment. */
502 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
503 {
504 if (fWhat & CPUMCTX_EXTRN_LDTR)
505 ADD_SEG(LDTR, pVCpu->cpum.GstCtx.ldtr);
506 if (fWhat & CPUMCTX_EXTRN_TR)
507 ADD_SEG(TR, pVCpu->cpum.GstCtx.tr);
508 if (fWhat & CPUMCTX_EXTRN_IDTR)
509 {
510 WRITE_VMCS_FIELD(VMCS_GUEST_IDTR_LIMIT, pVCpu->cpum.GstCtx.idtr.cbIdt);
511 WRITE_VMCS_FIELD(VMCS_GUEST_IDTR_BASE, pVCpu->cpum.GstCtx.idtr.pIdt);
512 }
513 if (fWhat & CPUMCTX_EXTRN_GDTR)
514 {
515 WRITE_VMCS_FIELD(VMCS_GUEST_GDTR_LIMIT, pVCpu->cpum.GstCtx.gdtr.cbGdt);
516 WRITE_VMCS_FIELD(VMCS_GUEST_GDTR_BASE, pVCpu->cpum.GstCtx.gdtr.pGdt);
517 }
518 }
519
520 /* Control registers. */
521 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
522 {
523 if (fWhat & CPUMCTX_EXTRN_CR0)
524 {
525 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
526
527 /* Apply the hardware specified CR0 fixed bits and enable caching. */
528 u64GuestCr0 |= VMX_V_CR0_FIXED0_UX;
529 u64GuestCr0 &= ~0;
530 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
531 WRITE_GREG(HV_X86_CR0, u64GuestCr0);
532 }
533 if (fWhat & CPUMCTX_EXTRN_CR2)
534 WRITE_GREG(HV_X86_CR2, pVCpu->cpum.GstCtx.cr2);
535 if (fWhat & CPUMCTX_EXTRN_CR3)
536 WRITE_GREG(HV_X86_CR3, pVCpu->cpum.GstCtx.cr3);
537 if (fWhat & CPUMCTX_EXTRN_CR4)
538 {
539 uint64_t u64GuestCr4 = pVCpu->cpum.GstCtx.cr4;
540
541 u64GuestCr4 |= VMX_V_CR4_FIXED0;
542 u64GuestCr4 &= ~0;
543
544 WRITE_GREG(HV_X86_CR4, u64GuestCr4);
545 }
546 }
547 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
548 WRITE_GREG(HV_X86_TPR, CPUMGetGuestCR8(pVCpu));
549
550 /* Debug registers. */
551 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
552 {
553 WRITE_GREG(HV_X86_DR0, pVCpu->cpum.GstCtx.dr[0]); // CPUMGetHyperDR0(pVCpu));
554 WRITE_GREG(HV_X86_DR1, pVCpu->cpum.GstCtx.dr[1]); // CPUMGetHyperDR1(pVCpu));
555 WRITE_GREG(HV_X86_DR2, pVCpu->cpum.GstCtx.dr[2]); // CPUMGetHyperDR2(pVCpu));
556 WRITE_GREG(HV_X86_DR3, pVCpu->cpum.GstCtx.dr[3]); // CPUMGetHyperDR3(pVCpu));
557 }
558 if (fWhat & CPUMCTX_EXTRN_DR6)
559 WRITE_GREG(HV_X86_DR6, pVCpu->cpum.GstCtx.dr[6]); // CPUMGetHyperDR6(pVCpu));
560 if (fWhat & CPUMCTX_EXTRN_DR7)
561 WRITE_GREG(HV_X86_DR7, pVCpu->cpum.GstCtx.dr[7]); // CPUMGetHyperDR7(pVCpu));
562
563 /* MSRs */
564 // WHvX64RegisterTsc - don't touch
565 if (fWhat & CPUMCTX_EXTRN_EFER)
566 WRITE_VMCS_FIELD(VMCS_GUEST_IA32_EFER, pVCpu->cpum.GstCtx.msrEFER);
567#if 0
568 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
569 ADD_REG64(WHvX64RegisterKernelGsBase, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
570 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
571 {
572 ADD_REG64(WHvX64RegisterSysenterCs, pVCpu->cpum.GstCtx.SysEnter.cs);
573 ADD_REG64(WHvX64RegisterSysenterEip, pVCpu->cpum.GstCtx.SysEnter.eip);
574 ADD_REG64(WHvX64RegisterSysenterEsp, pVCpu->cpum.GstCtx.SysEnter.esp);
575 }
576 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
577 {
578 ADD_REG64(WHvX64RegisterStar, pVCpu->cpum.GstCtx.msrSTAR);
579 ADD_REG64(WHvX64RegisterLstar, pVCpu->cpum.GstCtx.msrLSTAR);
580 ADD_REG64(WHvX64RegisterCstar, pVCpu->cpum.GstCtx.msrCSTAR);
581 ADD_REG64(WHvX64RegisterSfmask, pVCpu->cpum.GstCtx.msrSFMASK);
582 }
583 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
584 {
585 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu));
586 ADD_REG64(WHvX64RegisterPat, pVCpu->cpum.GstCtx.msrPAT);
587#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
588 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
589#endif
590 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
591 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
592 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
593 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
594 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
595 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
596 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
597 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
598 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
599 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
600 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
601 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
602 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
603 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
604#if 0 /** @todo these registers aren't available? Might explain something.. .*/
605 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);
606 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
607 {
608 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
609 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
610 }
611#endif
612 }
613#endif
614
615 WRITE_VMCS_FIELD(VMX_VMCS_CTRL_CR0_MASK, 0x60000000);
616 WRITE_VMCS_FIELD(VMX_VMCS_CTRL_CR0_READ_SHADOW, 0x00000000);
617
618 WRITE_VMCS_FIELD(VMX_VMCS_CTRL_CR4_MASK, VMX_V_CR4_FIXED0);
619 WRITE_VMCS_FIELD(VMX_VMCS_CTRL_CR4_READ_SHADOW, 0);
620
621 WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_DEBUGCTL_FULL, MSR_IA32_DEBUGCTL_LBR);
622
623#if 0 /** @todo */
624 WRITE_GREG(HV_X86_TSS_BASE, );
625 WRITE_GREG(HV_X86_TSS_LIMIT, );
626 WRITE_GREG(HV_X86_TSS_AR, );
627 WRITE_GREG(HV_X86_XCR0, );
628#endif
629
630 hv_vcpu_invalidate_tlb(pVCpu->nem.s.hVCpuId);
631 hv_vcpu_flush(pVCpu->nem.s.hVCpuId);
632
633 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
634 return VINF_SUCCESS;
635#undef WRITE_GREG
636#undef WRITE_VMCS_FIELD
637}
638
639
640static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
641{
642#define READ_GREG(a_GReg, a_Value) \
643 do \
644 { \
645 hrc = hv_vcpu_read_register(pVCpu->nem.s.hVCpuId, (a_GReg), &(a_Value)); \
646 if (RT_LIKELY(hrc == HV_SUCCESS)) \
647 { /* likely */ } \
648 else \
649 return VERR_INTERNAL_ERROR; \
650 } while(0)
651#define READ_VMCS_FIELD(a_Field, a_Value) \
652 do \
653 { \
654 hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), &(a_Value)); \
655 if (RT_LIKELY(hrc == HV_SUCCESS)) \
656 { /* likely */ } \
657 else \
658 return VERR_INTERNAL_ERROR; \
659 } while(0)
660#define READ_VMCS16_FIELD(a_Field, a_Value) \
661 do \
662 { \
663 uint64_t u64Data; \
664 hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), &u64Data); \
665 if (RT_LIKELY(hrc == HV_SUCCESS)) \
666 { (a_Value) = (uint16_t)u64Data; } \
667 else \
668 return VERR_INTERNAL_ERROR; \
669 } while(0)
670#define READ_VMCS32_FIELD(a_Field, a_Value) \
671 do \
672 { \
673 uint64_t u64Data; \
674 hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), &u64Data); \
675 if (RT_LIKELY(hrc == HV_SUCCESS)) \
676 { (a_Value) = (uint32_t)u64Data; } \
677 else \
678 return VERR_INTERNAL_ERROR; \
679 } while(0)
680
681 RT_NOREF(pVM);
682 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
683
684 /* GPRs */
685 hv_return_t hrc;
686 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
687 {
688 if (fWhat & CPUMCTX_EXTRN_RAX)
689 READ_GREG(HV_X86_RAX, pVCpu->cpum.GstCtx.rax);
690 if (fWhat & CPUMCTX_EXTRN_RCX)
691 READ_GREG(HV_X86_RCX, pVCpu->cpum.GstCtx.rcx);
692 if (fWhat & CPUMCTX_EXTRN_RDX)
693 READ_GREG(HV_X86_RDX, pVCpu->cpum.GstCtx.rdx);
694 if (fWhat & CPUMCTX_EXTRN_RBX)
695 READ_GREG(HV_X86_RBX, pVCpu->cpum.GstCtx.rbx);
696 if (fWhat & CPUMCTX_EXTRN_RSP)
697 READ_GREG(HV_X86_RSP, pVCpu->cpum.GstCtx.rsp);
698 if (fWhat & CPUMCTX_EXTRN_RBP)
699 READ_GREG(HV_X86_RBP, pVCpu->cpum.GstCtx.rbp);
700 if (fWhat & CPUMCTX_EXTRN_RSI)
701 READ_GREG(HV_X86_RSI, pVCpu->cpum.GstCtx.rsi);
702 if (fWhat & CPUMCTX_EXTRN_RDI)
703 READ_GREG(HV_X86_RDI, pVCpu->cpum.GstCtx.rdi);
704 if (fWhat & CPUMCTX_EXTRN_R8_R15)
705 {
706 READ_GREG(HV_X86_R8, pVCpu->cpum.GstCtx.r8);
707 READ_GREG(HV_X86_R9, pVCpu->cpum.GstCtx.r9);
708 READ_GREG(HV_X86_R10, pVCpu->cpum.GstCtx.r10);
709 READ_GREG(HV_X86_R11, pVCpu->cpum.GstCtx.r11);
710 READ_GREG(HV_X86_R12, pVCpu->cpum.GstCtx.r12);
711 READ_GREG(HV_X86_R13, pVCpu->cpum.GstCtx.r13);
712 READ_GREG(HV_X86_R14, pVCpu->cpum.GstCtx.r14);
713 READ_GREG(HV_X86_R15, pVCpu->cpum.GstCtx.r15);
714 }
715 }
716
717 /* RIP & Flags */
718 if (fWhat & CPUMCTX_EXTRN_RIP)
719 READ_GREG(HV_X86_RIP, pVCpu->cpum.GstCtx.rip);
720 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
721 READ_GREG(HV_X86_RFLAGS, pVCpu->cpum.GstCtx.rflags.u);
722
723 /* Segments */
724#define READ_SEG(a_SReg, a_enmName) \
725 do { \
726 READ_VMCS16_FIELD(VMX_VMCS16_GUEST_ ## a_enmName ## _SEL, (a_SReg).Sel); \
727 READ_VMCS32_FIELD(VMX_VMCS32_GUEST_ ## a_enmName ## _LIMIT, (a_SReg).u32Limit); \
728 READ_VMCS32_FIELD(VMX_VMCS32_GUEST_ ## a_enmName ## _ACCESS_RIGHTS, (a_SReg).Attr.u); \
729 READ_VMCS_FIELD(VMX_VMCS_GUEST_ ## a_enmName ## _BASE, (a_SReg).u64Base); \
730 (a_SReg).ValidSel = (a_SReg).Sel; \
731 } while (0)
732 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
733 {
734 if (fWhat & CPUMCTX_EXTRN_ES)
735 READ_SEG(pVCpu->cpum.GstCtx.es, ES);
736 if (fWhat & CPUMCTX_EXTRN_CS)
737 READ_SEG(pVCpu->cpum.GstCtx.cs, CS);
738 if (fWhat & CPUMCTX_EXTRN_SS)
739 READ_SEG(pVCpu->cpum.GstCtx.ss, SS);
740 if (fWhat & CPUMCTX_EXTRN_DS)
741 READ_SEG(pVCpu->cpum.GstCtx.ds, DS);
742 if (fWhat & CPUMCTX_EXTRN_FS)
743 READ_SEG(pVCpu->cpum.GstCtx.fs, FS);
744 if (fWhat & CPUMCTX_EXTRN_GS)
745 READ_SEG(pVCpu->cpum.GstCtx.gs, GS);
746 }
747
748 /* Descriptor tables and the task segment. */
749 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
750 {
751 if (fWhat & CPUMCTX_EXTRN_LDTR)
752 READ_SEG(pVCpu->cpum.GstCtx.ldtr, LDTR);
753
754 if (fWhat & CPUMCTX_EXTRN_TR)
755 {
756 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
757 avoid to trigger sanity assertions around the code, always fix this. */
758 READ_SEG(pVCpu->cpum.GstCtx.tr, TR);
759 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
760 {
761 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
762 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
763 break;
764 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
765 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
766 break;
767 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
768 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
769 break;
770 }
771 }
772 if (fWhat & CPUMCTX_EXTRN_IDTR)
773 {
774 READ_VMCS32_FIELD(VMCS_GUEST_IDTR_LIMIT, pVCpu->cpum.GstCtx.idtr.cbIdt);
775 READ_VMCS_FIELD(VMCS_GUEST_IDTR_BASE, pVCpu->cpum.GstCtx.idtr.pIdt);
776 }
777 if (fWhat & CPUMCTX_EXTRN_GDTR)
778 {
779 READ_VMCS32_FIELD(VMCS_GUEST_GDTR_LIMIT, pVCpu->cpum.GstCtx.gdtr.cbGdt);
780 READ_VMCS_FIELD(VMCS_GUEST_GDTR_BASE, pVCpu->cpum.GstCtx.gdtr.pGdt);
781 }
782 }
783
784 /* Control registers. */
785 bool fMaybeChangedMode = false;
786 bool fUpdateCr3 = false;
787 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
788 {
789 uint64_t u64CrTmp = 0;
790
791 if (fWhat & CPUMCTX_EXTRN_CR0)
792 {
793 READ_GREG(HV_X86_CR0, u64CrTmp);
794 if (pVCpu->cpum.GstCtx.cr0 != u64CrTmp)
795 {
796 CPUMSetGuestCR0(pVCpu, u64CrTmp);
797 fMaybeChangedMode = true;
798 }
799 }
800 if (fWhat & CPUMCTX_EXTRN_CR2)
801 READ_GREG(HV_X86_CR2, pVCpu->cpum.GstCtx.cr2);
802 if (fWhat & CPUMCTX_EXTRN_CR3)
803 {
804 READ_GREG(HV_X86_CR3, u64CrTmp);
805 if (pVCpu->cpum.GstCtx.cr3 != u64CrTmp)
806 {
807 CPUMSetGuestCR3(pVCpu, u64CrTmp);
808 fUpdateCr3 = true;
809 }
810 }
811 if (fWhat & CPUMCTX_EXTRN_CR4)
812 {
813 READ_GREG(HV_X86_CR4, u64CrTmp);
814 u64CrTmp &= ~VMX_V_CR4_FIXED0;
815
816 if (pVCpu->cpum.GstCtx.cr4 != u64CrTmp)
817 {
818 CPUMSetGuestCR4(pVCpu, u64CrTmp);
819 fMaybeChangedMode = true;
820 }
821 }
822 }
823 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
824 {
825 uint64_t u64Cr8 = 0;
826
827 READ_GREG(HV_X86_TPR, u64Cr8);
828 APICSetTpr(pVCpu, u64Cr8);
829 }
830
831 /* Debug registers. */
832 if (fWhat & CPUMCTX_EXTRN_DR7)
833 {
834 uint64_t u64Dr7;
835 READ_GREG(HV_X86_DR7, u64Dr7);
836 if (pVCpu->cpum.GstCtx.dr[7] != u64Dr7)
837 CPUMSetGuestDR7(pVCpu, u64Dr7);
838 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
839 }
840 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
841 {
842 uint64_t u64DrTmp;
843
844 READ_GREG(HV_X86_DR0, u64DrTmp);
845 if (pVCpu->cpum.GstCtx.dr[0] != u64DrTmp)
846 CPUMSetGuestDR0(pVCpu, u64DrTmp);
847 READ_GREG(HV_X86_DR1, u64DrTmp);
848 if (pVCpu->cpum.GstCtx.dr[1] != u64DrTmp)
849 CPUMSetGuestDR1(pVCpu, u64DrTmp);
850 READ_GREG(HV_X86_DR3, u64DrTmp);
851 if (pVCpu->cpum.GstCtx.dr[2] != u64DrTmp)
852 CPUMSetGuestDR2(pVCpu, u64DrTmp);
853 READ_GREG(HV_X86_DR3, u64DrTmp);
854 if (pVCpu->cpum.GstCtx.dr[3] != u64DrTmp)
855 CPUMSetGuestDR3(pVCpu, u64DrTmp);
856 }
857 if (fWhat & CPUMCTX_EXTRN_DR6)
858 {
859 uint64_t u64Dr6;
860 READ_GREG(HV_X86_DR7, u64Dr6);
861 if (pVCpu->cpum.GstCtx.dr[6] != u64Dr6)
862 CPUMSetGuestDR6(pVCpu, u64Dr6);
863 }
864
865#if 0
866 /* Floating point state. */
867 if (fWhat & CPUMCTX_EXTRN_X87)
868 {
869 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);
870 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);
871 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);
872 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);
873 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);
874 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);
875 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);
876 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);
877
878 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);
879 pVCpu->cpum.GstCtx.XState.x87.FCW = aValues[iReg].FpControlStatus.FpControl;
880 pVCpu->cpum.GstCtx.XState.x87.FSW = aValues[iReg].FpControlStatus.FpStatus;
881 pVCpu->cpum.GstCtx.XState.x87.FTW = aValues[iReg].FpControlStatus.FpTag
882 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;
883 pVCpu->cpum.GstCtx.XState.x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;
884 pVCpu->cpum.GstCtx.XState.x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;
885 pVCpu->cpum.GstCtx.XState.x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);
886 pVCpu->cpum.GstCtx.XState.x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);
887 iReg++;
888 }
889
890 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
891 {
892 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus);
893 if (fWhat & CPUMCTX_EXTRN_X87)
894 {
895 pVCpu->cpum.GstCtx.XState.x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp;
896 pVCpu->cpum.GstCtx.XState.x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32);
897 pVCpu->cpum.GstCtx.XState.x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48);
898 }
899 pVCpu->cpum.GstCtx.XState.x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl;
900 pVCpu->cpum.GstCtx.XState.x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
901 iReg++;
902 }
903
904 /* Vector state. */
905 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
906 {
907 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0);
908 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1);
909 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2);
910 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3);
911 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4);
912 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5);
913 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6);
914 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7);
915 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8);
916 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9);
917 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10);
918 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11);
919 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12);
920 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13);
921 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14);
922 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15);
923 }
924#endif
925
926 /* MSRs */
927 // WHvX64RegisterTsc - don't touch
928 if (fWhat & CPUMCTX_EXTRN_EFER)
929 {
930 uint64_t u64Efer;
931
932 READ_VMCS_FIELD(VMCS_GUEST_IA32_EFER, u64Efer);
933 if (u64Efer != pVCpu->cpum.GstCtx.msrEFER)
934 {
935 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, u64Efer));
936 if ((u64Efer ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE)
937 PGMNotifyNxeChanged(pVCpu, RT_BOOL(u64Efer & MSR_K6_EFER_NXE));
938 pVCpu->cpum.GstCtx.msrEFER = u64Efer;
939 fMaybeChangedMode = true;
940 }
941 }
942#if 0
943 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
944 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");
945 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
946 {
947 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");
948 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");
949 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");
950 }
951 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
952 {
953 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSTAR, WHvX64RegisterStar, "MSR STAR");
954 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");
955 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");
956 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");
957 }
958 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
959 {
960 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
961 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
962 if (aValues[iReg].Reg64 != uOldBase)
963 {
964 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
965 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
966 int rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
967 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", rc2, aValues[iReg].Reg64));
968 }
969 iReg++;
970
971 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterPat, "MSR PAT");
972#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
973 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterMsrMtrrCap);
974#endif
975 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
976 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
977 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
978 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
979 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
980 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
981 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
982 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
983 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
984 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
985 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
986 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
987 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
988 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
989 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
990 }
991
992 /* Interruptibility. */
993 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
994 {
995 Assert(aenmNames[iReg] == WHvRegisterInterruptState);
996 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);
997
998 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
999 {
1000 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
1001 if (aValues[iReg].InterruptState.InterruptShadow)
1002 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
1003 else
1004 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1005 }
1006
1007 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1008 {
1009 if (aValues[iReg].InterruptState.NmiMasked)
1010 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1011 else
1012 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1013 }
1014
1015 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
1016 iReg += 2;
1017 }
1018#endif
1019
1020 /* Almost done, just update extrn flags and maybe change PGM mode. */
1021 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1022 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
1023 pVCpu->cpum.GstCtx.fExtrn = 0;
1024
1025 /* Typical. */
1026 if (!fMaybeChangedMode && !fUpdateCr3)
1027 return VINF_SUCCESS;
1028
1029 /*
1030 * Slow.
1031 */
1032 if (fMaybeChangedMode)
1033 {
1034 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1035 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
1036 }
1037
1038 if (fUpdateCr3)
1039 {
1040 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3, false /*fPdpesMapped*/);
1041 if (rc == VINF_SUCCESS)
1042 { /* likely */ }
1043 else
1044 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
1045 }
1046
1047 return VINF_SUCCESS;
1048#undef READ_GREG
1049#undef READ_VMCS_FIELD
1050#undef READ_VMCS32_FIELD
1051#undef READ_SEG
1052}
1053
1054
1055/**
1056 * Wrapper around nemR3DarwinCopyStateFromHv.
1057 *
1058 * Unlike the wrapped APIs, this checks whether it's necessary.
1059 *
1060 * @returns VBox strict status code.
1061 * @param pVCpu The cross context per CPU structure.
1062 * @param fWhat What to import.
1063 */
1064DECLINLINE(VBOXSTRICTRC) nemR3DarwinImportStateIfNeededStrict(PVMCPUCC pVCpu, uint64_t fWhat)
1065{
1066 if (pVCpu->cpum.GstCtx.fExtrn & fWhat)
1067 {
1068 int rc = nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
1069 AssertRCReturn(rc, rc);
1070 }
1071 return VINF_SUCCESS;
1072}
1073
1074
1075/**
1076 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1077 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1078 */
1079typedef struct NEMHCDARWINHMACPCCSTATE
1080{
1081 /** Input: Write access. */
1082 bool fWriteAccess;
1083 /** Output: Set if we did something. */
1084 bool fDidSomething;
1085 /** Output: Set it we should resume. */
1086 bool fCanResume;
1087} NEMHCDARWINHMACPCCSTATE;
1088
1089/**
1090 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1091 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1092 * NEMHCDARWINHMACPCCSTATE structure. }
1093 */
1094static DECLCALLBACK(int)
1095nemR3DarwinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1096{
1097 NEMHCDARWINHMACPCCSTATE *pState = (NEMHCDARWINHMACPCCSTATE *)pvUser;
1098 pState->fDidSomething = false;
1099 pState->fCanResume = false;
1100
1101 uint8_t u2State = pInfo->u2NemState;
1102
1103 /*
1104 * Consolidate current page state with actual page protection and access type.
1105 * We don't really consider downgrades here, as they shouldn't happen.
1106 */
1107 int rc;
1108 switch (u2State)
1109 {
1110 case NEM_DARWIN_PAGE_STATE_UNMAPPED:
1111 case NEM_DARWIN_PAGE_STATE_NOT_SET:
1112 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1113 {
1114 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1115 return VINF_SUCCESS;
1116 }
1117
1118 /* Don't bother remapping it if it's a write request to a non-writable page. */
1119 if ( pState->fWriteAccess
1120 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1121 {
1122 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1123 return VINF_SUCCESS;
1124 }
1125
1126 /* Map the page. */
1127 rc = nemHCNativeSetPhysPage(pVM,
1128 pVCpu,
1129 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1130 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1131 pInfo->fNemProt,
1132 &u2State,
1133 true /*fBackingState*/);
1134 pInfo->u2NemState = u2State;
1135 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1136 GCPhys, g_apszPageStates[u2State], rc));
1137 pState->fDidSomething = true;
1138 pState->fCanResume = true;
1139 return rc;
1140
1141 case NEM_DARWIN_PAGE_STATE_READABLE:
1142 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1143 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1144 {
1145 pState->fCanResume = true;
1146 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1147 return VINF_SUCCESS;
1148 }
1149 break;
1150
1151 case NEM_DARWIN_PAGE_STATE_WRITABLE:
1152 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1153 {
1154 /* We get spurious EPT exit violations when everything is fine (#3a case) but can resume without issues here... */
1155 pState->fCanResume = true;
1156 if (pInfo->u2OldNemState == NEM_DARWIN_PAGE_STATE_WRITABLE)
1157 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1158 else
1159 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1160 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1161 return VINF_SUCCESS;
1162 }
1163
1164 break;
1165
1166 default:
1167 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1168 }
1169
1170 /*
1171 * Unmap and restart the instruction.
1172 * If this fails, which it does every so often, just unmap everything for now.
1173 */
1174 rc = nemR3DarwinUnmap(GCPhys, X86_PAGE_SIZE);
1175 if (RT_SUCCESS(rc))
1176 {
1177 pState->fDidSomething = true;
1178 pState->fCanResume = true;
1179 pInfo->u2NemState = NEM_DARWIN_PAGE_STATE_UNMAPPED;
1180 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
1181 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1182 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1183 return VINF_SUCCESS;
1184 }
1185 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
1186 LogRel(("nemR3DarwinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s rc=%Rrc\n",
1187 GCPhys, g_apszPageStates[u2State], rc));
1188 return VERR_NEM_UNMAP_PAGES_FAILED;
1189}
1190
1191
1192/*
1193 * Instantiate the code we share with ring-0.
1194 */
1195#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
1196#define VCPU_2_VMXSTATE(a_pVCpu) (a_pVCpu)->nem.s
1197
1198#define VMX_VMCS_WRITE_16(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs16((a_pVCpu), (a_FieldEnc), (a_Val))
1199#define VMX_VMCS_WRITE_32(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs32((a_pVCpu), (a_FieldEnc), (a_Val))
1200#define VMX_VMCS_WRITE_64(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs64((a_pVCpu), (a_FieldEnc), (a_Val))
1201#define VMX_VMCS_WRITE_NW(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs64((a_pVCpu), (a_FieldEnc), (a_Val))
1202
1203#define VMX_VMCS_READ_16(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs16((a_pVCpu), (a_FieldEnc), (a_pVal))
1204#define VMX_VMCS_READ_32(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs32((a_pVCpu), (a_FieldEnc), (a_pVal))
1205#define VMX_VMCS_READ_64(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs64((a_pVCpu), (a_FieldEnc), (a_pVal))
1206#define VMX_VMCS_READ_NW(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs64((a_pVCpu), (a_FieldEnc), (a_pVal))
1207
1208#include "../VMMAll/VMXAllTemplate.cpp.h"
1209
1210#undef VMX_VMCS_WRITE_16
1211#undef VMX_VMCS_WRITE_32
1212#undef VMX_VMCS_WRITE_64
1213#undef VMX_VMCS_WRITE_NW
1214
1215#undef VMX_VMCS_READ_16
1216#undef VMX_VMCS_READ_32
1217#undef VMX_VMCS_READ_64
1218#undef VMX_VMCS_READ_NW
1219
1220
1221/**
1222 * Handles an exit from hv_vcpu_run().
1223 *
1224 * @returns VBox strict status code.
1225 * @param pVM The cross context VM structure.
1226 * @param pVCpu The cross context virtual CPU structure of the
1227 * calling EMT.
1228 */
1229static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
1230{
1231 VMXTRANSIENT VmxTransient;
1232 RT_ZERO(VmxTransient);
1233
1234 uint32_t uExitReason;
1235 int rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
1236 AssertRC(rc);
1237 VmxTransient.pVmcsInfo = &pVCpu->nem.s.VmcsInfo;
1238 VmxTransient.uExitReason = VMX_EXIT_REASON_BASIC(uExitReason);
1239 VmxTransient.fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
1240
1241 if (RT_UNLIKELY(VmxTransient.fVMEntryFailed))
1242 AssertLogRelMsgFailedReturn(("Running guest failed for CPU #%u: %#x %u\n",
1243 pVCpu->idCpu, VmxTransient.uExitReason, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)),
1244 VERR_NEM_IPE_0);
1245
1246 /** @todo Only copy the state on demand (requires changing to adhere to fCtxChanged from th VMX code
1247 * flags instead of the fExtrn one living in CPUM.
1248 */
1249 rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, UINT64_MAX);
1250 AssertRCReturn(rc, rc);
1251
1252#ifndef HMVMX_USE_FUNCTION_TABLE
1253 return vmxHCHandleExit(pVCpu, &VmxTransient);
1254#else
1255 return g_aVMExitHandlers[VmxTransient.uExitReason].pfn(pVCpu, &VmxTransient);
1256#endif
1257}
1258
1259
1260/**
1261 * Read and initialize the global capabilities supported by this CPU.
1262 *
1263 * @returns VBox status code.
1264 */
1265static int nemR3DarwinCapsInit(void)
1266{
1267 RT_ZERO(g_HmMsrs);
1268
1269 hv_return_t hrc = hv_vmx_read_capability(HV_VMX_CAP_PINBASED, &g_HmMsrs.u.vmx.PinCtls.u);
1270 if (hrc == HV_SUCCESS)
1271 hrc = hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &g_HmMsrs.u.vmx.ProcCtls.u);
1272#if 0 /* Not available with our SDK. */
1273 if (hrc == HV_SUCCESS)
1274 hrc = hv_vmx_read_capability(HV_VMX_CAP_BASIC, &g_HmMsrs.u.vmx.u64Basic);
1275#endif
1276 if (hrc == HV_SUCCESS)
1277 hrc = hv_vmx_read_capability(HV_VMX_CAP_ENTRY, &g_HmMsrs.u.vmx.EntryCtls.u);
1278 if (hrc == HV_SUCCESS)
1279 hrc = hv_vmx_read_capability(HV_VMX_CAP_EXIT, &g_HmMsrs.u.vmx.ExitCtls.u);
1280#if 0 /* Not available with our SDK. */
1281 if (hrc == HV_SUCCESS)
1282 hrc = hv_vmx_read_capability(HV_VMX_CAP_MISC, &g_HmMsrs.u.vmx.u64Misc);
1283 if (hrc == HV_SUCCESS)
1284 hrc = hv_vmx_read_capability(HV_VMX_CAP_CR0_FIXED0, &g_HmMsrs.u.vmx.u64Cr0Fixed0);
1285 if (hrc == HV_SUCCESS)
1286 hrc = hv_vmx_read_capability(HV_VMX_CAP_CR0_FIXED1, &g_HmMsrs.u.vmx.u64Cr0Fixed1);
1287 if (hrc == HV_SUCCESS)
1288 hrc = hv_vmx_read_capability(HV_VMX_CAP_CR4_FIXED0, &g_HmMsrs.u.vmx.u64Cr4Fixed0);
1289 if (hrc == HV_SUCCESS)
1290 hrc = hv_vmx_read_capability(HV_VMX_CAP_CR4_FIXED1, &g_HmMsrs.u.vmx.u64Cr4Fixed1);
1291 if (hrc == HV_SUCCESS)
1292 hrc = hv_vmx_read_capability(HV_VMX_CAP_VMCS_ENUM, &g_HmMsrs.u.vmx.u64VmcsEnum);
1293 if ( hrc == HV_SUCCESS
1294 && RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
1295 {
1296 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_PINBASED, &g_HmMsrs.u.vmx.TruePinCtls.u);
1297 if (hrc == HV_SUCCESS)
1298 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_PROCBASED, &g_HmMsrs.u.vmx.TrueProcCtls.u);
1299 if (hrc == HV_SUCCESS)
1300 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_ENTRY, &g_HmMsrs.u.vmx.TrueEntryCtls.u);
1301 if (hrc == HV_SUCCESS)
1302 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_EXIT, &g_HmMsrs.u.vmx.TrueExitCtls.u);
1303 }
1304#endif
1305
1306 if ( hrc == HV_SUCCESS
1307 && g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1308 {
1309 hrc = hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &g_HmMsrs.u.vmx.ProcCtls2.u);
1310
1311#if 0 /* Not available with our SDK. */
1312 if ( hrc == HV_SUCCESS
1313 & g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
1314 hrc = hv_vmx_read_capability(HV_VMX_CAP_EPT_VPID_CAP, &g_HmMsrs.u.vmx.u64EptVpidCaps);
1315#endif
1316 g_HmMsrs.u.vmx.u64VmFunc = 0; /* No way to read that on macOS. */
1317 }
1318
1319 return nemR3DarwinHvSts2Rc(hrc);
1320}
1321
1322
1323/**
1324 * Sets up pin-based VM-execution controls in the VMCS.
1325 *
1326 * @returns VBox status code.
1327 * @param pVCpu The cross context virtual CPU structure.
1328 * @param pVmcsInfo The VMCS info. object.
1329 */
1330static int nemR3DarwinVmxSetupVmcsPinCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1331{
1332 //PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1333 uint32_t fVal = g_HmMsrs.u.vmx.PinCtls.n.allowed0; /* Bits set here must always be set. */
1334 uint32_t const fZap = g_HmMsrs.u.vmx.PinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
1335
1336 if (g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_VIRT_NMI)
1337 fVal |= VMX_PIN_CTLS_VIRT_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
1338
1339#if 0 /** @todo Use preemption timer */
1340 /* Enable the VMX-preemption timer. */
1341 if (pVM->hmr0.s.vmx.fUsePreemptTimer)
1342 {
1343 Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER);
1344 fVal |= VMX_PIN_CTLS_PREEMPT_TIMER;
1345 }
1346
1347 /* Enable posted-interrupt processing. */
1348 if (pVM->hm.s.fPostedIntrs)
1349 {
1350 Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT);
1351 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT);
1352 fVal |= VMX_PIN_CTLS_POSTED_INT;
1353 }
1354#endif
1355
1356 if ((fVal & fZap) != fVal)
1357 {
1358 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1359 g_HmMsrs.u.vmx.PinCtls.n.allowed0, fVal, fZap));
1360 pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
1361 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1362 }
1363
1364 /* Commit it to the VMCS and update our cache. */
1365 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, fVal);
1366 AssertRC(rc);
1367 pVmcsInfo->u32PinCtls = fVal;
1368
1369 return VINF_SUCCESS;
1370}
1371
1372
1373/**
1374 * Sets up secondary processor-based VM-execution controls in the VMCS.
1375 *
1376 * @returns VBox status code.
1377 * @param pVCpu The cross context virtual CPU structure.
1378 * @param pVmcsInfo The VMCS info. object.
1379 */
1380static int nemR3DarwinVmxSetupVmcsProcCtls2(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1381{
1382 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1383 uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls2.n.allowed0; /* Bits set here must be set in the VMCS. */
1384 uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1385
1386 /* WBINVD causes a VM-exit. */
1387 if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_WBINVD_EXIT)
1388 fVal |= VMX_PROC_CTLS2_WBINVD_EXIT;
1389
1390 /* Enable the INVPCID instruction if we expose it to the guest and is supported
1391 by the hardware. Without this, guest executing INVPCID would cause a #UD. */
1392 if ( pVM->cpum.ro.GuestFeatures.fInvpcid
1393 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID))
1394 fVal |= VMX_PROC_CTLS2_INVPCID;
1395
1396#if 0 /** @todo */
1397 /* Enable VPID. */
1398 if (pVM->hmr0.s.vmx.fVpid)
1399 fVal |= VMX_PROC_CTLS2_VPID;
1400
1401 if (pVM->hm.s.fVirtApicRegs)
1402 {
1403 /* Enable APIC-register virtualization. */
1404 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT);
1405 fVal |= VMX_PROC_CTLS2_APIC_REG_VIRT;
1406
1407 /* Enable virtual-interrupt delivery. */
1408 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY);
1409 fVal |= VMX_PROC_CTLS2_VIRT_INTR_DELIVERY;
1410 }
1411
1412 /* Virtualize-APIC accesses if supported by the CPU. The virtual-APIC page is
1413 where the TPR shadow resides. */
1414 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
1415 * done dynamically. */
1416 if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
1417 {
1418 fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS;
1419 hmR0VmxSetupVmcsApicAccessAddr(pVCpu);
1420 }
1421
1422 /* Enable the RDTSCP instruction if we expose it to the guest and is supported
1423 by the hardware. Without this, guest executing RDTSCP would cause a #UD. */
1424 if ( pVM->cpum.ro.GuestFeatures.fRdTscP
1425 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP))
1426 fVal |= VMX_PROC_CTLS2_RDTSCP;
1427
1428 /* Enable Pause-Loop exiting. */
1429 if ( (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
1430 && pVM->hm.s.vmx.cPleGapTicks
1431 && pVM->hm.s.vmx.cPleWindowTicks)
1432 {
1433 fVal |= VMX_PROC_CTLS2_PAUSE_LOOP_EXIT;
1434
1435 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks); AssertRC(rc);
1436 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks); AssertRC(rc);
1437 }
1438#endif
1439
1440 if ((fVal & fZap) != fVal)
1441 {
1442 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1443 g_HmMsrs.u.vmx.ProcCtls2.n.allowed0, fVal, fZap));
1444 pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
1445 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1446 }
1447
1448 /* Commit it to the VMCS and update our cache. */
1449 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
1450 AssertRC(rc);
1451 pVmcsInfo->u32ProcCtls2 = fVal;
1452
1453 return VINF_SUCCESS;
1454}
1455
1456
1457/**
1458 * Sets up processor-based VM-execution controls in the VMCS.
1459 *
1460 * @returns VBox status code.
1461 * @param pVCpu The cross context virtual CPU structure.
1462 * @param pVmcsInfo The VMCS info. object.
1463 */
1464static int nemR3DarwinVmxSetupVmcsProcCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1465{
1466 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1467 uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
1468 uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1469
1470 fVal |= VMX_PROC_CTLS_HLT_EXIT /* HLT causes a VM-exit. */
1471// | VMX_PROC_CTLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
1472 | VMX_PROC_CTLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
1473 | VMX_PROC_CTLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
1474 | VMX_PROC_CTLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
1475 | VMX_PROC_CTLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
1476 | VMX_PROC_CTLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
1477
1478 /* We toggle VMX_PROC_CTLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
1479 if ( !(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MOV_DR_EXIT)
1480 || (g_HmMsrs.u.vmx.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
1481 {
1482 pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
1483 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1484 }
1485
1486#if 0 /** @todo */
1487 /* Use TPR shadowing if supported by the CPU. */
1488 if ( PDMHasApic(pVM)
1489 && (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
1490 {
1491 fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
1492 /* CR8 writes cause a VM-exit based on TPR threshold. */
1493 Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT));
1494 Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT));
1495 hmR0VmxSetupVmcsVirtApicAddr(pVmcsInfo);
1496 }
1497 else
1498#endif
1499 {
1500 fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
1501 | VMX_PROC_CTLS_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
1502 }
1503
1504#if 0 /** @todo */
1505 /* Use MSR-bitmaps if supported by the CPU. */
1506 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1507 {
1508 fVal |= VMX_PROC_CTLS_USE_MSR_BITMAPS;
1509 hmR0VmxSetupVmcsMsrBitmapAddr(pVmcsInfo);
1510 }
1511#endif
1512
1513 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
1514 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1515 fVal |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
1516
1517 if ((fVal & fZap) != fVal)
1518 {
1519 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1520 g_HmMsrs.u.vmx.ProcCtls.n.allowed0, fVal, fZap));
1521 pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
1522 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1523 }
1524
1525 /* Commit it to the VMCS and update our cache. */
1526 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, fVal);
1527 AssertRC(rc);
1528 pVmcsInfo->u32ProcCtls = fVal;
1529
1530#if 0
1531 /* Set up MSR permissions that don't change through the lifetime of the VM. */
1532 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1533 hmR0VmxSetupVmcsMsrPermissions(pVCpu, pVmcsInfo);
1534#endif
1535
1536 /*
1537 * Set up secondary processor-based VM-execution controls
1538 * (we assume the CPU to always support it as we rely on unrestricted guest execution support).
1539 */
1540 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
1541 return nemR3DarwinVmxSetupVmcsProcCtls2(pVCpu, pVmcsInfo);
1542}
1543
1544
1545/**
1546 * Sets up miscellaneous (everything other than Pin, Processor and secondary
1547 * Processor-based VM-execution) control fields in the VMCS.
1548 *
1549 * @returns VBox status code.
1550 * @param pVCpu The cross context virtual CPU structure.
1551 * @param pVmcsInfo The VMCS info. object.
1552 */
1553static int nemR3DarwinVmxSetupVmcsMiscCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1554{
1555 int rc = VINF_SUCCESS;
1556 //rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo); TODO
1557 if (RT_SUCCESS(rc))
1558 {
1559 uint64_t const u64Cr0Mask = vmxHCGetFixedCr0Mask(pVCpu);
1560 uint64_t const u64Cr4Mask = vmxHCGetFixedCr4Mask(pVCpu);
1561
1562 rc = nemR3DarwinWriteVmcs64(pVCpu, VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask); AssertRC(rc);
1563 rc = nemR3DarwinWriteVmcs64(pVCpu, VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask); AssertRC(rc);
1564
1565 pVmcsInfo->u64Cr0Mask = u64Cr0Mask;
1566 pVmcsInfo->u64Cr4Mask = u64Cr4Mask;
1567
1568#if 0 /** @todo */
1569 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fLbr)
1570 {
1571 rc = VMXWriteVmcsNw(VMX_VMCS64_GUEST_DEBUGCTL_FULL, MSR_IA32_DEBUGCTL_LBR);
1572 AssertRC(rc);
1573 }
1574#endif
1575 return VINF_SUCCESS;
1576 }
1577 else
1578 LogRelFunc(("Failed to initialize VMCS auto-load/store MSR addresses. rc=%Rrc\n", rc));
1579 return rc;
1580}
1581
1582
1583/**
1584 * Sets up the initial exception bitmap in the VMCS based on static conditions.
1585 *
1586 * We shall setup those exception intercepts that don't change during the
1587 * lifetime of the VM here. The rest are done dynamically while loading the
1588 * guest state.
1589 *
1590 * @param pVCpu The cross context virtual CPU structure.
1591 * @param pVmcsInfo The VMCS info. object.
1592 */
1593static void nemR3DarwinVmxSetupVmcsXcptBitmap(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1594{
1595 /*
1596 * The following exceptions are always intercepted:
1597 *
1598 * #AC - To prevent the guest from hanging the CPU and for dealing with
1599 * split-lock detecting host configs.
1600 * #DB - To maintain the DR6 state even when intercepting DRx reads/writes and
1601 * recursive #DBs can cause a CPU hang.
1602 */
1603 uint32_t const uXcptBitmap = RT_BIT(X86_XCPT_AC)
1604 | RT_BIT(X86_XCPT_DB);
1605
1606 /* Commit it to the VMCS. */
1607 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
1608 AssertRC(rc);
1609
1610 /* Update our cache of the exception bitmap. */
1611 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
1612}
1613
1614
1615/**
1616 * Initialize the VMCS information field for the given vCPU.
1617 *
1618 * @returns VBox status code.
1619 * @param pVCpu The cross context virtual CPU structure of the
1620 * calling EMT.
1621 */
1622static int nemR3DarwinInitVmcs(PVMCPU pVCpu)
1623{
1624 int rc = nemR3DarwinVmxSetupVmcsPinCtls(pVCpu, &pVCpu->nem.s.VmcsInfo);
1625 if (RT_SUCCESS(rc))
1626 {
1627 rc = nemR3DarwinVmxSetupVmcsProcCtls(pVCpu, &pVCpu->nem.s.VmcsInfo);
1628 if (RT_SUCCESS(rc))
1629 {
1630 rc = nemR3DarwinVmxSetupVmcsMiscCtls(pVCpu, &pVCpu->nem.s.VmcsInfo);
1631 if (RT_SUCCESS(rc))
1632 {
1633 nemR3DarwinVmxSetupVmcsXcptBitmap(pVCpu, &pVCpu->nem.s.VmcsInfo);
1634 return VINF_SUCCESS;
1635 }
1636 else
1637 LogRelFunc(("Failed to setup miscellaneous controls. rc=%Rrc\n", rc));
1638 }
1639 else
1640 LogRelFunc(("Failed to setup processor-based VM-execution controls. rc=%Rrc\n", rc));
1641 }
1642 else
1643 LogRelFunc(("Failed to setup pin-based controls. rc=%Rrc\n", rc));
1644
1645 return rc;
1646}
1647
1648
1649/**
1650 * Try initialize the native API.
1651 *
1652 * This may only do part of the job, more can be done in
1653 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
1654 *
1655 * @returns VBox status code.
1656 * @param pVM The cross context VM structure.
1657 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
1658 * the latter we'll fail if we cannot initialize.
1659 * @param fForced Whether the HMForced flag is set and we should
1660 * fail if we cannot initialize.
1661 */
1662int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
1663{
1664 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
1665
1666 /*
1667 * Some state init.
1668 */
1669
1670 /*
1671 * Error state.
1672 * The error message will be non-empty on failure and 'rc' will be set too.
1673 */
1674 RTERRINFOSTATIC ErrInfo;
1675 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
1676 int rc = VINF_SUCCESS;
1677 hv_return_t hrc = hv_vm_create(HV_VM_DEFAULT);
1678 if (hrc == HV_SUCCESS)
1679 {
1680 pVM->nem.s.fCreatedVm = true;
1681
1682 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
1683 Log(("NEM: Marked active!\n"));
1684 PGMR3EnableNemMode(pVM);
1685
1686 /* Register release statistics */
1687 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1688 {
1689 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
1690 STAMR3RegisterF(pVM, &pNemCpu->StatExitPortIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of port I/O exits", "/NEM/CPU%u/ExitPortIo", idCpu);
1691 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemUnmapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unmapped memory exits", "/NEM/CPU%u/ExitMemUnmapped", idCpu);
1692 STAMR3RegisterF(pVM, &pNemCpu->StatExitMemIntercept, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of intercepted memory exits", "/NEM/CPU%u/ExitMemIntercept", idCpu);
1693 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitHalt", idCpu);
1694 STAMR3RegisterF(pVM, &pNemCpu->StatExitInterruptWindow, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of HLT exits", "/NEM/CPU%u/ExitInterruptWindow", idCpu);
1695 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", idCpu);
1696 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", idCpu);
1697 STAMR3RegisterF(pVM, &pNemCpu->StatExitException, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of exception exits", "/NEM/CPU%u/ExitException", idCpu);
1698 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionBp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #BP exits", "/NEM/CPU%u/ExitExceptionBp", idCpu);
1699 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionDb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #DB exits", "/NEM/CPU%u/ExitExceptionDb", idCpu);
1700 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits", "/NEM/CPU%u/ExitExceptionGp", idCpu);
1701 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionGpMesa, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #GP exits from mesa driver", "/NEM/CPU%u/ExitExceptionGpMesa", idCpu);
1702 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of #UD exits", "/NEM/CPU%u/ExitExceptionUd", idCpu);
1703 STAMR3RegisterF(pVM, &pNemCpu->StatExitExceptionUdHandled, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of handled #UD exits", "/NEM/CPU%u/ExitExceptionUdHandled", idCpu);
1704 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", idCpu);
1705 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", idCpu);
1706 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", idCpu);
1707 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", idCpu);
1708 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingAlerts,STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stop alerts", "/NEM/CPU%u/StopCpuPendingAlerts", idCpu);
1709 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", idCpu);
1710 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", idCpu);
1711 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", idCpu);
1712 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPre, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pre execution FF breaks", "/NEM/CPU%u/BreakOnFFPre", idCpu);
1713 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnFFPost, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of post execution FF breaks", "/NEM/CPU%u/BreakOnFFPost", idCpu);
1714 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", idCpu);
1715 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", idCpu);
1716 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);
1717 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
1718 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
1719 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);
1720 }
1721 }
1722 else
1723 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
1724 "hv_vm_create() failed: %#x", hrc);
1725
1726 /*
1727 * We only fail if in forced mode, otherwise just log the complaint and return.
1728 */
1729 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1730 if ( (fForced || !fFallback)
1731 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1732 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1733
1734 if (RTErrInfoIsSet(pErrInfo))
1735 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1736 return VINF_SUCCESS;
1737}
1738
1739
1740/**
1741 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
1742 *
1743 * @returns VBox status code
1744 * @param pVM The VM handle.
1745 * @param pVCpu The vCPU handle.
1746 * @param idCpu ID of the CPU to create.
1747 */
1748static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
1749{
1750 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpuId, HV_VCPU_DEFAULT);
1751 if (hrc != HV_SUCCESS)
1752 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1753 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1754
1755 if (idCpu == 0)
1756 {
1757 /* First call initializs the MSR structure holding the capabilities of the host CPU. */
1758 int rc = nemR3DarwinCapsInit();
1759 AssertRCReturn(rc, rc);
1760 }
1761
1762 int rc = nemR3DarwinInitVmcs(pVCpu);
1763 AssertRCReturn(rc, rc);
1764
1765 return VINF_SUCCESS;
1766}
1767
1768
1769/**
1770 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
1771 *
1772 * @returns VBox status code
1773 * @param pVCpu The vCPU handle.
1774 */
1775static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVMCPU pVCpu)
1776{
1777 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpuId);
1778 Assert(hrc == HV_SUCCESS);
1779 return VINF_SUCCESS;
1780}
1781
1782
1783/**
1784 * This is called after CPUMR3Init is done.
1785 *
1786 * @returns VBox status code.
1787 * @param pVM The VM handle..
1788 */
1789int nemR3NativeInitAfterCPUM(PVM pVM)
1790{
1791 /*
1792 * Validate sanity.
1793 */
1794 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
1795 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1796
1797 /*
1798 * Setup the EMTs.
1799 */
1800 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1801 {
1802 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1803
1804 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
1805 if (RT_FAILURE(rc))
1806 {
1807 /* Rollback. */
1808 while (idCpu--)
1809 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 1, pVCpu);
1810
1811 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
1812 }
1813 }
1814
1815 pVM->nem.s.fCreatedEmts = true;
1816
1817 //CPUMR3ClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1818 return VINF_SUCCESS;
1819}
1820
1821
1822int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1823{
1824 NOREF(pVM); NOREF(enmWhat);
1825 return VINF_SUCCESS;
1826}
1827
1828
1829int nemR3NativeTerm(PVM pVM)
1830{
1831 /*
1832 * Delete the VM.
1833 */
1834
1835 for (VMCPUID idCpu = pVM->cCpus - 1; idCpu > 0; idCpu--)
1836 {
1837 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1838
1839 /*
1840 * Apple's documentation states that the vCPU should be destroyed
1841 * on the thread running the vCPU but as all the other EMTs are gone
1842 * at this point, destroying the VM would hang.
1843 *
1844 * We seem to be at luck here though as destroying apparently works
1845 * from EMT(0) as well.
1846 */
1847 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpuId);
1848 Assert(hrc == HV_SUCCESS);
1849 }
1850
1851 hv_vcpu_destroy(pVM->apCpusR3[0]->nem.s.hVCpuId);
1852 pVM->nem.s.fCreatedEmts = false;
1853
1854 if (pVM->nem.s.fCreatedVm)
1855 {
1856 hv_return_t hrc = hv_vm_destroy();
1857 if (hrc != HV_SUCCESS)
1858 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
1859
1860 pVM->nem.s.fCreatedVm = false;
1861 }
1862 return VINF_SUCCESS;
1863}
1864
1865
1866/**
1867 * VM reset notification.
1868 *
1869 * @param pVM The cross context VM structure.
1870 */
1871void nemR3NativeReset(PVM pVM)
1872{
1873 RT_NOREF(pVM);
1874}
1875
1876
1877/**
1878 * Reset CPU due to INIT IPI or hot (un)plugging.
1879 *
1880 * @param pVCpu The cross context virtual CPU structure of the CPU being
1881 * reset.
1882 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1883 */
1884void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1885{
1886 RT_NOREF(pVCpu, fInitIpi);
1887}
1888
1889
1890VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1891{
1892 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags));
1893#ifdef LOG_ENABLED
1894 if (LogIs3Enabled())
1895 nemR3DarwinLogState(pVM, pVCpu);
1896#endif
1897
1898 /*
1899 * Try switch to NEM runloop state.
1900 */
1901 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
1902 { /* likely */ }
1903 else
1904 {
1905 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1906 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
1907 return VINF_SUCCESS;
1908 }
1909
1910 /*
1911 * The run loop.
1912 *
1913 * Current approach to state updating to use the sledgehammer and sync
1914 * everything every time. This will be optimized later.
1915 */
1916 const bool fSingleStepping = DBGFIsStepping(pVCpu);
1917 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1918 for (unsigned iLoop = 0;; iLoop++)
1919 {
1920 /*
1921 * Check and process force flag actions, some of which might require us to go back to ring-3.
1922 */
1923 rcStrict = vmxHCCheckForceFlags(pVCpu, false /*fIsNestedGuest*/, fSingleStepping);
1924 if (rcStrict == VINF_SUCCESS)
1925 { /*likely */ }
1926 else
1927 break;
1928
1929 /*
1930 * Evaluate events to be injected into the guest.
1931 *
1932 * Events in TRPM can be injected without inspecting the guest state.
1933 * If any new events (interrupts/NMI) are pending currently, we try to set up the
1934 * guest to cause a VM-exit the next time they are ready to receive the event.
1935 */
1936 if (TRPMHasTrap(pVCpu))
1937 vmxHCTrpmTrapToPendingEvent(pVCpu);
1938
1939 uint32_t fIntrState;
1940 rcStrict = vmxHCEvaluatePendingEvent(pVCpu, &pVCpu->nem.s.VmcsInfo, false /*fIsNestedGuest*/, &fIntrState);
1941
1942 /*
1943 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus
1944 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
1945 * also result in triple-faulting the VM.
1946 *
1947 * With nested-guests, the above does not apply since unrestricted guest execution is a
1948 * requirement. Regardless, we do this here to avoid duplicating code elsewhere.
1949 */
1950 rcStrict = vmxHCInjectPendingEvent(pVCpu, &pVCpu->nem.s.VmcsInfo, false /*fIsNestedGuest*/, fIntrState, fSingleStepping);
1951 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1952 { /* likely */ }
1953 else
1954 {
1955 AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fSingleStepping),
1956 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1957 break;
1958 }
1959
1960 /** @todo Only copy the state selectively. */
1961 {
1962 int rc2 = nemR3DarwinCopyStateToHv(pVM, pVCpu);
1963 AssertRCReturn(rc2, rc2);
1964 }
1965
1966 /*
1967 * Poll timers and run for a bit.
1968 */
1969 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
1970 * the whole polling job when timers have changed... */
1971 uint64_t offDeltaIgnored;
1972 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
1973 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
1974 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
1975 {
1976 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
1977 {
1978 LogFlowFunc(("Running vCPU\n"));
1979 pVCpu->nem.s.Event.fPending = false;
1980 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpuId); /** @todo Use hv_vcpu_run_until() when available (11.0+). */
1981 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
1982 if (hrc == HV_SUCCESS)
1983 {
1984 /*
1985 * Deal with the message.
1986 */
1987 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
1988 if (rcStrict == VINF_SUCCESS)
1989 { /* hopefully likely */ }
1990 else
1991 {
1992 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
1993 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
1994 break;
1995 }
1996 }
1997 else
1998 {
1999 AssertLogRelMsgFailedReturn(("hv_vcpu_run failed for CPU #%u: %#x\n", pVCpu->idCpu, hrc),
2000 VERR_NEM_IPE_0);
2001 }
2002
2003 /*
2004 * If no relevant FFs are pending, loop.
2005 */
2006 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
2007 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
2008 continue;
2009
2010 /** @todo Try handle pending flags, not just return to EM loops. Take care
2011 * not to set important RCs here unless we've handled a message. */
2012 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
2013 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
2014 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
2015 }
2016 else
2017 {
2018 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
2019 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
2020 }
2021 }
2022 else
2023 {
2024 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
2025 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
2026 }
2027 break;
2028 } /* the run loop */
2029
2030
2031 /*
2032 * Convert any pending HM events back to TRPM due to premature exits.
2033 *
2034 * This is because execution may continue from IEM and we would need to inject
2035 * the event from there (hence place it back in TRPM).
2036 */
2037 if (pVCpu->nem.s.Event.fPending)
2038 {
2039 vmxHCPendingEventToTrpmTrap(pVCpu);
2040 Assert(!pVCpu->nem.s.Event.fPending);
2041
2042 /* Clear the events from the VMCS. */
2043 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0); AssertRC(rc);
2044 rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0); AssertRC(rc);
2045 }
2046
2047
2048 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2049 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2050
2051 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
2052 {
2053 /* Try anticipate what we might need. */
2054 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2055 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2056 || RT_FAILURE(rcStrict))
2057 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
2058 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
2059 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
2060 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2061
2062 fImport = CPUMCTX_EXTRN_ALL;
2063 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2064 {
2065 /* Only import what is external currently. */
2066 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
2067 if (RT_SUCCESS(rc2))
2068 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2069 else if (RT_SUCCESS(rcStrict))
2070 rcStrict = rc2;
2071 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2072 pVCpu->cpum.GstCtx.fExtrn = 0;
2073 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2074 }
2075 else
2076 {
2077 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2078 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2079 }
2080 }
2081 else
2082 {
2083 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2084 pVCpu->cpum.GstCtx.fExtrn = 0;
2085 }
2086
2087 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
2088 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, VBOXSTRICTRC_VAL(rcStrict) ));
2089 return rcStrict;
2090}
2091
2092
2093VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2094{
2095 NOREF(pVM);
2096 return PGMPhysIsA20Enabled(pVCpu);
2097}
2098
2099
2100bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2101{
2102 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2103 return false;
2104}
2105
2106
2107/**
2108 * Forced flag notification call from VMEmt.h.
2109 *
2110 * This is only called when pVCpu is in the VMCPUSTATE_STARTED_EXEC_NEM state.
2111 *
2112 * @param pVM The cross context VM structure.
2113 * @param pVCpu The cross context virtual CPU structure of the CPU
2114 * to be notified.
2115 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_XXX.
2116 */
2117void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2118{
2119 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
2120
2121 hv_return_t hrc = hv_vcpu_interrupt(&pVCpu->nem.s.hVCpuId, 1);
2122 if (hrc != HV_SUCCESS)
2123 LogRel(("NEM: hv_vcpu_interrupt(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpuId, hrc));
2124}
2125
2126
2127VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2128 uint8_t *pu2State, uint32_t *puNemRange)
2129{
2130 RT_NOREF(pVM, puNemRange);
2131
2132 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
2133#if defined(VBOX_WITH_PGM_NEM_MODE)
2134 if (pvR3)
2135 {
2136 int rc = nemR3DarwinMap(GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE);
2137 if (RT_SUCCESS(rc))
2138 *pu2State = NEM_DARWIN_PAGE_STATE_WRITABLE;
2139 else
2140 {
2141 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
2142 return VERR_NEM_MAP_PAGES_FAILED;
2143 }
2144 }
2145 return VINF_SUCCESS;
2146#else
2147 RT_NOREF(pVM, GCPhys, cb, pvR3);
2148 return VERR_NEM_MAP_PAGES_FAILED;
2149#endif
2150}
2151
2152
2153VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2154{
2155 RT_NOREF(pVM);
2156 return false;
2157}
2158
2159
2160VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2161 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2162{
2163 RT_NOREF(pVM, puNemRange);
2164
2165 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
2166 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
2167
2168#if defined(VBOX_WITH_PGM_NEM_MODE)
2169 /*
2170 * Unmap the RAM we're replacing.
2171 */
2172 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2173 {
2174 int rc = nemR3DarwinUnmap(GCPhys, cb);
2175 if (RT_SUCCESS(rc))
2176 { /* likely */ }
2177 else if (pvMmio2)
2178 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
2179 GCPhys, cb, fFlags, rc));
2180 else
2181 {
2182 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2183 GCPhys, cb, fFlags, rc));
2184 return VERR_NEM_UNMAP_PAGES_FAILED;
2185 }
2186 }
2187
2188 /*
2189 * Map MMIO2 if any.
2190 */
2191 if (pvMmio2)
2192 {
2193 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2194 int rc = nemR3DarwinMap(GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE);
2195 if (RT_SUCCESS(rc))
2196 *pu2State = NEM_DARWIN_PAGE_STATE_WRITABLE;
2197 else
2198 {
2199 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
2200 GCPhys, cb, fFlags, pvMmio2, rc));
2201 return VERR_NEM_MAP_PAGES_FAILED;
2202 }
2203 }
2204 else
2205 {
2206 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2207 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
2208 }
2209
2210#else
2211 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
2212 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
2213#endif
2214 return VINF_SUCCESS;
2215}
2216
2217
2218VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2219 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2220{
2221 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2222 return VINF_SUCCESS;
2223}
2224
2225
2226VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2227 void *pvMmio2, uint8_t *pu2State)
2228{
2229 RT_NOREF(pVM);
2230
2231 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p\n",
2232 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State));
2233
2234 int rc = VINF_SUCCESS;
2235#if defined(VBOX_WITH_PGM_NEM_MODE)
2236 /*
2237 * Unmap the MMIO2 pages.
2238 */
2239 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2240 * we may have more stuff to unmap even in case of pure MMIO... */
2241 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2242 {
2243 rc = nemR3DarwinUnmap(GCPhys, cb);
2244 if (RT_FAILURE(rc))
2245 {
2246 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2247 GCPhys, cb, fFlags, rc));
2248 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2249 }
2250 }
2251
2252 /*
2253 * Restore the RAM we replaced.
2254 */
2255 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2256 {
2257 AssertPtr(pvRam);
2258 rc = nemR3DarwinMap(GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE);
2259 if (RT_SUCCESS(rc))
2260 { /* likely */ }
2261 else
2262 {
2263 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
2264 rc = VERR_NEM_MAP_PAGES_FAILED;
2265 }
2266 if (pu2State)
2267 *pu2State = NEM_DARWIN_PAGE_STATE_WRITABLE;
2268 }
2269 /* Mark the pages as unmapped if relevant. */
2270 else if (pu2State)
2271 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
2272
2273 RT_NOREF(pvMmio2);
2274#else
2275 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
2276 if (pu2State)
2277 *pu2State = UINT8_MAX;
2278 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2279#endif
2280 return rc;
2281}
2282
2283
2284VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2285 void *pvBitmap, size_t cbBitmap)
2286{
2287 RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
2288 AssertFailed();
2289 return VERR_NOT_IMPLEMENTED;
2290}
2291
2292
2293VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2294 uint8_t *pu2State)
2295{
2296 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags);
2297
2298 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2299 *pu2State = UINT8_MAX;
2300 return VINF_SUCCESS;
2301}
2302
2303
2304VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2305 uint32_t fFlags, uint8_t *pu2State)
2306{
2307 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p\n",
2308 GCPhys, cb, pvPages, fFlags, pu2State));
2309 *pu2State = UINT8_MAX;
2310
2311#if defined(VBOX_WITH_PGM_NEM_MODE)
2312 /*
2313 * (Re-)map readonly.
2314 */
2315 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2316 int rc = nemR3DarwinMap(GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE);
2317 if (RT_SUCCESS(rc))
2318 *pu2State = NEM_DARWIN_PAGE_STATE_READABLE;
2319 else
2320 {
2321 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
2322 GCPhys, cb, pvPages, fFlags, rc));
2323 return VERR_NEM_MAP_PAGES_FAILED;
2324 }
2325 RT_NOREF(pVM, fFlags);
2326 return VINF_SUCCESS;
2327#else
2328 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags);
2329 return VERR_NEM_MAP_PAGES_FAILED;
2330#endif
2331}
2332
2333
2334VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2335 RTR3PTR pvMemR3, uint8_t *pu2State)
2336{
2337 RT_NOREF(pVM);
2338
2339 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2340 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2341
2342 *pu2State = UINT8_MAX;
2343#if defined(VBOX_WITH_PGM_NEM_MODE)
2344 if (pvMemR3)
2345 {
2346 int rc = nemR3DarwinMap(GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE);
2347 if (RT_SUCCESS(rc))
2348 *pu2State = NEM_DARWIN_PAGE_STATE_WRITABLE;
2349 else
2350 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
2351 pvMemR3, GCPhys, cb, rc));
2352 }
2353 RT_NOREF(enmKind);
2354#else
2355 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
2356 AssertFailed();
2357#endif
2358}
2359
2360
2361static int nemHCJustUnmapPage(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
2362{
2363 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
2364 {
2365 Log5(("nemHCJustUnmapPage: %RGp == unmapped\n", GCPhysDst));
2366 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
2367 return VINF_SUCCESS;
2368 }
2369
2370 int rc = nemR3DarwinUnmap(GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
2371 if (RT_SUCCESS(rc))
2372 {
2373 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
2374 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2375 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
2376 Log5(("nemHCJustUnmapPage: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
2377 return VINF_SUCCESS;
2378 }
2379 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
2380 LogRel(("nemHCJustUnmapPage(%RGp): failed! rc=%Rrc\n",
2381 GCPhysDst, rc));
2382 return VERR_NEM_IPE_6;
2383}
2384
2385
2386/**
2387 * Called when the A20 state changes.
2388 *
2389 * @param pVCpu The CPU the A20 state changed on.
2390 * @param fEnabled Whether it was enabled (true) or disabled.
2391 */
2392VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2393{
2394 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
2395 RT_NOREF(pVCpu, fEnabled);
2396}
2397
2398
2399void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2400{
2401 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2402 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2403}
2404
2405
2406void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2407 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2408{
2409 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2410 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2411 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2412}
2413
2414
2415int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
2416 PGMPAGETYPE enmType, uint8_t *pu2State)
2417{
2418 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2419 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2420 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
2421
2422 return nemHCJustUnmapPage(pVM, GCPhys, pu2State);
2423}
2424
2425
2426VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
2427 PGMPAGETYPE enmType, uint8_t *pu2State)
2428{
2429 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2430 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2431 RT_NOREF(HCPhys, pvR3, fPageProt, enmType)
2432
2433 nemHCJustUnmapPage(pVM, GCPhys, pu2State);
2434}
2435
2436
2437VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
2438 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
2439{
2440 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2441 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
2442 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType);
2443
2444 nemHCJustUnmapPage(pVM, GCPhys, pu2State);
2445}
2446
2447
2448/**
2449 * Interface for importing state on demand (used by IEM).
2450 *
2451 * @returns VBox status code.
2452 * @param pVCpu The cross context CPU structure.
2453 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2454 */
2455VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
2456{
2457 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
2458
2459 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
2460}
2461
2462
2463/**
2464 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
2465 *
2466 * @returns VBox status code.
2467 * @param pVCpu The cross context CPU structure.
2468 * @param pcTicks Where to return the CPU tick count.
2469 * @param puAux Where to return the TSC_AUX register value.
2470 */
2471VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
2472{
2473 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
2474 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
2475
2476 RT_NOREF(pVCpu, pcTicks, puAux);
2477 return VINF_SUCCESS;
2478}
2479
2480
2481/**
2482 * Resumes CPU clock (TSC) on all virtual CPUs.
2483 *
2484 * This is called by TM when the VM is started, restored, resumed or similar.
2485 *
2486 * @returns VBox status code.
2487 * @param pVM The cross context VM structure.
2488 * @param pVCpu The cross context CPU structure of the calling EMT.
2489 * @param uPausedTscValue The TSC value at the time of pausing.
2490 */
2491VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
2492{
2493 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVCpu, uPausedTscValue));
2494 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
2495 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
2496
2497 RT_NOREF(uPausedTscValue);
2498 return VINF_SUCCESS;
2499}
2500
2501
2502/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
2503 *
2504 * @todo Add notes as the implementation progresses...
2505 */
2506
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette