VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp@ 93368

Last change on this file since 93368 was 93218, checked in by vboxsync, 3 years ago

VMM/NEMR3Native-darwin: Straighten out the runloop a bit, no need to check for FFs twice (already done in vmxHCCheckForceFlags()), move the timer polling up before the for loop as 32bit guests using the standard PIT and PIC seem to become very unresponsive due to excessive timer handling, bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 127.7 KB
Line 
1/* $Id: NEMR3Native-darwin.cpp 93218 2022-01-13 10:31:55Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2020-2022 Oracle Corporation
12 *
13 * This file is part of VirtualBox Open Source Edition (OSE), as
14 * available from http://www.virtualbox.org. This file is free software;
15 * you can redistribute it and/or modify it under the terms of the GNU
16 * General Public License (GPL) as published by the Free Software
17 * Foundation, in version 2 as it comes in the "COPYING" file of the
18 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
19 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
20 */
21
22
23/*********************************************************************************************************************************
24* Header Files *
25*********************************************************************************************************************************/
26#define LOG_GROUP LOG_GROUP_NEM
27#define VMCPU_INCL_CPUM_GST_CTX
28#include <VBox/vmm/nem.h>
29#include <VBox/vmm/iem.h>
30#include <VBox/vmm/em.h>
31#include <VBox/vmm/apic.h>
32#include <VBox/vmm/pdm.h>
33#include <VBox/vmm/hm.h>
34#include <VBox/vmm/hm_vmx.h>
35#include <VBox/vmm/dbgftrace.h>
36#include "VMXInternal.h"
37#include "NEMInternal.h"
38#include <VBox/vmm/vmcc.h>
39#include "dtrace/VBoxVMM.h"
40
41#include <iprt/asm.h>
42#include <iprt/ldr.h>
43#include <iprt/mem.h>
44#include <iprt/path.h>
45#include <iprt/string.h>
46#include <iprt/system.h>
47#include <iprt/utf16.h>
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/* No nested hwvirt (for now). */
54#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
55# undef VBOX_WITH_NESTED_HWVIRT_VMX
56#endif
57
58
59/** @name HV return codes.
60 * @{ */
61/** Operation was successful. */
62#define HV_SUCCESS 0
63/** An error occurred during operation. */
64#define HV_ERROR 0xfae94001
65/** The operation could not be completed right now, try again. */
66#define HV_BUSY 0xfae94002
67/** One of the parameters passed wis invalid. */
68#define HV_BAD_ARGUMENT 0xfae94003
69/** Not enough resources left to fulfill the operation. */
70#define HV_NO_RESOURCES 0xfae94005
71/** The device could not be found. */
72#define HV_NO_DEVICE 0xfae94006
73/** The operation is not supportd on this platform with this configuration. */
74#define HV_UNSUPPORTED 0xfae94007
75/** @} */
76
77
78/** @name HV memory protection flags.
79 * @{ */
80/** Memory is readable. */
81#define HV_MEMORY_READ RT_BIT_64(0)
82/** Memory is writeable. */
83#define HV_MEMORY_WRITE RT_BIT_64(1)
84/** Memory is executable. */
85#define HV_MEMORY_EXEC RT_BIT_64(2)
86/** @} */
87
88
89/** @name HV shadow VMCS protection flags.
90 * @{ */
91/** Shadow VMCS field is not accessible. */
92#define HV_SHADOW_VMCS_NONE 0
93/** Shadow VMCS fild is readable. */
94#define HV_SHADOW_VMCS_READ RT_BIT_64(0)
95/** Shadow VMCS field is writeable. */
96#define HV_SHADOW_VMCS_WRITE RT_BIT_64(1)
97/** @} */
98
99
100/** Default VM creation flags. */
101#define HV_VM_DEFAULT 0
102/** Default guest address space creation flags. */
103#define HV_VM_SPACE_DEFAULT 0
104/** Default vCPU creation flags. */
105#define HV_VCPU_DEFAULT 0
106
107#define HV_DEADLINE_FOREVER UINT64_MAX
108
109
110/*********************************************************************************************************************************
111* Structures and Typedefs *
112*********************************************************************************************************************************/
113
114/** HV return code type. */
115typedef uint32_t hv_return_t;
116/** HV capability bitmask. */
117typedef uint64_t hv_capability_t;
118/** Option bitmask type when creating a VM. */
119typedef uint64_t hv_vm_options_t;
120/** Option bitmask when creating a vCPU. */
121typedef uint64_t hv_vcpu_options_t;
122/** HV memory protection flags type. */
123typedef uint64_t hv_memory_flags_t;
124/** Shadow VMCS protection flags. */
125typedef uint64_t hv_shadow_flags_t;
126/** Guest physical address type. */
127typedef uint64_t hv_gpaddr_t;
128
129
130/**
131 * VMX Capability enumeration.
132 */
133typedef enum
134{
135 HV_VMX_CAP_PINBASED = 0,
136 HV_VMX_CAP_PROCBASED,
137 HV_VMX_CAP_PROCBASED2,
138 HV_VMX_CAP_ENTRY,
139 HV_VMX_CAP_EXIT,
140 HV_VMX_CAP_BASIC, /* Since 11.0 */
141 HV_VMX_CAP_TRUE_PINBASED, /* Since 11.0 */
142 HV_VMX_CAP_TRUE_PROCBASED, /* Since 11.0 */
143 HV_VMX_CAP_TRUE_ENTRY, /* Since 11.0 */
144 HV_VMX_CAP_TRUE_EXIT, /* Since 11.0 */
145 HV_VMX_CAP_MISC, /* Since 11.0 */
146 HV_VMX_CAP_CR0_FIXED0, /* Since 11.0 */
147 HV_VMX_CAP_CR0_FIXED1, /* Since 11.0 */
148 HV_VMX_CAP_CR4_FIXED0, /* Since 11.0 */
149 HV_VMX_CAP_CR4_FIXED1, /* Since 11.0 */
150 HV_VMX_CAP_VMCS_ENUM, /* Since 11.0 */
151 HV_VMX_CAP_EPT_VPID_CAP, /* Since 11.0 */
152 HV_VMX_CAP_PREEMPTION_TIMER = 32
153} hv_vmx_capability_t;
154
155
156/**
157 * HV x86 register enumeration.
158 */
159typedef enum
160{
161 HV_X86_RIP = 0,
162 HV_X86_RFLAGS,
163 HV_X86_RAX,
164 HV_X86_RCX,
165 HV_X86_RDX,
166 HV_X86_RBX,
167 HV_X86_RSI,
168 HV_X86_RDI,
169 HV_X86_RSP,
170 HV_X86_RBP,
171 HV_X86_R8,
172 HV_X86_R9,
173 HV_X86_R10,
174 HV_X86_R11,
175 HV_X86_R12,
176 HV_X86_R13,
177 HV_X86_R14,
178 HV_X86_R15,
179 HV_X86_CS,
180 HV_X86_SS,
181 HV_X86_DS,
182 HV_X86_ES,
183 HV_X86_FS,
184 HV_X86_GS,
185 HV_X86_IDT_BASE,
186 HV_X86_IDT_LIMIT,
187 HV_X86_GDT_BASE,
188 HV_X86_GDT_LIMIT,
189 HV_X86_LDTR,
190 HV_X86_LDT_BASE,
191 HV_X86_LDT_LIMIT,
192 HV_X86_LDT_AR,
193 HV_X86_TR,
194 HV_X86_TSS_BASE,
195 HV_X86_TSS_LIMIT,
196 HV_X86_TSS_AR,
197 HV_X86_CR0,
198 HV_X86_CR1,
199 HV_X86_CR2,
200 HV_X86_CR3,
201 HV_X86_CR4,
202 HV_X86_DR0,
203 HV_X86_DR1,
204 HV_X86_DR2,
205 HV_X86_DR3,
206 HV_X86_DR4,
207 HV_X86_DR5,
208 HV_X86_DR6,
209 HV_X86_DR7,
210 HV_X86_TPR,
211 HV_X86_XCR0,
212 HV_X86_REGISTERS_MAX
213} hv_x86_reg_t;
214
215
216typedef hv_return_t FN_HV_CAPABILITY(hv_capability_t capability, uint64_t *valu);
217typedef hv_return_t FN_HV_VM_CREATE(hv_vm_options_t flags);
218typedef hv_return_t FN_HV_VM_DESTROY(void);
219typedef hv_return_t FN_HV_VM_SPACE_CREATE(hv_vm_space_t *asid);
220typedef hv_return_t FN_HV_VM_SPACE_DESTROY(hv_vm_space_t asid);
221typedef hv_return_t FN_HV_VM_MAP(const void *uva, hv_gpaddr_t gpa, size_t size, hv_memory_flags_t flags);
222typedef hv_return_t FN_HV_VM_UNMAP(hv_gpaddr_t gpa, size_t size);
223typedef hv_return_t FN_HV_VM_PROTECT(hv_gpaddr_t gpa, size_t size, hv_memory_flags_t flags);
224typedef hv_return_t FN_HV_VM_MAP_SPACE(hv_vm_space_t asid, const void *uva, hv_gpaddr_t gpa, size_t size, hv_memory_flags_t flags);
225typedef hv_return_t FN_HV_VM_UNMAP_SPACE(hv_vm_space_t asid, hv_gpaddr_t gpa, size_t size);
226typedef hv_return_t FN_HV_VM_PROTECT_SPACE(hv_vm_space_t asid, hv_gpaddr_t gpa, size_t size, hv_memory_flags_t flags);
227typedef hv_return_t FN_HV_VM_SYNC_TSC(uint64_t tsc);
228
229typedef hv_return_t FN_HV_VCPU_CREATE(hv_vcpuid_t *vcpu, hv_vcpu_options_t flags);
230typedef hv_return_t FN_HV_VCPU_DESTROY(hv_vcpuid_t vcpu);
231typedef hv_return_t FN_HV_VCPU_SET_SPACE(hv_vcpuid_t vcpu, hv_vm_space_t asid);
232typedef hv_return_t FN_HV_VCPU_READ_REGISTER(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t *value);
233typedef hv_return_t FN_HV_VCPU_WRITE_REGISTER(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t value);
234typedef hv_return_t FN_HV_VCPU_READ_FPSTATE(hv_vcpuid_t vcpu, void *buffer, size_t size);
235typedef hv_return_t FN_HV_VCPU_WRITE_FPSTATE(hv_vcpuid_t vcpu, const void *buffer, size_t size);
236typedef hv_return_t FN_HV_VCPU_ENABLE_NATIVE_MSR(hv_vcpuid_t vcpu, uint32_t msr, bool enable);
237typedef hv_return_t FN_HV_VCPU_READ_MSR(hv_vcpuid_t vcpu, uint32_t msr, uint64_t *value);
238typedef hv_return_t FN_HV_VCPU_WRITE_MSR(hv_vcpuid_t vcpu, uint32_t msr, uint64_t value);
239typedef hv_return_t FN_HV_VCPU_FLUSH(hv_vcpuid_t vcpu);
240typedef hv_return_t FN_HV_VCPU_INVALIDATE_TLB(hv_vcpuid_t vcpu);
241typedef hv_return_t FN_HV_VCPU_RUN(hv_vcpuid_t vcpu);
242typedef hv_return_t FN_HV_VCPU_RUN_UNTIL(hv_vcpuid_t vcpu, uint64_t deadline);
243typedef hv_return_t FN_HV_VCPU_INTERRUPT(hv_vcpuid_t *vcpus, unsigned int vcpu_count);
244typedef hv_return_t FN_HV_VCPU_GET_EXEC_TIME(hv_vcpuid_t *vcpus, uint64_t *time);
245
246typedef hv_return_t FN_HV_VMX_VCPU_READ_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t *value);
247typedef hv_return_t FN_HV_VMX_VCPU_WRITE_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t value);
248
249typedef hv_return_t FN_HV_VMX_VCPU_READ_SHADOW_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t *value);
250typedef hv_return_t FN_HV_VMX_VCPU_WRITE_SHADOW_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t value);
251typedef hv_return_t FN_HV_VMX_VCPU_SET_SHADOW_ACCESS(hv_vcpuid_t vcpu, uint32_t field, hv_shadow_flags_t flags);
252
253typedef hv_return_t FN_HV_VMX_READ_CAPABILITY(hv_vmx_capability_t field, uint64_t *value);
254typedef hv_return_t FN_HV_VMX_VCPU_SET_APIC_ADDRESS(hv_vcpuid_t vcpu, hv_gpaddr_t gpa);
255
256
257/*********************************************************************************************************************************
258* Global Variables *
259*********************************************************************************************************************************/
260/** NEM_DARWIN_PAGE_STATE_XXX names. */
261NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
262/** MSRs. */
263static SUPHWVIRTMSRS g_HmMsrs;
264/** VMX: Set if swapping EFER is supported. */
265static bool g_fHmVmxSupportsVmcsEfer = false;
266/** @name APIs imported from Hypervisor.framework.
267 * @{ */
268static FN_HV_CAPABILITY *g_pfnHvCapability = NULL; /* Since 10.15 */
269static FN_HV_VM_CREATE *g_pfnHvVmCreate = NULL; /* Since 10.10 */
270static FN_HV_VM_DESTROY *g_pfnHvVmDestroy = NULL; /* Since 10.10 */
271static FN_HV_VM_SPACE_CREATE *g_pfnHvVmSpaceCreate = NULL; /* Since 10.15 */
272static FN_HV_VM_SPACE_DESTROY *g_pfnHvVmSpaceDestroy = NULL; /* Since 10.15 */
273static FN_HV_VM_MAP *g_pfnHvVmMap = NULL; /* Since 10.10 */
274static FN_HV_VM_UNMAP *g_pfnHvVmUnmap = NULL; /* Since 10.10 */
275static FN_HV_VM_PROTECT *g_pfnHvVmProtect = NULL; /* Since 10.10 */
276static FN_HV_VM_MAP_SPACE *g_pfnHvVmMapSpace = NULL; /* Since 10.15 */
277static FN_HV_VM_UNMAP_SPACE *g_pfnHvVmUnmapSpace = NULL; /* Since 10.15 */
278static FN_HV_VM_PROTECT_SPACE *g_pfnHvVmProtectSpace = NULL; /* Since 10.15 */
279static FN_HV_VM_SYNC_TSC *g_pfnHvVmSyncTsc = NULL; /* Since 10.10 */
280
281static FN_HV_VCPU_CREATE *g_pfnHvVCpuCreate = NULL; /* Since 10.10 */
282static FN_HV_VCPU_DESTROY *g_pfnHvVCpuDestroy = NULL; /* Since 10.10 */
283static FN_HV_VCPU_SET_SPACE *g_pfnHvVCpuSetSpace = NULL; /* Since 10.15 */
284static FN_HV_VCPU_READ_REGISTER *g_pfnHvVCpuReadRegister = NULL; /* Since 10.10 */
285static FN_HV_VCPU_WRITE_REGISTER *g_pfnHvVCpuWriteRegister = NULL; /* Since 10.10 */
286static FN_HV_VCPU_READ_FPSTATE *g_pfnHvVCpuReadFpState = NULL; /* Since 10.10 */
287static FN_HV_VCPU_WRITE_FPSTATE *g_pfnHvVCpuWriteFpState = NULL; /* Since 10.10 */
288static FN_HV_VCPU_ENABLE_NATIVE_MSR *g_pfnHvVCpuEnableNativeMsr = NULL; /* Since 10.10 */
289static FN_HV_VCPU_READ_MSR *g_pfnHvVCpuReadMsr = NULL; /* Since 10.10 */
290static FN_HV_VCPU_WRITE_MSR *g_pfnHvVCpuWriteMsr = NULL; /* Since 10.10 */
291static FN_HV_VCPU_FLUSH *g_pfnHvVCpuFlush = NULL; /* Since 10.10 */
292static FN_HV_VCPU_INVALIDATE_TLB *g_pfnHvVCpuInvalidateTlb = NULL; /* Since 10.10 */
293static FN_HV_VCPU_RUN *g_pfnHvVCpuRun = NULL; /* Since 10.10 */
294static FN_HV_VCPU_RUN_UNTIL *g_pfnHvVCpuRunUntil = NULL; /* Since 10.15 */
295static FN_HV_VCPU_INTERRUPT *g_pfnHvVCpuInterrupt = NULL; /* Since 10.10 */
296static FN_HV_VCPU_GET_EXEC_TIME *g_pfnHvVCpuGetExecTime = NULL; /* Since 10.10 */
297
298static FN_HV_VMX_READ_CAPABILITY *g_pfnHvVmxReadCapability = NULL; /* Since 10.10 */
299static FN_HV_VMX_VCPU_READ_VMCS *g_pfnHvVmxVCpuReadVmcs = NULL; /* Since 10.10 */
300static FN_HV_VMX_VCPU_WRITE_VMCS *g_pfnHvVmxVCpuWriteVmcs = NULL; /* Since 10.10 */
301static FN_HV_VMX_VCPU_READ_SHADOW_VMCS *g_pfnHvVmxVCpuReadShadowVmcs = NULL; /* Since 10.15 */
302static FN_HV_VMX_VCPU_WRITE_SHADOW_VMCS *g_pfnHvVmxVCpuWriteShadowVmcs = NULL; /* Since 10.15 */
303static FN_HV_VMX_VCPU_SET_SHADOW_ACCESS *g_pfnHvVmxVCpuSetShadowAccess = NULL; /* Since 10.15 */
304static FN_HV_VMX_VCPU_SET_APIC_ADDRESS *g_pfnHvVmxVCpuSetApicAddress = NULL; /* Since 10.10 */
305/** @} */
306
307
308/**
309 * Import instructions.
310 */
311static const struct
312{
313 bool fOptional; /**< Set if import is optional. */
314 void **ppfn; /**< The function pointer variable. */
315 const char *pszName; /**< The function name. */
316} g_aImports[] =
317{
318#define NEM_DARWIN_IMPORT(a_fOptional, a_Pfn, a_Name) { (a_fOptional), (void **)&(a_Pfn), #a_Name }
319 NEM_DARWIN_IMPORT(true, g_pfnHvCapability, hv_capability),
320 NEM_DARWIN_IMPORT(false, g_pfnHvVmCreate, hv_vm_create),
321 NEM_DARWIN_IMPORT(false, g_pfnHvVmDestroy, hv_vm_destroy),
322 NEM_DARWIN_IMPORT(true, g_pfnHvVmSpaceCreate, hv_vm_space_create),
323 NEM_DARWIN_IMPORT(true, g_pfnHvVmSpaceDestroy, hv_vm_space_destroy),
324 NEM_DARWIN_IMPORT(false, g_pfnHvVmMap, hv_vm_map),
325 NEM_DARWIN_IMPORT(false, g_pfnHvVmUnmap, hv_vm_unmap),
326 NEM_DARWIN_IMPORT(false, g_pfnHvVmProtect, hv_vm_protect),
327 NEM_DARWIN_IMPORT(true, g_pfnHvVmMapSpace, hv_vm_map_space),
328 NEM_DARWIN_IMPORT(true, g_pfnHvVmUnmapSpace, hv_vm_unmap_space),
329 NEM_DARWIN_IMPORT(true, g_pfnHvVmProtectSpace, hv_vm_protect_space),
330 NEM_DARWIN_IMPORT(false, g_pfnHvVmSyncTsc, hv_vm_sync_tsc),
331
332 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuCreate, hv_vcpu_create),
333 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuDestroy, hv_vcpu_destroy),
334 NEM_DARWIN_IMPORT(true, g_pfnHvVCpuSetSpace, hv_vcpu_set_space),
335 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuReadRegister, hv_vcpu_read_register),
336 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuWriteRegister, hv_vcpu_write_register),
337 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuReadFpState, hv_vcpu_read_fpstate),
338 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuWriteFpState, hv_vcpu_write_fpstate),
339 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuEnableNativeMsr, hv_vcpu_enable_native_msr),
340 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuReadMsr, hv_vcpu_read_msr),
341 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuWriteMsr, hv_vcpu_write_msr),
342 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuFlush, hv_vcpu_flush),
343 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuInvalidateTlb, hv_vcpu_invalidate_tlb),
344 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuRun, hv_vcpu_run),
345 NEM_DARWIN_IMPORT(true, g_pfnHvVCpuRunUntil, hv_vcpu_run_until),
346 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuInterrupt, hv_vcpu_interrupt),
347 NEM_DARWIN_IMPORT(true, g_pfnHvVCpuGetExecTime, hv_vcpu_get_exec_time),
348 NEM_DARWIN_IMPORT(false, g_pfnHvVmxReadCapability, hv_vmx_read_capability),
349 NEM_DARWIN_IMPORT(false, g_pfnHvVmxVCpuReadVmcs, hv_vmx_vcpu_read_vmcs),
350 NEM_DARWIN_IMPORT(false, g_pfnHvVmxVCpuWriteVmcs, hv_vmx_vcpu_write_vmcs),
351 NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuReadShadowVmcs, hv_vmx_vcpu_read_shadow_vmcs),
352 NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuWriteShadowVmcs, hv_vmx_vcpu_write_shadow_vmcs),
353 NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuSetShadowAccess, hv_vmx_vcpu_set_shadow_access),
354 NEM_DARWIN_IMPORT(false, g_pfnHvVmxVCpuSetApicAddress, hv_vmx_vcpu_set_apic_address),
355#undef NEM_DARWIN_IMPORT
356};
357
358
359/*
360 * Let the preprocessor alias the APIs to import variables for better autocompletion.
361 */
362#ifndef IN_SLICKEDIT
363# define hv_capability g_pfnHvCapability
364# define hv_vm_create g_pfnHvVmCreate
365# define hv_vm_destroy g_pfnHvVmDestroy
366# define hv_vm_space_create g_pfnHvVmSpaceCreate
367# define hv_vm_space_destroy g_pfnHvVmSpaceDestroy
368# define hv_vm_map g_pfnHvVmMap
369# define hv_vm_unmap g_pfnHvVmUnmap
370# define hv_vm_protect g_pfnHvVmProtect
371# define hv_vm_map_space g_pfnHvVmMapSpace
372# define hv_vm_unmap_space g_pfnHvVmUnmapSpace
373# define hv_vm_protect_space g_pfnHvVmProtectSpace
374# define hv_vm_sync_tsc g_pfnHvVmSyncTsc
375
376# define hv_vcpu_create g_pfnHvVCpuCreate
377# define hv_vcpu_destroy g_pfnHvVCpuDestroy
378# define hv_vcpu_set_space g_pfnHvVCpuSetSpace
379# define hv_vcpu_read_register g_pfnHvVCpuReadRegister
380# define hv_vcpu_write_register g_pfnHvVCpuWriteRegister
381# define hv_vcpu_read_fpstate g_pfnHvVCpuReadFpState
382# define hv_vcpu_write_fpstate g_pfnHvVCpuWriteFpState
383# define hv_vcpu_enable_native_msr g_pfnHvVCpuEnableNativeMsr
384# define hv_vcpu_read_msr g_pfnHvVCpuReadMsr
385# define hv_vcpu_write_msr g_pfnHvVCpuWriteMsr
386# define hv_vcpu_flush g_pfnHvVCpuFlush
387# define hv_vcpu_invalidate_tlb g_pfnHvVCpuInvalidateTlb
388# define hv_vcpu_run g_pfnHvVCpuRun
389# define hv_vcpu_run_until g_pfnHvVCpuRunUntil
390# define hv_vcpu_interrupt g_pfnHvVCpuInterrupt
391# define hv_vcpu_get_exec_time g_pfnHvVCpuGetExecTime
392
393# define hv_vmx_read_capability g_pfnHvVmxReadCapability
394# define hv_vmx_vcpu_read_vmcs g_pfnHvVmxVCpuReadVmcs
395# define hv_vmx_vcpu_write_vmcs g_pfnHvVmxVCpuWriteVmcs
396# define hv_vmx_vcpu_read_shadow_vmcs g_pfnHvVmxVCpuReadShadowVmcs
397# define hv_vmx_vcpu_write_shadow_vmcs g_pfnHvVmxVCpuWriteShadowVmcs
398# define hv_vmx_vcpu_set_shadow_access g_pfnHvVmxVCpuSetShadowAccess
399# define hv_vmx_vcpu_set_apic_address g_pfnHvVmxVCpuSetApicAddress
400#endif
401
402
403/*********************************************************************************************************************************
404* Internal Functions *
405*********************************************************************************************************************************/
406static void vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo);
407
408/**
409 * Converts a HV return code to a VBox status code.
410 *
411 * @returns VBox status code.
412 * @param hrc The HV return code to convert.
413 */
414DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
415{
416 if (hrc == HV_SUCCESS)
417 return VINF_SUCCESS;
418
419 switch (hrc)
420 {
421 case HV_ERROR: return VERR_INVALID_STATE;
422 case HV_BUSY: return VERR_RESOURCE_BUSY;
423 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
424 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
425 case HV_NO_DEVICE: return VERR_NOT_FOUND;
426 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
427 }
428
429 return VERR_IPE_UNEXPECTED_STATUS;
430}
431
432
433/**
434 * Unmaps the given guest physical address range (page aligned).
435 *
436 * @returns VBox status code.
437 * @param pVM The cross context VM structure.
438 * @param GCPhys The guest physical address to start unmapping at.
439 * @param cb The size of the range to unmap in bytes.
440 */
441DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb)
442{
443 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
444 hv_return_t hrc;
445 if (pVM->nem.s.fCreatedAsid)
446 hrc = hv_vm_unmap_space(pVM->nem.s.uVmAsid, GCPhys, cb);
447 else
448 hrc = hv_vm_unmap(GCPhys, cb);
449 return nemR3DarwinHvSts2Rc(hrc);
450}
451
452
453/**
454 * Maps a given guest physical address range backed by the given memory with the given
455 * protection flags.
456 *
457 * @returns VBox status code.
458 * @param pVM The cross context VM structure.
459 * @param GCPhys The guest physical address to start mapping.
460 * @param pvRam The R3 pointer of the memory to back the range with.
461 * @param cb The size of the range, page aligned.
462 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
463 */
464DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, void *pvRam, size_t cb, uint32_t fPageProt)
465{
466 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
467
468 hv_memory_flags_t fHvMemProt = 0;
469 if (fPageProt & NEM_PAGE_PROT_READ)
470 fHvMemProt |= HV_MEMORY_READ;
471 if (fPageProt & NEM_PAGE_PROT_WRITE)
472 fHvMemProt |= HV_MEMORY_WRITE;
473 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
474 fHvMemProt |= HV_MEMORY_EXEC;
475
476 hv_return_t hrc;
477 if (pVM->nem.s.fCreatedAsid)
478 hrc = hv_vm_map_space(pVM->nem.s.uVmAsid, pvRam, GCPhys, cb, fHvMemProt);
479 else
480 hrc = hv_vm_map(pvRam, GCPhys, cb, fHvMemProt);
481 return nemR3DarwinHvSts2Rc(hrc);
482}
483
484
485#if 0 /* unused */
486DECLINLINE(int) nemR3DarwinProtectPage(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt)
487{
488 hv_memory_flags_t fHvMemProt = 0;
489 if (fPageProt & NEM_PAGE_PROT_READ)
490 fHvMemProt |= HV_MEMORY_READ;
491 if (fPageProt & NEM_PAGE_PROT_WRITE)
492 fHvMemProt |= HV_MEMORY_WRITE;
493 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
494 fHvMemProt |= HV_MEMORY_EXEC;
495
496 if (pVM->nem.s.fCreatedAsid)
497 hrc = hv_vm_protect_space(pVM->nem.s.uVmAsid, GCPhys, cb, fHvMemProt);
498 else
499 hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
500
501 return nemR3DarwinHvSts2Rc(hrc);
502}
503#endif
504
505
506DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
507{
508 PGMPAGEMAPLOCK Lock;
509 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
510 if (RT_SUCCESS(rc))
511 PGMPhysReleasePageMappingLock(pVM, &Lock);
512 return rc;
513}
514
515
516DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
517{
518 PGMPAGEMAPLOCK Lock;
519 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
520 if (RT_SUCCESS(rc))
521 PGMPhysReleasePageMappingLock(pVM, &Lock);
522 return rc;
523}
524
525
526/**
527 * Worker that maps pages into Hyper-V.
528 *
529 * This is used by the PGM physical page notifications as well as the memory
530 * access VMEXIT handlers.
531 *
532 * @returns VBox status code.
533 * @param pVM The cross context VM structure.
534 * @param pVCpu The cross context virtual CPU structure of the
535 * calling EMT.
536 * @param GCPhysSrc The source page address.
537 * @param GCPhysDst The hyper-V destination page. This may differ from
538 * GCPhysSrc when A20 is disabled.
539 * @param fPageProt NEM_PAGE_PROT_XXX.
540 * @param pu2State Our page state (input/output).
541 * @param fBackingChanged Set if the page backing is being changed.
542 * @thread EMT(pVCpu)
543 */
544NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
545 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
546{
547 /*
548 * Looks like we need to unmap a page before we can change the backing
549 * or even modify the protection. This is going to be *REALLY* efficient.
550 * PGM lends us two bits to keep track of the state here.
551 */
552 RT_NOREF(pVCpu);
553 uint8_t const u2OldState = *pu2State;
554 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_DARWIN_PAGE_STATE_WRITABLE
555 : fPageProt & NEM_PAGE_PROT_READ ? NEM_DARWIN_PAGE_STATE_READABLE : NEM_DARWIN_PAGE_STATE_UNMAPPED;
556 if ( fBackingChanged
557 || u2NewState != u2OldState)
558 {
559 if (u2OldState > NEM_DARWIN_PAGE_STATE_UNMAPPED)
560 {
561 int rc = nemR3DarwinUnmap(pVM, GCPhysDst, X86_PAGE_SIZE);
562 if (RT_SUCCESS(rc))
563 {
564 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
565 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
566 if (u2NewState == NEM_DARWIN_PAGE_STATE_UNMAPPED)
567 {
568 Log5(("NEM GPA unmapped/set: %RGp (was %s)\n", GCPhysDst, g_apszPageStates[u2OldState]));
569 return VINF_SUCCESS;
570 }
571 }
572 else
573 {
574 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
575 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
576 return VERR_NEM_INIT_FAILED;
577 }
578 }
579 }
580
581 /*
582 * Writeable mapping?
583 */
584 if (fPageProt & NEM_PAGE_PROT_WRITE)
585 {
586 void *pvPage;
587 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
588 if (RT_SUCCESS(rc))
589 {
590 rc = nemR3DarwinMap(pVM, GCPhysDst, pvPage, X86_PAGE_SIZE, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE);
591 if (RT_SUCCESS(rc))
592 {
593 *pu2State = NEM_DARWIN_PAGE_STATE_WRITABLE;
594 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
595 Log5(("NEM GPA mapped/set: %RGp %s (was %s)\n", GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState]));
596 return VINF_SUCCESS;
597 }
598 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
599 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst));
600 return VERR_NEM_INIT_FAILED;
601 }
602 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
603 return rc;
604 }
605
606 if (fPageProt & NEM_PAGE_PROT_READ)
607 {
608 const void *pvPage;
609 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
610 if (RT_SUCCESS(rc))
611 {
612 rc = nemR3DarwinMap(pVM, GCPhysDst, (void *)pvPage, X86_PAGE_SIZE, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE);
613 if (RT_SUCCESS(rc))
614 {
615 *pu2State = NEM_DARWIN_PAGE_STATE_READABLE;
616 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPage);
617 Log5(("NEM GPA mapped/set: %RGp %s (was %s)\n", GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState]));
618 return VINF_SUCCESS;
619 }
620 STAM_REL_COUNTER_INC(&pVM->nem.s.StatMapPageFailed);
621 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
622 return VERR_NEM_INIT_FAILED;
623 }
624 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
625 return rc;
626 }
627
628 /* We already unmapped it above. */
629 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
630 return VINF_SUCCESS;
631}
632
633
634#ifdef LOG_ENABLED
635/**
636 * Logs the current CPU state.
637 */
638static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
639{
640 if (LogIs3Enabled())
641 {
642#if 0
643 char szRegs[4096];
644 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
645 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
646 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
647 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
648 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
649 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
650 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
651 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
652 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
653 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
654 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
655 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
656 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
657 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
658 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
659 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
660 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
661 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
662 " efer=%016VR{efer}\n"
663 " pat=%016VR{pat}\n"
664 " sf_mask=%016VR{sf_mask}\n"
665 "krnl_gs_base=%016VR{krnl_gs_base}\n"
666 " lstar=%016VR{lstar}\n"
667 " star=%016VR{star} cstar=%016VR{cstar}\n"
668 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
669 );
670
671 char szInstr[256];
672 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
673 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
674 szInstr, sizeof(szInstr), NULL);
675 Log3(("%s%s\n", szRegs, szInstr));
676#else
677 RT_NOREF(pVM, pVCpu);
678#endif
679 }
680}
681#endif /* LOG_ENABLED */
682
683
684DECLINLINE(int) nemR3DarwinReadVmcs16(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint16_t *pData)
685{
686 uint64_t u64Data;
687 hv_return_t hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, &u64Data);
688 if (RT_LIKELY(hrc == HV_SUCCESS))
689 {
690 *pData = (uint16_t)u64Data;
691 return VINF_SUCCESS;
692 }
693
694 return nemR3DarwinHvSts2Rc(hrc);
695}
696
697
698DECLINLINE(int) nemR3DarwinReadVmcs32(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint32_t *pData)
699{
700 uint64_t u64Data;
701 hv_return_t hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, &u64Data);
702 if (RT_LIKELY(hrc == HV_SUCCESS))
703 {
704 *pData = (uint32_t)u64Data;
705 return VINF_SUCCESS;
706 }
707
708 return nemR3DarwinHvSts2Rc(hrc);
709}
710
711
712DECLINLINE(int) nemR3DarwinReadVmcs64(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint64_t *pData)
713{
714 hv_return_t hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, pData);
715 if (RT_LIKELY(hrc == HV_SUCCESS))
716 return VINF_SUCCESS;
717
718 return nemR3DarwinHvSts2Rc(hrc);
719}
720
721
722DECLINLINE(int) nemR3DarwinWriteVmcs16(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint16_t u16Val)
723{
724 hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, u16Val);
725 if (RT_LIKELY(hrc == HV_SUCCESS))
726 return VINF_SUCCESS;
727
728 return nemR3DarwinHvSts2Rc(hrc);
729}
730
731
732DECLINLINE(int) nemR3DarwinWriteVmcs32(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint32_t u32Val)
733{
734 hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, u32Val);
735 if (RT_LIKELY(hrc == HV_SUCCESS))
736 return VINF_SUCCESS;
737
738 return nemR3DarwinHvSts2Rc(hrc);
739}
740
741
742DECLINLINE(int) nemR3DarwinWriteVmcs64(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint64_t u64Val)
743{
744 hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, u64Val);
745 if (RT_LIKELY(hrc == HV_SUCCESS))
746 return VINF_SUCCESS;
747
748 return nemR3DarwinHvSts2Rc(hrc);
749}
750
751DECLINLINE(int) nemR3DarwinMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Val)
752{
753 hv_return_t hrc = hv_vcpu_read_msr(pVCpu->nem.s.hVCpuId, idMsr, pu64Val);
754 if (RT_LIKELY(hrc == HV_SUCCESS))
755 return VINF_SUCCESS;
756
757 return nemR3DarwinHvSts2Rc(hrc);
758}
759
760#if 0 /*unused*/
761DECLINLINE(int) nemR3DarwinMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Val)
762{
763 hv_return_t hrc = hv_vcpu_write_msr(pVCpu->nem.s.hVCpuId, idMsr, u64Val);
764 if (RT_LIKELY(hrc == HV_SUCCESS))
765 return VINF_SUCCESS;
766
767 return nemR3DarwinHvSts2Rc(hrc);
768}
769#endif
770
771static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
772{
773#define READ_GREG(a_GReg, a_Value) \
774 do \
775 { \
776 hrc = hv_vcpu_read_register(pVCpu->nem.s.hVCpuId, (a_GReg), &(a_Value)); \
777 if (RT_LIKELY(hrc == HV_SUCCESS)) \
778 { /* likely */ } \
779 else \
780 return VERR_INTERNAL_ERROR; \
781 } while(0)
782#define READ_VMCS_FIELD(a_Field, a_Value) \
783 do \
784 { \
785 hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), &(a_Value)); \
786 if (RT_LIKELY(hrc == HV_SUCCESS)) \
787 { /* likely */ } \
788 else \
789 return VERR_INTERNAL_ERROR; \
790 } while(0)
791#define READ_VMCS16_FIELD(a_Field, a_Value) \
792 do \
793 { \
794 uint64_t u64Data; \
795 hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), &u64Data); \
796 if (RT_LIKELY(hrc == HV_SUCCESS)) \
797 { (a_Value) = (uint16_t)u64Data; } \
798 else \
799 return VERR_INTERNAL_ERROR; \
800 } while(0)
801#define READ_VMCS32_FIELD(a_Field, a_Value) \
802 do \
803 { \
804 uint64_t u64Data; \
805 hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), &u64Data); \
806 if (RT_LIKELY(hrc == HV_SUCCESS)) \
807 { (a_Value) = (uint32_t)u64Data; } \
808 else \
809 return VERR_INTERNAL_ERROR; \
810 } while(0)
811#define READ_MSR(a_Msr, a_Value) \
812 do \
813 { \
814 hrc = hv_vcpu_read_msr(pVCpu->nem.s.hVCpuId, (a_Msr), &(a_Value)); \
815 if (RT_LIKELY(hrc == HV_SUCCESS)) \
816 { /* likely */ } \
817 else \
818 AssertFailedReturn(VERR_INTERNAL_ERROR); \
819 } while(0)
820
821 STAM_PROFILE_ADV_START(&pVCpu->nem.s.StatProfGstStateImport, x);
822
823 RT_NOREF(pVM);
824 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
825
826 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
827 vmxHCImportGuestIntrState(pVCpu, &pVCpu->nem.s.VmcsInfo);
828
829 /* GPRs */
830 hv_return_t hrc;
831 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
832 {
833 if (fWhat & CPUMCTX_EXTRN_RAX)
834 READ_GREG(HV_X86_RAX, pVCpu->cpum.GstCtx.rax);
835 if (fWhat & CPUMCTX_EXTRN_RCX)
836 READ_GREG(HV_X86_RCX, pVCpu->cpum.GstCtx.rcx);
837 if (fWhat & CPUMCTX_EXTRN_RDX)
838 READ_GREG(HV_X86_RDX, pVCpu->cpum.GstCtx.rdx);
839 if (fWhat & CPUMCTX_EXTRN_RBX)
840 READ_GREG(HV_X86_RBX, pVCpu->cpum.GstCtx.rbx);
841 if (fWhat & CPUMCTX_EXTRN_RSP)
842 READ_GREG(HV_X86_RSP, pVCpu->cpum.GstCtx.rsp);
843 if (fWhat & CPUMCTX_EXTRN_RBP)
844 READ_GREG(HV_X86_RBP, pVCpu->cpum.GstCtx.rbp);
845 if (fWhat & CPUMCTX_EXTRN_RSI)
846 READ_GREG(HV_X86_RSI, pVCpu->cpum.GstCtx.rsi);
847 if (fWhat & CPUMCTX_EXTRN_RDI)
848 READ_GREG(HV_X86_RDI, pVCpu->cpum.GstCtx.rdi);
849 if (fWhat & CPUMCTX_EXTRN_R8_R15)
850 {
851 READ_GREG(HV_X86_R8, pVCpu->cpum.GstCtx.r8);
852 READ_GREG(HV_X86_R9, pVCpu->cpum.GstCtx.r9);
853 READ_GREG(HV_X86_R10, pVCpu->cpum.GstCtx.r10);
854 READ_GREG(HV_X86_R11, pVCpu->cpum.GstCtx.r11);
855 READ_GREG(HV_X86_R12, pVCpu->cpum.GstCtx.r12);
856 READ_GREG(HV_X86_R13, pVCpu->cpum.GstCtx.r13);
857 READ_GREG(HV_X86_R14, pVCpu->cpum.GstCtx.r14);
858 READ_GREG(HV_X86_R15, pVCpu->cpum.GstCtx.r15);
859 }
860 }
861
862 /* RIP & Flags */
863 if (fWhat & CPUMCTX_EXTRN_RIP)
864 READ_GREG(HV_X86_RIP, pVCpu->cpum.GstCtx.rip);
865 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
866 READ_GREG(HV_X86_RFLAGS, pVCpu->cpum.GstCtx.rflags.u);
867
868 /* Segments */
869#define READ_SEG(a_SReg, a_enmName) \
870 do { \
871 READ_VMCS16_FIELD(VMX_VMCS16_GUEST_ ## a_enmName ## _SEL, (a_SReg).Sel); \
872 READ_VMCS32_FIELD(VMX_VMCS32_GUEST_ ## a_enmName ## _LIMIT, (a_SReg).u32Limit); \
873 READ_VMCS32_FIELD(VMX_VMCS32_GUEST_ ## a_enmName ## _ACCESS_RIGHTS, (a_SReg).Attr.u); \
874 READ_VMCS_FIELD(VMX_VMCS_GUEST_ ## a_enmName ## _BASE, (a_SReg).u64Base); \
875 (a_SReg).ValidSel = (a_SReg).Sel; \
876 } while (0)
877 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
878 {
879 if (fWhat & CPUMCTX_EXTRN_ES)
880 READ_SEG(pVCpu->cpum.GstCtx.es, ES);
881 if (fWhat & CPUMCTX_EXTRN_CS)
882 READ_SEG(pVCpu->cpum.GstCtx.cs, CS);
883 if (fWhat & CPUMCTX_EXTRN_SS)
884 READ_SEG(pVCpu->cpum.GstCtx.ss, SS);
885 if (fWhat & CPUMCTX_EXTRN_DS)
886 READ_SEG(pVCpu->cpum.GstCtx.ds, DS);
887 if (fWhat & CPUMCTX_EXTRN_FS)
888 READ_SEG(pVCpu->cpum.GstCtx.fs, FS);
889 if (fWhat & CPUMCTX_EXTRN_GS)
890 READ_SEG(pVCpu->cpum.GstCtx.gs, GS);
891 }
892
893 /* Descriptor tables and the task segment. */
894 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
895 {
896 if (fWhat & CPUMCTX_EXTRN_LDTR)
897 READ_SEG(pVCpu->cpum.GstCtx.ldtr, LDTR);
898
899 if (fWhat & CPUMCTX_EXTRN_TR)
900 {
901 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
902 avoid to trigger sanity assertions around the code, always fix this. */
903 READ_SEG(pVCpu->cpum.GstCtx.tr, TR);
904 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
905 {
906 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
907 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
908 break;
909 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
910 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
911 break;
912 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
913 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
914 break;
915 }
916 }
917 if (fWhat & CPUMCTX_EXTRN_IDTR)
918 {
919 READ_VMCS32_FIELD(VMX_VMCS32_GUEST_IDTR_LIMIT, pVCpu->cpum.GstCtx.idtr.cbIdt);
920 READ_VMCS_FIELD(VMX_VMCS_GUEST_IDTR_BASE, pVCpu->cpum.GstCtx.idtr.pIdt);
921 }
922 if (fWhat & CPUMCTX_EXTRN_GDTR)
923 {
924 READ_VMCS32_FIELD(VMX_VMCS32_GUEST_GDTR_LIMIT, pVCpu->cpum.GstCtx.gdtr.cbGdt);
925 READ_VMCS_FIELD(VMX_VMCS_GUEST_GDTR_BASE, pVCpu->cpum.GstCtx.gdtr.pGdt);
926 }
927 }
928
929 /* Control registers. */
930 bool fMaybeChangedMode = false;
931 bool fUpdateCr3 = false;
932 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
933 {
934 uint64_t u64CrTmp = 0;
935
936 if (fWhat & CPUMCTX_EXTRN_CR0)
937 {
938 READ_GREG(HV_X86_CR0, u64CrTmp);
939 if (pVCpu->cpum.GstCtx.cr0 != u64CrTmp)
940 {
941 CPUMSetGuestCR0(pVCpu, u64CrTmp);
942 fMaybeChangedMode = true;
943 }
944 }
945 if (fWhat & CPUMCTX_EXTRN_CR2)
946 READ_GREG(HV_X86_CR2, pVCpu->cpum.GstCtx.cr2);
947 if (fWhat & CPUMCTX_EXTRN_CR3)
948 {
949 READ_GREG(HV_X86_CR3, u64CrTmp);
950 if (pVCpu->cpum.GstCtx.cr3 != u64CrTmp)
951 {
952 CPUMSetGuestCR3(pVCpu, u64CrTmp);
953 fUpdateCr3 = true;
954 }
955 }
956 if (fWhat & CPUMCTX_EXTRN_CR4)
957 {
958 READ_GREG(HV_X86_CR4, u64CrTmp);
959 u64CrTmp &= ~VMX_V_CR4_FIXED0;
960
961 if (pVCpu->cpum.GstCtx.cr4 != u64CrTmp)
962 {
963 CPUMSetGuestCR4(pVCpu, u64CrTmp);
964 fMaybeChangedMode = true;
965 }
966 }
967 }
968
969#if 0 /* Always done. */
970 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
971 {
972 uint64_t u64Cr8 = 0;
973
974 READ_GREG(HV_X86_TPR, u64Cr8);
975 APICSetTpr(pVCpu, u64Cr8 << 4);
976 }
977#endif
978
979 if (fWhat & CPUMCTX_EXTRN_XCRx)
980 READ_GREG(HV_X86_XCR0, pVCpu->cpum.GstCtx.aXcr[0]);
981
982 /* Debug registers. */
983 if (fWhat & CPUMCTX_EXTRN_DR7)
984 {
985 uint64_t u64Dr7;
986 READ_GREG(HV_X86_DR7, u64Dr7);
987 if (pVCpu->cpum.GstCtx.dr[7] != u64Dr7)
988 CPUMSetGuestDR7(pVCpu, u64Dr7);
989 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
990 }
991 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
992 {
993 uint64_t u64DrTmp;
994
995 READ_GREG(HV_X86_DR0, u64DrTmp);
996 if (pVCpu->cpum.GstCtx.dr[0] != u64DrTmp)
997 CPUMSetGuestDR0(pVCpu, u64DrTmp);
998 READ_GREG(HV_X86_DR1, u64DrTmp);
999 if (pVCpu->cpum.GstCtx.dr[1] != u64DrTmp)
1000 CPUMSetGuestDR1(pVCpu, u64DrTmp);
1001 READ_GREG(HV_X86_DR2, u64DrTmp);
1002 if (pVCpu->cpum.GstCtx.dr[2] != u64DrTmp)
1003 CPUMSetGuestDR2(pVCpu, u64DrTmp);
1004 READ_GREG(HV_X86_DR3, u64DrTmp);
1005 if (pVCpu->cpum.GstCtx.dr[3] != u64DrTmp)
1006 CPUMSetGuestDR3(pVCpu, u64DrTmp);
1007 }
1008 if (fWhat & CPUMCTX_EXTRN_DR6)
1009 {
1010 uint64_t u64Dr6;
1011 READ_GREG(HV_X86_DR6, u64Dr6);
1012 if (pVCpu->cpum.GstCtx.dr[6] != u64Dr6)
1013 CPUMSetGuestDR6(pVCpu, u64Dr6);
1014 }
1015
1016 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1017 {
1018 hrc = hv_vcpu_read_fpstate(pVCpu->nem.s.hVCpuId, &pVCpu->cpum.GstCtx.XState, sizeof(pVCpu->cpum.GstCtx.XState));
1019 if (hrc == HV_SUCCESS)
1020 { /* likely */ }
1021 else
1022 {
1023 STAM_PROFILE_ADV_STOP(&pVCpu->nem.s.StatProfGstStateImport, x);
1024 return nemR3DarwinHvSts2Rc(hrc);
1025 }
1026 }
1027
1028 /* MSRs */
1029 if (fWhat & CPUMCTX_EXTRN_EFER)
1030 {
1031 uint64_t u64Efer;
1032
1033 READ_VMCS_FIELD(VMX_VMCS64_GUEST_EFER_FULL, u64Efer);
1034 if (u64Efer != pVCpu->cpum.GstCtx.msrEFER)
1035 {
1036 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, u64Efer));
1037 if ((u64Efer ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE)
1038 PGMNotifyNxeChanged(pVCpu, RT_BOOL(u64Efer & MSR_K6_EFER_NXE));
1039 pVCpu->cpum.GstCtx.msrEFER = u64Efer;
1040 fMaybeChangedMode = true;
1041 }
1042 }
1043
1044 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1045 READ_MSR(MSR_K8_KERNEL_GS_BASE, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
1046 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1047 {
1048 uint64_t u64Tmp;
1049 READ_MSR(MSR_IA32_SYSENTER_EIP, u64Tmp);
1050 pVCpu->cpum.GstCtx.SysEnter.eip = u64Tmp;
1051 READ_MSR(MSR_IA32_SYSENTER_ESP, u64Tmp);
1052 pVCpu->cpum.GstCtx.SysEnter.esp = u64Tmp;
1053 READ_MSR(MSR_IA32_SYSENTER_CS, u64Tmp);
1054 pVCpu->cpum.GstCtx.SysEnter.cs = u64Tmp;
1055 }
1056 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1057 {
1058 READ_MSR(MSR_K6_STAR, pVCpu->cpum.GstCtx.msrSTAR);
1059 READ_MSR(MSR_K8_LSTAR, pVCpu->cpum.GstCtx.msrLSTAR);
1060 READ_MSR(MSR_K8_CSTAR, pVCpu->cpum.GstCtx.msrCSTAR);
1061 READ_MSR(MSR_K8_SF_MASK, pVCpu->cpum.GstCtx.msrSFMASK);
1062 }
1063#if 0
1064 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1065 {
1066 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
1067 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
1068 if (aValues[iReg].Reg64 != uOldBase)
1069 {
1070 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
1071 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
1072 int rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
1073 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", rc2, aValues[iReg].Reg64));
1074 }
1075 iReg++;
1076
1077 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterPat, "MSR PAT");
1078#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1079 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrPAT, WHvX64RegisterMsrMtrrCap);
1080#endif
1081 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1082 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
1083 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
1084 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
1085 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
1086 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
1087 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
1088 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
1089 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
1090 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
1091 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
1092 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
1093 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
1094 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
1095 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
1096 }
1097#endif
1098
1099 /* Almost done, just update extrn flags and maybe change PGM mode. */
1100 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1101 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1102 pVCpu->cpum.GstCtx.fExtrn = 0;
1103
1104#ifdef LOG_ENABLED
1105 nemR3DarwinLogState(pVM, pVCpu);
1106#endif
1107
1108 /* Typical. */
1109 if (!fMaybeChangedMode && !fUpdateCr3)
1110 {
1111 STAM_PROFILE_ADV_STOP(&pVCpu->nem.s.StatProfGstStateImport, x);
1112 return VINF_SUCCESS;
1113 }
1114
1115 /*
1116 * Slow.
1117 */
1118 if (fMaybeChangedMode)
1119 {
1120 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
1121 false /* fForce */);
1122 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
1123 }
1124
1125 if (fUpdateCr3)
1126 {
1127 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3);
1128 if (rc == VINF_SUCCESS)
1129 { /* likely */ }
1130 else
1131 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
1132 }
1133
1134 STAM_PROFILE_ADV_STOP(&pVCpu->nem.s.StatProfGstStateImport, x);
1135
1136 return VINF_SUCCESS;
1137#undef READ_GREG
1138#undef READ_VMCS_FIELD
1139#undef READ_VMCS32_FIELD
1140#undef READ_SEG
1141#undef READ_MSR
1142}
1143
1144
1145/**
1146 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1147 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1148 */
1149typedef struct NEMHCDARWINHMACPCCSTATE
1150{
1151 /** Input: Write access. */
1152 bool fWriteAccess;
1153 /** Output: Set if we did something. */
1154 bool fDidSomething;
1155 /** Output: Set it we should resume. */
1156 bool fCanResume;
1157} NEMHCDARWINHMACPCCSTATE;
1158
1159/**
1160 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1161 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1162 * NEMHCDARWINHMACPCCSTATE structure. }
1163 */
1164static DECLCALLBACK(int)
1165nemR3DarwinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1166{
1167 NEMHCDARWINHMACPCCSTATE *pState = (NEMHCDARWINHMACPCCSTATE *)pvUser;
1168 pState->fDidSomething = false;
1169 pState->fCanResume = false;
1170
1171 uint8_t u2State = pInfo->u2NemState;
1172
1173 /*
1174 * Consolidate current page state with actual page protection and access type.
1175 * We don't really consider downgrades here, as they shouldn't happen.
1176 */
1177 int rc;
1178 switch (u2State)
1179 {
1180 case NEM_DARWIN_PAGE_STATE_UNMAPPED:
1181 case NEM_DARWIN_PAGE_STATE_NOT_SET:
1182 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1183 {
1184 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1185 return VINF_SUCCESS;
1186 }
1187
1188 /* Don't bother remapping it if it's a write request to a non-writable page. */
1189 if ( pState->fWriteAccess
1190 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1191 {
1192 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1193 return VINF_SUCCESS;
1194 }
1195
1196 /* Map the page. */
1197 rc = nemHCNativeSetPhysPage(pVM,
1198 pVCpu,
1199 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1200 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1201 pInfo->fNemProt,
1202 &u2State,
1203 true /*fBackingState*/);
1204 pInfo->u2NemState = u2State;
1205 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1206 GCPhys, g_apszPageStates[u2State], rc));
1207 pState->fDidSomething = true;
1208 pState->fCanResume = true;
1209 return rc;
1210
1211 case NEM_DARWIN_PAGE_STATE_READABLE:
1212 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1213 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1214 {
1215 pState->fCanResume = true;
1216 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1217 return VINF_SUCCESS;
1218 }
1219 break;
1220
1221 case NEM_DARWIN_PAGE_STATE_WRITABLE:
1222 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1223 {
1224 /* We get spurious EPT exit violations when everything is fine (#3a case) but can resume without issues here... */
1225 pState->fCanResume = true;
1226 if (pInfo->u2OldNemState == NEM_DARWIN_PAGE_STATE_WRITABLE)
1227 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #3a\n", GCPhys));
1228 else
1229 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #3b (%s -> %s)\n",
1230 GCPhys, g_apszPageStates[pInfo->u2OldNemState], g_apszPageStates[u2State]));
1231 return VINF_SUCCESS;
1232 }
1233
1234 break;
1235
1236 default:
1237 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1238 }
1239
1240 /*
1241 * Unmap and restart the instruction.
1242 * If this fails, which it does every so often, just unmap everything for now.
1243 */
1244 rc = nemR3DarwinUnmap(pVM, GCPhys, X86_PAGE_SIZE);
1245 if (RT_SUCCESS(rc))
1246 {
1247 pState->fDidSomething = true;
1248 pState->fCanResume = true;
1249 pInfo->u2NemState = NEM_DARWIN_PAGE_STATE_UNMAPPED;
1250 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
1251 Log5(("NEM GPA unmapped/exit: %RGp (was %s)\n", GCPhys, g_apszPageStates[u2State]));
1252 return VINF_SUCCESS;
1253 }
1254 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
1255 LogRel(("nemR3DarwinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s rc=%Rrc\n",
1256 GCPhys, g_apszPageStates[u2State], rc));
1257 return VERR_NEM_UNMAP_PAGES_FAILED;
1258}
1259
1260
1261DECL_FORCE_INLINE(bool) nemR3DarwinIsUnrestrictedGuest(PCVMCC pVM)
1262{
1263 RT_NOREF(pVM);
1264 return true;
1265}
1266
1267
1268DECL_FORCE_INLINE(bool) nemR3DarwinIsNestedPaging(PCVMCC pVM)
1269{
1270 RT_NOREF(pVM);
1271 return true;
1272}
1273
1274
1275DECL_FORCE_INLINE(bool) nemR3DarwinIsPreemptTimerUsed(PCVMCC pVM)
1276{
1277 RT_NOREF(pVM);
1278 return false;
1279}
1280
1281
1282#if 0 /* unused */
1283DECL_FORCE_INLINE(bool) nemR3DarwinIsVmxLbr(PCVMCC pVM)
1284{
1285 RT_NOREF(pVM);
1286 return false;
1287}
1288#endif
1289
1290
1291/*
1292 * Instantiate the code we share with ring-0.
1293 */
1294#define IN_NEM_DARWIN
1295//#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
1296//#define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
1297#define VCPU_2_VMXSTATE(a_pVCpu) (a_pVCpu)->nem.s
1298#define VCPU_2_VMXSTATS(a_pVCpu) (*(a_pVCpu)->nem.s.pVmxStats)
1299
1300#define VM_IS_VMX_UNRESTRICTED_GUEST(a_pVM) nemR3DarwinIsUnrestrictedGuest((a_pVM))
1301#define VM_IS_VMX_NESTED_PAGING(a_pVM) nemR3DarwinIsNestedPaging((a_pVM))
1302#define VM_IS_VMX_PREEMPT_TIMER_USED(a_pVM) nemR3DarwinIsPreemptTimerUsed((a_pVM))
1303#define VM_IS_VMX_LBR(a_pVM) nemR3DarwinIsVmxLbr((a_pVM))
1304
1305#define VMX_VMCS_WRITE_16(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs16((a_pVCpu), (a_FieldEnc), (a_Val))
1306#define VMX_VMCS_WRITE_32(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs32((a_pVCpu), (a_FieldEnc), (a_Val))
1307#define VMX_VMCS_WRITE_64(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs64((a_pVCpu), (a_FieldEnc), (a_Val))
1308#define VMX_VMCS_WRITE_NW(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs64((a_pVCpu), (a_FieldEnc), (a_Val))
1309
1310#define VMX_VMCS_READ_16(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs16((a_pVCpu), (a_FieldEnc), (a_pVal))
1311#define VMX_VMCS_READ_32(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs32((a_pVCpu), (a_FieldEnc), (a_pVal))
1312#define VMX_VMCS_READ_64(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs64((a_pVCpu), (a_FieldEnc), (a_pVal))
1313#define VMX_VMCS_READ_NW(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs64((a_pVCpu), (a_FieldEnc), (a_pVal))
1314
1315#include "../VMMAll/VMXAllTemplate.cpp.h"
1316
1317#undef VMX_VMCS_WRITE_16
1318#undef VMX_VMCS_WRITE_32
1319#undef VMX_VMCS_WRITE_64
1320#undef VMX_VMCS_WRITE_NW
1321
1322#undef VMX_VMCS_READ_16
1323#undef VMX_VMCS_READ_32
1324#undef VMX_VMCS_READ_64
1325#undef VMX_VMCS_READ_NW
1326
1327#undef VM_IS_VMX_PREEMPT_TIMER_USED
1328#undef VM_IS_VMX_NESTED_PAGING
1329#undef VM_IS_VMX_UNRESTRICTED_GUEST
1330#undef VCPU_2_VMXSTATS
1331#undef VCPU_2_VMXSTATE
1332
1333
1334/**
1335 * Exports the guest GP registers to HV for execution.
1336 *
1337 * @returns VBox status code.
1338 * @param pVCpu The cross context virtual CPU structure of the
1339 * calling EMT.
1340 */
1341static int nemR3DarwinExportGuestGprs(PVMCPUCC pVCpu)
1342{
1343#define WRITE_GREG(a_GReg, a_Value) \
1344 do \
1345 { \
1346 hv_return_t hrc = hv_vcpu_write_register(pVCpu->nem.s.hVCpuId, (a_GReg), (a_Value)); \
1347 if (RT_LIKELY(hrc == HV_SUCCESS)) \
1348 { /* likely */ } \
1349 else \
1350 return VERR_INTERNAL_ERROR; \
1351 } while(0)
1352
1353 uint64_t fCtxChanged = ASMAtomicUoReadU64(&pVCpu->nem.s.fCtxChanged);
1354 if (fCtxChanged & HM_CHANGED_GUEST_GPRS_MASK)
1355 {
1356 if (fCtxChanged & HM_CHANGED_GUEST_RAX)
1357 WRITE_GREG(HV_X86_RAX, pVCpu->cpum.GstCtx.rax);
1358 if (fCtxChanged & HM_CHANGED_GUEST_RCX)
1359 WRITE_GREG(HV_X86_RCX, pVCpu->cpum.GstCtx.rcx);
1360 if (fCtxChanged & HM_CHANGED_GUEST_RDX)
1361 WRITE_GREG(HV_X86_RDX, pVCpu->cpum.GstCtx.rdx);
1362 if (fCtxChanged & HM_CHANGED_GUEST_RBX)
1363 WRITE_GREG(HV_X86_RBX, pVCpu->cpum.GstCtx.rbx);
1364 if (fCtxChanged & HM_CHANGED_GUEST_RSP)
1365 WRITE_GREG(HV_X86_RSP, pVCpu->cpum.GstCtx.rsp);
1366 if (fCtxChanged & HM_CHANGED_GUEST_RBP)
1367 WRITE_GREG(HV_X86_RBP, pVCpu->cpum.GstCtx.rbp);
1368 if (fCtxChanged & HM_CHANGED_GUEST_RSI)
1369 WRITE_GREG(HV_X86_RSI, pVCpu->cpum.GstCtx.rsi);
1370 if (fCtxChanged & HM_CHANGED_GUEST_RDI)
1371 WRITE_GREG(HV_X86_RDI, pVCpu->cpum.GstCtx.rdi);
1372 if (fCtxChanged & HM_CHANGED_GUEST_R8_R15)
1373 {
1374 WRITE_GREG(HV_X86_R8, pVCpu->cpum.GstCtx.r8);
1375 WRITE_GREG(HV_X86_R9, pVCpu->cpum.GstCtx.r9);
1376 WRITE_GREG(HV_X86_R10, pVCpu->cpum.GstCtx.r10);
1377 WRITE_GREG(HV_X86_R11, pVCpu->cpum.GstCtx.r11);
1378 WRITE_GREG(HV_X86_R12, pVCpu->cpum.GstCtx.r12);
1379 WRITE_GREG(HV_X86_R13, pVCpu->cpum.GstCtx.r13);
1380 WRITE_GREG(HV_X86_R14, pVCpu->cpum.GstCtx.r14);
1381 WRITE_GREG(HV_X86_R15, pVCpu->cpum.GstCtx.r15);
1382 }
1383
1384 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_GPRS_MASK);
1385 }
1386
1387 if (fCtxChanged & HM_CHANGED_GUEST_CR2)
1388 {
1389 WRITE_GREG(HV_X86_CR2, pVCpu->cpum.GstCtx.cr2);
1390 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_CR2);
1391 }
1392
1393 return VINF_SUCCESS;
1394#undef WRITE_GREG
1395}
1396
1397
1398/**
1399 * Converts the given CPUM externalized bitmask to the appropriate HM changed bitmask.
1400 *
1401 * @returns Bitmask of HM changed flags.
1402 * @param fCpumExtrn The CPUM extern bitmask.
1403 */
1404static uint64_t nemR3DarwinCpumExtrnToHmChanged(uint64_t fCpumExtrn)
1405{
1406 uint64_t fHmChanged = 0;
1407
1408 /* Invert to gt a mask of things which are kept in CPUM. */
1409 uint64_t fCpumIntern = ~fCpumExtrn;
1410
1411 if (fCpumIntern & CPUMCTX_EXTRN_GPRS_MASK)
1412 {
1413 if (fCpumIntern & CPUMCTX_EXTRN_RAX)
1414 fHmChanged |= HM_CHANGED_GUEST_RAX;
1415 if (fCpumIntern & CPUMCTX_EXTRN_RCX)
1416 fHmChanged |= HM_CHANGED_GUEST_RCX;
1417 if (fCpumIntern & CPUMCTX_EXTRN_RDX)
1418 fHmChanged |= HM_CHANGED_GUEST_RDX;
1419 if (fCpumIntern & CPUMCTX_EXTRN_RBX)
1420 fHmChanged |= HM_CHANGED_GUEST_RBX;
1421 if (fCpumIntern & CPUMCTX_EXTRN_RSP)
1422 fHmChanged |= HM_CHANGED_GUEST_RSP;
1423 if (fCpumIntern & CPUMCTX_EXTRN_RBP)
1424 fHmChanged |= HM_CHANGED_GUEST_RBP;
1425 if (fCpumIntern & CPUMCTX_EXTRN_RSI)
1426 fHmChanged |= HM_CHANGED_GUEST_RSI;
1427 if (fCpumIntern & CPUMCTX_EXTRN_RDI)
1428 fHmChanged |= HM_CHANGED_GUEST_RDI;
1429 if (fCpumIntern & CPUMCTX_EXTRN_R8_R15)
1430 fHmChanged |= HM_CHANGED_GUEST_R8_R15;
1431 }
1432
1433 /* RIP & Flags */
1434 if (fCpumIntern & CPUMCTX_EXTRN_RIP)
1435 fHmChanged |= HM_CHANGED_GUEST_RIP;
1436 if (fCpumIntern & CPUMCTX_EXTRN_RFLAGS)
1437 fHmChanged |= HM_CHANGED_GUEST_RFLAGS;
1438
1439 /* Segments */
1440 if (fCpumIntern & CPUMCTX_EXTRN_SREG_MASK)
1441 {
1442 if (fCpumIntern & CPUMCTX_EXTRN_ES)
1443 fHmChanged |= HM_CHANGED_GUEST_ES;
1444 if (fCpumIntern & CPUMCTX_EXTRN_CS)
1445 fHmChanged |= HM_CHANGED_GUEST_CS;
1446 if (fCpumIntern & CPUMCTX_EXTRN_SS)
1447 fHmChanged |= HM_CHANGED_GUEST_SS;
1448 if (fCpumIntern & CPUMCTX_EXTRN_DS)
1449 fHmChanged |= HM_CHANGED_GUEST_DS;
1450 if (fCpumIntern & CPUMCTX_EXTRN_FS)
1451 fHmChanged |= HM_CHANGED_GUEST_FS;
1452 if (fCpumIntern & CPUMCTX_EXTRN_GS)
1453 fHmChanged |= HM_CHANGED_GUEST_GS;
1454 }
1455
1456 /* Descriptor tables & task segment. */
1457 if (fCpumIntern & CPUMCTX_EXTRN_TABLE_MASK)
1458 {
1459 if (fCpumIntern & CPUMCTX_EXTRN_LDTR)
1460 fHmChanged |= HM_CHANGED_GUEST_LDTR;
1461 if (fCpumIntern & CPUMCTX_EXTRN_TR)
1462 fHmChanged |= HM_CHANGED_GUEST_TR;
1463 if (fCpumIntern & CPUMCTX_EXTRN_IDTR)
1464 fHmChanged |= HM_CHANGED_GUEST_IDTR;
1465 if (fCpumIntern & CPUMCTX_EXTRN_GDTR)
1466 fHmChanged |= HM_CHANGED_GUEST_GDTR;
1467 }
1468
1469 /* Control registers. */
1470 if (fCpumIntern & CPUMCTX_EXTRN_CR_MASK)
1471 {
1472 if (fCpumIntern & CPUMCTX_EXTRN_CR0)
1473 fHmChanged |= HM_CHANGED_GUEST_CR0;
1474 if (fCpumIntern & CPUMCTX_EXTRN_CR2)
1475 fHmChanged |= HM_CHANGED_GUEST_CR2;
1476 if (fCpumIntern & CPUMCTX_EXTRN_CR3)
1477 fHmChanged |= HM_CHANGED_GUEST_CR3;
1478 if (fCpumIntern & CPUMCTX_EXTRN_CR4)
1479 fHmChanged |= HM_CHANGED_GUEST_CR4;
1480 }
1481 if (fCpumIntern & CPUMCTX_EXTRN_APIC_TPR)
1482 fHmChanged |= HM_CHANGED_GUEST_APIC_TPR;
1483
1484 /* Debug registers. */
1485 if (fCpumIntern & CPUMCTX_EXTRN_DR0_DR3)
1486 fHmChanged |= HM_CHANGED_GUEST_DR0_DR3;
1487 if (fCpumIntern & CPUMCTX_EXTRN_DR6)
1488 fHmChanged |= HM_CHANGED_GUEST_DR6;
1489 if (fCpumIntern & CPUMCTX_EXTRN_DR7)
1490 fHmChanged |= HM_CHANGED_GUEST_DR7;
1491
1492 /* Floating point state. */
1493 if (fCpumIntern & CPUMCTX_EXTRN_X87)
1494 fHmChanged |= HM_CHANGED_GUEST_X87;
1495 if (fCpumIntern & CPUMCTX_EXTRN_SSE_AVX)
1496 fHmChanged |= HM_CHANGED_GUEST_SSE_AVX;
1497 if (fCpumIntern & CPUMCTX_EXTRN_OTHER_XSAVE)
1498 fHmChanged |= HM_CHANGED_GUEST_OTHER_XSAVE;
1499 if (fCpumIntern & CPUMCTX_EXTRN_XCRx)
1500 fHmChanged |= HM_CHANGED_GUEST_XCRx;
1501
1502 /* MSRs */
1503 if (fCpumIntern & CPUMCTX_EXTRN_EFER)
1504 fHmChanged |= HM_CHANGED_GUEST_EFER_MSR;
1505 if (fCpumIntern & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1506 fHmChanged |= HM_CHANGED_GUEST_KERNEL_GS_BASE;
1507 if (fCpumIntern & CPUMCTX_EXTRN_SYSENTER_MSRS)
1508 fHmChanged |= HM_CHANGED_GUEST_SYSENTER_MSR_MASK;
1509 if (fCpumIntern & CPUMCTX_EXTRN_SYSCALL_MSRS)
1510 fHmChanged |= HM_CHANGED_GUEST_SYSCALL_MSRS;
1511 if (fCpumIntern & CPUMCTX_EXTRN_TSC_AUX)
1512 fHmChanged |= HM_CHANGED_GUEST_TSC_AUX;
1513 if (fCpumIntern & CPUMCTX_EXTRN_OTHER_MSRS)
1514 fHmChanged |= HM_CHANGED_GUEST_OTHER_MSRS;
1515
1516 return fHmChanged;
1517}
1518
1519
1520/**
1521 * Exports the guest state to HV for execution.
1522 *
1523 * @returns VBox status code.
1524 * @param pVM The cross context VM structure.
1525 * @param pVCpu The cross context virtual CPU structure of the
1526 * calling EMT.
1527 * @param pVmxTransient The transient VMX structure.
1528 */
1529static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1530{
1531#define WRITE_GREG(a_GReg, a_Value) \
1532 do \
1533 { \
1534 hv_return_t hrc = hv_vcpu_write_register(pVCpu->nem.s.hVCpuId, (a_GReg), (a_Value)); \
1535 if (RT_LIKELY(hrc == HV_SUCCESS)) \
1536 { /* likely */ } \
1537 else \
1538 return VERR_INTERNAL_ERROR; \
1539 } while(0)
1540#define WRITE_VMCS_FIELD(a_Field, a_Value) \
1541 do \
1542 { \
1543 hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), (a_Value)); \
1544 if (RT_LIKELY(hrc == HV_SUCCESS)) \
1545 { /* likely */ } \
1546 else \
1547 return VERR_INTERNAL_ERROR; \
1548 } while(0)
1549#define WRITE_MSR(a_Msr, a_Value) \
1550 do \
1551 { \
1552 hv_return_t hrc = hv_vcpu_write_msr(pVCpu->nem.s.hVCpuId, (a_Msr), (a_Value)); \
1553 if (RT_LIKELY(hrc == HV_SUCCESS)) \
1554 { /* likely */ } \
1555 else \
1556 AssertFailedReturn(VERR_INTERNAL_ERROR); \
1557 } while(0)
1558
1559 RT_NOREF(pVM);
1560
1561#ifdef LOG_ENABLED
1562 nemR3DarwinLogState(pVM, pVCpu);
1563#endif
1564
1565 STAM_PROFILE_ADV_START(&pVCpu->nem.s.StatProfGstStateExport, x);
1566
1567 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
1568 if (!fWhat)
1569 return VINF_SUCCESS;
1570
1571 pVCpu->nem.s.fCtxChanged |= nemR3DarwinCpumExtrnToHmChanged(pVCpu->cpum.GstCtx.fExtrn);
1572
1573 int rc = vmxHCExportGuestEntryExitCtls(pVCpu, pVmxTransient);
1574 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
1575
1576 rc = nemR3DarwinExportGuestGprs(pVCpu);
1577 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
1578
1579 rc = vmxHCExportGuestCR0(pVCpu, pVmxTransient);
1580 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
1581
1582 VBOXSTRICTRC rcStrict = vmxHCExportGuestCR3AndCR4(pVCpu, pVmxTransient);
1583 if (rcStrict == VINF_SUCCESS)
1584 { /* likely */ }
1585 else
1586 {
1587 Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict));
1588 return VBOXSTRICTRC_VAL(rcStrict);
1589 }
1590
1591 vmxHCExportGuestXcptIntercepts(pVCpu, pVmxTransient);
1592 vmxHCExportGuestRip(pVCpu);
1593 //vmxHCExportGuestRsp(pVCpu);
1594 vmxHCExportGuestRflags(pVCpu, pVmxTransient);
1595
1596 rc = vmxHCExportGuestSegRegsXdtr(pVCpu, pVmxTransient);
1597 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
1598
1599 if (fWhat & CPUMCTX_EXTRN_XCRx)
1600 {
1601 WRITE_GREG(HV_X86_XCR0, pVCpu->cpum.GstCtx.aXcr[0]);
1602 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_XCRx);
1603 }
1604
1605 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1606 {
1607 Assert(pVCpu->nem.s.fCtxChanged & HM_CHANGED_GUEST_APIC_TPR);
1608 vmxHCExportGuestApicTpr(pVCpu, pVmxTransient);
1609
1610 rc = APICGetTpr(pVCpu, &pVmxTransient->u8GuestTpr, NULL /*pfPending*/, NULL /*pu8PendingIntr*/);
1611 AssertRC(rc);
1612
1613 WRITE_GREG(HV_X86_TPR, pVmxTransient->u8GuestTpr);
1614 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1615 }
1616
1617 /* Debug registers. */
1618 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1619 {
1620 WRITE_GREG(HV_X86_DR0, pVCpu->cpum.GstCtx.dr[0]); // CPUMGetHyperDR0(pVCpu));
1621 WRITE_GREG(HV_X86_DR1, pVCpu->cpum.GstCtx.dr[1]); // CPUMGetHyperDR1(pVCpu));
1622 WRITE_GREG(HV_X86_DR2, pVCpu->cpum.GstCtx.dr[2]); // CPUMGetHyperDR2(pVCpu));
1623 WRITE_GREG(HV_X86_DR3, pVCpu->cpum.GstCtx.dr[3]); // CPUMGetHyperDR3(pVCpu));
1624 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR0_DR3);
1625 }
1626 if (fWhat & CPUMCTX_EXTRN_DR6)
1627 {
1628 WRITE_GREG(HV_X86_DR6, pVCpu->cpum.GstCtx.dr[6]); // CPUMGetHyperDR6(pVCpu));
1629 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR6);
1630 }
1631 if (fWhat & CPUMCTX_EXTRN_DR7)
1632 {
1633 WRITE_GREG(HV_X86_DR7, pVCpu->cpum.GstCtx.dr[7]); // CPUMGetHyperDR7(pVCpu));
1634 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR7);
1635 }
1636
1637 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE))
1638 {
1639 hv_return_t hrc = hv_vcpu_write_fpstate(pVCpu->nem.s.hVCpuId, &pVCpu->cpum.GstCtx.XState, sizeof(pVCpu->cpum.GstCtx.XState));
1640 if (hrc == HV_SUCCESS)
1641 { /* likely */ }
1642 else
1643 return nemR3DarwinHvSts2Rc(hrc);
1644
1645 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~(HM_CHANGED_GUEST_X87 | HM_CHANGED_GUEST_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE));
1646 }
1647
1648 /* MSRs */
1649 if (fWhat & CPUMCTX_EXTRN_EFER)
1650 {
1651 WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_EFER_FULL, pVCpu->cpum.GstCtx.msrEFER);
1652 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
1653 }
1654 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1655 {
1656 WRITE_MSR(MSR_K8_KERNEL_GS_BASE, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
1657 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_KERNEL_GS_BASE);
1658 }
1659 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1660 {
1661 WRITE_MSR(MSR_IA32_SYSENTER_CS, pVCpu->cpum.GstCtx.SysEnter.cs);
1662 WRITE_MSR(MSR_IA32_SYSENTER_EIP, pVCpu->cpum.GstCtx.SysEnter.eip);
1663 WRITE_MSR(MSR_IA32_SYSENTER_ESP, pVCpu->cpum.GstCtx.SysEnter.esp);
1664 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_MSR_MASK);
1665 }
1666 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1667 {
1668 WRITE_MSR(MSR_K6_STAR, pVCpu->cpum.GstCtx.msrSTAR);
1669 WRITE_MSR(MSR_K8_LSTAR, pVCpu->cpum.GstCtx.msrLSTAR);
1670 WRITE_MSR(MSR_K8_CSTAR, pVCpu->cpum.GstCtx.msrCSTAR);
1671 WRITE_MSR(MSR_K8_SF_MASK, pVCpu->cpum.GstCtx.msrSFMASK);
1672 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSCALL_MSRS);
1673 }
1674 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1675 {
1676#if 0
1677 hv_return_t hrc = hv_vmx_vcpu_set_apic_address(pVCpu->nem.s.hVCpuId, APICGetBaseMsrNoCheck(pVCpu) & PAGE_BASE_GC_MASK);
1678 if (RT_UNLIKELY(hrc != HV_SUCCESS))
1679 return nemR3DarwinHvSts2Rc(hrc);
1680#endif
1681
1682 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
1683
1684#if 0
1685 ADD_REG64(WHvX64RegisterPat, pVCpu->cpum.GstCtx.msrPAT);
1686#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
1687 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
1688#endif
1689 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1690 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
1691 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
1692 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
1693 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
1694 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
1695 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
1696 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
1697 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
1698 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
1699 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
1700 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
1701 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
1702 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
1703#if 0 /** @todo these registers aren't available? Might explain something.. .*/
1704 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);
1705 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1706 {
1707 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
1708 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
1709 }
1710#endif
1711#endif
1712 }
1713
1714 WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0 /*MSR_IA32_DEBUGCTL_LBR*/);
1715
1716 hv_vcpu_invalidate_tlb(pVCpu->nem.s.hVCpuId);
1717 hv_vcpu_flush(pVCpu->nem.s.hVCpuId);
1718
1719 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1720
1721 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
1722 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~(
1723 HM_CHANGED_GUEST_TSC_AUX
1724 | HM_CHANGED_GUEST_HWVIRT
1725 | HM_CHANGED_VMX_GUEST_AUTO_MSRS
1726 | HM_CHANGED_VMX_GUEST_LAZY_MSRS
1727 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
1728
1729 STAM_PROFILE_ADV_STOP(&pVCpu->nem.s.StatProfGstStateExport, x);
1730 return VINF_SUCCESS;
1731#undef WRITE_GREG
1732#undef WRITE_VMCS_FIELD
1733}
1734
1735
1736/**
1737 * Handles an exit from hv_vcpu_run().
1738 *
1739 * @returns VBox strict status code.
1740 * @param pVM The cross context VM structure.
1741 * @param pVCpu The cross context virtual CPU structure of the
1742 * calling EMT.
1743 * @param pVmxTransient The transient VMX structure.
1744 */
1745static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
1746{
1747 uint32_t uExitReason;
1748 int rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
1749 AssertRC(rc);
1750 pVmxTransient->fVmcsFieldsRead = 0;
1751 pVmxTransient->fIsNestedGuest = false;
1752 pVmxTransient->uExitReason = VMX_EXIT_REASON_BASIC(uExitReason);
1753 pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
1754
1755 if (RT_UNLIKELY(pVmxTransient->fVMEntryFailed))
1756 AssertLogRelMsgFailedReturn(("Running guest failed for CPU #%u: %#x %u\n",
1757 pVCpu->idCpu, pVmxTransient->uExitReason, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)),
1758 VERR_NEM_IPE_0);
1759
1760 /** @todo Only copy the state on demand (the R0 VT-x code saves some stuff unconditionally and the VMX template assumes that
1761 * when handling exits). */
1762 rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1763 AssertRCReturn(rc, rc);
1764
1765 STAM_COUNTER_INC(&pVCpu->nem.s.pVmxStats->aStatExitReason[pVmxTransient->uExitReason & MASK_EXITREASON_STAT]);
1766 STAM_REL_COUNTER_INC(&pVCpu->nem.s.pVmxStats->StatExitAll);
1767
1768#ifndef HMVMX_USE_FUNCTION_TABLE
1769 return vmxHCHandleExit(pVCpu, pVmxTransient);
1770#else
1771 return g_aVMExitHandlers[pVmxTransient->uExitReason].pfn(pVCpu, pVmxTransient);
1772#endif
1773}
1774
1775
1776/**
1777 * Worker for nemR3NativeInit that loads the Hypervisor.framework shared library.
1778 *
1779 * @returns VBox status code.
1780 * @param fForced Whether the HMForced flag is set and we should
1781 * fail if we cannot initialize.
1782 * @param pErrInfo Where to always return error info.
1783 */
1784static int nemR3DarwinLoadHv(bool fForced, PRTERRINFO pErrInfo)
1785{
1786 RTLDRMOD hMod = NIL_RTLDRMOD;
1787 static const char *s_pszHvPath = "/System/Library/Frameworks/Hypervisor.framework/Hypervisor";
1788
1789 int rc = RTLdrLoadEx(s_pszHvPath, &hMod, RTLDRLOAD_FLAGS_NO_UNLOAD | RTLDRLOAD_FLAGS_NO_SUFFIX, pErrInfo);
1790 if (RT_SUCCESS(rc))
1791 {
1792 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
1793 {
1794 int rc2 = RTLdrGetSymbol(hMod, g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
1795 if (RT_SUCCESS(rc2))
1796 {
1797 if (g_aImports[i].fOptional)
1798 LogRel(("NEM: info: Found optional import Hypervisor!%s.\n",
1799 g_aImports[i].pszName));
1800 }
1801 else
1802 {
1803 *g_aImports[i].ppfn = NULL;
1804
1805 LogRel(("NEM: %s: Failed to import Hypervisor!%s: %Rrc\n",
1806 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
1807 g_aImports[i].pszName, rc2));
1808 if (!g_aImports[i].fOptional)
1809 {
1810 if (RTErrInfoIsSet(pErrInfo))
1811 RTErrInfoAddF(pErrInfo, rc2, ", Hypervisor!%s", g_aImports[i].pszName);
1812 else
1813 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: Hypervisor!%s", g_aImports[i].pszName);
1814 Assert(RT_FAILURE(rc));
1815 }
1816 }
1817 }
1818 if (RT_SUCCESS(rc))
1819 {
1820 Assert(!RTErrInfoIsSet(pErrInfo));
1821 }
1822
1823 RTLdrClose(hMod);
1824 }
1825 else
1826 {
1827 RTErrInfoAddF(pErrInfo, rc, "Failed to load Hypervisor.framwork: %s: %Rrc", s_pszHvPath, rc);
1828 rc = VERR_NEM_INIT_FAILED;
1829 }
1830
1831 return rc;
1832}
1833
1834
1835/**
1836 * Read and initialize the global capabilities supported by this CPU.
1837 *
1838 * @returns VBox status code.
1839 */
1840static int nemR3DarwinCapsInit(void)
1841{
1842 RT_ZERO(g_HmMsrs);
1843
1844 hv_return_t hrc = hv_vmx_read_capability(HV_VMX_CAP_PINBASED, &g_HmMsrs.u.vmx.PinCtls.u);
1845 if (hrc == HV_SUCCESS)
1846 hrc = hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &g_HmMsrs.u.vmx.ProcCtls.u);
1847 if (hrc == HV_SUCCESS)
1848 hrc = hv_vmx_read_capability(HV_VMX_CAP_ENTRY, &g_HmMsrs.u.vmx.EntryCtls.u);
1849 if (hrc == HV_SUCCESS)
1850 hrc = hv_vmx_read_capability(HV_VMX_CAP_EXIT, &g_HmMsrs.u.vmx.ExitCtls.u);
1851 if (hrc == HV_SUCCESS)
1852 {
1853 hrc = hv_vmx_read_capability(HV_VMX_CAP_BASIC, &g_HmMsrs.u.vmx.u64Basic);
1854 if (hrc == HV_SUCCESS)
1855 {
1856 if (hrc == HV_SUCCESS)
1857 hrc = hv_vmx_read_capability(HV_VMX_CAP_MISC, &g_HmMsrs.u.vmx.u64Misc);
1858 if (hrc == HV_SUCCESS)
1859 hrc = hv_vmx_read_capability(HV_VMX_CAP_CR0_FIXED0, &g_HmMsrs.u.vmx.u64Cr0Fixed0);
1860 if (hrc == HV_SUCCESS)
1861 hrc = hv_vmx_read_capability(HV_VMX_CAP_CR0_FIXED1, &g_HmMsrs.u.vmx.u64Cr0Fixed1);
1862 if (hrc == HV_SUCCESS)
1863 hrc = hv_vmx_read_capability(HV_VMX_CAP_CR4_FIXED0, &g_HmMsrs.u.vmx.u64Cr4Fixed0);
1864 if (hrc == HV_SUCCESS)
1865 hrc = hv_vmx_read_capability(HV_VMX_CAP_CR4_FIXED1, &g_HmMsrs.u.vmx.u64Cr4Fixed1);
1866 if (hrc == HV_SUCCESS)
1867 hrc = hv_vmx_read_capability(HV_VMX_CAP_VMCS_ENUM, &g_HmMsrs.u.vmx.u64VmcsEnum);
1868 if ( hrc == HV_SUCCESS
1869 && RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
1870 {
1871 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_PINBASED, &g_HmMsrs.u.vmx.TruePinCtls.u);
1872 if (hrc == HV_SUCCESS)
1873 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_PROCBASED, &g_HmMsrs.u.vmx.TrueProcCtls.u);
1874 if (hrc == HV_SUCCESS)
1875 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_ENTRY, &g_HmMsrs.u.vmx.TrueEntryCtls.u);
1876 if (hrc == HV_SUCCESS)
1877 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_EXIT, &g_HmMsrs.u.vmx.TrueExitCtls.u);
1878 }
1879 }
1880 else
1881 {
1882 /* Likely running on anything < 11.0 (BigSur) so provide some sensible defaults. */
1883 g_HmMsrs.u.vmx.u64Cr0Fixed0 = 0x80000021;
1884 g_HmMsrs.u.vmx.u64Cr0Fixed1 = 0xffffffff;
1885 g_HmMsrs.u.vmx.u64Cr4Fixed0 = 0x2000;
1886 g_HmMsrs.u.vmx.u64Cr4Fixed1 = 0x1767ff;
1887 hrc = HV_SUCCESS;
1888 }
1889 }
1890
1891 if ( hrc == HV_SUCCESS
1892 && g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
1893 {
1894 hrc = hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &g_HmMsrs.u.vmx.ProcCtls2.u);
1895
1896 if ( hrc == HV_SUCCESS
1897 && g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
1898 {
1899 hrc = hv_vmx_read_capability(HV_VMX_CAP_EPT_VPID_CAP, &g_HmMsrs.u.vmx.u64EptVpidCaps);
1900 if (hrc != HV_SUCCESS)
1901 hrc = HV_SUCCESS; /* Probably just outdated OS. */
1902 }
1903
1904 g_HmMsrs.u.vmx.u64VmFunc = 0; /* No way to read that on macOS. */
1905 }
1906
1907 if (hrc == HV_SUCCESS)
1908 {
1909 /*
1910 * Check for EFER swapping support.
1911 */
1912 g_fHmVmxSupportsVmcsEfer = true; //(g_HmMsrs.u.vmx.EntryCtls.n.allowed1 & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
1913 //&& (g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_LOAD_EFER_MSR)
1914 //&& (g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_EFER_MSR);
1915 }
1916
1917 return nemR3DarwinHvSts2Rc(hrc);
1918}
1919
1920
1921/**
1922 * Sets up pin-based VM-execution controls in the VMCS.
1923 *
1924 * @returns VBox status code.
1925 * @param pVCpu The cross context virtual CPU structure.
1926 * @param pVmcsInfo The VMCS info. object.
1927 */
1928static int nemR3DarwinVmxSetupVmcsPinCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1929{
1930 //PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1931 uint32_t fVal = g_HmMsrs.u.vmx.PinCtls.n.allowed0; /* Bits set here must always be set. */
1932 uint32_t const fZap = g_HmMsrs.u.vmx.PinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
1933
1934 if (g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_VIRT_NMI)
1935 fVal |= VMX_PIN_CTLS_VIRT_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
1936
1937#if 0 /** @todo Use preemption timer */
1938 /* Enable the VMX-preemption timer. */
1939 if (pVM->hmr0.s.vmx.fUsePreemptTimer)
1940 {
1941 Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER);
1942 fVal |= VMX_PIN_CTLS_PREEMPT_TIMER;
1943 }
1944
1945 /* Enable posted-interrupt processing. */
1946 if (pVM->hm.s.fPostedIntrs)
1947 {
1948 Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT);
1949 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT);
1950 fVal |= VMX_PIN_CTLS_POSTED_INT;
1951 }
1952#endif
1953
1954 if ((fVal & fZap) != fVal)
1955 {
1956 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
1957 g_HmMsrs.u.vmx.PinCtls.n.allowed0, fVal, fZap));
1958 pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
1959 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1960 }
1961
1962 /* Commit it to the VMCS and update our cache. */
1963 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, fVal);
1964 AssertRC(rc);
1965 pVmcsInfo->u32PinCtls = fVal;
1966
1967 return VINF_SUCCESS;
1968}
1969
1970
1971/**
1972 * Sets up secondary processor-based VM-execution controls in the VMCS.
1973 *
1974 * @returns VBox status code.
1975 * @param pVCpu The cross context virtual CPU structure.
1976 * @param pVmcsInfo The VMCS info. object.
1977 */
1978static int nemR3DarwinVmxSetupVmcsProcCtls2(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
1979{
1980 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1981 uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls2.n.allowed0; /* Bits set here must be set in the VMCS. */
1982 uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1983
1984 /* WBINVD causes a VM-exit. */
1985 if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_WBINVD_EXIT)
1986 fVal |= VMX_PROC_CTLS2_WBINVD_EXIT;
1987
1988 /* Enable the INVPCID instruction if we expose it to the guest and is supported
1989 by the hardware. Without this, guest executing INVPCID would cause a #UD. */
1990 if ( pVM->cpum.ro.GuestFeatures.fInvpcid
1991 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID))
1992 fVal |= VMX_PROC_CTLS2_INVPCID;
1993
1994#if 0 /** @todo */
1995 /* Enable VPID. */
1996 if (pVM->hmr0.s.vmx.fVpid)
1997 fVal |= VMX_PROC_CTLS2_VPID;
1998
1999 if (pVM->hm.s.fVirtApicRegs)
2000 {
2001 /* Enable APIC-register virtualization. */
2002 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT);
2003 fVal |= VMX_PROC_CTLS2_APIC_REG_VIRT;
2004
2005 /* Enable virtual-interrupt delivery. */
2006 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY);
2007 fVal |= VMX_PROC_CTLS2_VIRT_INTR_DELIVERY;
2008 }
2009
2010 /* Virtualize-APIC accesses if supported by the CPU. The virtual-APIC page is
2011 where the TPR shadow resides. */
2012 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2013 * done dynamically. */
2014 if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
2015 {
2016 fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS;
2017 hmR0VmxSetupVmcsApicAccessAddr(pVCpu);
2018 }
2019#endif
2020
2021 /* Enable the RDTSCP instruction if we expose it to the guest and is supported
2022 by the hardware. Without this, guest executing RDTSCP would cause a #UD. */
2023 if ( pVM->cpum.ro.GuestFeatures.fRdTscP
2024 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP))
2025 fVal |= VMX_PROC_CTLS2_RDTSCP;
2026
2027#if 0
2028 /* Enable Pause-Loop exiting. */
2029 if ( (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
2030 && pVM->hm.s.vmx.cPleGapTicks
2031 && pVM->hm.s.vmx.cPleWindowTicks)
2032 {
2033 fVal |= VMX_PROC_CTLS2_PAUSE_LOOP_EXIT;
2034
2035 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks); AssertRC(rc);
2036 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks); AssertRC(rc);
2037 }
2038#endif
2039
2040 if ((fVal & fZap) != fVal)
2041 {
2042 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2043 g_HmMsrs.u.vmx.ProcCtls2.n.allowed0, fVal, fZap));
2044 pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2045 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2046 }
2047
2048 /* Commit it to the VMCS and update our cache. */
2049 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
2050 AssertRC(rc);
2051 pVmcsInfo->u32ProcCtls2 = fVal;
2052
2053 return VINF_SUCCESS;
2054}
2055
2056
2057/**
2058 * Enables native access for the given MSR.
2059 *
2060 * @returns VBox status code.
2061 * @param pVCpu The cross context virtual CPU structure.
2062 * @param idMsr The MSR to enable native access for.
2063 */
2064static int nemR3DarwinMsrSetNative(PVMCPUCC pVCpu, uint32_t idMsr)
2065{
2066 hv_return_t hrc = hv_vcpu_enable_native_msr(pVCpu->nem.s.hVCpuId, idMsr, true /*enable*/);
2067 if (hrc == HV_SUCCESS)
2068 return VINF_SUCCESS;
2069
2070 return nemR3DarwinHvSts2Rc(hrc);
2071}
2072
2073
2074/**
2075 * Sets up the MSR permissions which don't change through the lifetime of the VM.
2076 *
2077 * @returns VBox status code.
2078 * @param pVCpu The cross context virtual CPU structure.
2079 * @param pVmcsInfo The VMCS info. object.
2080 */
2081static int nemR3DarwinSetupVmcsMsrPermissions(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2082{
2083 RT_NOREF(pVmcsInfo);
2084
2085 /*
2086 * The guest can access the following MSRs (read, write) without causing
2087 * VM-exits; they are loaded/stored automatically using fields in the VMCS.
2088 */
2089 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2090 int rc;
2091 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SYSENTER_CS); AssertRCReturn(rc, rc);
2092 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SYSENTER_ESP); AssertRCReturn(rc, rc);
2093 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SYSENTER_EIP); AssertRCReturn(rc, rc);
2094 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_GS_BASE); AssertRCReturn(rc, rc);
2095 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_FS_BASE); AssertRCReturn(rc, rc);
2096
2097 /*
2098 * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state
2099 * associated with then. We never need to intercept access (writes need to be
2100 * executed without causing a VM-exit, reads will #GP fault anyway).
2101 *
2102 * The IA32_SPEC_CTRL MSR is read/write and has state. We allow the guest to
2103 * read/write them. We swap the guest/host MSR value using the
2104 * auto-load/store MSR area.
2105 */
2106 if (pVM->cpum.ro.GuestFeatures.fIbpb)
2107 {
2108 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_PRED_CMD);
2109 AssertRCReturn(rc, rc);
2110 }
2111#if 0 /* Doesn't work. */
2112 if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
2113 {
2114 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_FLUSH_CMD);
2115 AssertRCReturn(rc, rc);
2116 }
2117#endif
2118 if (pVM->cpum.ro.GuestFeatures.fIbrs)
2119 {
2120 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SPEC_CTRL);
2121 AssertRCReturn(rc, rc);
2122 }
2123
2124 /*
2125 * Allow full read/write access for the following MSRs (mandatory for VT-x)
2126 * required for 64-bit guests.
2127 */
2128 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_LSTAR); AssertRCReturn(rc, rc);
2129 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K6_STAR); AssertRCReturn(rc, rc);
2130 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_SF_MASK); AssertRCReturn(rc, rc);
2131 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_KERNEL_GS_BASE); AssertRCReturn(rc, rc);
2132
2133 /* Required for enabling the RDTSCP instruction. */
2134 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_TSC_AUX); AssertRCReturn(rc, rc);
2135
2136 return VINF_SUCCESS;
2137}
2138
2139
2140/**
2141 * Sets up processor-based VM-execution controls in the VMCS.
2142 *
2143 * @returns VBox status code.
2144 * @param pVCpu The cross context virtual CPU structure.
2145 * @param pVmcsInfo The VMCS info. object.
2146 */
2147static int nemR3DarwinVmxSetupVmcsProcCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2148{
2149 uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
2150 uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2151
2152 fVal |= VMX_PROC_CTLS_HLT_EXIT /* HLT causes a VM-exit. */
2153// | VMX_PROC_CTLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2154 | VMX_PROC_CTLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2155 | VMX_PROC_CTLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2156 | VMX_PROC_CTLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2157 | VMX_PROC_CTLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2158 | VMX_PROC_CTLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2159
2160 /* We toggle VMX_PROC_CTLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2161 if ( !(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MOV_DR_EXIT)
2162 || (g_HmMsrs.u.vmx.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
2163 {
2164 pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2165 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2166 }
2167
2168 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2169 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
2170 fVal |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
2171
2172 if ((fVal & fZap) != fVal)
2173 {
2174 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2175 g_HmMsrs.u.vmx.ProcCtls.n.allowed0, fVal, fZap));
2176 pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2177 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2178 }
2179
2180 /* Commit it to the VMCS and update our cache. */
2181 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, fVal);
2182 AssertRC(rc);
2183 pVmcsInfo->u32ProcCtls = fVal;
2184
2185 /* Set up MSR permissions that don't change through the lifetime of the VM. */
2186 rc = nemR3DarwinSetupVmcsMsrPermissions(pVCpu, pVmcsInfo);
2187 AssertRCReturn(rc, rc);
2188
2189 /*
2190 * Set up secondary processor-based VM-execution controls
2191 * (we assume the CPU to always support it as we rely on unrestricted guest execution support).
2192 */
2193 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
2194 return nemR3DarwinVmxSetupVmcsProcCtls2(pVCpu, pVmcsInfo);
2195}
2196
2197
2198/**
2199 * Sets up miscellaneous (everything other than Pin, Processor and secondary
2200 * Processor-based VM-execution) control fields in the VMCS.
2201 *
2202 * @returns VBox status code.
2203 * @param pVCpu The cross context virtual CPU structure.
2204 * @param pVmcsInfo The VMCS info. object.
2205 */
2206static int nemR3DarwinVmxSetupVmcsMiscCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2207{
2208 int rc = VINF_SUCCESS;
2209 //rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo); TODO
2210 if (RT_SUCCESS(rc))
2211 {
2212 uint64_t const u64Cr0Mask = vmxHCGetFixedCr0Mask(pVCpu);
2213 uint64_t const u64Cr4Mask = vmxHCGetFixedCr4Mask(pVCpu);
2214
2215 rc = nemR3DarwinWriteVmcs64(pVCpu, VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask); AssertRC(rc);
2216 rc = nemR3DarwinWriteVmcs64(pVCpu, VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask); AssertRC(rc);
2217
2218 pVmcsInfo->u64Cr0Mask = u64Cr0Mask;
2219 pVmcsInfo->u64Cr4Mask = u64Cr4Mask;
2220
2221#if 0 /** @todo */
2222 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fLbr)
2223 {
2224 rc = VMXWriteVmcsNw(VMX_VMCS64_GUEST_DEBUGCTL_FULL, MSR_IA32_DEBUGCTL_LBR);
2225 AssertRC(rc);
2226 }
2227#endif
2228 return VINF_SUCCESS;
2229 }
2230 else
2231 LogRelFunc(("Failed to initialize VMCS auto-load/store MSR addresses. rc=%Rrc\n", rc));
2232 return rc;
2233}
2234
2235
2236/**
2237 * Sets up the initial exception bitmap in the VMCS based on static conditions.
2238 *
2239 * We shall setup those exception intercepts that don't change during the
2240 * lifetime of the VM here. The rest are done dynamically while loading the
2241 * guest state.
2242 *
2243 * @param pVCpu The cross context virtual CPU structure.
2244 * @param pVmcsInfo The VMCS info. object.
2245 */
2246static void nemR3DarwinVmxSetupVmcsXcptBitmap(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2247{
2248 /*
2249 * The following exceptions are always intercepted:
2250 *
2251 * #AC - To prevent the guest from hanging the CPU and for dealing with
2252 * split-lock detecting host configs.
2253 * #DB - To maintain the DR6 state even when intercepting DRx reads/writes and
2254 * recursive #DBs can cause a CPU hang.
2255 */
2256 uint32_t const uXcptBitmap = RT_BIT(X86_XCPT_AC)
2257 | RT_BIT(X86_XCPT_DB);
2258
2259 /* Commit it to the VMCS. */
2260 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2261 AssertRC(rc);
2262
2263 /* Update our cache of the exception bitmap. */
2264 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2265}
2266
2267
2268/**
2269 * Initialize the VMCS information field for the given vCPU.
2270 *
2271 * @returns VBox status code.
2272 * @param pVCpu The cross context virtual CPU structure of the
2273 * calling EMT.
2274 */
2275static int nemR3DarwinInitVmcs(PVMCPU pVCpu)
2276{
2277 int rc = nemR3DarwinVmxSetupVmcsPinCtls(pVCpu, &pVCpu->nem.s.VmcsInfo);
2278 if (RT_SUCCESS(rc))
2279 {
2280 rc = nemR3DarwinVmxSetupVmcsProcCtls(pVCpu, &pVCpu->nem.s.VmcsInfo);
2281 if (RT_SUCCESS(rc))
2282 {
2283 rc = nemR3DarwinVmxSetupVmcsMiscCtls(pVCpu, &pVCpu->nem.s.VmcsInfo);
2284 if (RT_SUCCESS(rc))
2285 {
2286 rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &pVCpu->nem.s.VmcsInfo.u32EntryCtls);
2287 if (RT_SUCCESS(rc))
2288 {
2289 rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_CTRL_EXIT, &pVCpu->nem.s.VmcsInfo.u32ExitCtls);
2290 if (RT_SUCCESS(rc))
2291 {
2292 nemR3DarwinVmxSetupVmcsXcptBitmap(pVCpu, &pVCpu->nem.s.VmcsInfo);
2293 return VINF_SUCCESS;
2294 }
2295 else
2296 LogRelFunc(("Failed to read the exit controls. rc=%Rrc\n", rc));
2297 }
2298 else
2299 LogRelFunc(("Failed to read the entry controls. rc=%Rrc\n", rc));
2300 }
2301 else
2302 LogRelFunc(("Failed to setup miscellaneous controls. rc=%Rrc\n", rc));
2303 }
2304 else
2305 LogRelFunc(("Failed to setup processor-based VM-execution controls. rc=%Rrc\n", rc));
2306 }
2307 else
2308 LogRelFunc(("Failed to setup pin-based controls. rc=%Rrc\n", rc));
2309
2310 return rc;
2311}
2312
2313
2314/**
2315 * Registers statistics for the given vCPU.
2316 *
2317 * @returns VBox status code.
2318 * @param pVM The cross context VM structure.
2319 * @param idCpu The CPU ID.
2320 * @param pNemCpu The NEM CPU structure.
2321 */
2322static int nemR3DarwinStatisticsRegister(PVM pVM, VMCPUID idCpu, PNEMCPU pNemCpu)
2323{
2324#define NEM_REG_STAT(a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szNmFmt, a_szDesc) do { \
2325 int rc = STAMR3RegisterF(pVM, a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szDesc, a_szNmFmt, idCpu); \
2326 AssertRC(rc); \
2327 } while (0)
2328#define NEM_REG_PROFILE(a_pVar, a_szNmFmt, a_szDesc) \
2329 NEM_REG_STAT(a_pVar, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, a_szNmFmt, a_szDesc)
2330#define NEM_REG_COUNTER(a, b, desc) NEM_REG_STAT(a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, b, desc)
2331
2332 NEM_REG_COUNTER(&pNemCpu->pVmxStats->StatExitCR0Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR0", "CR0 read.");
2333 NEM_REG_COUNTER(&pNemCpu->pVmxStats->StatExitCR2Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR2", "CR2 read.");
2334 NEM_REG_COUNTER(&pNemCpu->pVmxStats->StatExitCR3Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR3", "CR3 read.");
2335 NEM_REG_COUNTER(&pNemCpu->pVmxStats->StatExitCR4Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR4", "CR4 read.");
2336 NEM_REG_COUNTER(&pNemCpu->pVmxStats->StatExitCR8Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR8", "CR8 read.");
2337 NEM_REG_COUNTER(&pNemCpu->pVmxStats->StatExitCR0Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR0", "CR0 write.");
2338 NEM_REG_COUNTER(&pNemCpu->pVmxStats->StatExitCR2Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR2", "CR2 write.");
2339 NEM_REG_COUNTER(&pNemCpu->pVmxStats->StatExitCR3Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR3", "CR3 write.");
2340 NEM_REG_COUNTER(&pNemCpu->pVmxStats->StatExitCR4Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR4", "CR4 write.");
2341 NEM_REG_COUNTER(&pNemCpu->pVmxStats->StatExitCR8Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR8", "CR8 write.");
2342
2343 NEM_REG_COUNTER(&pNemCpu->pVmxStats->StatExitAll, "/NEM/CPU%u/Exit/All", "Total exits (including nested-guest exits).");
2344
2345#ifdef VBOX_WITH_STATISTICS
2346 NEM_REG_PROFILE(&pNemCpu->StatProfGstStateImport, "/NEM/CPU%u/ImportGuestState", "Profiling of importing guest state from hardware after VM-exit.");
2347 NEM_REG_PROFILE(&pNemCpu->StatProfGstStateExport, "/NEM/CPU%u/ExportGuestState", "Profiling of exporting guest state from hardware after VM-exit.");
2348
2349 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
2350 {
2351 const char *pszExitName = HMGetVmxExitName(j);
2352 if (pszExitName)
2353 {
2354 int rc = STAMR3RegisterF(pVM, &pNemCpu->pVmxStats->aStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
2355 STAMUNIT_OCCURENCES, pszExitName, "/NEM/CPU%u/Exit/Reason/%02x", idCpu, j);
2356 AssertRCReturn(rc, rc);
2357 }
2358 }
2359#endif
2360
2361 return VINF_SUCCESS;
2362
2363#undef NEM_REG_COUNTER
2364#undef NEM_REG_PROFILE
2365#undef NEM_REG_STAT
2366}
2367
2368
2369/**
2370 * Try initialize the native API.
2371 *
2372 * This may only do part of the job, more can be done in
2373 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
2374 *
2375 * @returns VBox status code.
2376 * @param pVM The cross context VM structure.
2377 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
2378 * the latter we'll fail if we cannot initialize.
2379 * @param fForced Whether the HMForced flag is set and we should
2380 * fail if we cannot initialize.
2381 */
2382int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
2383{
2384 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
2385
2386 /*
2387 * Some state init.
2388 */
2389
2390 /*
2391 * Error state.
2392 * The error message will be non-empty on failure and 'rc' will be set too.
2393 */
2394 RTERRINFOSTATIC ErrInfo;
2395 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
2396 int rc = nemR3DarwinLoadHv(fForced, pErrInfo);
2397 if (RT_SUCCESS(rc))
2398 {
2399 hv_return_t hrc = hv_vm_create(HV_VM_DEFAULT);
2400 if (hrc == HV_SUCCESS)
2401 {
2402 if (hv_vm_space_create)
2403 {
2404 hrc = hv_vm_space_create(&pVM->nem.s.uVmAsid);
2405 if (hrc == HV_SUCCESS)
2406 {
2407 LogRel(("NEM: Successfully created ASID: %u\n", pVM->nem.s.uVmAsid));
2408 pVM->nem.s.fCreatedAsid = true;
2409 }
2410 else
2411 LogRel(("NEM: Failed to create ASID for VM (hrc=%#x), continuing...\n", pVM->nem.s.uVmAsid));
2412 }
2413 pVM->nem.s.fCreatedVm = true;
2414
2415 /* Register release statistics */
2416 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2417 {
2418 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
2419 PVMXSTATISTICS pVmxStats = (PVMXSTATISTICS)RTMemAllocZ(sizeof(*pVmxStats));
2420 if (RT_LIKELY(pVmxStats))
2421 {
2422 pNemCpu->pVmxStats = pVmxStats;
2423 rc = nemR3DarwinStatisticsRegister(pVM, idCpu, pNemCpu);
2424 AssertRC(rc);
2425 }
2426 else
2427 {
2428 rc = VERR_NO_MEMORY;
2429 break;
2430 }
2431 }
2432
2433 if (RT_SUCCESS(rc))
2434 {
2435 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
2436 Log(("NEM: Marked active!\n"));
2437 PGMR3EnableNemMode(pVM);
2438 }
2439 }
2440 else
2441 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
2442 "hv_vm_create() failed: %#x", hrc);
2443 }
2444
2445 /*
2446 * We only fail if in forced mode, otherwise just log the complaint and return.
2447 */
2448 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
2449 if ( (fForced || !fFallback)
2450 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
2451 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
2452
2453 if (RTErrInfoIsSet(pErrInfo))
2454 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
2455 return VINF_SUCCESS;
2456}
2457
2458
2459/**
2460 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
2461 *
2462 * @returns VBox status code
2463 * @param pVM The VM handle.
2464 * @param pVCpu The vCPU handle.
2465 * @param idCpu ID of the CPU to create.
2466 */
2467static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
2468{
2469 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpuId, HV_VCPU_DEFAULT);
2470 if (hrc != HV_SUCCESS)
2471 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
2472 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2473
2474 if (idCpu == 0)
2475 {
2476 /* First call initializs the MSR structure holding the capabilities of the host CPU. */
2477 int rc = nemR3DarwinCapsInit();
2478 AssertRCReturn(rc, rc);
2479 }
2480
2481 int rc = nemR3DarwinInitVmcs(pVCpu);
2482 AssertRCReturn(rc, rc);
2483
2484 if (pVM->nem.s.fCreatedAsid)
2485 {
2486 hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, pVM->nem.s.uVmAsid);
2487 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_VM_CREATE_FAILED);
2488 }
2489
2490 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
2491
2492 return VINF_SUCCESS;
2493}
2494
2495
2496/**
2497 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
2498 *
2499 * @returns VBox status code
2500 * @param pVCpu The vCPU handle.
2501 */
2502static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVMCPU pVCpu)
2503{
2504 hv_return_t hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, 0 /*asid*/);
2505 Assert(hrc == HV_SUCCESS);
2506
2507 hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpuId);
2508 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
2509 return VINF_SUCCESS;
2510}
2511
2512
2513/**
2514 * Worker to setup the TPR shadowing feature if available on the CPU and the VM has an APIC enabled.
2515 *
2516 * @returns VBox status code
2517 * @param pVM The VM handle.
2518 * @param pVCpu The vCPU handle.
2519 */
2520static DECLCALLBACK(int) nemR3DarwinNativeInitTprShadowing(PVM pVM, PVMCPU pVCpu)
2521{
2522 PVMXVMCSINFO pVmcsInfo = &pVCpu->nem.s.VmcsInfo;
2523 uint32_t fVal = pVmcsInfo->u32ProcCtls;
2524
2525 /* Use TPR shadowing if supported by the CPU. */
2526 if ( PDMHasApic(pVM)
2527 && (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
2528 {
2529 fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
2530 /* CR8 writes cause a VM-exit based on TPR threshold. */
2531 Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT));
2532 Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT));
2533 }
2534 else
2535 {
2536 fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
2537 | VMX_PROC_CTLS_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
2538 }
2539
2540 /* Commit it to the VMCS and update our cache. */
2541 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, fVal);
2542 AssertRC(rc);
2543 pVmcsInfo->u32ProcCtls = fVal;
2544
2545 return VINF_SUCCESS;
2546}
2547
2548
2549/**
2550 * This is called after CPUMR3Init is done.
2551 *
2552 * @returns VBox status code.
2553 * @param pVM The VM handle..
2554 */
2555int nemR3NativeInitAfterCPUM(PVM pVM)
2556{
2557 /*
2558 * Validate sanity.
2559 */
2560 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
2561 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
2562
2563 /*
2564 * Setup the EMTs.
2565 */
2566 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2567 {
2568 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
2569
2570 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
2571 if (RT_FAILURE(rc))
2572 {
2573 /* Rollback. */
2574 while (idCpu--)
2575 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 1, pVCpu);
2576
2577 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
2578 }
2579 }
2580
2581 pVM->nem.s.fCreatedEmts = true;
2582 return VINF_SUCCESS;
2583}
2584
2585
2586int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
2587{
2588 if (enmWhat == VMINITCOMPLETED_RING3)
2589 {
2590 /* Now that PDM is initialized the APIC state is known in order to enable the TPR shadowing feature on all EMTs. */
2591 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2592 {
2593 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
2594
2595 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitTprShadowing, 2, pVM, pVCpu);
2596 if (RT_FAILURE(rc))
2597 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Setting up TPR shadowing failed: %Rrc", rc);
2598 }
2599 }
2600 return VINF_SUCCESS;
2601}
2602
2603
2604int nemR3NativeTerm(PVM pVM)
2605{
2606 /*
2607 * Delete the VM.
2608 */
2609
2610 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
2611 {
2612 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
2613
2614 /*
2615 * Need to do this or hv_vm_space_destroy() fails later on (on 10.15 at least). Could've been documented in
2616 * API reference so I wouldn't have to decompile the kext to find this out but we are talking
2617 * about Apple here unfortunately, API documentation is not their strong suit...
2618 * Would have been of course even better to just automatically drop the address space reference when the vCPU
2619 * gets destroyed.
2620 */
2621 hv_return_t hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, 0 /*asid*/);
2622 Assert(hrc == HV_SUCCESS);
2623
2624 /*
2625 * Apple's documentation states that the vCPU should be destroyed
2626 * on the thread running the vCPU but as all the other EMTs are gone
2627 * at this point, destroying the VM would hang.
2628 *
2629 * We seem to be at luck here though as destroying apparently works
2630 * from EMT(0) as well.
2631 */
2632 hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpuId);
2633 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
2634
2635 if (pVCpu->nem.s.pVmxStats)
2636 {
2637 RTMemFree(pVCpu->nem.s.pVmxStats);
2638 pVCpu->nem.s.pVmxStats = NULL;
2639 }
2640 }
2641
2642 pVM->nem.s.fCreatedEmts = false;
2643
2644 if (pVM->nem.s.fCreatedAsid)
2645 {
2646 hv_return_t hrc = hv_vm_space_destroy(pVM->nem.s.uVmAsid);
2647 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
2648 pVM->nem.s.fCreatedAsid = false;
2649 }
2650
2651 if (pVM->nem.s.fCreatedVm)
2652 {
2653 hv_return_t hrc = hv_vm_destroy();
2654 if (hrc != HV_SUCCESS)
2655 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
2656
2657 pVM->nem.s.fCreatedVm = false;
2658 }
2659 return VINF_SUCCESS;
2660}
2661
2662
2663/**
2664 * VM reset notification.
2665 *
2666 * @param pVM The cross context VM structure.
2667 */
2668void nemR3NativeReset(PVM pVM)
2669{
2670 RT_NOREF(pVM);
2671}
2672
2673
2674/**
2675 * Reset CPU due to INIT IPI or hot (un)plugging.
2676 *
2677 * @param pVCpu The cross context virtual CPU structure of the CPU being
2678 * reset.
2679 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
2680 */
2681void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
2682{
2683 RT_NOREF(fInitIpi);
2684 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
2685}
2686
2687
2688VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2689{
2690 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags));
2691#ifdef LOG_ENABLED
2692 if (LogIs3Enabled())
2693 nemR3DarwinLogState(pVM, pVCpu);
2694#endif
2695
2696 /*
2697 * Try switch to NEM runloop state.
2698 */
2699 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2700 { /* likely */ }
2701 else
2702 {
2703 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2704 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2705 return VINF_SUCCESS;
2706 }
2707
2708 /*
2709 * The run loop.
2710 *
2711 * Current approach to state updating to use the sledgehammer and sync
2712 * everything every time. This will be optimized later.
2713 */
2714
2715 VMXTRANSIENT VmxTransient;
2716 RT_ZERO(VmxTransient);
2717 VmxTransient.pVmcsInfo = &pVCpu->nem.s.VmcsInfo;
2718
2719 /*
2720 * Poll timers and run for a bit.
2721 */
2722 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2723 * the whole polling job when timers have changed... */
2724 uint64_t offDeltaIgnored;
2725 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2726
2727 const bool fSingleStepping = DBGFIsStepping(pVCpu);
2728 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2729 for (unsigned iLoop = 0;; iLoop++)
2730 {
2731 /*
2732 * Check and process force flag actions, some of which might require us to go back to ring-3.
2733 */
2734 rcStrict = vmxHCCheckForceFlags(pVCpu, false /*fIsNestedGuest*/, fSingleStepping);
2735 if (rcStrict == VINF_SUCCESS)
2736 { /*likely */ }
2737 else
2738 {
2739 if (rcStrict == VINF_EM_RAW_TO_R3)
2740 rcStrict = VINF_SUCCESS;
2741 break;
2742 }
2743
2744 /*
2745 * Evaluate events to be injected into the guest.
2746 *
2747 * Events in TRPM can be injected without inspecting the guest state.
2748 * If any new events (interrupts/NMI) are pending currently, we try to set up the
2749 * guest to cause a VM-exit the next time they are ready to receive the event.
2750 */
2751 if (TRPMHasTrap(pVCpu))
2752 vmxHCTrpmTrapToPendingEvent(pVCpu);
2753
2754 uint32_t fIntrState;
2755 rcStrict = vmxHCEvaluatePendingEvent(pVCpu, &pVCpu->nem.s.VmcsInfo, false /*fIsNestedGuest*/, &fIntrState);
2756
2757 /*
2758 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus
2759 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
2760 * also result in triple-faulting the VM.
2761 *
2762 * With nested-guests, the above does not apply since unrestricted guest execution is a
2763 * requirement. Regardless, we do this here to avoid duplicating code elsewhere.
2764 */
2765 rcStrict = vmxHCInjectPendingEvent(pVCpu, &pVCpu->nem.s.VmcsInfo, false /*fIsNestedGuest*/, fIntrState, fSingleStepping);
2766 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2767 { /* likely */ }
2768 else
2769 {
2770 AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fSingleStepping),
2771 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2772 break;
2773 }
2774
2775 int rc = nemR3DarwinExportGuestState(pVM, pVCpu, &VmxTransient);
2776 AssertRCReturn(rc, rc);
2777
2778 LogFlowFunc(("Running vCPU\n"));
2779 pVCpu->nem.s.Event.fPending = false;
2780
2781 TMNotifyStartOfExecution(pVM, pVCpu);
2782
2783 Assert(!pVCpu->nem.s.fCtxChanged);
2784 hv_return_t hrc;
2785 if (hv_vcpu_run_until)
2786 hrc = hv_vcpu_run_until(pVCpu->nem.s.hVCpuId, HV_DEADLINE_FOREVER);
2787 else
2788 hrc = hv_vcpu_run(pVCpu->nem.s.hVCpuId);
2789
2790 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2791
2792 /*
2793 * Sync the TPR shadow with our APIC state.
2794 */
2795 if ( !VmxTransient.fIsNestedGuest
2796 && (pVCpu->nem.s.VmcsInfo.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
2797 {
2798 uint64_t u64Tpr;
2799 hrc = hv_vcpu_read_register(pVCpu->nem.s.hVCpuId, HV_X86_TPR, &u64Tpr);
2800 Assert(hrc == HV_SUCCESS);
2801
2802 if (VmxTransient.u8GuestTpr != (uint8_t)u64Tpr)
2803 {
2804 rc = APICSetTpr(pVCpu, (uint8_t)u64Tpr);
2805 AssertRC(rc);
2806 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
2807 }
2808 }
2809
2810 if (hrc == HV_SUCCESS)
2811 {
2812 /*
2813 * Deal with the message.
2814 */
2815 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu, &VmxTransient);
2816 if (rcStrict == VINF_SUCCESS)
2817 { /* hopefully likely */ }
2818 else
2819 {
2820 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2821 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2822 break;
2823 }
2824 //Assert(!pVCpu->cpum.GstCtx.fExtrn);
2825 }
2826 else
2827 {
2828 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x %u\n",
2829 pVCpu->idCpu, hrc, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)),
2830 VERR_NEM_IPE_0);
2831 }
2832 } /* the run loop */
2833
2834
2835 /*
2836 * Convert any pending HM events back to TRPM due to premature exits.
2837 *
2838 * This is because execution may continue from IEM and we would need to inject
2839 * the event from there (hence place it back in TRPM).
2840 */
2841 if (pVCpu->nem.s.Event.fPending)
2842 {
2843 vmxHCPendingEventToTrpmTrap(pVCpu);
2844 Assert(!pVCpu->nem.s.Event.fPending);
2845
2846 /* Clear the events from the VMCS. */
2847 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0); AssertRC(rc);
2848 rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0); AssertRC(rc);
2849 }
2850
2851
2852 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2853 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2854
2855 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
2856 {
2857 /* Try anticipate what we might need. */
2858 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
2859 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2860 || RT_FAILURE(rcStrict))
2861 fImport = CPUMCTX_EXTRN_ALL;
2862 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
2863 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
2864 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2865
2866 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2867 {
2868 /* Only import what is external currently. */
2869 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
2870 if (RT_SUCCESS(rc2))
2871 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2872 else if (RT_SUCCESS(rcStrict))
2873 rcStrict = rc2;
2874 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2875 {
2876 pVCpu->cpum.GstCtx.fExtrn = 0;
2877 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
2878 }
2879 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2880 }
2881 else
2882 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2883 }
2884 else
2885 {
2886 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2887 pVCpu->cpum.GstCtx.fExtrn = 0;
2888 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
2889 }
2890
2891 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
2892 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, VBOXSTRICTRC_VAL(rcStrict) ));
2893 return rcStrict;
2894}
2895
2896
2897VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2898{
2899 NOREF(pVM);
2900 return PGMPhysIsA20Enabled(pVCpu);
2901}
2902
2903
2904bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2905{
2906 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
2907 return false;
2908}
2909
2910
2911/**
2912 * Forced flag notification call from VMEmt.h.
2913 *
2914 * This is only called when pVCpu is in the VMCPUSTATE_STARTED_EXEC_NEM state.
2915 *
2916 * @param pVM The cross context VM structure.
2917 * @param pVCpu The cross context virtual CPU structure of the CPU
2918 * to be notified.
2919 * @param fFlags Notification flags, VMNOTIFYFF_FLAGS_XXX.
2920 */
2921void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2922{
2923 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
2924
2925 RT_NOREF(pVM, fFlags);
2926
2927 hv_return_t hrc = hv_vcpu_interrupt(&pVCpu->nem.s.hVCpuId, 1);
2928 if (hrc != HV_SUCCESS)
2929 LogRel(("NEM: hv_vcpu_interrupt(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpuId, hrc));
2930}
2931
2932
2933VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2934 uint8_t *pu2State, uint32_t *puNemRange)
2935{
2936 RT_NOREF(pVM, puNemRange);
2937
2938 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
2939#if defined(VBOX_WITH_PGM_NEM_MODE)
2940 if (pvR3)
2941 {
2942 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE);
2943 if (RT_SUCCESS(rc))
2944 *pu2State = NEM_DARWIN_PAGE_STATE_WRITABLE;
2945 else
2946 {
2947 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
2948 return VERR_NEM_MAP_PAGES_FAILED;
2949 }
2950 }
2951 return VINF_SUCCESS;
2952#else
2953 RT_NOREF(pVM, GCPhys, cb, pvR3);
2954 return VERR_NEM_MAP_PAGES_FAILED;
2955#endif
2956}
2957
2958
2959VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2960{
2961 RT_NOREF(pVM);
2962 return false;
2963}
2964
2965
2966VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2967 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2968{
2969 RT_NOREF(pVM, puNemRange, pvRam, fFlags);
2970
2971 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
2972 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
2973
2974#if defined(VBOX_WITH_PGM_NEM_MODE)
2975 /*
2976 * Unmap the RAM we're replacing.
2977 */
2978 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2979 {
2980 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb);
2981 if (RT_SUCCESS(rc))
2982 { /* likely */ }
2983 else if (pvMmio2)
2984 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
2985 GCPhys, cb, fFlags, rc));
2986 else
2987 {
2988 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2989 GCPhys, cb, fFlags, rc));
2990 return VERR_NEM_UNMAP_PAGES_FAILED;
2991 }
2992 }
2993
2994 /*
2995 * Map MMIO2 if any.
2996 */
2997 if (pvMmio2)
2998 {
2999 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
3000 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE);
3001 if (RT_SUCCESS(rc))
3002 *pu2State = NEM_DARWIN_PAGE_STATE_WRITABLE;
3003 else
3004 {
3005 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
3006 GCPhys, cb, fFlags, pvMmio2, rc));
3007 return VERR_NEM_MAP_PAGES_FAILED;
3008 }
3009 }
3010 else
3011 {
3012 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
3013 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
3014 }
3015
3016#else
3017 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
3018 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
3019#endif
3020 return VINF_SUCCESS;
3021}
3022
3023
3024VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
3025 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
3026{
3027 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
3028 return VINF_SUCCESS;
3029}
3030
3031
3032VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
3033 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
3034{
3035 RT_NOREF(pVM, puNemRange);
3036
3037 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
3038 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
3039
3040 int rc = VINF_SUCCESS;
3041#if defined(VBOX_WITH_PGM_NEM_MODE)
3042 /*
3043 * Unmap the MMIO2 pages.
3044 */
3045 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
3046 * we may have more stuff to unmap even in case of pure MMIO... */
3047 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
3048 {
3049 rc = nemR3DarwinUnmap(pVM, GCPhys, cb);
3050 if (RT_FAILURE(rc))
3051 {
3052 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
3053 GCPhys, cb, fFlags, rc));
3054 rc = VERR_NEM_UNMAP_PAGES_FAILED;
3055 }
3056 }
3057
3058 /*
3059 * Restore the RAM we replaced.
3060 */
3061 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
3062 {
3063 AssertPtr(pvRam);
3064 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE);
3065 if (RT_SUCCESS(rc))
3066 { /* likely */ }
3067 else
3068 {
3069 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
3070 rc = VERR_NEM_MAP_PAGES_FAILED;
3071 }
3072 if (pu2State)
3073 *pu2State = NEM_DARWIN_PAGE_STATE_WRITABLE;
3074 }
3075 /* Mark the pages as unmapped if relevant. */
3076 else if (pu2State)
3077 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
3078
3079 RT_NOREF(pvMmio2);
3080#else
3081 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
3082 if (pu2State)
3083 *pu2State = UINT8_MAX;
3084 rc = VERR_NEM_UNMAP_PAGES_FAILED;
3085#endif
3086 return rc;
3087}
3088
3089
3090VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
3091 void *pvBitmap, size_t cbBitmap)
3092{
3093 RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
3094 AssertFailed();
3095 return VERR_NOT_IMPLEMENTED;
3096}
3097
3098
3099VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
3100 uint8_t *pu2State, uint32_t *puNemRange)
3101{
3102 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
3103
3104 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
3105 *pu2State = UINT8_MAX;
3106 *puNemRange = 0;
3107 return VINF_SUCCESS;
3108}
3109
3110
3111VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
3112 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
3113{
3114 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
3115 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
3116 *pu2State = UINT8_MAX;
3117
3118#if defined(VBOX_WITH_PGM_NEM_MODE)
3119 /*
3120 * (Re-)map readonly.
3121 */
3122 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
3123 int rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE);
3124 if (RT_SUCCESS(rc))
3125 *pu2State = NEM_DARWIN_PAGE_STATE_READABLE;
3126 else
3127 {
3128 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
3129 GCPhys, cb, pvPages, fFlags, rc));
3130 return VERR_NEM_MAP_PAGES_FAILED;
3131 }
3132 RT_NOREF(pVM, fFlags, puNemRange);
3133 return VINF_SUCCESS;
3134#else
3135 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
3136 return VERR_NEM_MAP_PAGES_FAILED;
3137#endif
3138}
3139
3140
3141VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3142 RTR3PTR pvMemR3, uint8_t *pu2State)
3143{
3144 RT_NOREF(pVM);
3145
3146 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
3147 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
3148
3149 *pu2State = UINT8_MAX;
3150#if defined(VBOX_WITH_PGM_NEM_MODE)
3151 if (pvMemR3)
3152 {
3153 int rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE);
3154 if (RT_SUCCESS(rc))
3155 *pu2State = NEM_DARWIN_PAGE_STATE_WRITABLE;
3156 else
3157 AssertLogRelMsgFailed(("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
3158 pvMemR3, GCPhys, cb, rc));
3159 }
3160 RT_NOREF(enmKind);
3161#else
3162 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
3163 AssertFailed();
3164#endif
3165}
3166
3167
3168static int nemHCJustUnmapPage(PVMCC pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
3169{
3170 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
3171 {
3172 Log5(("nemHCJustUnmapPage: %RGp == unmapped\n", GCPhysDst));
3173 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
3174 return VINF_SUCCESS;
3175 }
3176
3177 int rc = nemR3DarwinUnmap(pVM, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
3178 if (RT_SUCCESS(rc))
3179 {
3180 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
3181 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
3182 Log5(("nemHCJustUnmapPage: %RGp => unmapped\n", GCPhysDst));
3183 return VINF_SUCCESS;
3184 }
3185 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
3186 LogRel(("nemHCJustUnmapPage(%RGp): failed! rc=%Rrc\n",
3187 GCPhysDst, rc));
3188 return VERR_NEM_IPE_6;
3189}
3190
3191
3192/**
3193 * Called when the A20 state changes.
3194 *
3195 * @param pVCpu The CPU the A20 state changed on.
3196 * @param fEnabled Whether it was enabled (true) or disabled.
3197 */
3198VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
3199{
3200 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
3201 RT_NOREF(pVCpu, fEnabled);
3202}
3203
3204
3205void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
3206{
3207 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
3208 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
3209}
3210
3211
3212void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
3213 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
3214{
3215 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
3216 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
3217 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
3218}
3219
3220
3221int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
3222 PGMPAGETYPE enmType, uint8_t *pu2State)
3223{
3224 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3225 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3226 RT_NOREF(HCPhys, fPageProt, enmType);
3227
3228 return nemHCJustUnmapPage(pVM, GCPhys, pu2State);
3229}
3230
3231
3232VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
3233 PGMPAGETYPE enmType, uint8_t *pu2State)
3234{
3235 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3236 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3237 RT_NOREF(HCPhys, pvR3, fPageProt, enmType)
3238
3239 nemHCJustUnmapPage(pVM, GCPhys, pu2State);
3240}
3241
3242
3243VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
3244 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
3245{
3246 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3247 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
3248 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType);
3249
3250 nemHCJustUnmapPage(pVM, GCPhys, pu2State);
3251}
3252
3253
3254/**
3255 * Interface for importing state on demand (used by IEM).
3256 *
3257 * @returns VBox status code.
3258 * @param pVCpu The cross context CPU structure.
3259 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3260 */
3261VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
3262{
3263 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
3264 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
3265
3266 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
3267}
3268
3269
3270/**
3271 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
3272 *
3273 * @returns VBox status code.
3274 * @param pVCpu The cross context CPU structure.
3275 * @param pcTicks Where to return the CPU tick count.
3276 * @param puAux Where to return the TSC_AUX register value.
3277 */
3278VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
3279{
3280 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
3281 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
3282
3283 int rc = nemR3DarwinMsrRead(pVCpu, MSR_IA32_TSC, pcTicks);
3284 if ( RT_SUCCESS(rc)
3285 && puAux)
3286 {
3287 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX)
3288 {
3289 /** @todo Why the heck is puAux a uint32_t?. */
3290 uint64_t u64Aux;
3291 rc = nemR3DarwinMsrRead(pVCpu, MSR_K8_TSC_AUX, &u64Aux);
3292 if (RT_SUCCESS(rc))
3293 *puAux = (uint32_t)u64Aux;
3294 }
3295 else
3296 *puAux = CPUMGetGuestTscAux(pVCpu);
3297 }
3298
3299 return rc;
3300}
3301
3302
3303/**
3304 * Resumes CPU clock (TSC) on all virtual CPUs.
3305 *
3306 * This is called by TM when the VM is started, restored, resumed or similar.
3307 *
3308 * @returns VBox status code.
3309 * @param pVM The cross context VM structure.
3310 * @param pVCpu The cross context CPU structure of the calling EMT.
3311 * @param uPausedTscValue The TSC value at the time of pausing.
3312 */
3313VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
3314{
3315 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVCpu, uPausedTscValue));
3316 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
3317 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
3318
3319 hv_return_t hrc = hv_vm_sync_tsc(uPausedTscValue);
3320 if (RT_LIKELY(hrc == HV_SUCCESS))
3321 {
3322 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_TSC_AUX);
3323 return VINF_SUCCESS;
3324 }
3325
3326 return nemR3DarwinHvSts2Rc(hrc);
3327}
3328
3329
3330/**
3331 * Returns features supported by the NEM backend.
3332 *
3333 * @returns Flags of features supported by the native NEM backend.
3334 * @param pVM The cross context VM structure.
3335 */
3336VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
3337{
3338 RT_NOREF(pVM);
3339 /*
3340 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
3341 * and unrestricted guest execution support so we can safely return these flags here always.
3342 */
3343 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
3344}
3345
3346
3347/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
3348 *
3349 * @todo Add notes as the implementation progresses...
3350 */
3351
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette