VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 99238

Last change on this file since 99238 was 99197, checked in by vboxsync, 23 months ago

VMM/NEMR3Native-darwin-armv8.cpp: Hook up the new system register accessors, bugref:10390

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 61.1 KB
Line 
1/* $Id: NEMR3Native-darwin-armv8.cpp 99197 2023-03-28 13:06:47Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.virtualbox.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#define CPUM_WITH_NONCONST_HOST_FEATURES /* required for initializing parts of the g_CpumHostFeatures structure here. */
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/apic.h>
43#include <VBox/vmm/pdm.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/hm_vmx.h>
46#include <VBox/vmm/dbgftrace.h>
47#include <VBox/vmm/gcm.h>
48#include "NEMInternal.h"
49#include <VBox/vmm/vmcc.h>
50#include "dtrace/VBoxVMM.h"
51
52#include <iprt/armv8.h>
53#include <iprt/asm.h>
54#include <iprt/ldr.h>
55#include <iprt/mem.h>
56#include <iprt/path.h>
57#include <iprt/string.h>
58#include <iprt/system.h>
59#include <iprt/utf16.h>
60
61#include <mach/mach_time.h>
62#include <mach/kern_return.h>
63
64#include <Hypervisor/Hypervisor.h>
65
66
67/*********************************************************************************************************************************
68* Defined Constants And Macros *
69*********************************************************************************************************************************/
70
71
72/*********************************************************************************************************************************
73* Structures and Typedefs *
74*********************************************************************************************************************************/
75
76
77/*********************************************************************************************************************************
78* Global Variables *
79*********************************************************************************************************************************/
80/** NEM_DARWIN_PAGE_STATE_XXX names. */
81NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
82/** The general registers. */
83static const struct
84{
85 hv_reg_t enmHvReg;
86 uint32_t fCpumExtrn;
87 uint32_t offCpumCtx;
88} s_aCpumRegs[] =
89{
90#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
91#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
92 CPUM_GREG_EMIT_X0_X3(0),
93 CPUM_GREG_EMIT_X0_X3(1),
94 CPUM_GREG_EMIT_X0_X3(2),
95 CPUM_GREG_EMIT_X0_X3(3),
96 CPUM_GREG_EMIT_X4_X28(4),
97 CPUM_GREG_EMIT_X4_X28(5),
98 CPUM_GREG_EMIT_X4_X28(6),
99 CPUM_GREG_EMIT_X4_X28(7),
100 CPUM_GREG_EMIT_X4_X28(8),
101 CPUM_GREG_EMIT_X4_X28(9),
102 CPUM_GREG_EMIT_X4_X28(10),
103 CPUM_GREG_EMIT_X4_X28(11),
104 CPUM_GREG_EMIT_X4_X28(12),
105 CPUM_GREG_EMIT_X4_X28(13),
106 CPUM_GREG_EMIT_X4_X28(14),
107 CPUM_GREG_EMIT_X4_X28(15),
108 CPUM_GREG_EMIT_X4_X28(16),
109 CPUM_GREG_EMIT_X4_X28(17),
110 CPUM_GREG_EMIT_X4_X28(18),
111 CPUM_GREG_EMIT_X4_X28(19),
112 CPUM_GREG_EMIT_X4_X28(20),
113 CPUM_GREG_EMIT_X4_X28(21),
114 CPUM_GREG_EMIT_X4_X28(22),
115 CPUM_GREG_EMIT_X4_X28(23),
116 CPUM_GREG_EMIT_X4_X28(24),
117 CPUM_GREG_EMIT_X4_X28(25),
118 CPUM_GREG_EMIT_X4_X28(26),
119 CPUM_GREG_EMIT_X4_X28(27),
120 CPUM_GREG_EMIT_X4_X28(28),
121 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
122 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
123 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
124 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
125 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
126#undef CPUM_GREG_EMIT_X0_X3
127#undef CPUM_GREG_EMIT_X4_X28
128};
129/** SIMD/FP registers. */
130static const struct
131{
132 hv_simd_fp_reg_t enmHvReg;
133 uint32_t offCpumCtx;
134} s_aCpumFpRegs[] =
135{
136#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
137 CPUM_VREG_EMIT(0),
138 CPUM_VREG_EMIT(1),
139 CPUM_VREG_EMIT(2),
140 CPUM_VREG_EMIT(3),
141 CPUM_VREG_EMIT(4),
142 CPUM_VREG_EMIT(5),
143 CPUM_VREG_EMIT(6),
144 CPUM_VREG_EMIT(7),
145 CPUM_VREG_EMIT(8),
146 CPUM_VREG_EMIT(9),
147 CPUM_VREG_EMIT(10),
148 CPUM_VREG_EMIT(11),
149 CPUM_VREG_EMIT(12),
150 CPUM_VREG_EMIT(13),
151 CPUM_VREG_EMIT(14),
152 CPUM_VREG_EMIT(15),
153 CPUM_VREG_EMIT(16),
154 CPUM_VREG_EMIT(17),
155 CPUM_VREG_EMIT(18),
156 CPUM_VREG_EMIT(19),
157 CPUM_VREG_EMIT(20),
158 CPUM_VREG_EMIT(21),
159 CPUM_VREG_EMIT(22),
160 CPUM_VREG_EMIT(23),
161 CPUM_VREG_EMIT(24),
162 CPUM_VREG_EMIT(25),
163 CPUM_VREG_EMIT(26),
164 CPUM_VREG_EMIT(27),
165 CPUM_VREG_EMIT(28),
166 CPUM_VREG_EMIT(29),
167 CPUM_VREG_EMIT(30),
168 CPUM_VREG_EMIT(31)
169#undef CPUM_VREG_EMIT
170};
171/** System registers. */
172static const struct
173{
174 hv_sys_reg_t enmHvReg;
175 uint32_t fCpumExtrn;
176 uint32_t offCpumCtx;
177} s_aCpumSysRegs[] =
178{
179 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
180 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
181 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
182 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
183};
184
185
186/*********************************************************************************************************************************
187* Internal Functions *
188*********************************************************************************************************************************/
189
190
191/**
192 * Converts a HV return code to a VBox status code.
193 *
194 * @returns VBox status code.
195 * @param hrc The HV return code to convert.
196 */
197DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
198{
199 if (hrc == HV_SUCCESS)
200 return VINF_SUCCESS;
201
202 switch (hrc)
203 {
204 case HV_ERROR: return VERR_INVALID_STATE;
205 case HV_BUSY: return VERR_RESOURCE_BUSY;
206 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
207 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
208 case HV_NO_DEVICE: return VERR_NOT_FOUND;
209 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
210 }
211
212 return VERR_IPE_UNEXPECTED_STATUS;
213}
214
215
216/**
217 * Returns a human readable string of the given exception class.
218 *
219 * @returns Pointer to the string matching the given EC.
220 * @param u32Ec The exception class to return the string for.
221 */
222static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
223{
224 switch (u32Ec)
225 {
226#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
227 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
228 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
229 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
230 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
231 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
232 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
233 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
234 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
235 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
236 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
237 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
238 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
239 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
240 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
241 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
242 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
243 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
244 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
245 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
246 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
247 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
248 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
249 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
250 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
251 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
252 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
253 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
254 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
255 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
256 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
257 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
258 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
259 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
260 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
261 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
262 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
263 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
264 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
265 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
266 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
267 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
268 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
269 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
270 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
271 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
272#undef ARMV8_EC_CASE
273 default:
274 break;
275 }
276
277 return "<INVALID>";
278}
279
280
281/**
282 * Unmaps the given guest physical address range (page aligned).
283 *
284 * @returns VBox status code.
285 * @param pVM The cross context VM structure.
286 * @param GCPhys The guest physical address to start unmapping at.
287 * @param cb The size of the range to unmap in bytes.
288 * @param pu2State Where to store the new state of the unmappd page, optional.
289 */
290DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
291{
292 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
293 {
294 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
295 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
296 return VINF_SUCCESS;
297 }
298
299 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
300 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
301 if (RT_LIKELY(hrc == HV_SUCCESS))
302 {
303 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
304 if (pu2State)
305 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
306 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
307 return VINF_SUCCESS;
308 }
309
310 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
311 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
312 GCPhys, hrc));
313 return VERR_NEM_IPE_6;
314}
315
316
317/**
318 * Maps a given guest physical address range backed by the given memory with the given
319 * protection flags.
320 *
321 * @returns VBox status code.
322 * @param pVM The cross context VM structure.
323 * @param GCPhys The guest physical address to start mapping.
324 * @param pvRam The R3 pointer of the memory to back the range with.
325 * @param cb The size of the range, page aligned.
326 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
327 * @param pu2State Where to store the state for the new page, optional.
328 */
329DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
330{
331 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
332
333 Assert(fPageProt != NEM_PAGE_PROT_NONE);
334 RT_NOREF(pVM);
335
336 hv_memory_flags_t fHvMemProt = 0;
337 if (fPageProt & NEM_PAGE_PROT_READ)
338 fHvMemProt |= HV_MEMORY_READ;
339 if (fPageProt & NEM_PAGE_PROT_WRITE)
340 fHvMemProt |= HV_MEMORY_WRITE;
341 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
342 fHvMemProt |= HV_MEMORY_EXEC;
343
344 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
345 if (hrc == HV_SUCCESS)
346 {
347 if (pu2State)
348 *pu2State = (fPageProt & NEM_PAGE_PROT_WRITE)
349 ? NEM_DARWIN_PAGE_STATE_WRITABLE
350 : NEM_DARWIN_PAGE_STATE_READABLE;
351 return VINF_SUCCESS;
352 }
353
354 return nemR3DarwinHvSts2Rc(hrc);
355}
356
357#if 0 /* unused */
358DECLINLINE(int) nemR3DarwinProtectPage(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt)
359{
360 hv_memory_flags_t fHvMemProt = 0;
361 if (fPageProt & NEM_PAGE_PROT_READ)
362 fHvMemProt |= HV_MEMORY_READ;
363 if (fPageProt & NEM_PAGE_PROT_WRITE)
364 fHvMemProt |= HV_MEMORY_WRITE;
365 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
366 fHvMemProt |= HV_MEMORY_EXEC;
367
368 hv_return_t hrc;
369 if (pVM->nem.s.fCreatedAsid)
370 hrc = hv_vm_protect_space(pVM->nem.s.uVmAsid, GCPhys, cb, fHvMemProt);
371 else
372 hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
373
374 return nemR3DarwinHvSts2Rc(hrc);
375}
376#endif
377
378DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
379{
380 PGMPAGEMAPLOCK Lock;
381 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
382 if (RT_SUCCESS(rc))
383 PGMPhysReleasePageMappingLock(pVM, &Lock);
384 return rc;
385}
386
387
388DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
389{
390 PGMPAGEMAPLOCK Lock;
391 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
392 if (RT_SUCCESS(rc))
393 PGMPhysReleasePageMappingLock(pVM, &Lock);
394 return rc;
395}
396
397
398#ifdef LOG_ENABLED
399/**
400 * Logs the current CPU state.
401 */
402static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
403{
404 if (LogIs3Enabled())
405 {
406 char szRegs[4096];
407 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
408 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
409 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
410 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
411 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
412 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
413 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
414 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
415 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
416 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
417 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
418 );
419 char szInstr[256]; RT_ZERO(szInstr);
420#if 0
421 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
422 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
423 szInstr, sizeof(szInstr), NULL);
424#endif
425 Log3(("%s%s\n", szRegs, szInstr));
426 }
427}
428#endif /* LOG_ENABLED */
429
430
431static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
432{
433 RT_NOREF(pVM);
434 hv_return_t hrc = HV_SUCCESS;
435
436 if (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
437 {
438 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
439 {
440 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
441 {
442 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
443 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
444 }
445 }
446 }
447
448 if ( hrc == HV_SUCCESS
449 && (fWhat & CPUMCTX_EXTRN_V0_V31))
450 {
451 /* SIMD/FP registers. */
452 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
453 {
454 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
455 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
456 }
457 }
458
459 if ( hrc == HV_SUCCESS
460 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP)))
461 {
462 /* System registers. */
463 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
464 {
465 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
466 {
467 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
468 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
469 }
470 }
471 }
472
473 if ( hrc == HV_SUCCESS
474 && (fWhat & CPUMCTX_EXTRN_PSTATE))
475 {
476 uint64_t u64Tmp;
477 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
478 if (hrc == HV_SUCCESS)
479 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
480 }
481
482 /* Almost done, just update extern flags. */
483 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
484 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
485 pVCpu->cpum.GstCtx.fExtrn = 0;
486
487 return nemR3DarwinHvSts2Rc(hrc);
488}
489
490
491/**
492 * State to pass between vmxHCExitEptViolation
493 * and nemR3DarwinHandleMemoryAccessPageCheckerCallback.
494 */
495typedef struct NEMHCDARWINHMACPCCSTATE
496{
497 /** Input: Write access. */
498 bool fWriteAccess;
499 /** Output: Set if we did something. */
500 bool fDidSomething;
501 /** Output: Set it we should resume. */
502 bool fCanResume;
503} NEMHCDARWINHMACPCCSTATE;
504
505/**
506 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
507 * Worker for vmxHCExitEptViolation; pvUser points to a
508 * NEMHCDARWINHMACPCCSTATE structure. }
509 */
510static DECLCALLBACK(int)
511nemR3DarwinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
512{
513 RT_NOREF(pVCpu);
514
515 NEMHCDARWINHMACPCCSTATE *pState = (NEMHCDARWINHMACPCCSTATE *)pvUser;
516 pState->fDidSomething = false;
517 pState->fCanResume = false;
518
519 uint8_t u2State = pInfo->u2NemState;
520
521 /*
522 * Consolidate current page state with actual page protection and access type.
523 * We don't really consider downgrades here, as they shouldn't happen.
524 */
525 switch (u2State)
526 {
527 case NEM_DARWIN_PAGE_STATE_UNMAPPED:
528 case NEM_DARWIN_PAGE_STATE_NOT_SET:
529 {
530 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
531 {
532 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
533 return VINF_SUCCESS;
534 }
535
536 /* Don't bother remapping it if it's a write request to a non-writable page. */
537 if ( pState->fWriteAccess
538 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
539 {
540 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
541 return VINF_SUCCESS;
542 }
543
544 int rc = VINF_SUCCESS;
545 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
546 {
547 void *pvPage;
548 rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhys, &pvPage);
549 if (RT_SUCCESS(rc))
550 rc = nemR3DarwinMap(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, pvPage, X86_PAGE_SIZE, pInfo->fNemProt, &u2State);
551 }
552 else if (pInfo->fNemProt & NEM_PAGE_PROT_READ)
553 {
554 const void *pvPage;
555 rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
556 if (RT_SUCCESS(rc))
557 rc = nemR3DarwinMap(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, pvPage, X86_PAGE_SIZE, pInfo->fNemProt, &u2State);
558 }
559 else /* Only EXECUTE doesn't work. */
560 AssertReleaseFailed();
561
562 pInfo->u2NemState = u2State;
563 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
564 GCPhys, g_apszPageStates[u2State], rc));
565 pState->fDidSomething = true;
566 pState->fCanResume = true;
567 return rc;
568 }
569 case NEM_DARWIN_PAGE_STATE_READABLE:
570 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
571 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
572 {
573 pState->fCanResume = true;
574 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
575 return VINF_SUCCESS;
576 }
577 break;
578
579 case NEM_DARWIN_PAGE_STATE_WRITABLE:
580 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
581 {
582 pState->fCanResume = true;
583 if (pInfo->u2OldNemState == NEM_DARWIN_PAGE_STATE_WRITABLE)
584 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: Spurious EPT fault\n", GCPhys));
585 return VINF_SUCCESS;
586 }
587 break;
588
589 default:
590 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
591 }
592
593 /* Unmap and restart the instruction. */
594 int rc = nemR3DarwinUnmap(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE, &u2State);
595 if (RT_SUCCESS(rc))
596 {
597 pInfo->u2NemState = u2State;
598 pState->fDidSomething = true;
599 pState->fCanResume = true;
600 Log5(("NEM GPA unmapped/exit: %RGp (was %s)\n", GCPhys, g_apszPageStates[u2State]));
601 return VINF_SUCCESS;
602 }
603
604 LogRel(("nemR3DarwinHandleMemoryAccessPageCheckerCallback/unmap: GCPhys=%RGp %s rc=%Rrc\n",
605 GCPhys, g_apszPageStates[u2State], rc));
606 return VERR_NEM_UNMAP_PAGES_FAILED;
607}
608
609
610/**
611 * Exports the guest state to HV for execution.
612 *
613 * @returns VBox status code.
614 * @param pVM The cross context VM structure.
615 * @param pVCpu The cross context virtual CPU structure of the
616 * calling EMT.
617 */
618static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
619{
620 RT_NOREF(pVM);
621 hv_return_t hrc = HV_SUCCESS;
622
623 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
624 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
625 {
626 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
627 {
628 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
629 {
630 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
631 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
632 }
633 }
634 }
635
636 if ( hrc == HV_SUCCESS
637 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
638 {
639 /* SIMD/FP registers. */
640 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
641 {
642 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
643 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
644 }
645 }
646
647 if ( hrc == HV_SUCCESS
648 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP))
649 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP))
650 {
651 /* System registers. */
652 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
653 {
654 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
655 {
656 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
657 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
658 }
659 }
660 }
661
662 if ( hrc == HV_SUCCESS
663 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
664 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
665
666 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
667 return nemR3DarwinHvSts2Rc(hrc);
668}
669
670
671/**
672 * Try initialize the native API.
673 *
674 * This may only do part of the job, more can be done in
675 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
676 *
677 * @returns VBox status code.
678 * @param pVM The cross context VM structure.
679 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
680 * the latter we'll fail if we cannot initialize.
681 * @param fForced Whether the HMForced flag is set and we should
682 * fail if we cannot initialize.
683 */
684int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
685{
686 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
687
688 /*
689 * Some state init.
690 */
691 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
692 RT_NOREF(pCfgNem);
693
694 /*
695 * Error state.
696 * The error message will be non-empty on failure and 'rc' will be set too.
697 */
698 RTERRINFOSTATIC ErrInfo;
699 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
700
701 int rc = VINF_SUCCESS;
702 hv_return_t hrc = hv_vm_create(NULL);
703 if (hrc == HV_SUCCESS)
704 {
705 pVM->nem.s.fCreatedVm = true;
706 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
707 Log(("NEM: Marked active!\n"));
708 PGMR3EnableNemMode(pVM);
709 }
710 else
711 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
712 "hv_vm_create() failed: %#x", hrc);
713
714 /*
715 * We only fail if in forced mode, otherwise just log the complaint and return.
716 */
717 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
718 if ( (fForced || !fFallback)
719 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
720 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
721
722if (RTErrInfoIsSet(pErrInfo))
723 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
724 return VINF_SUCCESS;
725}
726
727
728/**
729 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
730 *
731 * @returns VBox status code
732 * @param pVM The VM handle.
733 * @param pVCpu The vCPU handle.
734 * @param idCpu ID of the CPU to create.
735 */
736static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
737{
738 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, NULL);
739 if (hrc != HV_SUCCESS)
740 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
741 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
742
743 if (idCpu == 0)
744 {
745 /** @todo */
746 }
747
748 return VINF_SUCCESS;
749}
750
751
752/**
753 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
754 *
755 * @returns VBox status code
756 * @param pVCpu The vCPU handle.
757 */
758static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVMCPU pVCpu)
759{
760 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
761 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
762 return VINF_SUCCESS;
763}
764
765
766/**
767 * This is called after CPUMR3Init is done.
768 *
769 * @returns VBox status code.
770 * @param pVM The VM handle..
771 */
772int nemR3NativeInitAfterCPUM(PVM pVM)
773{
774 /*
775 * Validate sanity.
776 */
777 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
778 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
779
780 /*
781 * Setup the EMTs.
782 */
783 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
784 {
785 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
786
787 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
788 if (RT_FAILURE(rc))
789 {
790 /* Rollback. */
791 while (idCpu--)
792 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 1, pVCpu);
793
794 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
795 }
796 }
797
798 pVM->nem.s.fCreatedEmts = true;
799 return VINF_SUCCESS;
800}
801
802
803int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
804{
805 RT_NOREF(pVM, enmWhat);
806 return VINF_SUCCESS;
807}
808
809
810int nemR3NativeTerm(PVM pVM)
811{
812 /*
813 * Delete the VM.
814 */
815
816 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
817 {
818 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
819
820 /*
821 * Apple's documentation states that the vCPU should be destroyed
822 * on the thread running the vCPU but as all the other EMTs are gone
823 * at this point, destroying the VM would hang.
824 *
825 * We seem to be at luck here though as destroying apparently works
826 * from EMT(0) as well.
827 */
828 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
829 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
830 }
831
832 pVM->nem.s.fCreatedEmts = false;
833 if (pVM->nem.s.fCreatedVm)
834 {
835 hv_return_t hrc = hv_vm_destroy();
836 if (hrc != HV_SUCCESS)
837 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
838
839 pVM->nem.s.fCreatedVm = false;
840 }
841 return VINF_SUCCESS;
842}
843
844
845/**
846 * VM reset notification.
847 *
848 * @param pVM The cross context VM structure.
849 */
850void nemR3NativeReset(PVM pVM)
851{
852 RT_NOREF(pVM);
853}
854
855
856/**
857 * Reset CPU due to INIT IPI or hot (un)plugging.
858 *
859 * @param pVCpu The cross context virtual CPU structure of the CPU being
860 * reset.
861 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
862 */
863void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
864{
865 RT_NOREF(pVCpu, fInitIpi);
866}
867
868
869/**
870 * Returns the byte size from the given access SAS value.
871 *
872 * @returns Number of bytes to transfer.
873 * @param uSas The SAS value to convert.
874 */
875DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
876{
877 switch (uSas)
878 {
879 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
880 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
881 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
882 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
883 default:
884 AssertReleaseFailed();
885 }
886
887 return 0;
888}
889
890
891/**
892 * Sets the given general purpose register to the given value.
893 *
894 * @returns nothing.
895 * @param pVCpu The cross context virtual CPU structure of the
896 * calling EMT.
897 * @param uReg The register index.
898 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
899 * @param fSignExtend Flag whether to sign extend the value.
900 * @param u64Val The value.
901 */
902DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
903{
904 AssertReturnVoid(uReg < 31);
905
906 if (f64BitReg)
907 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
908 else
909 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
910
911 /* Mark the register as not extern anymore. */
912 switch (uReg)
913 {
914 case 0:
915 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
916 break;
917 case 1:
918 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
919 break;
920 case 2:
921 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
922 break;
923 case 3:
924 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
925 break;
926 default:
927 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
928 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
929 }
930}
931
932
933/**
934 * Gets the given general purpose register and returns the value.
935 *
936 * @returns Value from the given register.
937 * @param pVCpu The cross context virtual CPU structure of the
938 * calling EMT.
939 * @param uReg The register index.
940 */
941DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
942{
943 AssertReturn(uReg < 31, 0);
944
945 /** @todo Import the register if extern. */
946 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
947
948 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
949}
950
951
952/**
953 * Works on the data abort exception (which will be a MMIO access most of the time).
954 *
955 * @returns VBox strict status code.
956 * @param pVM The cross context VM structure.
957 * @param pVCpu The cross context virtual CPU structure of the
958 * calling EMT.
959 * @param uIss The instruction specific syndrome value.
960 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
961 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
962 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
963 */
964static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
965 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
966{
967 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
968 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
969 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
970 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
971 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
972 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
973 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
974 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
975 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
976 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
977
978 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
979
980 EMHistoryAddExit(pVCpu,
981 fWrite
982 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
983 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
984 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
985
986 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
987 uint64_t u64Val;
988 if (fWrite)
989 {
990 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
991 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
992 Log4(("MmioExit/%u: %08RX64: WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
993 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
994 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
995 }
996 else
997 {
998 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
999 Log4(("MmioExit/%u: %08RX64: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1000 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1001 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1002 if (rcStrict == VINF_SUCCESS)
1003 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
1004 }
1005
1006 if (rcStrict == VINF_SUCCESS)
1007 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1008
1009 return rcStrict;
1010}
1011
1012
1013/**
1014 * Works on the trapped MRS, MSR and system instruction exception.
1015 *
1016 * @returns VBox strict status code.
1017 * @param pVM The cross context VM structure.
1018 * @param pVCpu The cross context virtual CPU structure of the
1019 * calling EMT.
1020 * @param uIss The instruction specific syndrome value.
1021 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1022 */
1023static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
1024{
1025 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
1026 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
1027 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
1028 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
1029 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
1030 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
1031 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
1032 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
1033 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
1034 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
1035
1036 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
1037 EMHistoryAddExit(pVCpu,
1038 fRead
1039 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1040 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1041 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1042
1043 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1044 uint64_t u64Val = 0;
1045 if (fRead)
1046 {
1047 RT_NOREF(pVM);
1048 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
1049 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
1050 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1051 VBOXSTRICTRC_VAL(rcStrict) ));
1052 if (rcStrict == VINF_SUCCESS)
1053 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
1054 }
1055 else
1056 {
1057 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1058 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
1059 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
1060 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1061 VBOXSTRICTRC_VAL(rcStrict) ));
1062 }
1063
1064 if (rcStrict == VINF_SUCCESS)
1065 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1066
1067 return rcStrict;
1068}
1069
1070
1071/**
1072 * Handles an exception VM exit.
1073 *
1074 * @returns VBox strict status code.
1075 * @param pVM The cross context VM structure.
1076 * @param pVCpu The cross context virtual CPU structure of the
1077 * calling EMT.
1078 * @param pExit Pointer to the exit information.
1079 */
1080static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1081{
1082 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1083 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1084 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1085
1086 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1087 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1088
1089 switch (uEc)
1090 {
1091 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1092 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1093 pExit->exception.physical_address);
1094 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1095 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1096 case ARMV8_ESR_EL2_EC_UNKNOWN:
1097 default:
1098 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1099 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1100 return VERR_NOT_IMPLEMENTED;
1101 }
1102
1103 return VINF_SUCCESS;
1104}
1105
1106
1107/**
1108 * Handles an exit from hv_vcpu_run().
1109 *
1110 * @returns VBox strict status code.
1111 * @param pVM The cross context VM structure.
1112 * @param pVCpu The cross context virtual CPU structure of the
1113 * calling EMT.
1114 */
1115static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
1116{
1117 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1118 if (RT_FAILURE(rc))
1119 return rc;
1120
1121#ifdef LOG_ENABLED
1122 if (LogIs3Enabled())
1123 nemR3DarwinLogState(pVM, pVCpu);
1124#endif
1125
1126 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
1127 switch (pExit->reason)
1128 {
1129 case HV_EXIT_REASON_CANCELED:
1130 return VINF_EM_RAW_INTERRUPT;
1131 case HV_EXIT_REASON_EXCEPTION:
1132 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
1133 default:
1134 AssertReleaseFailed();
1135 break;
1136 }
1137
1138 return VERR_INVALID_STATE;
1139}
1140
1141
1142/**
1143 * Runs the guest once until an exit occurs.
1144 *
1145 * @returns HV status code.
1146 * @param pVM The cross context VM structure.
1147 * @param pVCpu The cross context virtual CPU structure.
1148 */
1149static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
1150{
1151 TMNotifyStartOfExecution(pVM, pVCpu);
1152
1153 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
1154
1155 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
1156
1157 return hrc;
1158}
1159
1160
1161/**
1162 * Prepares the VM to run the guest.
1163 *
1164 * @returns Strict VBox status code.
1165 * @param pVM The cross context VM structure.
1166 * @param pVCpu The cross context virtual CPU structure.
1167 * @param fSingleStepping Flag whether we run in single stepping mode.
1168 */
1169static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
1170{
1171#ifdef LOG_ENABLED
1172 if (LogIs3Enabled())
1173 nemR3DarwinLogState(pVM, pVCpu);
1174#endif
1175
1176 /** @todo */ RT_NOREF(fSingleStepping);
1177 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
1178 AssertRCReturn(rc, rc);
1179
1180 LogFlowFunc(("Running vCPU\n"));
1181 pVCpu->nem.s.fEventPending = false;
1182 return VINF_SUCCESS;
1183}
1184
1185
1186/**
1187 * The normal runloop (no debugging features enabled).
1188 *
1189 * @returns Strict VBox status code.
1190 * @param pVM The cross context VM structure.
1191 * @param pVCpu The cross context virtual CPU structure.
1192 */
1193static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
1194{
1195 /*
1196 * The run loop.
1197 *
1198 * Current approach to state updating to use the sledgehammer and sync
1199 * everything every time. This will be optimized later.
1200 */
1201
1202 /*
1203 * Poll timers and run for a bit.
1204 */
1205 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
1206 * the whole polling job when timers have changed... */
1207 uint64_t offDeltaIgnored;
1208 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
1209 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1210 for (unsigned iLoop = 0;; iLoop++)
1211 {
1212 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
1213 if (rcStrict != VINF_SUCCESS)
1214 break;
1215
1216 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
1217 if (hrc == HV_SUCCESS)
1218 {
1219 /*
1220 * Deal with the message.
1221 */
1222 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
1223 if (rcStrict == VINF_SUCCESS)
1224 { /* hopefully likely */ }
1225 else
1226 {
1227 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
1228 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
1229 break;
1230 }
1231 }
1232 else
1233 {
1234 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
1235 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
1236 }
1237 } /* the run loop */
1238
1239 return rcStrict;
1240}
1241
1242
1243VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1244{
1245#ifdef LOG_ENABLED
1246 if (LogIs3Enabled())
1247 nemR3DarwinLogState(pVM, pVCpu);
1248#endif
1249
1250 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
1251
1252 /*
1253 * Try switch to NEM runloop state.
1254 */
1255 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
1256 { /* likely */ }
1257 else
1258 {
1259 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1260 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
1261 return VINF_SUCCESS;
1262 }
1263
1264 VBOXSTRICTRC rcStrict;
1265#if 0
1266 if ( !pVCpu->nem.s.fUseDebugLoop
1267 && !nemR3DarwinAnyExpensiveProbesEnabled()
1268 && !DBGFIsStepping(pVCpu)
1269 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
1270#endif
1271 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
1272#if 0
1273 else
1274 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
1275#endif
1276
1277 if (rcStrict == VINF_EM_RAW_TO_R3)
1278 rcStrict = VINF_SUCCESS;
1279
1280 /*
1281 * Convert any pending HM events back to TRPM due to premature exits.
1282 *
1283 * This is because execution may continue from IEM and we would need to inject
1284 * the event from there (hence place it back in TRPM).
1285 */
1286 if (pVCpu->nem.s.fEventPending)
1287 {
1288 /** @todo */
1289 }
1290
1291
1292 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
1293 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1294
1295 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
1296 {
1297 /* Try anticipate what we might need. */
1298 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
1299 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
1300 || RT_FAILURE(rcStrict))
1301 fImport = CPUMCTX_EXTRN_ALL;
1302 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
1303 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
1304 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
1305
1306 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
1307 {
1308 /* Only import what is external currently. */
1309 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
1310 if (RT_SUCCESS(rc2))
1311 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
1312 else if (RT_SUCCESS(rcStrict))
1313 rcStrict = rc2;
1314 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1315 pVCpu->cpum.GstCtx.fExtrn = 0;
1316 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
1317 }
1318 else
1319 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1320 }
1321 else
1322 {
1323 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1324 pVCpu->cpum.GstCtx.fExtrn = 0;
1325 }
1326
1327 return rcStrict;
1328}
1329
1330
1331VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
1332{
1333 RT_NOREF(pVM, pVCpu);
1334 return true; /** @todo Are there any cases where we have to emulate? */
1335}
1336
1337
1338bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
1339{
1340 VMCPU_ASSERT_EMT(pVCpu);
1341 bool fOld = pVCpu->nem.s.fSingleInstruction;
1342 pVCpu->nem.s.fSingleInstruction = fEnable;
1343 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
1344 return fOld;
1345}
1346
1347
1348void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
1349{
1350 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
1351
1352 RT_NOREF(pVM, fFlags);
1353
1354 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
1355 if (hrc != HV_SUCCESS)
1356 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
1357}
1358
1359
1360DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
1361{
1362 RT_NOREF(pVM, fUseDebugLoop);
1363 AssertReleaseFailed();
1364 return false;
1365}
1366
1367
1368DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
1369{
1370 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
1371 return fUseDebugLoop;
1372}
1373
1374
1375VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
1376 uint8_t *pu2State, uint32_t *puNemRange)
1377{
1378 RT_NOREF(pVM, puNemRange);
1379
1380 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
1381#if defined(VBOX_WITH_PGM_NEM_MODE)
1382 if (pvR3)
1383 {
1384 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1385 if (RT_FAILURE(rc))
1386 {
1387 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
1388 return VERR_NEM_MAP_PAGES_FAILED;
1389 }
1390 }
1391 return VINF_SUCCESS;
1392#else
1393 RT_NOREF(pVM, GCPhys, cb, pvR3);
1394 return VERR_NEM_MAP_PAGES_FAILED;
1395#endif
1396}
1397
1398
1399VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
1400{
1401 RT_NOREF(pVM);
1402 return false;
1403}
1404
1405
1406VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1407 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1408{
1409 RT_NOREF(pVM, puNemRange, pvRam, fFlags);
1410
1411 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
1412 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
1413
1414#if defined(VBOX_WITH_PGM_NEM_MODE)
1415 /*
1416 * Unmap the RAM we're replacing.
1417 */
1418 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1419 {
1420 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1421 if (RT_SUCCESS(rc))
1422 { /* likely */ }
1423 else if (pvMmio2)
1424 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
1425 GCPhys, cb, fFlags, rc));
1426 else
1427 {
1428 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1429 GCPhys, cb, fFlags, rc));
1430 return VERR_NEM_UNMAP_PAGES_FAILED;
1431 }
1432 }
1433
1434 /*
1435 * Map MMIO2 if any.
1436 */
1437 if (pvMmio2)
1438 {
1439 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
1440 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1441 if (RT_FAILURE(rc))
1442 {
1443 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
1444 GCPhys, cb, fFlags, pvMmio2, rc));
1445 return VERR_NEM_MAP_PAGES_FAILED;
1446 }
1447 }
1448 else
1449 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
1450
1451#else
1452 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
1453 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
1454#endif
1455 return VINF_SUCCESS;
1456}
1457
1458
1459VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1460 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
1461{
1462 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
1463 return VINF_SUCCESS;
1464}
1465
1466
1467VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
1468 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1469{
1470 RT_NOREF(pVM, puNemRange);
1471
1472 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
1473 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
1474
1475 int rc = VINF_SUCCESS;
1476#if defined(VBOX_WITH_PGM_NEM_MODE)
1477 /*
1478 * Unmap the MMIO2 pages.
1479 */
1480 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
1481 * we may have more stuff to unmap even in case of pure MMIO... */
1482 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
1483 {
1484 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1485 if (RT_FAILURE(rc))
1486 {
1487 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1488 GCPhys, cb, fFlags, rc));
1489 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1490 }
1491 }
1492
1493 /* Ensure the page is masked as unmapped if relevant. */
1494 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
1495
1496 /*
1497 * Restore the RAM we replaced.
1498 */
1499 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1500 {
1501 AssertPtr(pvRam);
1502 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1503 if (RT_SUCCESS(rc))
1504 { /* likely */ }
1505 else
1506 {
1507 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
1508 rc = VERR_NEM_MAP_PAGES_FAILED;
1509 }
1510 }
1511
1512 RT_NOREF(pvMmio2);
1513#else
1514 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
1515 if (pu2State)
1516 *pu2State = UINT8_MAX;
1517 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1518#endif
1519 return rc;
1520}
1521
1522
1523VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
1524 void *pvBitmap, size_t cbBitmap)
1525{
1526 RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
1527 AssertReleaseFailed();
1528 return VERR_NOT_IMPLEMENTED;
1529}
1530
1531
1532VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
1533 uint8_t *pu2State, uint32_t *puNemRange)
1534{
1535 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1536
1537 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
1538 *pu2State = UINT8_MAX;
1539 *puNemRange = 0;
1540 return VINF_SUCCESS;
1541}
1542
1543
1544VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
1545 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
1546{
1547 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
1548 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
1549 *pu2State = UINT8_MAX;
1550
1551#if defined(VBOX_WITH_PGM_NEM_MODE)
1552 /*
1553 * (Re-)map readonly.
1554 */
1555 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
1556 int rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
1557 if (RT_FAILURE(rc))
1558 {
1559 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
1560 GCPhys, cb, pvPages, fFlags, rc));
1561 return VERR_NEM_MAP_PAGES_FAILED;
1562 }
1563 RT_NOREF(fFlags, puNemRange);
1564 return VINF_SUCCESS;
1565#else
1566 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1567 return VERR_NEM_MAP_PAGES_FAILED;
1568#endif
1569}
1570
1571
1572VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
1573 RTR3PTR pvMemR3, uint8_t *pu2State)
1574{
1575 RT_NOREF(pVM);
1576
1577 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
1578 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
1579
1580 *pu2State = UINT8_MAX;
1581#if defined(VBOX_WITH_PGM_NEM_MODE)
1582 if (pvMemR3)
1583 {
1584 int rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1585 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
1586 pvMemR3, GCPhys, cb, rc));
1587 }
1588 RT_NOREF(enmKind);
1589#else
1590 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
1591 AssertFailed();
1592#endif
1593}
1594
1595
1596VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
1597{
1598 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
1599 RT_NOREF(pVCpu, fEnabled);
1600}
1601
1602
1603void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
1604{
1605 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
1606 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
1607}
1608
1609
1610void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
1611 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
1612{
1613 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
1614 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
1615 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
1616}
1617
1618
1619int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
1620 PGMPAGETYPE enmType, uint8_t *pu2State)
1621{
1622 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1623 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1624 RT_NOREF(HCPhys, fPageProt, enmType);
1625
1626 return nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1627}
1628
1629
1630VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
1631 PGMPAGETYPE enmType, uint8_t *pu2State)
1632{
1633 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1634 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1635 RT_NOREF(HCPhys, pvR3, fPageProt, enmType)
1636
1637 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1638}
1639
1640
1641VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
1642 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
1643{
1644 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1645 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
1646 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType);
1647
1648 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1649}
1650
1651
1652/**
1653 * Interface for importing state on demand (used by IEM).
1654 *
1655 * @returns VBox status code.
1656 * @param pVCpu The cross context CPU structure.
1657 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1658 */
1659VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1660{
1661 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
1662 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1663
1664 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
1665}
1666
1667
1668/**
1669 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1670 *
1671 * @returns VBox status code.
1672 * @param pVCpu The cross context CPU structure.
1673 * @param pcTicks Where to return the CPU tick count.
1674 * @param puAux Where to return the TSC_AUX register value.
1675 */
1676VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1677{
1678 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
1679 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1680
1681 AssertReleaseFailed();
1682 return VERR_NOT_IMPLEMENTED;
1683}
1684
1685
1686/**
1687 * Resumes CPU clock (TSC) on all virtual CPUs.
1688 *
1689 * This is called by TM when the VM is started, restored, resumed or similar.
1690 *
1691 * @returns VBox status code.
1692 * @param pVM The cross context VM structure.
1693 * @param pVCpu The cross context CPU structure of the calling EMT.
1694 * @param uPausedTscValue The TSC value at the time of pausing.
1695 */
1696VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1697{
1698 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVCpu, uPausedTscValue));
1699 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1700 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1701
1702 //AssertReleaseFailed();
1703 return VINF_SUCCESS;
1704}
1705
1706
1707/**
1708 * Returns features supported by the NEM backend.
1709 *
1710 * @returns Flags of features supported by the native NEM backend.
1711 * @param pVM The cross context VM structure.
1712 */
1713VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
1714{
1715 RT_NOREF(pVM);
1716 /*
1717 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
1718 * and unrestricted guest execution support so we can safely return these flags here always.
1719 */
1720 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
1721}
1722
1723
1724/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
1725 *
1726 * @todo Add notes as the implementation progresses...
1727 */
1728
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette