VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 100705

Last change on this file since 100705 was 100705, checked in by vboxsync, 16 months ago

VMM/NEMR3Native-darwin-armv8.cpp: Need to implement our own simplistic dirty page tracking for MMIO2 regions as Hypervisor.framework doesn't work with guest page sizes but only with host page sized regions so it conflicts with our generic implementation in PGM. With that it is possible to get screen updates for guests which access the standard framebuffer and don't use the SVGA 3 interface to notify the device about dirty regions, bugref:10390

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 73.9 KB
Line 
1/* $Id: NEMR3Native-darwin-armv8.cpp 100705 2023-07-26 12:57:59Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.virtualbox.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#define CPUM_WITH_NONCONST_HOST_FEATURES /* required for initializing parts of the g_CpumHostFeatures structure here. */
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/gic.h>
43#include <VBox/vmm/pdm.h>
44#include <VBox/vmm/dbgftrace.h>
45#include <VBox/vmm/gcm.h>
46#include "NEMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include <VBox/vmm/vmm.h>
49#include "dtrace/VBoxVMM.h"
50
51#include <iprt/armv8.h>
52#include <iprt/asm.h>
53#include <iprt/asm-arm.h>
54#include <iprt/asm-math.h>
55#include <iprt/ldr.h>
56#include <iprt/mem.h>
57#include <iprt/path.h>
58#include <iprt/string.h>
59#include <iprt/system.h>
60#include <iprt/utf16.h>
61
62#include <iprt/formats/arm-psci.h>
63
64#include <mach/mach_time.h>
65#include <mach/kern_return.h>
66
67#include <Hypervisor/Hypervisor.h>
68
69
70/*********************************************************************************************************************************
71* Defined Constants And Macros *
72*********************************************************************************************************************************/
73
74
75/** @todo The vTimer PPI for the virt platform, make it configurable. */
76#define NEM_DARWIN_VTIMER_GIC_PPI_IRQ 11
77
78
79/*********************************************************************************************************************************
80* Structures and Typedefs *
81*********************************************************************************************************************************/
82
83
84/*********************************************************************************************************************************
85* Global Variables *
86*********************************************************************************************************************************/
87/** The general registers. */
88static const struct
89{
90 hv_reg_t enmHvReg;
91 uint32_t fCpumExtrn;
92 uint32_t offCpumCtx;
93} s_aCpumRegs[] =
94{
95#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
96#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
97 CPUM_GREG_EMIT_X0_X3(0),
98 CPUM_GREG_EMIT_X0_X3(1),
99 CPUM_GREG_EMIT_X0_X3(2),
100 CPUM_GREG_EMIT_X0_X3(3),
101 CPUM_GREG_EMIT_X4_X28(4),
102 CPUM_GREG_EMIT_X4_X28(5),
103 CPUM_GREG_EMIT_X4_X28(6),
104 CPUM_GREG_EMIT_X4_X28(7),
105 CPUM_GREG_EMIT_X4_X28(8),
106 CPUM_GREG_EMIT_X4_X28(9),
107 CPUM_GREG_EMIT_X4_X28(10),
108 CPUM_GREG_EMIT_X4_X28(11),
109 CPUM_GREG_EMIT_X4_X28(12),
110 CPUM_GREG_EMIT_X4_X28(13),
111 CPUM_GREG_EMIT_X4_X28(14),
112 CPUM_GREG_EMIT_X4_X28(15),
113 CPUM_GREG_EMIT_X4_X28(16),
114 CPUM_GREG_EMIT_X4_X28(17),
115 CPUM_GREG_EMIT_X4_X28(18),
116 CPUM_GREG_EMIT_X4_X28(19),
117 CPUM_GREG_EMIT_X4_X28(20),
118 CPUM_GREG_EMIT_X4_X28(21),
119 CPUM_GREG_EMIT_X4_X28(22),
120 CPUM_GREG_EMIT_X4_X28(23),
121 CPUM_GREG_EMIT_X4_X28(24),
122 CPUM_GREG_EMIT_X4_X28(25),
123 CPUM_GREG_EMIT_X4_X28(26),
124 CPUM_GREG_EMIT_X4_X28(27),
125 CPUM_GREG_EMIT_X4_X28(28),
126 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
127 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
128 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
129 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
130 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
131#undef CPUM_GREG_EMIT_X0_X3
132#undef CPUM_GREG_EMIT_X4_X28
133};
134/** SIMD/FP registers. */
135static const struct
136{
137 hv_simd_fp_reg_t enmHvReg;
138 uint32_t offCpumCtx;
139} s_aCpumFpRegs[] =
140{
141#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
142 CPUM_VREG_EMIT(0),
143 CPUM_VREG_EMIT(1),
144 CPUM_VREG_EMIT(2),
145 CPUM_VREG_EMIT(3),
146 CPUM_VREG_EMIT(4),
147 CPUM_VREG_EMIT(5),
148 CPUM_VREG_EMIT(6),
149 CPUM_VREG_EMIT(7),
150 CPUM_VREG_EMIT(8),
151 CPUM_VREG_EMIT(9),
152 CPUM_VREG_EMIT(10),
153 CPUM_VREG_EMIT(11),
154 CPUM_VREG_EMIT(12),
155 CPUM_VREG_EMIT(13),
156 CPUM_VREG_EMIT(14),
157 CPUM_VREG_EMIT(15),
158 CPUM_VREG_EMIT(16),
159 CPUM_VREG_EMIT(17),
160 CPUM_VREG_EMIT(18),
161 CPUM_VREG_EMIT(19),
162 CPUM_VREG_EMIT(20),
163 CPUM_VREG_EMIT(21),
164 CPUM_VREG_EMIT(22),
165 CPUM_VREG_EMIT(23),
166 CPUM_VREG_EMIT(24),
167 CPUM_VREG_EMIT(25),
168 CPUM_VREG_EMIT(26),
169 CPUM_VREG_EMIT(27),
170 CPUM_VREG_EMIT(28),
171 CPUM_VREG_EMIT(29),
172 CPUM_VREG_EMIT(30),
173 CPUM_VREG_EMIT(31)
174#undef CPUM_VREG_EMIT
175};
176/** System registers. */
177static const struct
178{
179 hv_sys_reg_t enmHvReg;
180 uint32_t fCpumExtrn;
181 uint32_t offCpumCtx;
182} s_aCpumSysRegs[] =
183{
184 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
185 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
186 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
187 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
188 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
189 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
190 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
191 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
192 { HV_SYS_REG_VBAR_EL1, CPUMCTX_EXTRN_SYSREG, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
193};
194
195
196/*********************************************************************************************************************************
197* Internal Functions *
198*********************************************************************************************************************************/
199
200
201/**
202 * Converts a HV return code to a VBox status code.
203 *
204 * @returns VBox status code.
205 * @param hrc The HV return code to convert.
206 */
207DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
208{
209 if (hrc == HV_SUCCESS)
210 return VINF_SUCCESS;
211
212 switch (hrc)
213 {
214 case HV_ERROR: return VERR_INVALID_STATE;
215 case HV_BUSY: return VERR_RESOURCE_BUSY;
216 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
217 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
218 case HV_NO_DEVICE: return VERR_NOT_FOUND;
219 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
220 }
221
222 return VERR_IPE_UNEXPECTED_STATUS;
223}
224
225
226/**
227 * Returns a human readable string of the given exception class.
228 *
229 * @returns Pointer to the string matching the given EC.
230 * @param u32Ec The exception class to return the string for.
231 */
232static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
233{
234 switch (u32Ec)
235 {
236#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
237 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
238 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
239 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
240 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
241 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
242 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
243 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
244 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
245 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
246 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
247 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
248 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
249 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
250 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
251 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
252 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
253 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
254 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
255 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
256 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
257 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
258 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
259 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
260 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
261 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
262 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
263 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
264 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
265 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
266 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
267 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
268 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
269 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
270 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
271 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
272 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
273 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
274 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
275 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
276 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
277 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
278 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
279 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
280 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
281 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
282#undef ARMV8_EC_CASE
283 default:
284 break;
285 }
286
287 return "<INVALID>";
288}
289
290
291/**
292 * Resolves a NEM page state from the given protection flags.
293 *
294 * @returns NEM page state.
295 * @param fPageProt The page protection flags.
296 */
297DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
298{
299 switch (fPageProt)
300 {
301 case NEM_PAGE_PROT_NONE:
302 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
303 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
304 return NEM_DARWIN_PAGE_STATE_RX;
305 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
306 return NEM_DARWIN_PAGE_STATE_RW;
307 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
308 return NEM_DARWIN_PAGE_STATE_RWX;
309 default:
310 break;
311 }
312
313 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
314 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
315}
316
317
318/**
319 * Unmaps the given guest physical address range (page aligned).
320 *
321 * @returns VBox status code.
322 * @param pVM The cross context VM structure.
323 * @param GCPhys The guest physical address to start unmapping at.
324 * @param cb The size of the range to unmap in bytes.
325 * @param pu2State Where to store the new state of the unmappd page, optional.
326 */
327DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
328{
329 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
330 {
331 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
332 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
333 return VINF_SUCCESS;
334 }
335
336 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
337 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
338 if (RT_LIKELY(hrc == HV_SUCCESS))
339 {
340 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
341 if (pu2State)
342 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
343 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
344 return VINF_SUCCESS;
345 }
346
347 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
348 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
349 GCPhys, hrc));
350 return VERR_NEM_IPE_6;
351}
352
353
354/**
355 * Maps a given guest physical address range backed by the given memory with the given
356 * protection flags.
357 *
358 * @returns VBox status code.
359 * @param pVM The cross context VM structure.
360 * @param GCPhys The guest physical address to start mapping.
361 * @param pvRam The R3 pointer of the memory to back the range with.
362 * @param cb The size of the range, page aligned.
363 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
364 * @param pu2State Where to store the state for the new page, optional.
365 */
366DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
367{
368 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
369
370 Assert(fPageProt != NEM_PAGE_PROT_NONE);
371 RT_NOREF(pVM);
372
373 hv_memory_flags_t fHvMemProt = 0;
374 if (fPageProt & NEM_PAGE_PROT_READ)
375 fHvMemProt |= HV_MEMORY_READ;
376 if (fPageProt & NEM_PAGE_PROT_WRITE)
377 fHvMemProt |= HV_MEMORY_WRITE;
378 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
379 fHvMemProt |= HV_MEMORY_EXEC;
380
381 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
382 if (hrc == HV_SUCCESS)
383 {
384 if (pu2State)
385 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
386 return VINF_SUCCESS;
387 }
388
389 return nemR3DarwinHvSts2Rc(hrc);
390}
391
392
393/**
394 * Changes the protection flags for the given guest physical address range.
395 *
396 * @returns VBox status code.
397 * @param GCPhys The guest physical address to start mapping.
398 * @param cb The size of the range, page aligned.
399 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
400 * @param pu2State Where to store the state for the new page, optional.
401 */
402DECLINLINE(int) nemR3DarwinProtect(RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
403{
404 hv_memory_flags_t fHvMemProt = 0;
405 if (fPageProt & NEM_PAGE_PROT_READ)
406 fHvMemProt |= HV_MEMORY_READ;
407 if (fPageProt & NEM_PAGE_PROT_WRITE)
408 fHvMemProt |= HV_MEMORY_WRITE;
409 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
410 fHvMemProt |= HV_MEMORY_EXEC;
411
412 hv_return_t hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
413 if (hrc == HV_SUCCESS)
414 {
415 if (pu2State)
416 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
417 return VINF_SUCCESS;
418 }
419
420 LogRel(("nemR3DarwinProtect(%RGp,%zu,%#x): failed! hrc=%#x\n",
421 GCPhys, cb, fPageProt, hrc));
422 return nemR3DarwinHvSts2Rc(hrc);
423}
424
425
426#ifdef LOG_ENABLED
427/**
428 * Logs the current CPU state.
429 */
430static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
431{
432 if (LogIs3Enabled())
433 {
434 char szRegs[4096];
435 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
436 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
437 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
438 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
439 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
440 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
441 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
442 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
443 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
444 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
445 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
446 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
447 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
448 "vbar_el1=%016VR{vbar_el1}\n"
449 );
450 char szInstr[256]; RT_ZERO(szInstr);
451#if 0
452 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
453 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
454 szInstr, sizeof(szInstr), NULL);
455#endif
456 Log3(("%s%s\n", szRegs, szInstr));
457 }
458}
459#endif /* LOG_ENABLED */
460
461
462static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
463{
464 RT_NOREF(pVM);
465
466 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
467 if (hrc == HV_SUCCESS)
468 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
469
470 if ( hrc == HV_SUCCESS
471 && (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR)))
472 {
473 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
474 {
475 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
476 {
477 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
478 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
479 }
480 }
481 }
482
483 if ( hrc == HV_SUCCESS
484 && (fWhat & CPUMCTX_EXTRN_V0_V31))
485 {
486 /* SIMD/FP registers. */
487 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
488 {
489 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
490 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
491 }
492 }
493
494 if ( hrc == HV_SUCCESS
495 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG)))
496 {
497 /* System registers. */
498 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
499 {
500 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
501 {
502 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
503 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
504 }
505 }
506 }
507
508 if ( hrc == HV_SUCCESS
509 && (fWhat & CPUMCTX_EXTRN_PSTATE))
510 {
511 uint64_t u64Tmp;
512 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
513 if (hrc == HV_SUCCESS)
514 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
515 }
516
517 /* Almost done, just update extern flags. */
518 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
519 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
520 pVCpu->cpum.GstCtx.fExtrn = 0;
521
522 return nemR3DarwinHvSts2Rc(hrc);
523}
524
525
526/**
527 * Exports the guest state to HV for execution.
528 *
529 * @returns VBox status code.
530 * @param pVM The cross context VM structure.
531 * @param pVCpu The cross context virtual CPU structure of the
532 * calling EMT.
533 */
534static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
535{
536 RT_NOREF(pVM);
537 hv_return_t hrc = HV_SUCCESS;
538
539 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
540 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
541 {
542 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
543 {
544 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
545 {
546 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
547 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
548 }
549 }
550 }
551
552 if ( hrc == HV_SUCCESS
553 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
554 {
555 /* SIMD/FP registers. */
556 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
557 {
558 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
559 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
560 }
561 }
562
563 if ( hrc == HV_SUCCESS
564 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG))
565 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG))
566 {
567 /* System registers. */
568 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
569 {
570 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
571 {
572 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
573 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
574 }
575 }
576 }
577
578 if ( hrc == HV_SUCCESS
579 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
580 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
581
582 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
583 return nemR3DarwinHvSts2Rc(hrc);
584}
585
586
587/**
588 * Try initialize the native API.
589 *
590 * This may only do part of the job, more can be done in
591 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
592 *
593 * @returns VBox status code.
594 * @param pVM The cross context VM structure.
595 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
596 * the latter we'll fail if we cannot initialize.
597 * @param fForced Whether the HMForced flag is set and we should
598 * fail if we cannot initialize.
599 */
600int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
601{
602 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
603
604 /*
605 * Some state init.
606 */
607 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
608 RT_NOREF(pCfgNem);
609
610 /*
611 * Error state.
612 * The error message will be non-empty on failure and 'rc' will be set too.
613 */
614 RTERRINFOSTATIC ErrInfo;
615 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
616
617 int rc = VINF_SUCCESS;
618 hv_return_t hrc = hv_vm_create(NULL);
619 if (hrc == HV_SUCCESS)
620 {
621 pVM->nem.s.fCreatedVm = true;
622 pVM->nem.s.u64CntFrqHz = ASMReadCntFrqEl0();
623
624 /* Will be initialized in NEMHCResumeCpuTickOnAll() before executing guest code. */
625 pVM->nem.s.u64VTimerOff = 0;
626
627 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
628 Log(("NEM: Marked active!\n"));
629 PGMR3EnableNemMode(pVM);
630 }
631 else
632 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
633 "hv_vm_create() failed: %#x", hrc);
634
635 /*
636 * We only fail if in forced mode, otherwise just log the complaint and return.
637 */
638 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
639 if ( (fForced || !fFallback)
640 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
641 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
642
643if (RTErrInfoIsSet(pErrInfo))
644 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
645 return VINF_SUCCESS;
646}
647
648
649/**
650 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
651 *
652 * @returns VBox status code
653 * @param pVM The VM handle.
654 * @param pVCpu The vCPU handle.
655 * @param idCpu ID of the CPU to create.
656 */
657static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
658{
659 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, NULL);
660 if (hrc != HV_SUCCESS)
661 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
662 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
663
664 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MPIDR_EL1, idCpu);
665 if (hrc != HV_SUCCESS)
666 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
667 "Setting MPIDR_EL1 failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
668
669 if (idCpu == 0)
670 {
671 /** @todo */
672 }
673
674 return VINF_SUCCESS;
675}
676
677
678/**
679 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
680 *
681 * @returns VBox status code
682 * @param pVCpu The vCPU handle.
683 */
684static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVMCPU pVCpu)
685{
686 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
687 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
688 return VINF_SUCCESS;
689}
690
691
692/**
693 * This is called after CPUMR3Init is done.
694 *
695 * @returns VBox status code.
696 * @param pVM The VM handle..
697 */
698int nemR3NativeInitAfterCPUM(PVM pVM)
699{
700 /*
701 * Validate sanity.
702 */
703 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
704 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
705
706 /*
707 * Setup the EMTs.
708 */
709 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
710 {
711 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
712
713 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
714 if (RT_FAILURE(rc))
715 {
716 /* Rollback. */
717 while (idCpu--)
718 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 1, pVCpu);
719
720 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
721 }
722 }
723
724 pVM->nem.s.fCreatedEmts = true;
725 return VINF_SUCCESS;
726}
727
728
729int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
730{
731 RT_NOREF(pVM, enmWhat);
732 return VINF_SUCCESS;
733}
734
735
736int nemR3NativeTerm(PVM pVM)
737{
738 /*
739 * Delete the VM.
740 */
741
742 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
743 {
744 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
745
746 /*
747 * Apple's documentation states that the vCPU should be destroyed
748 * on the thread running the vCPU but as all the other EMTs are gone
749 * at this point, destroying the VM would hang.
750 *
751 * We seem to be at luck here though as destroying apparently works
752 * from EMT(0) as well.
753 */
754 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
755 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
756 }
757
758 pVM->nem.s.fCreatedEmts = false;
759 if (pVM->nem.s.fCreatedVm)
760 {
761 hv_return_t hrc = hv_vm_destroy();
762 if (hrc != HV_SUCCESS)
763 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
764
765 pVM->nem.s.fCreatedVm = false;
766 }
767 return VINF_SUCCESS;
768}
769
770
771/**
772 * VM reset notification.
773 *
774 * @param pVM The cross context VM structure.
775 */
776void nemR3NativeReset(PVM pVM)
777{
778 RT_NOREF(pVM);
779}
780
781
782/**
783 * Reset CPU due to INIT IPI or hot (un)plugging.
784 *
785 * @param pVCpu The cross context virtual CPU structure of the CPU being
786 * reset.
787 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
788 */
789void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
790{
791 RT_NOREF(pVCpu, fInitIpi);
792}
793
794
795/**
796 * Returns the byte size from the given access SAS value.
797 *
798 * @returns Number of bytes to transfer.
799 * @param uSas The SAS value to convert.
800 */
801DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
802{
803 switch (uSas)
804 {
805 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
806 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
807 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
808 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
809 default:
810 AssertReleaseFailed();
811 }
812
813 return 0;
814}
815
816
817/**
818 * Sets the given general purpose register to the given value.
819 *
820 * @param pVCpu The cross context virtual CPU structure of the
821 * calling EMT.
822 * @param uReg The register index.
823 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
824 * @param fSignExtend Flag whether to sign extend the value.
825 * @param u64Val The value.
826 */
827DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
828{
829 AssertReturnVoid(uReg < 31);
830
831 if (f64BitReg)
832 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
833 else
834 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
835
836 /* Mark the register as not extern anymore. */
837 switch (uReg)
838 {
839 case 0:
840 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
841 break;
842 case 1:
843 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
844 break;
845 case 2:
846 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
847 break;
848 case 3:
849 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
850 break;
851 default:
852 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
853 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
854 }
855}
856
857
858/**
859 * Gets the given general purpose register and returns the value.
860 *
861 * @returns Value from the given register.
862 * @param pVCpu The cross context virtual CPU structure of the
863 * calling EMT.
864 * @param uReg The register index.
865 */
866DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
867{
868 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
869
870 if (uReg == ARMV8_AARCH64_REG_ZR)
871 return 0;
872
873 /** @todo Import the register if extern. */
874 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
875
876 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
877}
878
879
880/**
881 * Works on the data abort exception (which will be a MMIO access most of the time).
882 *
883 * @returns VBox strict status code.
884 * @param pVM The cross context VM structure.
885 * @param pVCpu The cross context virtual CPU structure of the
886 * calling EMT.
887 * @param uIss The instruction specific syndrome value.
888 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
889 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
890 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
891 */
892static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
893 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
894{
895 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
896 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
897 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
898 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
899 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
900 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
901 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
902 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
903 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
904 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
905
906 RT_NOREF(fL2Fault, GCPtrDataAbrt);
907
908 if (fWrite)
909 {
910 /*
911 * Check whether this is one of the dirty tracked regions, mark it as dirty
912 * and enable write support for this region again.
913 *
914 * This is required for proper VRAM tracking or the display might not get updated
915 * and it is impossible to use the PGM generic facility as it operates on guest page sizes
916 * but setting protection flags with Hypervisor.framework works only host page sized regions, so
917 * we have to cook our own. Additionally the VRAM region is marked as prefetchable (write-back)
918 * which doesn't produce a valid instruction syndrome requiring restarting the instruction after enabling
919 * write access again (due to a missing interpreter right now).
920 */
921 for (uint32_t idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
922 {
923 PNEMHVMMIO2REGION pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
924
925 if ( GCPhysDataAbrt >= pMmio2Region->GCPhysStart
926 && GCPhysDataAbrt <= pMmio2Region->GCPhysLast)
927 {
928 pMmio2Region->fDirty = true;
929
930 uint8_t u2State;
931 int rc = nemR3DarwinProtect(pMmio2Region->GCPhysStart, pMmio2Region->GCPhysLast - pMmio2Region->GCPhysStart + 1,
932 NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE, &u2State);
933
934 /* Restart the instruction if there is no instruction syndrome available. */
935 if (RT_FAILURE(rc) || !fIsv)
936 return rc;
937 }
938 }
939 }
940
941 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
942
943 EMHistoryAddExit(pVCpu,
944 fWrite
945 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
946 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
947 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
948
949 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
950 uint64_t u64Val = 0;
951 if (fWrite)
952 {
953 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
954 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
955 Log4(("MmioExit/%u: %08RX64: WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
956 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
957 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
958 }
959 else
960 {
961 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
962 Log4(("MmioExit/%u: %08RX64: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
963 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
964 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
965 if (rcStrict == VINF_SUCCESS)
966 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
967 }
968
969 if (rcStrict == VINF_SUCCESS)
970 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
971
972 return rcStrict;
973}
974
975
976/**
977 * Works on the trapped MRS, MSR and system instruction exception.
978 *
979 * @returns VBox strict status code.
980 * @param pVM The cross context VM structure.
981 * @param pVCpu The cross context virtual CPU structure of the
982 * calling EMT.
983 * @param uIss The instruction specific syndrome value.
984 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
985 */
986static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
987{
988 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
989 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
990 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
991 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
992 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
993 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
994 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
995 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
996 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
997 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
998
999 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
1000 EMHistoryAddExit(pVCpu,
1001 fRead
1002 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1003 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1004 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1005
1006 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1007 uint64_t u64Val = 0;
1008 if (fRead)
1009 {
1010 RT_NOREF(pVM);
1011 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
1012 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
1013 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1014 VBOXSTRICTRC_VAL(rcStrict) ));
1015 if (rcStrict == VINF_SUCCESS)
1016 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
1017 }
1018 else
1019 {
1020 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1021 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
1022 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
1023 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1024 VBOXSTRICTRC_VAL(rcStrict) ));
1025 }
1026
1027 if (rcStrict == VINF_SUCCESS)
1028 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1029
1030 return rcStrict;
1031}
1032
1033
1034/**
1035 * Works on the trapped HVC instruction exception.
1036 *
1037 * @returns VBox strict status code.
1038 * @param pVM The cross context VM structure.
1039 * @param pVCpu The cross context virtual CPU structure of the
1040 * calling EMT.
1041 * @param uIss The instruction specific syndrome value.
1042 */
1043static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss)
1044{
1045 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
1046 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
1047
1048#if 0 /** @todo For later */
1049 EMHistoryAddExit(pVCpu,
1050 fRead
1051 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1052 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1053 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1054#endif
1055
1056 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1057 if (u16Imm == 0)
1058 {
1059 /** @todo Raise exception to EL1 if PSCI not configured. */
1060 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
1061 uint32_t uFunId = pVCpu->cpum.GstCtx.aGRegs[ARMV8_AARCH64_REG_X0].w;
1062 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
1063 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
1064 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
1065 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
1066 {
1067 switch (uFunNum)
1068 {
1069 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1070 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
1071 break;
1072 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1073 rcStrict = VMR3PowerOff(pVM->pUVM);
1074 break;
1075 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1076 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1077 {
1078 bool fHaltOnReset;
1079 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
1080 if (RT_SUCCESS(rc) && fHaltOnReset)
1081 {
1082 Log(("nemR3DarwinHandleExitExceptionTrappedHvcInsn: Halt On Reset!\n"));
1083 rc = VINF_EM_HALT;
1084 }
1085 else
1086 {
1087 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
1088 VM_FF_SET(pVM, VM_FF_RESET);
1089 rc = VINF_EM_RESET;
1090 }
1091 break;
1092 }
1093 case ARM_PSCI_FUNC_ID_CPU_ON:
1094 {
1095 uint64_t u64TgtCpu = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1096 RTGCPHYS GCPhysExecAddr = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X2);
1097 uint64_t u64CtxId = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X3);
1098 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
1099 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
1100 break;
1101 }
1102 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
1103 {
1104 uint32_t u32FunNum = (uint32_t)nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1105 switch (u32FunNum)
1106 {
1107 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1108 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1109 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1110 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1111 case ARM_PSCI_FUNC_ID_CPU_ON:
1112 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1113 false /*f64BitReg*/, false /*fSignExtend*/,
1114 (uint64_t)ARM_PSCI_STS_SUCCESS);
1115 break;
1116 default:
1117 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1118 false /*f64BitReg*/, false /*fSignExtend*/,
1119 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1120 }
1121 }
1122 default:
1123 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1124 }
1125 }
1126 else
1127 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1128 }
1129 /** @todo What to do if immediate is != 0? */
1130
1131 return rcStrict;
1132}
1133
1134
1135/**
1136 * Handles an exception VM exit.
1137 *
1138 * @returns VBox strict status code.
1139 * @param pVM The cross context VM structure.
1140 * @param pVCpu The cross context virtual CPU structure of the
1141 * calling EMT.
1142 * @param pExit Pointer to the exit information.
1143 */
1144static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1145{
1146 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1147 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1148 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1149
1150 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1151 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1152
1153 switch (uEc)
1154 {
1155 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1156 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1157 pExit->exception.physical_address);
1158 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1159 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1160 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
1161 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
1162 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
1163 {
1164 /* No need to halt if there is an interrupt pending already. */
1165 if (VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ)))
1166 return VINF_SUCCESS;
1167
1168 /* Set the vTimer expiration in order to get out of the halt at the right point in time. */
1169 if ( (pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE)
1170 && !(pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_IMASK))
1171 {
1172 uint64_t cTicksVTimer = mach_absolute_time() - pVM->nem.s.u64VTimerOff;
1173
1174 /* Check whether it expired and start executing guest code. */
1175 if (cTicksVTimer >= pVCpu->cpum.GstCtx.CntvCValEl0)
1176 return VINF_SUCCESS;
1177
1178 uint64_t cTicksVTimerToExpire = pVCpu->cpum.GstCtx.CntvCValEl0 - cTicksVTimer;
1179 uint64_t cNanoSecsVTimerToExpire = ASMMultU64ByU32DivByU32(cTicksVTimerToExpire, RT_NS_1SEC, (uint32_t)pVM->nem.s.u64CntFrqHz);
1180
1181 /*
1182 * Our halt method doesn't work with sub millisecond granularity at the moment causing a huge slowdown
1183 * + scheduling overhead which would increase the wakeup latency.
1184 * So only halt when the threshold is exceeded (needs more experimentation but 5ms turned out to be a good compromise
1185 * between CPU load when the guest is idle and performance).
1186 */
1187 if (cNanoSecsVTimerToExpire < 2 * RT_NS_1MS)
1188 return VINF_SUCCESS;
1189
1190 LogFlowFunc(("Set vTimer activation to cNanoSecsVTimerToExpire=%#RX64 (CntvCValEl0=%#RX64, u64VTimerOff=%#RX64 cTicksVTimer=%#RX64 u64CntFrqHz=%#RX64)\n",
1191 cNanoSecsVTimerToExpire, pVCpu->cpum.GstCtx.CntvCValEl0, pVM->nem.s.u64VTimerOff, cTicksVTimer, pVM->nem.s.u64CntFrqHz));
1192 TMCpuSetVTimerNextActivation(pVCpu, cNanoSecsVTimerToExpire);
1193 }
1194 else
1195 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1196
1197 return VINF_EM_HALT;
1198 }
1199 case ARMV8_ESR_EL2_EC_UNKNOWN:
1200 default:
1201 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1202 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1203 AssertReleaseFailed();
1204 return VERR_NOT_IMPLEMENTED;
1205 }
1206
1207 return VINF_SUCCESS;
1208}
1209
1210
1211/**
1212 * Handles an exit from hv_vcpu_run().
1213 *
1214 * @returns VBox strict status code.
1215 * @param pVM The cross context VM structure.
1216 * @param pVCpu The cross context virtual CPU structure of the
1217 * calling EMT.
1218 */
1219static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
1220{
1221 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1222 if (RT_FAILURE(rc))
1223 return rc;
1224
1225#ifdef LOG_ENABLED
1226 if (LogIs3Enabled())
1227 nemR3DarwinLogState(pVM, pVCpu);
1228#endif
1229
1230 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
1231 switch (pExit->reason)
1232 {
1233 case HV_EXIT_REASON_CANCELED:
1234 return VINF_EM_RAW_INTERRUPT;
1235 case HV_EXIT_REASON_EXCEPTION:
1236 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
1237 case HV_EXIT_REASON_VTIMER_ACTIVATED:
1238 {
1239 LogFlowFunc(("vTimer got activated\n"));
1240 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1241 pVCpu->nem.s.fVTimerActivated = true;
1242 return GICPpiSet(pVCpu, NEM_DARWIN_VTIMER_GIC_PPI_IRQ, true /*fAsserted*/);
1243 }
1244 default:
1245 AssertReleaseFailed();
1246 break;
1247 }
1248
1249 return VERR_INVALID_STATE;
1250}
1251
1252
1253/**
1254 * Runs the guest once until an exit occurs.
1255 *
1256 * @returns HV status code.
1257 * @param pVM The cross context VM structure.
1258 * @param pVCpu The cross context virtual CPU structure.
1259 */
1260static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
1261{
1262 TMNotifyStartOfExecution(pVM, pVCpu);
1263
1264 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
1265
1266 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
1267
1268 return hrc;
1269}
1270
1271
1272/**
1273 * Prepares the VM to run the guest.
1274 *
1275 * @returns Strict VBox status code.
1276 * @param pVM The cross context VM structure.
1277 * @param pVCpu The cross context virtual CPU structure.
1278 * @param fSingleStepping Flag whether we run in single stepping mode.
1279 */
1280static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
1281{
1282#ifdef LOG_ENABLED
1283 bool fIrq = false;
1284 bool fFiq = false;
1285
1286 if (LogIs3Enabled())
1287 nemR3DarwinLogState(pVM, pVCpu);
1288#endif
1289
1290 /** @todo */ RT_NOREF(fSingleStepping);
1291 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
1292 AssertRCReturn(rc, rc);
1293
1294 /* Check whether the vTimer interrupt was handled by the guest and we can unmask the vTimer. */
1295 if (pVCpu->nem.s.fVTimerActivated)
1296 {
1297 /* Read the CNTV_CTL_EL0 register. */
1298 uint64_t u64CntvCtl = 0;
1299
1300 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &u64CntvCtl);
1301 AssertRCReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1302
1303 if ( (u64CntvCtl & (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_IMASK | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
1304 != (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
1305 {
1306 /* Clear the interrupt. */
1307 GICPpiSet(pVCpu, NEM_DARWIN_VTIMER_GIC_PPI_IRQ, false /*fAsserted*/);
1308
1309 pVCpu->nem.s.fVTimerActivated = false;
1310 hrc = hv_vcpu_set_vtimer_mask(pVCpu->nem.s.hVCpu, false /*vtimer_is_masked*/);
1311 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1312 }
1313 }
1314
1315 /* Set the pending interrupt state. */
1316 hv_return_t hrc = HV_SUCCESS;
1317 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
1318 {
1319 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
1320 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1321#ifdef LOG_ENABLED
1322 fIrq = true;
1323#endif
1324 }
1325 else
1326 {
1327 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
1328 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1329 }
1330
1331 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
1332 {
1333 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
1334 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1335#ifdef LOG_ENABLED
1336 fFiq = true;
1337#endif
1338 }
1339 else
1340 {
1341 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
1342 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1343 }
1344
1345 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
1346 pVCpu->nem.s.fEventPending = false;
1347 return VINF_SUCCESS;
1348}
1349
1350
1351/**
1352 * The normal runloop (no debugging features enabled).
1353 *
1354 * @returns Strict VBox status code.
1355 * @param pVM The cross context VM structure.
1356 * @param pVCpu The cross context virtual CPU structure.
1357 */
1358static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
1359{
1360 /*
1361 * The run loop.
1362 *
1363 * Current approach to state updating to use the sledgehammer and sync
1364 * everything every time. This will be optimized later.
1365 */
1366
1367 /* Update the vTimer offset after resuming if instructed. */
1368 if (pVCpu->nem.s.fVTimerOffUpdate)
1369 {
1370 hv_return_t hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
1371 if (hrc != HV_SUCCESS)
1372 return nemR3DarwinHvSts2Rc(hrc);
1373
1374 pVCpu->nem.s.fVTimerOffUpdate = false;
1375 }
1376
1377 /*
1378 * Poll timers and run for a bit.
1379 */
1380 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
1381 * the whole polling job when timers have changed... */
1382 uint64_t offDeltaIgnored;
1383 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
1384 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1385 for (unsigned iLoop = 0;; iLoop++)
1386 {
1387 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
1388 if (rcStrict != VINF_SUCCESS)
1389 break;
1390
1391 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
1392 if (hrc == HV_SUCCESS)
1393 {
1394 /*
1395 * Deal with the message.
1396 */
1397 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
1398 if (rcStrict == VINF_SUCCESS)
1399 { /* hopefully likely */ }
1400 else
1401 {
1402 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
1403 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
1404 break;
1405 }
1406 }
1407 else
1408 {
1409 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
1410 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
1411 }
1412 } /* the run loop */
1413
1414 return rcStrict;
1415}
1416
1417
1418VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1419{
1420#ifdef LOG_ENABLED
1421 if (LogIs3Enabled())
1422 nemR3DarwinLogState(pVM, pVCpu);
1423#endif
1424
1425 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
1426
1427 /*
1428 * Try switch to NEM runloop state.
1429 */
1430 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
1431 { /* likely */ }
1432 else
1433 {
1434 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1435 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
1436 return VINF_SUCCESS;
1437 }
1438
1439 VBOXSTRICTRC rcStrict;
1440#if 0
1441 if ( !pVCpu->nem.s.fUseDebugLoop
1442 && !nemR3DarwinAnyExpensiveProbesEnabled()
1443 && !DBGFIsStepping(pVCpu)
1444 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
1445#endif
1446 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
1447#if 0
1448 else
1449 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
1450#endif
1451
1452 if (rcStrict == VINF_EM_RAW_TO_R3)
1453 rcStrict = VINF_SUCCESS;
1454
1455 /*
1456 * Convert any pending HM events back to TRPM due to premature exits.
1457 *
1458 * This is because execution may continue from IEM and we would need to inject
1459 * the event from there (hence place it back in TRPM).
1460 */
1461 if (pVCpu->nem.s.fEventPending)
1462 {
1463 /** @todo */
1464 }
1465
1466
1467 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
1468 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1469
1470 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
1471 {
1472 /* Try anticipate what we might need. */
1473 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
1474 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
1475 || RT_FAILURE(rcStrict))
1476 fImport = CPUMCTX_EXTRN_ALL;
1477 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
1478 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
1479 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
1480
1481 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
1482 {
1483 /* Only import what is external currently. */
1484 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
1485 if (RT_SUCCESS(rc2))
1486 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
1487 else if (RT_SUCCESS(rcStrict))
1488 rcStrict = rc2;
1489 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1490 pVCpu->cpum.GstCtx.fExtrn = 0;
1491 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
1492 }
1493 else
1494 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1495 }
1496 else
1497 {
1498 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1499 pVCpu->cpum.GstCtx.fExtrn = 0;
1500 }
1501
1502 return rcStrict;
1503}
1504
1505
1506VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
1507{
1508 RT_NOREF(pVM, pVCpu);
1509 return true; /** @todo Are there any cases where we have to emulate? */
1510}
1511
1512
1513bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
1514{
1515 VMCPU_ASSERT_EMT(pVCpu);
1516 bool fOld = pVCpu->nem.s.fSingleInstruction;
1517 pVCpu->nem.s.fSingleInstruction = fEnable;
1518 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
1519 return fOld;
1520}
1521
1522
1523void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
1524{
1525 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
1526
1527 RT_NOREF(pVM, fFlags);
1528
1529 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
1530 if (hrc != HV_SUCCESS)
1531 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
1532}
1533
1534
1535DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
1536{
1537 RT_NOREF(pVM, fUseDebugLoop);
1538 //AssertReleaseFailed();
1539 return false;
1540}
1541
1542
1543DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
1544{
1545 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
1546 return fUseDebugLoop;
1547}
1548
1549
1550VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
1551 uint8_t *pu2State, uint32_t *puNemRange)
1552{
1553 RT_NOREF(pVM, puNemRange);
1554
1555 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
1556#if defined(VBOX_WITH_PGM_NEM_MODE)
1557 if (pvR3)
1558 {
1559 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1560 if (RT_FAILURE(rc))
1561 {
1562 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
1563 return VERR_NEM_MAP_PAGES_FAILED;
1564 }
1565 }
1566 return VINF_SUCCESS;
1567#else
1568 RT_NOREF(pVM, GCPhys, cb, pvR3);
1569 return VERR_NEM_MAP_PAGES_FAILED;
1570#endif
1571}
1572
1573
1574VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
1575{
1576 RT_NOREF(pVM);
1577 return true;
1578}
1579
1580
1581VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1582 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1583{
1584 RT_NOREF(pvRam);
1585
1586 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
1587 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
1588
1589#if defined(VBOX_WITH_PGM_NEM_MODE)
1590 /*
1591 * Unmap the RAM we're replacing.
1592 */
1593 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1594 {
1595 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1596 if (RT_SUCCESS(rc))
1597 { /* likely */ }
1598 else if (pvMmio2)
1599 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
1600 GCPhys, cb, fFlags, rc));
1601 else
1602 {
1603 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1604 GCPhys, cb, fFlags, rc));
1605 return VERR_NEM_UNMAP_PAGES_FAILED;
1606 }
1607 }
1608
1609 /*
1610 * Map MMIO2 if any.
1611 */
1612 if (pvMmio2)
1613 {
1614 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
1615
1616 /* We need to set up our own dirty tracking due to Hypervisor.framework only working on host page sized aligned regions. */
1617 uint32_t fProt = NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
1618 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
1619 {
1620 /* Find a slot for dirty tracking. */
1621 PNEMHVMMIO2REGION pMmio2Region = NULL;
1622 uint32_t idSlot;
1623 for (idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
1624 {
1625 if ( pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart == 0
1626 && pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast == 0)
1627 {
1628 pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
1629 break;
1630 }
1631 }
1632
1633 if (!pMmio2Region)
1634 {
1635 LogRel(("NEMR3NotifyPhysMmioExMapEarly: Out of dirty tracking structures -> VERR_NEM_MAP_PAGES_FAILED\n"));
1636 return VERR_NEM_MAP_PAGES_FAILED;
1637 }
1638
1639 pMmio2Region->GCPhysStart = GCPhys;
1640 pMmio2Region->GCPhysLast = GCPhys + cb - 1;
1641 pMmio2Region->fDirty = false;
1642 *puNemRange = idSlot;
1643 }
1644 else
1645 fProt |= NEM_PAGE_PROT_WRITE;
1646
1647 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, fProt, pu2State);
1648 if (RT_FAILURE(rc))
1649 {
1650 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
1651 GCPhys, cb, fFlags, pvMmio2, rc));
1652 return VERR_NEM_MAP_PAGES_FAILED;
1653 }
1654 }
1655 else
1656 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
1657
1658#else
1659 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
1660 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
1661#endif
1662 return VINF_SUCCESS;
1663}
1664
1665
1666VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1667 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
1668{
1669 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
1670 return VINF_SUCCESS;
1671}
1672
1673
1674VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
1675 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1676{
1677 RT_NOREF(pVM, puNemRange);
1678
1679 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
1680 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
1681
1682 int rc = VINF_SUCCESS;
1683#if defined(VBOX_WITH_PGM_NEM_MODE)
1684 /*
1685 * Unmap the MMIO2 pages.
1686 */
1687 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
1688 * we may have more stuff to unmap even in case of pure MMIO... */
1689 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
1690 {
1691 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1692 if (RT_FAILURE(rc))
1693 {
1694 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1695 GCPhys, cb, fFlags, rc));
1696 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1697 }
1698
1699 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
1700 {
1701 /* Reset tracking structure. */
1702 uint32_t idSlot = *puNemRange;
1703 *puNemRange = UINT32_MAX;
1704
1705 Assert(idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
1706 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart = 0;
1707 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast = 0;
1708 pVM->nem.s.aMmio2DirtyTracking[idSlot].fDirty = false;
1709 }
1710 }
1711
1712 /* Ensure the page is masked as unmapped if relevant. */
1713 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
1714
1715 /*
1716 * Restore the RAM we replaced.
1717 */
1718 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1719 {
1720 AssertPtr(pvRam);
1721 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1722 if (RT_SUCCESS(rc))
1723 { /* likely */ }
1724 else
1725 {
1726 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
1727 rc = VERR_NEM_MAP_PAGES_FAILED;
1728 }
1729 }
1730
1731 RT_NOREF(pvMmio2);
1732#else
1733 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
1734 if (pu2State)
1735 *pu2State = UINT8_MAX;
1736 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1737#endif
1738 return rc;
1739}
1740
1741
1742VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
1743 void *pvBitmap, size_t cbBitmap)
1744{
1745 LogFlowFunc(("NEMR3PhysMmio2QueryAndResetDirtyBitmap: %RGp LB %RGp UnemRange=%u\n", GCPhys, cb, uNemRange));
1746 Assert(uNemRange < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
1747
1748 /* Keep it simple for now and mark everything as dirty if it is. */
1749 int rc = VINF_SUCCESS;
1750 if (pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty)
1751 {
1752 ASMBitSetRange(pvBitmap, 0, cbBitmap * 8);
1753
1754 pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty = false;
1755 /* Restore as RX only. */
1756 uint8_t u2State;
1757 rc = nemR3DarwinProtect(GCPhys, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, &u2State);
1758 }
1759 else
1760 ASMBitClearRange(pvBitmap, 0, cbBitmap * 8);
1761
1762 return rc;
1763}
1764
1765
1766VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
1767 uint8_t *pu2State, uint32_t *puNemRange)
1768{
1769 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1770
1771 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
1772 *pu2State = UINT8_MAX;
1773 *puNemRange = 0;
1774 return VINF_SUCCESS;
1775}
1776
1777
1778VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
1779 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
1780{
1781 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
1782 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
1783 *pu2State = UINT8_MAX;
1784
1785#if defined(VBOX_WITH_PGM_NEM_MODE)
1786 /*
1787 * (Re-)map readonly.
1788 */
1789 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
1790
1791 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1792 AssertRC(rc);
1793
1794 rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
1795 if (RT_FAILURE(rc))
1796 {
1797 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
1798 GCPhys, cb, pvPages, fFlags, rc));
1799 return VERR_NEM_MAP_PAGES_FAILED;
1800 }
1801 RT_NOREF(fFlags, puNemRange);
1802 return VINF_SUCCESS;
1803#else
1804 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1805 return VERR_NEM_MAP_PAGES_FAILED;
1806#endif
1807}
1808
1809
1810VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
1811 RTR3PTR pvMemR3, uint8_t *pu2State)
1812{
1813 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
1814 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
1815
1816 *pu2State = UINT8_MAX;
1817#if defined(VBOX_WITH_PGM_NEM_MODE)
1818 if (pvMemR3)
1819 {
1820 /* Unregister what was there before. */
1821 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1822 AssertRC(rc);
1823
1824 rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1825 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
1826 pvMemR3, GCPhys, cb, rc));
1827 }
1828 RT_NOREF(enmKind);
1829#else
1830 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
1831 AssertFailed();
1832#endif
1833}
1834
1835
1836VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
1837{
1838 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
1839 RT_NOREF(pVCpu, fEnabled);
1840}
1841
1842
1843void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
1844{
1845 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
1846 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
1847}
1848
1849
1850void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
1851 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
1852{
1853 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
1854 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
1855 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
1856}
1857
1858
1859int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
1860 PGMPAGETYPE enmType, uint8_t *pu2State)
1861{
1862 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1863 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1864 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
1865
1866 AssertFailed();
1867 return VINF_SUCCESS;
1868}
1869
1870
1871VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
1872 PGMPAGETYPE enmType, uint8_t *pu2State)
1873{
1874 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1875 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1876 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
1877}
1878
1879
1880VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
1881 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
1882{
1883 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1884 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
1885 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
1886
1887 AssertFailed();
1888}
1889
1890
1891/**
1892 * Interface for importing state on demand (used by IEM).
1893 *
1894 * @returns VBox status code.
1895 * @param pVCpu The cross context CPU structure.
1896 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1897 */
1898VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1899{
1900 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
1901 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1902
1903 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
1904}
1905
1906
1907/**
1908 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1909 *
1910 * @returns VBox status code.
1911 * @param pVCpu The cross context CPU structure.
1912 * @param pcTicks Where to return the CPU tick count.
1913 * @param puAux Where to return the TSC_AUX register value.
1914 */
1915VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1916{
1917 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
1918 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1919
1920 if (puAux)
1921 *puAux = 0;
1922 *pcTicks = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff; /* This is the host timer minus the offset. */
1923 return VINF_SUCCESS;
1924}
1925
1926
1927/**
1928 * Resumes CPU clock (TSC) on all virtual CPUs.
1929 *
1930 * This is called by TM when the VM is started, restored, resumed or similar.
1931 *
1932 * @returns VBox status code.
1933 * @param pVM The cross context VM structure.
1934 * @param pVCpu The cross context CPU structure of the calling EMT.
1935 * @param uPausedTscValue The TSC value at the time of pausing.
1936 */
1937VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1938{
1939 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVM, pVCpu, uPausedTscValue));
1940 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1941 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1942
1943 /*
1944 * Calculate the new offset, first get the new TSC value with the old vTimer offset and then adjust the
1945 * the new offset to let the guest not notice the pause.
1946 */
1947 uint64_t u64TscNew = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff;
1948 Assert(u64TscNew >= uPausedTscValue);
1949 LogFlowFunc(("u64VTimerOffOld=%#RX64 u64TscNew=%#RX64 u64VTimerValuePaused=%#RX64 -> u64VTimerOff=%#RX64\n",
1950 pVM->nem.s.u64VTimerOff, u64TscNew, uPausedTscValue,
1951 pVM->nem.s.u64VTimerOff + (u64TscNew - uPausedTscValue)));
1952
1953 pVM->nem.s.u64VTimerOff += u64TscNew - uPausedTscValue;
1954
1955 /*
1956 * Set the flag to update the vTimer offset when the vCPU resumes for the first time
1957 * (needs to be done on the actual EMT).
1958 */
1959 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1960 {
1961 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
1962 pVCpuDst->nem.s.fVTimerOffUpdate = true;
1963 }
1964
1965 return VINF_SUCCESS;
1966}
1967
1968
1969/**
1970 * Returns features supported by the NEM backend.
1971 *
1972 * @returns Flags of features supported by the native NEM backend.
1973 * @param pVM The cross context VM structure.
1974 */
1975VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
1976{
1977 RT_NOREF(pVM);
1978 /*
1979 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
1980 * and unrestricted guest execution support so we can safely return these flags here always.
1981 */
1982 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
1983}
1984
1985
1986/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
1987 *
1988 * @todo Add notes as the implementation progresses...
1989 */
1990
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette