VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 100723

Last change on this file since 100723 was 100723, checked in by vboxsync, 18 months ago

VMM/ARM: Add debug system registers in the vCPU state, bugref:10387, bugref:10390

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 78.2 KB
Line 
1/* $Id: NEMR3Native-darwin-armv8.cpp 100723 2023-07-28 07:23:31Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.virtualbox.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#define CPUM_WITH_NONCONST_HOST_FEATURES /* required for initializing parts of the g_CpumHostFeatures structure here. */
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/gic.h>
43#include <VBox/vmm/pdm.h>
44#include <VBox/vmm/dbgftrace.h>
45#include <VBox/vmm/gcm.h>
46#include "NEMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include <VBox/vmm/vmm.h>
49#include "dtrace/VBoxVMM.h"
50
51#include <iprt/armv8.h>
52#include <iprt/asm.h>
53#include <iprt/asm-arm.h>
54#include <iprt/asm-math.h>
55#include <iprt/ldr.h>
56#include <iprt/mem.h>
57#include <iprt/path.h>
58#include <iprt/string.h>
59#include <iprt/system.h>
60#include <iprt/utf16.h>
61
62#include <iprt/formats/arm-psci.h>
63
64#include <mach/mach_time.h>
65#include <mach/kern_return.h>
66
67#include <Hypervisor/Hypervisor.h>
68
69
70/*********************************************************************************************************************************
71* Defined Constants And Macros *
72*********************************************************************************************************************************/
73
74
75/** @todo The vTimer PPI for the virt platform, make it configurable. */
76#define NEM_DARWIN_VTIMER_GIC_PPI_IRQ 11
77
78
79/*********************************************************************************************************************************
80* Structures and Typedefs *
81*********************************************************************************************************************************/
82
83
84/*********************************************************************************************************************************
85* Global Variables *
86*********************************************************************************************************************************/
87/** The general registers. */
88static const struct
89{
90 hv_reg_t enmHvReg;
91 uint32_t fCpumExtrn;
92 uint32_t offCpumCtx;
93} s_aCpumRegs[] =
94{
95#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
96#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
97 CPUM_GREG_EMIT_X0_X3(0),
98 CPUM_GREG_EMIT_X0_X3(1),
99 CPUM_GREG_EMIT_X0_X3(2),
100 CPUM_GREG_EMIT_X0_X3(3),
101 CPUM_GREG_EMIT_X4_X28(4),
102 CPUM_GREG_EMIT_X4_X28(5),
103 CPUM_GREG_EMIT_X4_X28(6),
104 CPUM_GREG_EMIT_X4_X28(7),
105 CPUM_GREG_EMIT_X4_X28(8),
106 CPUM_GREG_EMIT_X4_X28(9),
107 CPUM_GREG_EMIT_X4_X28(10),
108 CPUM_GREG_EMIT_X4_X28(11),
109 CPUM_GREG_EMIT_X4_X28(12),
110 CPUM_GREG_EMIT_X4_X28(13),
111 CPUM_GREG_EMIT_X4_X28(14),
112 CPUM_GREG_EMIT_X4_X28(15),
113 CPUM_GREG_EMIT_X4_X28(16),
114 CPUM_GREG_EMIT_X4_X28(17),
115 CPUM_GREG_EMIT_X4_X28(18),
116 CPUM_GREG_EMIT_X4_X28(19),
117 CPUM_GREG_EMIT_X4_X28(20),
118 CPUM_GREG_EMIT_X4_X28(21),
119 CPUM_GREG_EMIT_X4_X28(22),
120 CPUM_GREG_EMIT_X4_X28(23),
121 CPUM_GREG_EMIT_X4_X28(24),
122 CPUM_GREG_EMIT_X4_X28(25),
123 CPUM_GREG_EMIT_X4_X28(26),
124 CPUM_GREG_EMIT_X4_X28(27),
125 CPUM_GREG_EMIT_X4_X28(28),
126 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
127 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
128 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
129 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
130 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
131#undef CPUM_GREG_EMIT_X0_X3
132#undef CPUM_GREG_EMIT_X4_X28
133};
134/** SIMD/FP registers. */
135static const struct
136{
137 hv_simd_fp_reg_t enmHvReg;
138 uint32_t offCpumCtx;
139} s_aCpumFpRegs[] =
140{
141#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
142 CPUM_VREG_EMIT(0),
143 CPUM_VREG_EMIT(1),
144 CPUM_VREG_EMIT(2),
145 CPUM_VREG_EMIT(3),
146 CPUM_VREG_EMIT(4),
147 CPUM_VREG_EMIT(5),
148 CPUM_VREG_EMIT(6),
149 CPUM_VREG_EMIT(7),
150 CPUM_VREG_EMIT(8),
151 CPUM_VREG_EMIT(9),
152 CPUM_VREG_EMIT(10),
153 CPUM_VREG_EMIT(11),
154 CPUM_VREG_EMIT(12),
155 CPUM_VREG_EMIT(13),
156 CPUM_VREG_EMIT(14),
157 CPUM_VREG_EMIT(15),
158 CPUM_VREG_EMIT(16),
159 CPUM_VREG_EMIT(17),
160 CPUM_VREG_EMIT(18),
161 CPUM_VREG_EMIT(19),
162 CPUM_VREG_EMIT(20),
163 CPUM_VREG_EMIT(21),
164 CPUM_VREG_EMIT(22),
165 CPUM_VREG_EMIT(23),
166 CPUM_VREG_EMIT(24),
167 CPUM_VREG_EMIT(25),
168 CPUM_VREG_EMIT(26),
169 CPUM_VREG_EMIT(27),
170 CPUM_VREG_EMIT(28),
171 CPUM_VREG_EMIT(29),
172 CPUM_VREG_EMIT(30),
173 CPUM_VREG_EMIT(31)
174#undef CPUM_VREG_EMIT
175};
176/** Debug system registers. */
177static const struct
178{
179 hv_sys_reg_t enmHvReg;
180 uint32_t offCpumCtx;
181} s_aCpumDbgRegs[] =
182{
183#define CPUM_DBGREG_EMIT(a_BorW, a_Idx) \
184 { HV_SYS_REG_DBG ## a_BorW ## CR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Ctrl.u64) }, \
185 { HV_SYS_REG_DBG ## a_BorW ## VR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Value.u64) }
186 /* Breakpoint registers. */
187 CPUM_DBGREG_EMIT(B, 0),
188 CPUM_DBGREG_EMIT(B, 1),
189 CPUM_DBGREG_EMIT(B, 2),
190 CPUM_DBGREG_EMIT(B, 3),
191 CPUM_DBGREG_EMIT(B, 4),
192 CPUM_DBGREG_EMIT(B, 5),
193 CPUM_DBGREG_EMIT(B, 6),
194 CPUM_DBGREG_EMIT(B, 7),
195 CPUM_DBGREG_EMIT(B, 8),
196 CPUM_DBGREG_EMIT(B, 9),
197 CPUM_DBGREG_EMIT(B, 10),
198 CPUM_DBGREG_EMIT(B, 11),
199 CPUM_DBGREG_EMIT(B, 12),
200 CPUM_DBGREG_EMIT(B, 13),
201 CPUM_DBGREG_EMIT(B, 14),
202 CPUM_DBGREG_EMIT(B, 15),
203 /* Watchpoint registers. */
204 CPUM_DBGREG_EMIT(W, 0),
205 CPUM_DBGREG_EMIT(W, 1),
206 CPUM_DBGREG_EMIT(W, 2),
207 CPUM_DBGREG_EMIT(W, 3),
208 CPUM_DBGREG_EMIT(W, 4),
209 CPUM_DBGREG_EMIT(W, 5),
210 CPUM_DBGREG_EMIT(W, 6),
211 CPUM_DBGREG_EMIT(W, 7),
212 CPUM_DBGREG_EMIT(W, 8),
213 CPUM_DBGREG_EMIT(W, 9),
214 CPUM_DBGREG_EMIT(W, 10),
215 CPUM_DBGREG_EMIT(W, 11),
216 CPUM_DBGREG_EMIT(W, 12),
217 CPUM_DBGREG_EMIT(W, 13),
218 CPUM_DBGREG_EMIT(W, 14),
219 CPUM_DBGREG_EMIT(W, 15)
220#undef CPUM_DBGREG_EMIT
221};
222/** System registers. */
223static const struct
224{
225 hv_sys_reg_t enmHvReg;
226 uint32_t fCpumExtrn;
227 uint32_t offCpumCtx;
228} s_aCpumSysRegs[] =
229{
230 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
231 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
232 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
233 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
234 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
235 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
236 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
237 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
238 { HV_SYS_REG_VBAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
239};
240/** ID registers. */
241static const struct
242{
243 hv_feature_reg_t enmHvReg;
244 uint32_t offIdStruct;
245} s_aIdRegs[] =
246{
247 { HV_FEATURE_REG_ID_AA64DFR0_EL1, RT_UOFFSETOF(HVIDREGS, u64IdDfReg0El1) },
248 { HV_FEATURE_REG_ID_AA64DFR1_EL1, RT_UOFFSETOF(HVIDREGS, u64IdDfReg1El1) },
249 { HV_FEATURE_REG_ID_AA64ISAR0_EL1, RT_UOFFSETOF(HVIDREGS, u64IdIsaReg0El1) },
250 { HV_FEATURE_REG_ID_AA64ISAR1_EL1, RT_UOFFSETOF(HVIDREGS, u64IdIsaReg1El1) },
251 { HV_FEATURE_REG_ID_AA64MMFR0_EL1, RT_UOFFSETOF(HVIDREGS, u64IdMmfReg0El1) },
252 { HV_FEATURE_REG_ID_AA64MMFR1_EL1, RT_UOFFSETOF(HVIDREGS, u64IdMmfReg1El1) },
253 { HV_FEATURE_REG_ID_AA64MMFR2_EL1, RT_UOFFSETOF(HVIDREGS, u64IdPfReg0El1) },
254 { HV_FEATURE_REG_ID_AA64PFR0_EL1, RT_UOFFSETOF(HVIDREGS, u64IdPfReg1El1) },
255 { HV_FEATURE_REG_ID_AA64PFR1_EL1, RT_UOFFSETOF(HVIDREGS, u64ClidrEl1) },
256 { HV_FEATURE_REG_CLIDR_EL1, RT_UOFFSETOF(HVIDREGS, u64CtrEl0) },
257 { HV_FEATURE_REG_CTR_EL0, RT_UOFFSETOF(HVIDREGS, u64DczidEl1) },
258};
259
260
261/*********************************************************************************************************************************
262* Internal Functions *
263*********************************************************************************************************************************/
264
265
266/**
267 * Converts a HV return code to a VBox status code.
268 *
269 * @returns VBox status code.
270 * @param hrc The HV return code to convert.
271 */
272DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
273{
274 if (hrc == HV_SUCCESS)
275 return VINF_SUCCESS;
276
277 switch (hrc)
278 {
279 case HV_ERROR: return VERR_INVALID_STATE;
280 case HV_BUSY: return VERR_RESOURCE_BUSY;
281 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
282 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
283 case HV_NO_DEVICE: return VERR_NOT_FOUND;
284 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
285 }
286
287 return VERR_IPE_UNEXPECTED_STATUS;
288}
289
290
291/**
292 * Returns a human readable string of the given exception class.
293 *
294 * @returns Pointer to the string matching the given EC.
295 * @param u32Ec The exception class to return the string for.
296 */
297static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
298{
299 switch (u32Ec)
300 {
301#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
302 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
303 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
304 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
305 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
306 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
307 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
308 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
309 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
310 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
311 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
312 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
313 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
314 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
315 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
316 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
317 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
318 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
319 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
320 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
321 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
322 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
323 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
324 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
325 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
326 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
327 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
328 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
329 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
330 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
331 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
332 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
333 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
334 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
335 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
336 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
337 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
338 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
339 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
340 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
341 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
342 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
343 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
344 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
345 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
346 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
347#undef ARMV8_EC_CASE
348 default:
349 break;
350 }
351
352 return "<INVALID>";
353}
354
355
356/**
357 * Resolves a NEM page state from the given protection flags.
358 *
359 * @returns NEM page state.
360 * @param fPageProt The page protection flags.
361 */
362DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
363{
364 switch (fPageProt)
365 {
366 case NEM_PAGE_PROT_NONE:
367 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
368 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
369 return NEM_DARWIN_PAGE_STATE_RX;
370 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
371 return NEM_DARWIN_PAGE_STATE_RW;
372 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
373 return NEM_DARWIN_PAGE_STATE_RWX;
374 default:
375 break;
376 }
377
378 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
379 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
380}
381
382
383/**
384 * Unmaps the given guest physical address range (page aligned).
385 *
386 * @returns VBox status code.
387 * @param pVM The cross context VM structure.
388 * @param GCPhys The guest physical address to start unmapping at.
389 * @param cb The size of the range to unmap in bytes.
390 * @param pu2State Where to store the new state of the unmappd page, optional.
391 */
392DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
393{
394 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
395 {
396 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
397 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
398 return VINF_SUCCESS;
399 }
400
401 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
402 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
403 if (RT_LIKELY(hrc == HV_SUCCESS))
404 {
405 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
406 if (pu2State)
407 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
408 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
409 return VINF_SUCCESS;
410 }
411
412 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
413 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
414 GCPhys, hrc));
415 return VERR_NEM_IPE_6;
416}
417
418
419/**
420 * Maps a given guest physical address range backed by the given memory with the given
421 * protection flags.
422 *
423 * @returns VBox status code.
424 * @param pVM The cross context VM structure.
425 * @param GCPhys The guest physical address to start mapping.
426 * @param pvRam The R3 pointer of the memory to back the range with.
427 * @param cb The size of the range, page aligned.
428 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
429 * @param pu2State Where to store the state for the new page, optional.
430 */
431DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
432{
433 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
434
435 Assert(fPageProt != NEM_PAGE_PROT_NONE);
436 RT_NOREF(pVM);
437
438 hv_memory_flags_t fHvMemProt = 0;
439 if (fPageProt & NEM_PAGE_PROT_READ)
440 fHvMemProt |= HV_MEMORY_READ;
441 if (fPageProt & NEM_PAGE_PROT_WRITE)
442 fHvMemProt |= HV_MEMORY_WRITE;
443 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
444 fHvMemProt |= HV_MEMORY_EXEC;
445
446 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
447 if (hrc == HV_SUCCESS)
448 {
449 if (pu2State)
450 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
451 return VINF_SUCCESS;
452 }
453
454 return nemR3DarwinHvSts2Rc(hrc);
455}
456
457
458/**
459 * Changes the protection flags for the given guest physical address range.
460 *
461 * @returns VBox status code.
462 * @param GCPhys The guest physical address to start mapping.
463 * @param cb The size of the range, page aligned.
464 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
465 * @param pu2State Where to store the state for the new page, optional.
466 */
467DECLINLINE(int) nemR3DarwinProtect(RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
468{
469 hv_memory_flags_t fHvMemProt = 0;
470 if (fPageProt & NEM_PAGE_PROT_READ)
471 fHvMemProt |= HV_MEMORY_READ;
472 if (fPageProt & NEM_PAGE_PROT_WRITE)
473 fHvMemProt |= HV_MEMORY_WRITE;
474 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
475 fHvMemProt |= HV_MEMORY_EXEC;
476
477 hv_return_t hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
478 if (hrc == HV_SUCCESS)
479 {
480 if (pu2State)
481 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
482 return VINF_SUCCESS;
483 }
484
485 LogRel(("nemR3DarwinProtect(%RGp,%zu,%#x): failed! hrc=%#x\n",
486 GCPhys, cb, fPageProt, hrc));
487 return nemR3DarwinHvSts2Rc(hrc);
488}
489
490
491#ifdef LOG_ENABLED
492/**
493 * Logs the current CPU state.
494 */
495static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
496{
497 if (LogIs3Enabled())
498 {
499 char szRegs[4096];
500 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
501 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
502 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
503 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
504 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
505 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
506 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
507 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
508 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
509 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
510 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
511 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
512 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
513 "vbar_el1=%016VR{vbar_el1}\n"
514 );
515 char szInstr[256]; RT_ZERO(szInstr);
516#if 0
517 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
518 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
519 szInstr, sizeof(szInstr), NULL);
520#endif
521 Log3(("%s%s\n", szRegs, szInstr));
522 }
523}
524#endif /* LOG_ENABLED */
525
526
527static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
528{
529 RT_NOREF(pVM);
530
531 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
532 if (hrc == HV_SUCCESS)
533 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
534
535 if ( hrc == HV_SUCCESS
536 && (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR)))
537 {
538 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
539 {
540 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
541 {
542 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
543 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
544 }
545 }
546 }
547
548 if ( hrc == HV_SUCCESS
549 && (fWhat & CPUMCTX_EXTRN_V0_V31))
550 {
551 /* SIMD/FP registers. */
552 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
553 {
554 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
555 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
556 }
557 }
558
559 if ( hrc == HV_SUCCESS
560 && (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG))
561 {
562 /* Debug registers. */
563 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
564 {
565 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
566 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, pu64);
567 }
568 }
569
570 if ( hrc == HV_SUCCESS
571 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC)))
572 {
573 /* System registers. */
574 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
575 {
576 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
577 {
578 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
579 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
580 }
581 }
582 }
583
584 if ( hrc == HV_SUCCESS
585 && (fWhat & CPUMCTX_EXTRN_PSTATE))
586 {
587 uint64_t u64Tmp;
588 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
589 if (hrc == HV_SUCCESS)
590 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
591 }
592
593 /* Almost done, just update extern flags. */
594 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
595 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
596 pVCpu->cpum.GstCtx.fExtrn = 0;
597
598 return nemR3DarwinHvSts2Rc(hrc);
599}
600
601
602/**
603 * Exports the guest state to HV for execution.
604 *
605 * @returns VBox status code.
606 * @param pVM The cross context VM structure.
607 * @param pVCpu The cross context virtual CPU structure of the
608 * calling EMT.
609 */
610static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
611{
612 RT_NOREF(pVM);
613 hv_return_t hrc = HV_SUCCESS;
614
615 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
616 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
617 {
618 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
619 {
620 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
621 {
622 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
623 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
624 }
625 }
626 }
627
628 if ( hrc == HV_SUCCESS
629 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
630 {
631 /* SIMD/FP registers. */
632 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
633 {
634 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
635 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
636 }
637 }
638
639 if ( hrc == HV_SUCCESS
640 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_DEBUG))
641 {
642 /* Debug registers. */
643 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
644 {
645 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
646 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, *pu64);
647 }
648 }
649
650 if ( hrc == HV_SUCCESS
651 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
652 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
653 {
654 /* System registers. */
655 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
656 {
657 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
658 {
659 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
660 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
661 }
662 }
663 }
664
665 if ( hrc == HV_SUCCESS
666 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
667 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
668
669 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
670 return nemR3DarwinHvSts2Rc(hrc);
671}
672
673
674/**
675 * Try initialize the native API.
676 *
677 * This may only do part of the job, more can be done in
678 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
679 *
680 * @returns VBox status code.
681 * @param pVM The cross context VM structure.
682 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
683 * the latter we'll fail if we cannot initialize.
684 * @param fForced Whether the HMForced flag is set and we should
685 * fail if we cannot initialize.
686 */
687int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
688{
689 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
690
691 /*
692 * Some state init.
693 */
694 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
695 RT_NOREF(pCfgNem);
696
697 /*
698 * Error state.
699 * The error message will be non-empty on failure and 'rc' will be set too.
700 */
701 RTERRINFOSTATIC ErrInfo;
702 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
703
704 int rc = VINF_SUCCESS;
705 hv_return_t hrc = hv_vm_create(NULL);
706 if (hrc == HV_SUCCESS)
707 {
708 pVM->nem.s.fCreatedVm = true;
709 pVM->nem.s.u64CntFrqHz = ASMReadCntFrqEl0();
710
711 /* Will be initialized in NEMHCResumeCpuTickOnAll() before executing guest code. */
712 pVM->nem.s.u64VTimerOff = 0;
713
714 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
715 Log(("NEM: Marked active!\n"));
716 PGMR3EnableNemMode(pVM);
717 }
718 else
719 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
720 "hv_vm_create() failed: %#x", hrc);
721
722 /*
723 * We only fail if in forced mode, otherwise just log the complaint and return.
724 */
725 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
726 if ( (fForced || !fFallback)
727 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
728 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
729
730if (RTErrInfoIsSet(pErrInfo))
731 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
732 return VINF_SUCCESS;
733}
734
735
736/**
737 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
738 *
739 * @returns VBox status code
740 * @param pVM The VM handle.
741 * @param pVCpu The vCPU handle.
742 * @param idCpu ID of the CPU to create.
743 */
744static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
745{
746 if (idCpu == 0)
747 {
748 Assert(pVM->nem.s.hVCpuCfg == NULL);
749
750 /* Create a new vCPU config and query the ID registers. */
751 pVM->nem.s.hVCpuCfg = hv_vcpu_config_create();
752 if (!pVM->nem.s.hVCpuCfg)
753 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
754 "Call to hv_vcpu_config_create failed on vCPU %u", idCpu);
755
756 for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
757 {
758 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVM->nem.s.IdRegs + s_aIdRegs[i].offIdStruct);
759 hv_return_t hrc = hv_vcpu_config_get_feature_reg(pVM->nem.s.hVCpuCfg, s_aIdRegs[i].enmHvReg, pu64);
760 if (hrc != HV_SUCCESS)
761 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
762 "Call to hv_vcpu_get_feature_reg(, %#x, ) failed: %#x (%Rrc)", hrc, nemR3DarwinHvSts2Rc(hrc));
763 }
764 }
765
766 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, pVM->nem.s.hVCpuCfg);
767 if (hrc != HV_SUCCESS)
768 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
769 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
770
771 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MPIDR_EL1, idCpu);
772 if (hrc != HV_SUCCESS)
773 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
774 "Setting MPIDR_EL1 failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
775
776 return VINF_SUCCESS;
777}
778
779
780/**
781 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
782 *
783 * @returns VBox status code.
784 * @param pVM The VM handle.
785 * @param pVCpu The vCPU handle.
786 */
787static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVM pVM, PVMCPU pVCpu)
788{
789 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
790 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
791
792 if (pVCpu->idCpu == 0)
793 {
794 os_release(pVM->nem.s.hVCpuCfg);
795 pVM->nem.s.hVCpuCfg = NULL;
796 }
797 return VINF_SUCCESS;
798}
799
800
801/**
802 * This is called after CPUMR3Init is done.
803 *
804 * @returns VBox status code.
805 * @param pVM The VM handle..
806 */
807int nemR3NativeInitAfterCPUM(PVM pVM)
808{
809 /*
810 * Validate sanity.
811 */
812 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
813 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
814
815 /*
816 * Setup the EMTs.
817 */
818 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
819 {
820 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
821
822 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
823 if (RT_FAILURE(rc))
824 {
825 /* Rollback. */
826 while (idCpu--)
827 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 2, pVM, pVCpu);
828
829 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
830 }
831 }
832
833 pVM->nem.s.fCreatedEmts = true;
834 return VINF_SUCCESS;
835}
836
837
838int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
839{
840 RT_NOREF(pVM, enmWhat);
841 return VINF_SUCCESS;
842}
843
844
845int nemR3NativeTerm(PVM pVM)
846{
847 /*
848 * Delete the VM.
849 */
850
851 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
852 {
853 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
854
855 /*
856 * Apple's documentation states that the vCPU should be destroyed
857 * on the thread running the vCPU but as all the other EMTs are gone
858 * at this point, destroying the VM would hang.
859 *
860 * We seem to be at luck here though as destroying apparently works
861 * from EMT(0) as well.
862 */
863 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
864 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
865 }
866
867 pVM->nem.s.fCreatedEmts = false;
868 if (pVM->nem.s.fCreatedVm)
869 {
870 hv_return_t hrc = hv_vm_destroy();
871 if (hrc != HV_SUCCESS)
872 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
873
874 pVM->nem.s.fCreatedVm = false;
875 }
876 return VINF_SUCCESS;
877}
878
879
880/**
881 * VM reset notification.
882 *
883 * @param pVM The cross context VM structure.
884 */
885void nemR3NativeReset(PVM pVM)
886{
887 RT_NOREF(pVM);
888}
889
890
891/**
892 * Reset CPU due to INIT IPI or hot (un)plugging.
893 *
894 * @param pVCpu The cross context virtual CPU structure of the CPU being
895 * reset.
896 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
897 */
898void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
899{
900 RT_NOREF(pVCpu, fInitIpi);
901}
902
903
904/**
905 * Returns the byte size from the given access SAS value.
906 *
907 * @returns Number of bytes to transfer.
908 * @param uSas The SAS value to convert.
909 */
910DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
911{
912 switch (uSas)
913 {
914 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
915 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
916 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
917 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
918 default:
919 AssertReleaseFailed();
920 }
921
922 return 0;
923}
924
925
926/**
927 * Sets the given general purpose register to the given value.
928 *
929 * @param pVCpu The cross context virtual CPU structure of the
930 * calling EMT.
931 * @param uReg The register index.
932 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
933 * @param fSignExtend Flag whether to sign extend the value.
934 * @param u64Val The value.
935 */
936DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
937{
938 AssertReturnVoid(uReg < 31);
939
940 if (f64BitReg)
941 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
942 else
943 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
944
945 /* Mark the register as not extern anymore. */
946 switch (uReg)
947 {
948 case 0:
949 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
950 break;
951 case 1:
952 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
953 break;
954 case 2:
955 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
956 break;
957 case 3:
958 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
959 break;
960 default:
961 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
962 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
963 }
964}
965
966
967/**
968 * Gets the given general purpose register and returns the value.
969 *
970 * @returns Value from the given register.
971 * @param pVCpu The cross context virtual CPU structure of the
972 * calling EMT.
973 * @param uReg The register index.
974 */
975DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
976{
977 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
978
979 if (uReg == ARMV8_AARCH64_REG_ZR)
980 return 0;
981
982 /** @todo Import the register if extern. */
983 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
984
985 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
986}
987
988
989/**
990 * Works on the data abort exception (which will be a MMIO access most of the time).
991 *
992 * @returns VBox strict status code.
993 * @param pVM The cross context VM structure.
994 * @param pVCpu The cross context virtual CPU structure of the
995 * calling EMT.
996 * @param uIss The instruction specific syndrome value.
997 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
998 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
999 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
1000 */
1001static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
1002 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
1003{
1004 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
1005 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
1006 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
1007 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
1008 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
1009 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
1010 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
1011 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
1012 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
1013 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
1014
1015 RT_NOREF(fL2Fault, GCPtrDataAbrt);
1016
1017 if (fWrite)
1018 {
1019 /*
1020 * Check whether this is one of the dirty tracked regions, mark it as dirty
1021 * and enable write support for this region again.
1022 *
1023 * This is required for proper VRAM tracking or the display might not get updated
1024 * and it is impossible to use the PGM generic facility as it operates on guest page sizes
1025 * but setting protection flags with Hypervisor.framework works only host page sized regions, so
1026 * we have to cook our own. Additionally the VRAM region is marked as prefetchable (write-back)
1027 * which doesn't produce a valid instruction syndrome requiring restarting the instruction after enabling
1028 * write access again (due to a missing interpreter right now).
1029 */
1030 for (uint32_t idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
1031 {
1032 PNEMHVMMIO2REGION pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
1033
1034 if ( GCPhysDataAbrt >= pMmio2Region->GCPhysStart
1035 && GCPhysDataAbrt <= pMmio2Region->GCPhysLast)
1036 {
1037 pMmio2Region->fDirty = true;
1038
1039 uint8_t u2State;
1040 int rc = nemR3DarwinProtect(pMmio2Region->GCPhysStart, pMmio2Region->GCPhysLast - pMmio2Region->GCPhysStart + 1,
1041 NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE, &u2State);
1042
1043 /* Restart the instruction if there is no instruction syndrome available. */
1044 if (RT_FAILURE(rc) || !fIsv)
1045 return rc;
1046 }
1047 }
1048 }
1049
1050 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
1051
1052 EMHistoryAddExit(pVCpu,
1053 fWrite
1054 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1055 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1056 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1057
1058 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1059 uint64_t u64Val = 0;
1060 if (fWrite)
1061 {
1062 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1063 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1064 Log4(("MmioExit/%u: %08RX64: WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
1065 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1066 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1067 }
1068 else
1069 {
1070 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1071 Log4(("MmioExit/%u: %08RX64: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1072 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1073 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1074 if (rcStrict == VINF_SUCCESS)
1075 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
1076 }
1077
1078 if (rcStrict == VINF_SUCCESS)
1079 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1080
1081 return rcStrict;
1082}
1083
1084
1085/**
1086 * Works on the trapped MRS, MSR and system instruction exception.
1087 *
1088 * @returns VBox strict status code.
1089 * @param pVM The cross context VM structure.
1090 * @param pVCpu The cross context virtual CPU structure of the
1091 * calling EMT.
1092 * @param uIss The instruction specific syndrome value.
1093 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1094 */
1095static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
1096{
1097 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
1098 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
1099 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
1100 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
1101 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
1102 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
1103 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
1104 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
1105 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
1106 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
1107
1108 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
1109 EMHistoryAddExit(pVCpu,
1110 fRead
1111 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1112 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1113 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1114
1115 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1116 uint64_t u64Val = 0;
1117 if (fRead)
1118 {
1119 RT_NOREF(pVM);
1120 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
1121 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
1122 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1123 VBOXSTRICTRC_VAL(rcStrict) ));
1124 if (rcStrict == VINF_SUCCESS)
1125 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
1126 }
1127 else
1128 {
1129 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1130 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
1131 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
1132 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1133 VBOXSTRICTRC_VAL(rcStrict) ));
1134 }
1135
1136 if (rcStrict == VINF_SUCCESS)
1137 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1138
1139 return rcStrict;
1140}
1141
1142
1143/**
1144 * Works on the trapped HVC instruction exception.
1145 *
1146 * @returns VBox strict status code.
1147 * @param pVM The cross context VM structure.
1148 * @param pVCpu The cross context virtual CPU structure of the
1149 * calling EMT.
1150 * @param uIss The instruction specific syndrome value.
1151 */
1152static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss)
1153{
1154 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
1155 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
1156
1157#if 0 /** @todo For later */
1158 EMHistoryAddExit(pVCpu,
1159 fRead
1160 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1161 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1162 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1163#endif
1164
1165 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1166 if (u16Imm == 0)
1167 {
1168 /** @todo Raise exception to EL1 if PSCI not configured. */
1169 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
1170 uint32_t uFunId = pVCpu->cpum.GstCtx.aGRegs[ARMV8_AARCH64_REG_X0].w;
1171 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
1172 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
1173 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
1174 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
1175 {
1176 switch (uFunNum)
1177 {
1178 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1179 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
1180 break;
1181 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1182 rcStrict = VMR3PowerOff(pVM->pUVM);
1183 break;
1184 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1185 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1186 {
1187 bool fHaltOnReset;
1188 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
1189 if (RT_SUCCESS(rc) && fHaltOnReset)
1190 {
1191 Log(("nemR3DarwinHandleExitExceptionTrappedHvcInsn: Halt On Reset!\n"));
1192 rc = VINF_EM_HALT;
1193 }
1194 else
1195 {
1196 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
1197 VM_FF_SET(pVM, VM_FF_RESET);
1198 rc = VINF_EM_RESET;
1199 }
1200 break;
1201 }
1202 case ARM_PSCI_FUNC_ID_CPU_ON:
1203 {
1204 uint64_t u64TgtCpu = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1205 RTGCPHYS GCPhysExecAddr = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X2);
1206 uint64_t u64CtxId = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X3);
1207 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
1208 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
1209 break;
1210 }
1211 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
1212 {
1213 uint32_t u32FunNum = (uint32_t)nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1214 switch (u32FunNum)
1215 {
1216 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1217 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1218 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1219 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1220 case ARM_PSCI_FUNC_ID_CPU_ON:
1221 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1222 false /*f64BitReg*/, false /*fSignExtend*/,
1223 (uint64_t)ARM_PSCI_STS_SUCCESS);
1224 break;
1225 default:
1226 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1227 false /*f64BitReg*/, false /*fSignExtend*/,
1228 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1229 }
1230 }
1231 default:
1232 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1233 }
1234 }
1235 else
1236 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1237 }
1238 /** @todo What to do if immediate is != 0? */
1239
1240 return rcStrict;
1241}
1242
1243
1244/**
1245 * Handles an exception VM exit.
1246 *
1247 * @returns VBox strict status code.
1248 * @param pVM The cross context VM structure.
1249 * @param pVCpu The cross context virtual CPU structure of the
1250 * calling EMT.
1251 * @param pExit Pointer to the exit information.
1252 */
1253static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1254{
1255 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1256 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1257 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1258
1259 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1260 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1261
1262 switch (uEc)
1263 {
1264 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1265 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1266 pExit->exception.physical_address);
1267 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1268 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1269 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
1270 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
1271 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
1272 {
1273 /* No need to halt if there is an interrupt pending already. */
1274 if (VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ)))
1275 return VINF_SUCCESS;
1276
1277 /* Set the vTimer expiration in order to get out of the halt at the right point in time. */
1278 if ( (pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE)
1279 && !(pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_IMASK))
1280 {
1281 uint64_t cTicksVTimer = mach_absolute_time() - pVM->nem.s.u64VTimerOff;
1282
1283 /* Check whether it expired and start executing guest code. */
1284 if (cTicksVTimer >= pVCpu->cpum.GstCtx.CntvCValEl0)
1285 return VINF_SUCCESS;
1286
1287 uint64_t cTicksVTimerToExpire = pVCpu->cpum.GstCtx.CntvCValEl0 - cTicksVTimer;
1288 uint64_t cNanoSecsVTimerToExpire = ASMMultU64ByU32DivByU32(cTicksVTimerToExpire, RT_NS_1SEC, (uint32_t)pVM->nem.s.u64CntFrqHz);
1289
1290 /*
1291 * Our halt method doesn't work with sub millisecond granularity at the moment causing a huge slowdown
1292 * + scheduling overhead which would increase the wakeup latency.
1293 * So only halt when the threshold is exceeded (needs more experimentation but 5ms turned out to be a good compromise
1294 * between CPU load when the guest is idle and performance).
1295 */
1296 if (cNanoSecsVTimerToExpire < 2 * RT_NS_1MS)
1297 return VINF_SUCCESS;
1298
1299 LogFlowFunc(("Set vTimer activation to cNanoSecsVTimerToExpire=%#RX64 (CntvCValEl0=%#RX64, u64VTimerOff=%#RX64 cTicksVTimer=%#RX64 u64CntFrqHz=%#RX64)\n",
1300 cNanoSecsVTimerToExpire, pVCpu->cpum.GstCtx.CntvCValEl0, pVM->nem.s.u64VTimerOff, cTicksVTimer, pVM->nem.s.u64CntFrqHz));
1301 TMCpuSetVTimerNextActivation(pVCpu, cNanoSecsVTimerToExpire);
1302 }
1303 else
1304 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1305
1306 return VINF_EM_HALT;
1307 }
1308 case ARMV8_ESR_EL2_EC_UNKNOWN:
1309 default:
1310 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1311 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1312 AssertReleaseFailed();
1313 return VERR_NOT_IMPLEMENTED;
1314 }
1315
1316 return VINF_SUCCESS;
1317}
1318
1319
1320/**
1321 * Handles an exit from hv_vcpu_run().
1322 *
1323 * @returns VBox strict status code.
1324 * @param pVM The cross context VM structure.
1325 * @param pVCpu The cross context virtual CPU structure of the
1326 * calling EMT.
1327 */
1328static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
1329{
1330 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1331 if (RT_FAILURE(rc))
1332 return rc;
1333
1334#ifdef LOG_ENABLED
1335 if (LogIs3Enabled())
1336 nemR3DarwinLogState(pVM, pVCpu);
1337#endif
1338
1339 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
1340 switch (pExit->reason)
1341 {
1342 case HV_EXIT_REASON_CANCELED:
1343 return VINF_EM_RAW_INTERRUPT;
1344 case HV_EXIT_REASON_EXCEPTION:
1345 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
1346 case HV_EXIT_REASON_VTIMER_ACTIVATED:
1347 {
1348 LogFlowFunc(("vTimer got activated\n"));
1349 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1350 pVCpu->nem.s.fVTimerActivated = true;
1351 return GICPpiSet(pVCpu, NEM_DARWIN_VTIMER_GIC_PPI_IRQ, true /*fAsserted*/);
1352 }
1353 default:
1354 AssertReleaseFailed();
1355 break;
1356 }
1357
1358 return VERR_INVALID_STATE;
1359}
1360
1361
1362/**
1363 * Runs the guest once until an exit occurs.
1364 *
1365 * @returns HV status code.
1366 * @param pVM The cross context VM structure.
1367 * @param pVCpu The cross context virtual CPU structure.
1368 */
1369static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
1370{
1371 TMNotifyStartOfExecution(pVM, pVCpu);
1372
1373 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
1374
1375 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
1376
1377 return hrc;
1378}
1379
1380
1381/**
1382 * Prepares the VM to run the guest.
1383 *
1384 * @returns Strict VBox status code.
1385 * @param pVM The cross context VM structure.
1386 * @param pVCpu The cross context virtual CPU structure.
1387 * @param fSingleStepping Flag whether we run in single stepping mode.
1388 */
1389static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
1390{
1391#ifdef LOG_ENABLED
1392 bool fIrq = false;
1393 bool fFiq = false;
1394
1395 if (LogIs3Enabled())
1396 nemR3DarwinLogState(pVM, pVCpu);
1397#endif
1398
1399 /** @todo */ RT_NOREF(fSingleStepping);
1400 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
1401 AssertRCReturn(rc, rc);
1402
1403 /* Check whether the vTimer interrupt was handled by the guest and we can unmask the vTimer. */
1404 if (pVCpu->nem.s.fVTimerActivated)
1405 {
1406 /* Read the CNTV_CTL_EL0 register. */
1407 uint64_t u64CntvCtl = 0;
1408
1409 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &u64CntvCtl);
1410 AssertRCReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1411
1412 if ( (u64CntvCtl & (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_IMASK | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
1413 != (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
1414 {
1415 /* Clear the interrupt. */
1416 GICPpiSet(pVCpu, NEM_DARWIN_VTIMER_GIC_PPI_IRQ, false /*fAsserted*/);
1417
1418 pVCpu->nem.s.fVTimerActivated = false;
1419 hrc = hv_vcpu_set_vtimer_mask(pVCpu->nem.s.hVCpu, false /*vtimer_is_masked*/);
1420 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1421 }
1422 }
1423
1424 /* Set the pending interrupt state. */
1425 hv_return_t hrc = HV_SUCCESS;
1426 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
1427 {
1428 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
1429 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1430#ifdef LOG_ENABLED
1431 fIrq = true;
1432#endif
1433 }
1434 else
1435 {
1436 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
1437 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1438 }
1439
1440 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
1441 {
1442 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
1443 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1444#ifdef LOG_ENABLED
1445 fFiq = true;
1446#endif
1447 }
1448 else
1449 {
1450 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
1451 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1452 }
1453
1454 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
1455 pVCpu->nem.s.fEventPending = false;
1456 return VINF_SUCCESS;
1457}
1458
1459
1460/**
1461 * The normal runloop (no debugging features enabled).
1462 *
1463 * @returns Strict VBox status code.
1464 * @param pVM The cross context VM structure.
1465 * @param pVCpu The cross context virtual CPU structure.
1466 */
1467static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
1468{
1469 /*
1470 * The run loop.
1471 *
1472 * Current approach to state updating to use the sledgehammer and sync
1473 * everything every time. This will be optimized later.
1474 */
1475
1476 /* Update the vTimer offset after resuming if instructed. */
1477 if (pVCpu->nem.s.fVTimerOffUpdate)
1478 {
1479 hv_return_t hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
1480 if (hrc != HV_SUCCESS)
1481 return nemR3DarwinHvSts2Rc(hrc);
1482
1483 pVCpu->nem.s.fVTimerOffUpdate = false;
1484 }
1485
1486 /*
1487 * Poll timers and run for a bit.
1488 */
1489 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
1490 * the whole polling job when timers have changed... */
1491 uint64_t offDeltaIgnored;
1492 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
1493 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1494 for (unsigned iLoop = 0;; iLoop++)
1495 {
1496 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
1497 if (rcStrict != VINF_SUCCESS)
1498 break;
1499
1500 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
1501 if (hrc == HV_SUCCESS)
1502 {
1503 /*
1504 * Deal with the message.
1505 */
1506 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
1507 if (rcStrict == VINF_SUCCESS)
1508 { /* hopefully likely */ }
1509 else
1510 {
1511 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
1512 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
1513 break;
1514 }
1515 }
1516 else
1517 {
1518 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
1519 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
1520 }
1521 } /* the run loop */
1522
1523 return rcStrict;
1524}
1525
1526
1527VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1528{
1529#ifdef LOG_ENABLED
1530 if (LogIs3Enabled())
1531 nemR3DarwinLogState(pVM, pVCpu);
1532#endif
1533
1534 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
1535
1536 /*
1537 * Try switch to NEM runloop state.
1538 */
1539 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
1540 { /* likely */ }
1541 else
1542 {
1543 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1544 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
1545 return VINF_SUCCESS;
1546 }
1547
1548 VBOXSTRICTRC rcStrict;
1549#if 0
1550 if ( !pVCpu->nem.s.fUseDebugLoop
1551 && !nemR3DarwinAnyExpensiveProbesEnabled()
1552 && !DBGFIsStepping(pVCpu)
1553 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
1554#endif
1555 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
1556#if 0
1557 else
1558 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
1559#endif
1560
1561 if (rcStrict == VINF_EM_RAW_TO_R3)
1562 rcStrict = VINF_SUCCESS;
1563
1564 /*
1565 * Convert any pending HM events back to TRPM due to premature exits.
1566 *
1567 * This is because execution may continue from IEM and we would need to inject
1568 * the event from there (hence place it back in TRPM).
1569 */
1570 if (pVCpu->nem.s.fEventPending)
1571 {
1572 /** @todo */
1573 }
1574
1575
1576 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
1577 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1578
1579 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
1580 {
1581 /* Try anticipate what we might need. */
1582 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
1583 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
1584 || RT_FAILURE(rcStrict))
1585 fImport = CPUMCTX_EXTRN_ALL;
1586 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
1587 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
1588 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
1589
1590 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
1591 {
1592 /* Only import what is external currently. */
1593 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
1594 if (RT_SUCCESS(rc2))
1595 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
1596 else if (RT_SUCCESS(rcStrict))
1597 rcStrict = rc2;
1598 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1599 pVCpu->cpum.GstCtx.fExtrn = 0;
1600 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
1601 }
1602 else
1603 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1604 }
1605 else
1606 {
1607 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1608 pVCpu->cpum.GstCtx.fExtrn = 0;
1609 }
1610
1611 return rcStrict;
1612}
1613
1614
1615VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
1616{
1617 RT_NOREF(pVM, pVCpu);
1618 return true; /** @todo Are there any cases where we have to emulate? */
1619}
1620
1621
1622bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
1623{
1624 VMCPU_ASSERT_EMT(pVCpu);
1625 bool fOld = pVCpu->nem.s.fSingleInstruction;
1626 pVCpu->nem.s.fSingleInstruction = fEnable;
1627 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
1628 return fOld;
1629}
1630
1631
1632void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
1633{
1634 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
1635
1636 RT_NOREF(pVM, fFlags);
1637
1638 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
1639 if (hrc != HV_SUCCESS)
1640 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
1641}
1642
1643
1644DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
1645{
1646 RT_NOREF(pVM, fUseDebugLoop);
1647 //AssertReleaseFailed();
1648 return false;
1649}
1650
1651
1652DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
1653{
1654 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
1655 return fUseDebugLoop;
1656}
1657
1658
1659VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
1660 uint8_t *pu2State, uint32_t *puNemRange)
1661{
1662 RT_NOREF(pVM, puNemRange);
1663
1664 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
1665#if defined(VBOX_WITH_PGM_NEM_MODE)
1666 if (pvR3)
1667 {
1668 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1669 if (RT_FAILURE(rc))
1670 {
1671 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
1672 return VERR_NEM_MAP_PAGES_FAILED;
1673 }
1674 }
1675 return VINF_SUCCESS;
1676#else
1677 RT_NOREF(pVM, GCPhys, cb, pvR3);
1678 return VERR_NEM_MAP_PAGES_FAILED;
1679#endif
1680}
1681
1682
1683VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
1684{
1685 RT_NOREF(pVM);
1686 return true;
1687}
1688
1689
1690VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1691 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1692{
1693 RT_NOREF(pvRam);
1694
1695 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
1696 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
1697
1698#if defined(VBOX_WITH_PGM_NEM_MODE)
1699 /*
1700 * Unmap the RAM we're replacing.
1701 */
1702 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1703 {
1704 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1705 if (RT_SUCCESS(rc))
1706 { /* likely */ }
1707 else if (pvMmio2)
1708 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
1709 GCPhys, cb, fFlags, rc));
1710 else
1711 {
1712 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1713 GCPhys, cb, fFlags, rc));
1714 return VERR_NEM_UNMAP_PAGES_FAILED;
1715 }
1716 }
1717
1718 /*
1719 * Map MMIO2 if any.
1720 */
1721 if (pvMmio2)
1722 {
1723 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
1724
1725 /* We need to set up our own dirty tracking due to Hypervisor.framework only working on host page sized aligned regions. */
1726 uint32_t fProt = NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
1727 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
1728 {
1729 /* Find a slot for dirty tracking. */
1730 PNEMHVMMIO2REGION pMmio2Region = NULL;
1731 uint32_t idSlot;
1732 for (idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
1733 {
1734 if ( pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart == 0
1735 && pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast == 0)
1736 {
1737 pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
1738 break;
1739 }
1740 }
1741
1742 if (!pMmio2Region)
1743 {
1744 LogRel(("NEMR3NotifyPhysMmioExMapEarly: Out of dirty tracking structures -> VERR_NEM_MAP_PAGES_FAILED\n"));
1745 return VERR_NEM_MAP_PAGES_FAILED;
1746 }
1747
1748 pMmio2Region->GCPhysStart = GCPhys;
1749 pMmio2Region->GCPhysLast = GCPhys + cb - 1;
1750 pMmio2Region->fDirty = false;
1751 *puNemRange = idSlot;
1752 }
1753 else
1754 fProt |= NEM_PAGE_PROT_WRITE;
1755
1756 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, fProt, pu2State);
1757 if (RT_FAILURE(rc))
1758 {
1759 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
1760 GCPhys, cb, fFlags, pvMmio2, rc));
1761 return VERR_NEM_MAP_PAGES_FAILED;
1762 }
1763 }
1764 else
1765 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
1766
1767#else
1768 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
1769 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
1770#endif
1771 return VINF_SUCCESS;
1772}
1773
1774
1775VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1776 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
1777{
1778 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
1779 return VINF_SUCCESS;
1780}
1781
1782
1783VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
1784 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1785{
1786 RT_NOREF(pVM, puNemRange);
1787
1788 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
1789 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
1790
1791 int rc = VINF_SUCCESS;
1792#if defined(VBOX_WITH_PGM_NEM_MODE)
1793 /*
1794 * Unmap the MMIO2 pages.
1795 */
1796 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
1797 * we may have more stuff to unmap even in case of pure MMIO... */
1798 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
1799 {
1800 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1801 if (RT_FAILURE(rc))
1802 {
1803 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1804 GCPhys, cb, fFlags, rc));
1805 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1806 }
1807
1808 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
1809 {
1810 /* Reset tracking structure. */
1811 uint32_t idSlot = *puNemRange;
1812 *puNemRange = UINT32_MAX;
1813
1814 Assert(idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
1815 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart = 0;
1816 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast = 0;
1817 pVM->nem.s.aMmio2DirtyTracking[idSlot].fDirty = false;
1818 }
1819 }
1820
1821 /* Ensure the page is masked as unmapped if relevant. */
1822 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
1823
1824 /*
1825 * Restore the RAM we replaced.
1826 */
1827 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1828 {
1829 AssertPtr(pvRam);
1830 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1831 if (RT_SUCCESS(rc))
1832 { /* likely */ }
1833 else
1834 {
1835 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
1836 rc = VERR_NEM_MAP_PAGES_FAILED;
1837 }
1838 }
1839
1840 RT_NOREF(pvMmio2);
1841#else
1842 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
1843 if (pu2State)
1844 *pu2State = UINT8_MAX;
1845 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1846#endif
1847 return rc;
1848}
1849
1850
1851VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
1852 void *pvBitmap, size_t cbBitmap)
1853{
1854 LogFlowFunc(("NEMR3PhysMmio2QueryAndResetDirtyBitmap: %RGp LB %RGp UnemRange=%u\n", GCPhys, cb, uNemRange));
1855 Assert(uNemRange < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
1856
1857 /* Keep it simple for now and mark everything as dirty if it is. */
1858 int rc = VINF_SUCCESS;
1859 if (pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty)
1860 {
1861 ASMBitSetRange(pvBitmap, 0, cbBitmap * 8);
1862
1863 pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty = false;
1864 /* Restore as RX only. */
1865 uint8_t u2State;
1866 rc = nemR3DarwinProtect(GCPhys, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, &u2State);
1867 }
1868 else
1869 ASMBitClearRange(pvBitmap, 0, cbBitmap * 8);
1870
1871 return rc;
1872}
1873
1874
1875VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
1876 uint8_t *pu2State, uint32_t *puNemRange)
1877{
1878 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1879
1880 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
1881 *pu2State = UINT8_MAX;
1882 *puNemRange = 0;
1883 return VINF_SUCCESS;
1884}
1885
1886
1887VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
1888 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
1889{
1890 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
1891 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
1892 *pu2State = UINT8_MAX;
1893
1894#if defined(VBOX_WITH_PGM_NEM_MODE)
1895 /*
1896 * (Re-)map readonly.
1897 */
1898 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
1899
1900 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1901 AssertRC(rc);
1902
1903 rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
1904 if (RT_FAILURE(rc))
1905 {
1906 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
1907 GCPhys, cb, pvPages, fFlags, rc));
1908 return VERR_NEM_MAP_PAGES_FAILED;
1909 }
1910 RT_NOREF(fFlags, puNemRange);
1911 return VINF_SUCCESS;
1912#else
1913 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1914 return VERR_NEM_MAP_PAGES_FAILED;
1915#endif
1916}
1917
1918
1919VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
1920 RTR3PTR pvMemR3, uint8_t *pu2State)
1921{
1922 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
1923 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
1924
1925 *pu2State = UINT8_MAX;
1926#if defined(VBOX_WITH_PGM_NEM_MODE)
1927 if (pvMemR3)
1928 {
1929 /* Unregister what was there before. */
1930 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1931 AssertRC(rc);
1932
1933 rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1934 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
1935 pvMemR3, GCPhys, cb, rc));
1936 }
1937 RT_NOREF(enmKind);
1938#else
1939 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
1940 AssertFailed();
1941#endif
1942}
1943
1944
1945VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
1946{
1947 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
1948 RT_NOREF(pVCpu, fEnabled);
1949}
1950
1951
1952void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
1953{
1954 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
1955 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
1956}
1957
1958
1959void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
1960 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
1961{
1962 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
1963 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
1964 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
1965}
1966
1967
1968int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
1969 PGMPAGETYPE enmType, uint8_t *pu2State)
1970{
1971 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1972 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1973 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
1974
1975 AssertFailed();
1976 return VINF_SUCCESS;
1977}
1978
1979
1980VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
1981 PGMPAGETYPE enmType, uint8_t *pu2State)
1982{
1983 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1984 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1985 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
1986}
1987
1988
1989VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
1990 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
1991{
1992 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1993 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
1994 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
1995
1996 AssertFailed();
1997}
1998
1999
2000/**
2001 * Interface for importing state on demand (used by IEM).
2002 *
2003 * @returns VBox status code.
2004 * @param pVCpu The cross context CPU structure.
2005 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2006 */
2007VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
2008{
2009 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
2010 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
2011
2012 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
2013}
2014
2015
2016/**
2017 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
2018 *
2019 * @returns VBox status code.
2020 * @param pVCpu The cross context CPU structure.
2021 * @param pcTicks Where to return the CPU tick count.
2022 * @param puAux Where to return the TSC_AUX register value.
2023 */
2024VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
2025{
2026 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
2027 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
2028
2029 if (puAux)
2030 *puAux = 0;
2031 *pcTicks = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff; /* This is the host timer minus the offset. */
2032 return VINF_SUCCESS;
2033}
2034
2035
2036/**
2037 * Resumes CPU clock (TSC) on all virtual CPUs.
2038 *
2039 * This is called by TM when the VM is started, restored, resumed or similar.
2040 *
2041 * @returns VBox status code.
2042 * @param pVM The cross context VM structure.
2043 * @param pVCpu The cross context CPU structure of the calling EMT.
2044 * @param uPausedTscValue The TSC value at the time of pausing.
2045 */
2046VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
2047{
2048 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVM, pVCpu, uPausedTscValue));
2049 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
2050 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
2051
2052 /*
2053 * Calculate the new offset, first get the new TSC value with the old vTimer offset and then adjust the
2054 * the new offset to let the guest not notice the pause.
2055 */
2056 uint64_t u64TscNew = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff;
2057 Assert(u64TscNew >= uPausedTscValue);
2058 LogFlowFunc(("u64VTimerOffOld=%#RX64 u64TscNew=%#RX64 u64VTimerValuePaused=%#RX64 -> u64VTimerOff=%#RX64\n",
2059 pVM->nem.s.u64VTimerOff, u64TscNew, uPausedTscValue,
2060 pVM->nem.s.u64VTimerOff + (u64TscNew - uPausedTscValue)));
2061
2062 pVM->nem.s.u64VTimerOff += u64TscNew - uPausedTscValue;
2063
2064 /*
2065 * Set the flag to update the vTimer offset when the vCPU resumes for the first time
2066 * (needs to be done on the actual EMT).
2067 */
2068 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2069 {
2070 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
2071 pVCpuDst->nem.s.fVTimerOffUpdate = true;
2072 }
2073
2074 return VINF_SUCCESS;
2075}
2076
2077
2078/**
2079 * Returns features supported by the NEM backend.
2080 *
2081 * @returns Flags of features supported by the native NEM backend.
2082 * @param pVM The cross context VM structure.
2083 */
2084VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
2085{
2086 RT_NOREF(pVM);
2087 /*
2088 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
2089 * and unrestricted guest execution support so we can safely return these flags here always.
2090 */
2091 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
2092}
2093
2094
2095/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
2096 *
2097 * @todo Add notes as the implementation progresses...
2098 */
2099
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette