VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 100166

Last change on this file since 100166 was 100166, checked in by vboxsync, 18 months ago

VMM/NEMR3Native-darwin-armv8: Updates to th PSCI implementation, bugref:10454, bugref:10390

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 69.4 KB
Line 
1/* $Id: NEMR3Native-darwin-armv8.cpp 100166 2023-06-13 11:57:26Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.virtualbox.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#define CPUM_WITH_NONCONST_HOST_FEATURES /* required for initializing parts of the g_CpumHostFeatures structure here. */
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/gic.h>
43#include <VBox/vmm/pdm.h>
44#include <VBox/vmm/dbgftrace.h>
45#include <VBox/vmm/gcm.h>
46#include "NEMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include <VBox/vmm/vmm.h>
49#include "dtrace/VBoxVMM.h"
50
51#include <iprt/armv8.h>
52#include <iprt/asm.h>
53#include <iprt/asm-arm.h>
54#include <iprt/asm-math.h>
55#include <iprt/ldr.h>
56#include <iprt/mem.h>
57#include <iprt/path.h>
58#include <iprt/string.h>
59#include <iprt/system.h>
60#include <iprt/utf16.h>
61
62#include <iprt/formats/arm-psci.h>
63
64#include <mach/mach_time.h>
65#include <mach/kern_return.h>
66
67#include <Hypervisor/Hypervisor.h>
68
69
70/*********************************************************************************************************************************
71* Defined Constants And Macros *
72*********************************************************************************************************************************/
73
74
75/** @todo The vTimer PPI for the virt platform, make it configurable. */
76#define NEM_DARWIN_VTIMER_GIC_PPI_IRQ 11
77
78
79/*********************************************************************************************************************************
80* Structures and Typedefs *
81*********************************************************************************************************************************/
82
83
84/*********************************************************************************************************************************
85* Global Variables *
86*********************************************************************************************************************************/
87/** The general registers. */
88static const struct
89{
90 hv_reg_t enmHvReg;
91 uint32_t fCpumExtrn;
92 uint32_t offCpumCtx;
93} s_aCpumRegs[] =
94{
95#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
96#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
97 CPUM_GREG_EMIT_X0_X3(0),
98 CPUM_GREG_EMIT_X0_X3(1),
99 CPUM_GREG_EMIT_X0_X3(2),
100 CPUM_GREG_EMIT_X0_X3(3),
101 CPUM_GREG_EMIT_X4_X28(4),
102 CPUM_GREG_EMIT_X4_X28(5),
103 CPUM_GREG_EMIT_X4_X28(6),
104 CPUM_GREG_EMIT_X4_X28(7),
105 CPUM_GREG_EMIT_X4_X28(8),
106 CPUM_GREG_EMIT_X4_X28(9),
107 CPUM_GREG_EMIT_X4_X28(10),
108 CPUM_GREG_EMIT_X4_X28(11),
109 CPUM_GREG_EMIT_X4_X28(12),
110 CPUM_GREG_EMIT_X4_X28(13),
111 CPUM_GREG_EMIT_X4_X28(14),
112 CPUM_GREG_EMIT_X4_X28(15),
113 CPUM_GREG_EMIT_X4_X28(16),
114 CPUM_GREG_EMIT_X4_X28(17),
115 CPUM_GREG_EMIT_X4_X28(18),
116 CPUM_GREG_EMIT_X4_X28(19),
117 CPUM_GREG_EMIT_X4_X28(20),
118 CPUM_GREG_EMIT_X4_X28(21),
119 CPUM_GREG_EMIT_X4_X28(22),
120 CPUM_GREG_EMIT_X4_X28(23),
121 CPUM_GREG_EMIT_X4_X28(24),
122 CPUM_GREG_EMIT_X4_X28(25),
123 CPUM_GREG_EMIT_X4_X28(26),
124 CPUM_GREG_EMIT_X4_X28(27),
125 CPUM_GREG_EMIT_X4_X28(28),
126 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
127 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
128 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
129 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
130 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
131#undef CPUM_GREG_EMIT_X0_X3
132#undef CPUM_GREG_EMIT_X4_X28
133};
134/** SIMD/FP registers. */
135static const struct
136{
137 hv_simd_fp_reg_t enmHvReg;
138 uint32_t offCpumCtx;
139} s_aCpumFpRegs[] =
140{
141#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
142 CPUM_VREG_EMIT(0),
143 CPUM_VREG_EMIT(1),
144 CPUM_VREG_EMIT(2),
145 CPUM_VREG_EMIT(3),
146 CPUM_VREG_EMIT(4),
147 CPUM_VREG_EMIT(5),
148 CPUM_VREG_EMIT(6),
149 CPUM_VREG_EMIT(7),
150 CPUM_VREG_EMIT(8),
151 CPUM_VREG_EMIT(9),
152 CPUM_VREG_EMIT(10),
153 CPUM_VREG_EMIT(11),
154 CPUM_VREG_EMIT(12),
155 CPUM_VREG_EMIT(13),
156 CPUM_VREG_EMIT(14),
157 CPUM_VREG_EMIT(15),
158 CPUM_VREG_EMIT(16),
159 CPUM_VREG_EMIT(17),
160 CPUM_VREG_EMIT(18),
161 CPUM_VREG_EMIT(19),
162 CPUM_VREG_EMIT(20),
163 CPUM_VREG_EMIT(21),
164 CPUM_VREG_EMIT(22),
165 CPUM_VREG_EMIT(23),
166 CPUM_VREG_EMIT(24),
167 CPUM_VREG_EMIT(25),
168 CPUM_VREG_EMIT(26),
169 CPUM_VREG_EMIT(27),
170 CPUM_VREG_EMIT(28),
171 CPUM_VREG_EMIT(29),
172 CPUM_VREG_EMIT(30),
173 CPUM_VREG_EMIT(31)
174#undef CPUM_VREG_EMIT
175};
176/** System registers. */
177static const struct
178{
179 hv_sys_reg_t enmHvReg;
180 uint32_t fCpumExtrn;
181 uint32_t offCpumCtx;
182} s_aCpumSysRegs[] =
183{
184 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
185 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
186 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
187 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
188 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
189 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
190 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
191 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
192 { HV_SYS_REG_VBAR_EL1, CPUMCTX_EXTRN_SYSREG, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
193};
194
195
196/*********************************************************************************************************************************
197* Internal Functions *
198*********************************************************************************************************************************/
199
200
201/**
202 * Converts a HV return code to a VBox status code.
203 *
204 * @returns VBox status code.
205 * @param hrc The HV return code to convert.
206 */
207DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
208{
209 if (hrc == HV_SUCCESS)
210 return VINF_SUCCESS;
211
212 switch (hrc)
213 {
214 case HV_ERROR: return VERR_INVALID_STATE;
215 case HV_BUSY: return VERR_RESOURCE_BUSY;
216 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
217 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
218 case HV_NO_DEVICE: return VERR_NOT_FOUND;
219 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
220 }
221
222 return VERR_IPE_UNEXPECTED_STATUS;
223}
224
225
226/**
227 * Returns a human readable string of the given exception class.
228 *
229 * @returns Pointer to the string matching the given EC.
230 * @param u32Ec The exception class to return the string for.
231 */
232static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
233{
234 switch (u32Ec)
235 {
236#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
237 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
238 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
239 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
240 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
241 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
242 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
243 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
244 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
245 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
246 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
247 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
248 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
249 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
250 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
251 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
252 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
253 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
254 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
255 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
256 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
257 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
258 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
259 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
260 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
261 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
262 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
263 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
264 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
265 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
266 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
267 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
268 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
269 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
270 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
271 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
272 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
273 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
274 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
275 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
276 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
277 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
278 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
279 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
280 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
281 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
282#undef ARMV8_EC_CASE
283 default:
284 break;
285 }
286
287 return "<INVALID>";
288}
289
290
291/**
292 * Resolves a NEM page state from the given protection flags.
293 *
294 * @returns NEM page state.
295 * @param fPageProt The page protection flags.
296 */
297DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
298{
299 switch (fPageProt)
300 {
301 case NEM_PAGE_PROT_NONE:
302 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
303 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
304 return NEM_DARWIN_PAGE_STATE_RX;
305 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
306 return NEM_DARWIN_PAGE_STATE_RW;
307 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
308 return NEM_DARWIN_PAGE_STATE_RWX;
309 default:
310 break;
311 }
312
313 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
314 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
315}
316
317
318/**
319 * Unmaps the given guest physical address range (page aligned).
320 *
321 * @returns VBox status code.
322 * @param pVM The cross context VM structure.
323 * @param GCPhys The guest physical address to start unmapping at.
324 * @param cb The size of the range to unmap in bytes.
325 * @param pu2State Where to store the new state of the unmappd page, optional.
326 */
327DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
328{
329 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
330 {
331 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
332 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
333 return VINF_SUCCESS;
334 }
335
336 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
337 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
338 if (RT_LIKELY(hrc == HV_SUCCESS))
339 {
340 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
341 if (pu2State)
342 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
343 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
344 return VINF_SUCCESS;
345 }
346
347 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
348 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
349 GCPhys, hrc));
350 return VERR_NEM_IPE_6;
351}
352
353
354/**
355 * Maps a given guest physical address range backed by the given memory with the given
356 * protection flags.
357 *
358 * @returns VBox status code.
359 * @param pVM The cross context VM structure.
360 * @param GCPhys The guest physical address to start mapping.
361 * @param pvRam The R3 pointer of the memory to back the range with.
362 * @param cb The size of the range, page aligned.
363 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
364 * @param pu2State Where to store the state for the new page, optional.
365 */
366DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
367{
368 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
369
370 Assert(fPageProt != NEM_PAGE_PROT_NONE);
371 RT_NOREF(pVM);
372
373 hv_memory_flags_t fHvMemProt = 0;
374 if (fPageProt & NEM_PAGE_PROT_READ)
375 fHvMemProt |= HV_MEMORY_READ;
376 if (fPageProt & NEM_PAGE_PROT_WRITE)
377 fHvMemProt |= HV_MEMORY_WRITE;
378 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
379 fHvMemProt |= HV_MEMORY_EXEC;
380
381 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
382 if (hrc == HV_SUCCESS)
383 {
384 if (pu2State)
385 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
386 return VINF_SUCCESS;
387 }
388
389 return nemR3DarwinHvSts2Rc(hrc);
390}
391
392#if 0 /* unused */
393DECLINLINE(int) nemR3DarwinProtectPage(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt)
394{
395 hv_memory_flags_t fHvMemProt = 0;
396 if (fPageProt & NEM_PAGE_PROT_READ)
397 fHvMemProt |= HV_MEMORY_READ;
398 if (fPageProt & NEM_PAGE_PROT_WRITE)
399 fHvMemProt |= HV_MEMORY_WRITE;
400 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
401 fHvMemProt |= HV_MEMORY_EXEC;
402
403 hv_return_t hrc;
404 if (pVM->nem.s.fCreatedAsid)
405 hrc = hv_vm_protect_space(pVM->nem.s.uVmAsid, GCPhys, cb, fHvMemProt);
406 else
407 hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
408
409 return nemR3DarwinHvSts2Rc(hrc);
410}
411#endif
412
413#ifdef LOG_ENABLED
414/**
415 * Logs the current CPU state.
416 */
417static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
418{
419 if (LogIs3Enabled())
420 {
421 char szRegs[4096];
422 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
423 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
424 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
425 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
426 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
427 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
428 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
429 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
430 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
431 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
432 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
433 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
434 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
435 "vbar_el1=%016VR{vbar_el1}\n"
436 );
437 char szInstr[256]; RT_ZERO(szInstr);
438#if 0
439 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
440 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
441 szInstr, sizeof(szInstr), NULL);
442#endif
443 Log3(("%s%s\n", szRegs, szInstr));
444 }
445}
446#endif /* LOG_ENABLED */
447
448
449static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
450{
451 RT_NOREF(pVM);
452
453 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
454 if (hrc == HV_SUCCESS)
455 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
456
457 if ( hrc == HV_SUCCESS
458 && (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR)))
459 {
460 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
461 {
462 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
463 {
464 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
465 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
466 }
467 }
468 }
469
470 if ( hrc == HV_SUCCESS
471 && (fWhat & CPUMCTX_EXTRN_V0_V31))
472 {
473 /* SIMD/FP registers. */
474 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
475 {
476 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
477 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
478 }
479 }
480
481 if ( hrc == HV_SUCCESS
482 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG)))
483 {
484 /* System registers. */
485 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
486 {
487 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
488 {
489 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
490 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
491 }
492 }
493 }
494
495 if ( hrc == HV_SUCCESS
496 && (fWhat & CPUMCTX_EXTRN_PSTATE))
497 {
498 uint64_t u64Tmp;
499 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
500 if (hrc == HV_SUCCESS)
501 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
502 }
503
504 /* Almost done, just update extern flags. */
505 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
506 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
507 pVCpu->cpum.GstCtx.fExtrn = 0;
508
509 return nemR3DarwinHvSts2Rc(hrc);
510}
511
512
513/**
514 * Exports the guest state to HV for execution.
515 *
516 * @returns VBox status code.
517 * @param pVM The cross context VM structure.
518 * @param pVCpu The cross context virtual CPU structure of the
519 * calling EMT.
520 */
521static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
522{
523 RT_NOREF(pVM);
524 hv_return_t hrc = HV_SUCCESS;
525
526 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
527 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
528 {
529 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
530 {
531 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
532 {
533 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
534 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
535 }
536 }
537 }
538
539 if ( hrc == HV_SUCCESS
540 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
541 {
542 /* SIMD/FP registers. */
543 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
544 {
545 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
546 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
547 }
548 }
549
550 if ( hrc == HV_SUCCESS
551 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG))
552 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG))
553 {
554 /* System registers. */
555 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
556 {
557 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
558 {
559 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
560 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
561 }
562 }
563 }
564
565 if ( hrc == HV_SUCCESS
566 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
567 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
568
569 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
570 return nemR3DarwinHvSts2Rc(hrc);
571}
572
573
574/**
575 * Try initialize the native API.
576 *
577 * This may only do part of the job, more can be done in
578 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
579 *
580 * @returns VBox status code.
581 * @param pVM The cross context VM structure.
582 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
583 * the latter we'll fail if we cannot initialize.
584 * @param fForced Whether the HMForced flag is set and we should
585 * fail if we cannot initialize.
586 */
587int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
588{
589 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
590
591 /*
592 * Some state init.
593 */
594 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
595 RT_NOREF(pCfgNem);
596
597 /*
598 * Error state.
599 * The error message will be non-empty on failure and 'rc' will be set too.
600 */
601 RTERRINFOSTATIC ErrInfo;
602 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
603
604 int rc = VINF_SUCCESS;
605 hv_return_t hrc = hv_vm_create(NULL);
606 if (hrc == HV_SUCCESS)
607 {
608 pVM->nem.s.fCreatedVm = true;
609 pVM->nem.s.u64CntFrqHz = ASMReadCntFrqEl0();
610
611 /* Will be initialized in NEMHCResumeCpuTickOnAll() before executing guest code. */
612 pVM->nem.s.u64VTimerOff = 0;
613
614 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
615 Log(("NEM: Marked active!\n"));
616 PGMR3EnableNemMode(pVM);
617 }
618 else
619 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
620 "hv_vm_create() failed: %#x", hrc);
621
622 /*
623 * We only fail if in forced mode, otherwise just log the complaint and return.
624 */
625 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
626 if ( (fForced || !fFallback)
627 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
628 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
629
630if (RTErrInfoIsSet(pErrInfo))
631 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
632 return VINF_SUCCESS;
633}
634
635
636/**
637 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
638 *
639 * @returns VBox status code
640 * @param pVM The VM handle.
641 * @param pVCpu The vCPU handle.
642 * @param idCpu ID of the CPU to create.
643 */
644static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
645{
646 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, NULL);
647 if (hrc != HV_SUCCESS)
648 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
649 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
650
651 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MPIDR_EL1, idCpu);
652 if (hrc != HV_SUCCESS)
653 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
654 "Setting MPIDR_EL1 failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
655
656 if (idCpu == 0)
657 {
658 /** @todo */
659 }
660
661 return VINF_SUCCESS;
662}
663
664
665/**
666 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
667 *
668 * @returns VBox status code
669 * @param pVCpu The vCPU handle.
670 */
671static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVMCPU pVCpu)
672{
673 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
674 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
675 return VINF_SUCCESS;
676}
677
678
679/**
680 * This is called after CPUMR3Init is done.
681 *
682 * @returns VBox status code.
683 * @param pVM The VM handle..
684 */
685int nemR3NativeInitAfterCPUM(PVM pVM)
686{
687 /*
688 * Validate sanity.
689 */
690 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
691 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
692
693 /*
694 * Setup the EMTs.
695 */
696 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
697 {
698 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
699
700 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
701 if (RT_FAILURE(rc))
702 {
703 /* Rollback. */
704 while (idCpu--)
705 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 1, pVCpu);
706
707 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
708 }
709 }
710
711 pVM->nem.s.fCreatedEmts = true;
712 return VINF_SUCCESS;
713}
714
715
716int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
717{
718 RT_NOREF(pVM, enmWhat);
719 return VINF_SUCCESS;
720}
721
722
723int nemR3NativeTerm(PVM pVM)
724{
725 /*
726 * Delete the VM.
727 */
728
729 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
730 {
731 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
732
733 /*
734 * Apple's documentation states that the vCPU should be destroyed
735 * on the thread running the vCPU but as all the other EMTs are gone
736 * at this point, destroying the VM would hang.
737 *
738 * We seem to be at luck here though as destroying apparently works
739 * from EMT(0) as well.
740 */
741 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
742 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
743 }
744
745 pVM->nem.s.fCreatedEmts = false;
746 if (pVM->nem.s.fCreatedVm)
747 {
748 hv_return_t hrc = hv_vm_destroy();
749 if (hrc != HV_SUCCESS)
750 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
751
752 pVM->nem.s.fCreatedVm = false;
753 }
754 return VINF_SUCCESS;
755}
756
757
758/**
759 * VM reset notification.
760 *
761 * @param pVM The cross context VM structure.
762 */
763void nemR3NativeReset(PVM pVM)
764{
765 RT_NOREF(pVM);
766}
767
768
769/**
770 * Reset CPU due to INIT IPI or hot (un)plugging.
771 *
772 * @param pVCpu The cross context virtual CPU structure of the CPU being
773 * reset.
774 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
775 */
776void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
777{
778 RT_NOREF(pVCpu, fInitIpi);
779}
780
781
782/**
783 * Returns the byte size from the given access SAS value.
784 *
785 * @returns Number of bytes to transfer.
786 * @param uSas The SAS value to convert.
787 */
788DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
789{
790 switch (uSas)
791 {
792 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
793 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
794 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
795 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
796 default:
797 AssertReleaseFailed();
798 }
799
800 return 0;
801}
802
803
804/**
805 * Sets the given general purpose register to the given value.
806 *
807 * @param pVCpu The cross context virtual CPU structure of the
808 * calling EMT.
809 * @param uReg The register index.
810 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
811 * @param fSignExtend Flag whether to sign extend the value.
812 * @param u64Val The value.
813 */
814DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
815{
816 AssertReturnVoid(uReg < 31);
817
818 if (f64BitReg)
819 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
820 else
821 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
822
823 /* Mark the register as not extern anymore. */
824 switch (uReg)
825 {
826 case 0:
827 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
828 break;
829 case 1:
830 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
831 break;
832 case 2:
833 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
834 break;
835 case 3:
836 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
837 break;
838 default:
839 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
840 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
841 }
842}
843
844
845/**
846 * Gets the given general purpose register and returns the value.
847 *
848 * @returns Value from the given register.
849 * @param pVCpu The cross context virtual CPU structure of the
850 * calling EMT.
851 * @param uReg The register index.
852 */
853DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
854{
855 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
856
857 if (uReg == ARMV8_AARCH64_REG_ZR)
858 return 0;
859
860 /** @todo Import the register if extern. */
861 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
862
863 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
864}
865
866
867/**
868 * Works on the data abort exception (which will be a MMIO access most of the time).
869 *
870 * @returns VBox strict status code.
871 * @param pVM The cross context VM structure.
872 * @param pVCpu The cross context virtual CPU structure of the
873 * calling EMT.
874 * @param uIss The instruction specific syndrome value.
875 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
876 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
877 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
878 */
879static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
880 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
881{
882 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
883 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
884 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
885 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
886 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
887 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
888 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
889 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
890 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
891 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
892
893 RT_NOREF(fL2Fault, GCPtrDataAbrt);
894 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
895
896 EMHistoryAddExit(pVCpu,
897 fWrite
898 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
899 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
900 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
901
902 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
903 uint64_t u64Val = 0;
904 if (fWrite)
905 {
906 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
907 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
908 Log4(("MmioExit/%u: %08RX64: WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
909 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
910 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
911 }
912 else
913 {
914 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
915 Log4(("MmioExit/%u: %08RX64: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
916 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
917 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
918 if (rcStrict == VINF_SUCCESS)
919 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
920 }
921
922 if (rcStrict == VINF_SUCCESS)
923 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
924
925 return rcStrict;
926}
927
928
929/**
930 * Works on the trapped MRS, MSR and system instruction exception.
931 *
932 * @returns VBox strict status code.
933 * @param pVM The cross context VM structure.
934 * @param pVCpu The cross context virtual CPU structure of the
935 * calling EMT.
936 * @param uIss The instruction specific syndrome value.
937 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
938 */
939static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
940{
941 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
942 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
943 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
944 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
945 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
946 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
947 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
948 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
949 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
950 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
951
952 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
953 EMHistoryAddExit(pVCpu,
954 fRead
955 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
956 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
957 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
958
959 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
960 uint64_t u64Val = 0;
961 if (fRead)
962 {
963 RT_NOREF(pVM);
964 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
965 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
966 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
967 VBOXSTRICTRC_VAL(rcStrict) ));
968 if (rcStrict == VINF_SUCCESS)
969 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
970 }
971 else
972 {
973 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
974 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
975 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
976 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
977 VBOXSTRICTRC_VAL(rcStrict) ));
978 }
979
980 if (rcStrict == VINF_SUCCESS)
981 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
982
983 return rcStrict;
984}
985
986
987/**
988 * Works on the trapped HVC instruction exception.
989 *
990 * @returns VBox strict status code.
991 * @param pVM The cross context VM structure.
992 * @param pVCpu The cross context virtual CPU structure of the
993 * calling EMT.
994 * @param uIss The instruction specific syndrome value.
995 */
996static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss)
997{
998 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
999 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
1000
1001#if 0 /** @todo For later */
1002 EMHistoryAddExit(pVCpu,
1003 fRead
1004 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1005 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1006 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1007#endif
1008
1009 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1010 if (u16Imm == 0)
1011 {
1012 /** @todo Raise exception to EL1 if PSCI not configured. */
1013 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
1014 uint32_t uFunId = pVCpu->cpum.GstCtx.aGRegs[ARMV8_AARCH64_REG_X0].w;
1015 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
1016 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
1017 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
1018 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
1019 {
1020 switch (uFunNum)
1021 {
1022 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1023 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
1024 break;
1025 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1026 rcStrict = VMR3PowerOff(pVM->pUVM);
1027 break;
1028 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1029 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1030 {
1031 bool fHaltOnReset;
1032 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
1033 if (RT_SUCCESS(rc) && fHaltOnReset)
1034 {
1035 Log(("nemR3DarwinHandleExitExceptionTrappedHvcInsn: Halt On Reset!\n"));
1036 rc = VINF_EM_HALT;
1037 }
1038 else
1039 {
1040 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
1041 VM_FF_SET(pVM, VM_FF_RESET);
1042 rc = VINF_EM_RESET;
1043 }
1044 break;
1045 }
1046 case ARM_PSCI_FUNC_ID_CPU_ON:
1047 {
1048 uint64_t u64TgtCpu = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1049 RTGCPHYS GCPhysExecAddr = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X2);
1050 uint64_t u64CtxId = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X3);
1051 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
1052 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
1053 break;
1054 }
1055 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
1056 {
1057 uint32_t u32FunNum = (uint32_t)nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1058 switch (u32FunNum)
1059 {
1060 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1061 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1062 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1063 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1064 case ARM_PSCI_FUNC_ID_CPU_ON:
1065 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1066 false /*f64BitReg*/, false /*fSignExtend*/,
1067 (uint64_t)ARM_PSCI_STS_SUCCESS);
1068 break;
1069 default:
1070 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1071 false /*f64BitReg*/, false /*fSignExtend*/,
1072 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1073 }
1074 }
1075 default:
1076 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1077 }
1078 }
1079 else
1080 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1081 }
1082 /** @todo What to do if immediate is != 0? */
1083
1084 return rcStrict;
1085}
1086
1087
1088/**
1089 * Handles an exception VM exit.
1090 *
1091 * @returns VBox strict status code.
1092 * @param pVM The cross context VM structure.
1093 * @param pVCpu The cross context virtual CPU structure of the
1094 * calling EMT.
1095 * @param pExit Pointer to the exit information.
1096 */
1097static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1098{
1099 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1100 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1101 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1102
1103 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1104 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1105
1106 switch (uEc)
1107 {
1108 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1109 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1110 pExit->exception.physical_address);
1111 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1112 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1113 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
1114 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
1115 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
1116 {
1117 /* No need to halt if there is an interrupt pending already. */
1118 if (VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ)))
1119 return VINF_SUCCESS;
1120
1121 /* Set the vTimer expiration in order to get out of the halt at the right point in time. */
1122 if ( (pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE)
1123 && !(pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_IMASK))
1124 {
1125 uint64_t cTicksVTimer = mach_absolute_time() - pVM->nem.s.u64VTimerOff;
1126
1127 /* Check whether it expired and start executing guest code. */
1128 if (cTicksVTimer >= pVCpu->cpum.GstCtx.CntvCValEl0)
1129 return VINF_SUCCESS;
1130
1131 uint64_t cTicksVTimerToExpire = pVCpu->cpum.GstCtx.CntvCValEl0 - cTicksVTimer;
1132 uint64_t cNanoSecsVTimerToExpire = ASMMultU64ByU32DivByU32(cTicksVTimerToExpire, RT_NS_1SEC, (uint32_t)pVM->nem.s.u64CntFrqHz);
1133
1134 /*
1135 * Our halt method doesn't work with sub millisecond granularity at the moment causing a huge slowdown
1136 * + scheduling overhead which would increase the wakeup latency.
1137 * So only halt when the threshold is exceeded (needs more experimentation but 5ms turned out to be a good compromise
1138 * between CPU load when the guest is idle and performance).
1139 */
1140 if (cNanoSecsVTimerToExpire < 2 * RT_NS_1MS)
1141 return VINF_SUCCESS;
1142
1143 LogFlowFunc(("Set vTimer activation to cNanoSecsVTimerToExpire=%#RX64 (CntvCValEl0=%#RX64, u64VTimerOff=%#RX64 cTicksVTimer=%#RX64 u64CntFrqHz=%#RX64)\n",
1144 cNanoSecsVTimerToExpire, pVCpu->cpum.GstCtx.CntvCValEl0, pVM->nem.s.u64VTimerOff, cTicksVTimer, pVM->nem.s.u64CntFrqHz));
1145 TMCpuSetVTimerNextActivation(pVCpu, cNanoSecsVTimerToExpire);
1146 }
1147 else
1148 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1149
1150 return VINF_EM_HALT;
1151 }
1152 case ARMV8_ESR_EL2_EC_UNKNOWN:
1153 default:
1154 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1155 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1156 AssertReleaseFailed();
1157 return VERR_NOT_IMPLEMENTED;
1158 }
1159
1160 return VINF_SUCCESS;
1161}
1162
1163
1164/**
1165 * Handles an exit from hv_vcpu_run().
1166 *
1167 * @returns VBox strict status code.
1168 * @param pVM The cross context VM structure.
1169 * @param pVCpu The cross context virtual CPU structure of the
1170 * calling EMT.
1171 */
1172static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
1173{
1174 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1175 if (RT_FAILURE(rc))
1176 return rc;
1177
1178#ifdef LOG_ENABLED
1179 if (LogIs3Enabled())
1180 nemR3DarwinLogState(pVM, pVCpu);
1181#endif
1182
1183 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
1184 switch (pExit->reason)
1185 {
1186 case HV_EXIT_REASON_CANCELED:
1187 return VINF_EM_RAW_INTERRUPT;
1188 case HV_EXIT_REASON_EXCEPTION:
1189 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
1190 case HV_EXIT_REASON_VTIMER_ACTIVATED:
1191 {
1192 LogFlowFunc(("vTimer got activated\n"));
1193 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1194 pVCpu->nem.s.fVTimerActivated = true;
1195 return GICPpiSet(pVCpu, NEM_DARWIN_VTIMER_GIC_PPI_IRQ, true /*fAsserted*/);
1196 }
1197 default:
1198 AssertReleaseFailed();
1199 break;
1200 }
1201
1202 return VERR_INVALID_STATE;
1203}
1204
1205
1206/**
1207 * Runs the guest once until an exit occurs.
1208 *
1209 * @returns HV status code.
1210 * @param pVM The cross context VM structure.
1211 * @param pVCpu The cross context virtual CPU structure.
1212 */
1213static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
1214{
1215 TMNotifyStartOfExecution(pVM, pVCpu);
1216
1217 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
1218
1219 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
1220
1221 return hrc;
1222}
1223
1224
1225/**
1226 * Prepares the VM to run the guest.
1227 *
1228 * @returns Strict VBox status code.
1229 * @param pVM The cross context VM structure.
1230 * @param pVCpu The cross context virtual CPU structure.
1231 * @param fSingleStepping Flag whether we run in single stepping mode.
1232 */
1233static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
1234{
1235#ifdef LOG_ENABLED
1236 bool fIrq = false;
1237 bool fFiq = false;
1238
1239 if (LogIs3Enabled())
1240 nemR3DarwinLogState(pVM, pVCpu);
1241#endif
1242
1243 /** @todo */ RT_NOREF(fSingleStepping);
1244 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
1245 AssertRCReturn(rc, rc);
1246
1247 /* Check whether the vTimer interrupt was handled by the guest and we can unmask the vTimer. */
1248 if (pVCpu->nem.s.fVTimerActivated)
1249 {
1250 /* Read the CNTV_CTL_EL0 register. */
1251 uint64_t u64CntvCtl = 0;
1252
1253 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &u64CntvCtl);
1254 AssertRCReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1255
1256 if ( (u64CntvCtl & (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_IMASK | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
1257 != (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
1258 {
1259 /* Clear the interrupt. */
1260 GICPpiSet(pVCpu, NEM_DARWIN_VTIMER_GIC_PPI_IRQ, false /*fAsserted*/);
1261
1262 pVCpu->nem.s.fVTimerActivated = false;
1263 hrc = hv_vcpu_set_vtimer_mask(pVCpu->nem.s.hVCpu, false /*vtimer_is_masked*/);
1264 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1265 }
1266 }
1267
1268 /* Set the pending interrupt state. */
1269 hv_return_t hrc = HV_SUCCESS;
1270 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
1271 {
1272 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
1273 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1274#ifdef LOG_ENABLED
1275 fIrq = true;
1276#endif
1277 }
1278 else
1279 {
1280 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
1281 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1282 }
1283
1284 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
1285 {
1286 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
1287 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1288#ifdef LOG_ENABLED
1289 fFiq = true;
1290#endif
1291 }
1292 else
1293 {
1294 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
1295 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1296 }
1297
1298 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
1299 pVCpu->nem.s.fEventPending = false;
1300 return VINF_SUCCESS;
1301}
1302
1303
1304/**
1305 * The normal runloop (no debugging features enabled).
1306 *
1307 * @returns Strict VBox status code.
1308 * @param pVM The cross context VM structure.
1309 * @param pVCpu The cross context virtual CPU structure.
1310 */
1311static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
1312{
1313 /*
1314 * The run loop.
1315 *
1316 * Current approach to state updating to use the sledgehammer and sync
1317 * everything every time. This will be optimized later.
1318 */
1319
1320 /* Update the vTimer offset after resuming if instructed. */
1321 if (pVCpu->nem.s.fVTimerOffUpdate)
1322 {
1323 hv_return_t hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
1324 if (hrc != HV_SUCCESS)
1325 return nemR3DarwinHvSts2Rc(hrc);
1326
1327 pVCpu->nem.s.fVTimerOffUpdate = false;
1328 }
1329
1330 /*
1331 * Poll timers and run for a bit.
1332 */
1333 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
1334 * the whole polling job when timers have changed... */
1335 uint64_t offDeltaIgnored;
1336 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
1337 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1338 for (unsigned iLoop = 0;; iLoop++)
1339 {
1340 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
1341 if (rcStrict != VINF_SUCCESS)
1342 break;
1343
1344 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
1345 if (hrc == HV_SUCCESS)
1346 {
1347 /*
1348 * Deal with the message.
1349 */
1350 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
1351 if (rcStrict == VINF_SUCCESS)
1352 { /* hopefully likely */ }
1353 else
1354 {
1355 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
1356 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
1357 break;
1358 }
1359 }
1360 else
1361 {
1362 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
1363 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
1364 }
1365 } /* the run loop */
1366
1367 return rcStrict;
1368}
1369
1370
1371VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1372{
1373#ifdef LOG_ENABLED
1374 if (LogIs3Enabled())
1375 nemR3DarwinLogState(pVM, pVCpu);
1376#endif
1377
1378 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
1379
1380 /*
1381 * Try switch to NEM runloop state.
1382 */
1383 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
1384 { /* likely */ }
1385 else
1386 {
1387 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1388 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
1389 return VINF_SUCCESS;
1390 }
1391
1392 VBOXSTRICTRC rcStrict;
1393#if 0
1394 if ( !pVCpu->nem.s.fUseDebugLoop
1395 && !nemR3DarwinAnyExpensiveProbesEnabled()
1396 && !DBGFIsStepping(pVCpu)
1397 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
1398#endif
1399 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
1400#if 0
1401 else
1402 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
1403#endif
1404
1405 if (rcStrict == VINF_EM_RAW_TO_R3)
1406 rcStrict = VINF_SUCCESS;
1407
1408 /*
1409 * Convert any pending HM events back to TRPM due to premature exits.
1410 *
1411 * This is because execution may continue from IEM and we would need to inject
1412 * the event from there (hence place it back in TRPM).
1413 */
1414 if (pVCpu->nem.s.fEventPending)
1415 {
1416 /** @todo */
1417 }
1418
1419
1420 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
1421 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1422
1423 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
1424 {
1425 /* Try anticipate what we might need. */
1426 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
1427 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
1428 || RT_FAILURE(rcStrict))
1429 fImport = CPUMCTX_EXTRN_ALL;
1430 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
1431 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
1432 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
1433
1434 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
1435 {
1436 /* Only import what is external currently. */
1437 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
1438 if (RT_SUCCESS(rc2))
1439 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
1440 else if (RT_SUCCESS(rcStrict))
1441 rcStrict = rc2;
1442 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1443 pVCpu->cpum.GstCtx.fExtrn = 0;
1444 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
1445 }
1446 else
1447 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1448 }
1449 else
1450 {
1451 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1452 pVCpu->cpum.GstCtx.fExtrn = 0;
1453 }
1454
1455 return rcStrict;
1456}
1457
1458
1459VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
1460{
1461 RT_NOREF(pVM, pVCpu);
1462 return true; /** @todo Are there any cases where we have to emulate? */
1463}
1464
1465
1466bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
1467{
1468 VMCPU_ASSERT_EMT(pVCpu);
1469 bool fOld = pVCpu->nem.s.fSingleInstruction;
1470 pVCpu->nem.s.fSingleInstruction = fEnable;
1471 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
1472 return fOld;
1473}
1474
1475
1476void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
1477{
1478 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
1479
1480 RT_NOREF(pVM, fFlags);
1481
1482 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
1483 if (hrc != HV_SUCCESS)
1484 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
1485}
1486
1487
1488DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
1489{
1490 RT_NOREF(pVM, fUseDebugLoop);
1491 //AssertReleaseFailed();
1492 return false;
1493}
1494
1495
1496DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
1497{
1498 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
1499 return fUseDebugLoop;
1500}
1501
1502
1503VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
1504 uint8_t *pu2State, uint32_t *puNemRange)
1505{
1506 RT_NOREF(pVM, puNemRange);
1507
1508 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
1509#if defined(VBOX_WITH_PGM_NEM_MODE)
1510 if (pvR3)
1511 {
1512 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1513 if (RT_FAILURE(rc))
1514 {
1515 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
1516 return VERR_NEM_MAP_PAGES_FAILED;
1517 }
1518 }
1519 return VINF_SUCCESS;
1520#else
1521 RT_NOREF(pVM, GCPhys, cb, pvR3);
1522 return VERR_NEM_MAP_PAGES_FAILED;
1523#endif
1524}
1525
1526
1527VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
1528{
1529 RT_NOREF(pVM);
1530 return false;
1531}
1532
1533
1534VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1535 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1536{
1537 RT_NOREF(pVM, puNemRange, pvRam, fFlags);
1538
1539 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
1540 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
1541
1542#if defined(VBOX_WITH_PGM_NEM_MODE)
1543 /*
1544 * Unmap the RAM we're replacing.
1545 */
1546 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1547 {
1548 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1549 if (RT_SUCCESS(rc))
1550 { /* likely */ }
1551 else if (pvMmio2)
1552 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
1553 GCPhys, cb, fFlags, rc));
1554 else
1555 {
1556 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1557 GCPhys, cb, fFlags, rc));
1558 return VERR_NEM_UNMAP_PAGES_FAILED;
1559 }
1560 }
1561
1562 /*
1563 * Map MMIO2 if any.
1564 */
1565 if (pvMmio2)
1566 {
1567 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
1568 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1569 if (RT_FAILURE(rc))
1570 {
1571 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
1572 GCPhys, cb, fFlags, pvMmio2, rc));
1573 return VERR_NEM_MAP_PAGES_FAILED;
1574 }
1575 }
1576 else
1577 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
1578
1579#else
1580 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
1581 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
1582#endif
1583 return VINF_SUCCESS;
1584}
1585
1586
1587VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1588 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
1589{
1590 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
1591 return VINF_SUCCESS;
1592}
1593
1594
1595VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
1596 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1597{
1598 RT_NOREF(pVM, puNemRange);
1599
1600 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
1601 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
1602
1603 int rc = VINF_SUCCESS;
1604#if defined(VBOX_WITH_PGM_NEM_MODE)
1605 /*
1606 * Unmap the MMIO2 pages.
1607 */
1608 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
1609 * we may have more stuff to unmap even in case of pure MMIO... */
1610 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
1611 {
1612 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1613 if (RT_FAILURE(rc))
1614 {
1615 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1616 GCPhys, cb, fFlags, rc));
1617 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1618 }
1619 }
1620
1621 /* Ensure the page is masked as unmapped if relevant. */
1622 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
1623
1624 /*
1625 * Restore the RAM we replaced.
1626 */
1627 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1628 {
1629 AssertPtr(pvRam);
1630 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1631 if (RT_SUCCESS(rc))
1632 { /* likely */ }
1633 else
1634 {
1635 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
1636 rc = VERR_NEM_MAP_PAGES_FAILED;
1637 }
1638 }
1639
1640 RT_NOREF(pvMmio2);
1641#else
1642 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
1643 if (pu2State)
1644 *pu2State = UINT8_MAX;
1645 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1646#endif
1647 return rc;
1648}
1649
1650
1651VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
1652 void *pvBitmap, size_t cbBitmap)
1653{
1654 RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
1655 AssertReleaseFailed();
1656 return VERR_NOT_IMPLEMENTED;
1657}
1658
1659
1660VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
1661 uint8_t *pu2State, uint32_t *puNemRange)
1662{
1663 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1664
1665 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
1666 *pu2State = UINT8_MAX;
1667 *puNemRange = 0;
1668 return VINF_SUCCESS;
1669}
1670
1671
1672VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
1673 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
1674{
1675 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
1676 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
1677 *pu2State = UINT8_MAX;
1678
1679#if defined(VBOX_WITH_PGM_NEM_MODE)
1680 /*
1681 * (Re-)map readonly.
1682 */
1683 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
1684 int rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
1685 if (RT_FAILURE(rc))
1686 {
1687 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
1688 GCPhys, cb, pvPages, fFlags, rc));
1689 return VERR_NEM_MAP_PAGES_FAILED;
1690 }
1691 RT_NOREF(fFlags, puNemRange);
1692 return VINF_SUCCESS;
1693#else
1694 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1695 return VERR_NEM_MAP_PAGES_FAILED;
1696#endif
1697}
1698
1699
1700VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
1701 RTR3PTR pvMemR3, uint8_t *pu2State)
1702{
1703 RT_NOREF(pVM);
1704
1705 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
1706 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
1707
1708 *pu2State = UINT8_MAX;
1709#if defined(VBOX_WITH_PGM_NEM_MODE)
1710 if (pvMemR3)
1711 {
1712 int rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1713 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
1714 pvMemR3, GCPhys, cb, rc));
1715 }
1716 RT_NOREF(enmKind);
1717#else
1718 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
1719 AssertFailed();
1720#endif
1721}
1722
1723
1724VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
1725{
1726 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
1727 RT_NOREF(pVCpu, fEnabled);
1728}
1729
1730
1731void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
1732{
1733 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
1734 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
1735}
1736
1737
1738void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
1739 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
1740{
1741 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
1742 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
1743 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
1744}
1745
1746
1747int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
1748 PGMPAGETYPE enmType, uint8_t *pu2State)
1749{
1750 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1751 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1752 RT_NOREF(HCPhys, fPageProt, enmType);
1753
1754 return nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1755}
1756
1757
1758VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
1759 PGMPAGETYPE enmType, uint8_t *pu2State)
1760{
1761 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1762 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1763 RT_NOREF(HCPhys, pvR3, fPageProt, enmType)
1764
1765 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1766}
1767
1768
1769VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
1770 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
1771{
1772 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1773 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
1774 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType);
1775
1776 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1777}
1778
1779
1780/**
1781 * Interface for importing state on demand (used by IEM).
1782 *
1783 * @returns VBox status code.
1784 * @param pVCpu The cross context CPU structure.
1785 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1786 */
1787VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1788{
1789 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
1790 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1791
1792 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
1793}
1794
1795
1796/**
1797 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1798 *
1799 * @returns VBox status code.
1800 * @param pVCpu The cross context CPU structure.
1801 * @param pcTicks Where to return the CPU tick count.
1802 * @param puAux Where to return the TSC_AUX register value.
1803 */
1804VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1805{
1806 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
1807 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1808
1809 if (puAux)
1810 *puAux = 0;
1811 *pcTicks = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff; /* This is the host timer minus the offset. */
1812 return VINF_SUCCESS;
1813}
1814
1815
1816/**
1817 * Resumes CPU clock (TSC) on all virtual CPUs.
1818 *
1819 * This is called by TM when the VM is started, restored, resumed or similar.
1820 *
1821 * @returns VBox status code.
1822 * @param pVM The cross context VM structure.
1823 * @param pVCpu The cross context CPU structure of the calling EMT.
1824 * @param uPausedTscValue The TSC value at the time of pausing.
1825 */
1826VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1827{
1828 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVM, pVCpu, uPausedTscValue));
1829 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1830 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1831
1832 /*
1833 * Calculate the new offset, first get the new TSC value with the old vTimer offset and then adjust the
1834 * the new offset to let the guest not notice the pause.
1835 */
1836 uint64_t u64TscNew = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff;
1837 Assert(u64TscNew >= uPausedTscValue);
1838 LogFlowFunc(("u64VTimerOffOld=%#RX64 u64TscNew=%#RX64 u64VTimerValuePaused=%#RX64 -> u64VTimerOff=%#RX64\n",
1839 pVM->nem.s.u64VTimerOff, u64TscNew, uPausedTscValue,
1840 pVM->nem.s.u64VTimerOff + (u64TscNew - uPausedTscValue)));
1841
1842 pVM->nem.s.u64VTimerOff += u64TscNew - uPausedTscValue;
1843
1844 /*
1845 * Set the flag to update the vTimer offset when the vCPU resumes for the first time
1846 * (needs to be done on the actual EMT).
1847 */
1848 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1849 {
1850 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
1851 pVCpuDst->nem.s.fVTimerOffUpdate = true;
1852 }
1853
1854 return VINF_SUCCESS;
1855}
1856
1857
1858/**
1859 * Returns features supported by the NEM backend.
1860 *
1861 * @returns Flags of features supported by the native NEM backend.
1862 * @param pVM The cross context VM structure.
1863 */
1864VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
1865{
1866 RT_NOREF(pVM);
1867 /*
1868 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
1869 * and unrestricted guest execution support so we can safely return these flags here always.
1870 */
1871 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
1872}
1873
1874
1875/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
1876 *
1877 * @todo Add notes as the implementation progresses...
1878 */
1879
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette