VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 108400

Last change on this file since 108400 was 107933, checked in by vboxsync, 3 months ago

include/iprt/armv8.h,VMM: Drop the deprecated ARMV8_AARCH64_REG_XXX defines and replace where still being used

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 131.7 KB
Line 
1/* $Id: NEMR3Native-darwin-armv8.cpp 107933 2025-01-24 11:22:16Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.virtualbox.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#include <VBox/vmm/nem.h>
39#include <VBox/vmm/iem.h>
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/pdmgic.h>
42#include <VBox/vmm/pdm.h>
43#include <VBox/vmm/dbgftrace.h>
44#include <VBox/vmm/gcm.h>
45#include "NEMInternal.h"
46#include <VBox/vmm/vmcc.h>
47#include <VBox/vmm/vmm.h>
48#include <VBox/dis.h>
49#include <VBox/gic.h>
50#include "dtrace/VBoxVMM.h"
51
52#include <iprt/armv8.h>
53#include <iprt/asm.h>
54#include <iprt/asm-arm.h>
55#include <iprt/asm-math.h>
56#include <iprt/ldr.h>
57#include <iprt/mem.h>
58#include <iprt/path.h>
59#include <iprt/string.h>
60#include <iprt/system.h>
61#include <iprt/utf16.h>
62
63#include <iprt/formats/arm-psci.h>
64
65#include <mach/mach_time.h>
66#include <mach/kern_return.h>
67
68#include <Hypervisor/Hypervisor.h>
69
70
71/*********************************************************************************************************************************
72* Defined Constants And Macros *
73*********************************************************************************************************************************/
74
75
76/*********************************************************************************************************************************
77* Structures and Typedefs *
78*********************************************************************************************************************************/
79
80#if MAC_OS_X_VERSION_MIN_REQUIRED < 150000
81
82/* Since 15.0+ */
83typedef enum hv_gic_distributor_reg_t : uint16_t
84{
85 HV_GIC_DISTRIBUTOR_REG_GICD_CTLR,
86 HV_GIC_DISTRIBUTOR_REG_GICD_ICACTIVER0
87 /** @todo */
88} hv_gic_distributor_reg_t;
89
90
91typedef enum hv_gic_icc_reg_t : uint16_t
92{
93 HV_GIC_ICC_REG_PMR_EL1,
94 HV_GIC_ICC_REG_BPR0_EL1,
95 HV_GIC_ICC_REG_AP0R0_EL1,
96 HV_GIC_ICC_REG_AP1R0_EL1,
97 HV_GIC_ICC_REG_RPR_EL1,
98 HV_GIC_ICC_REG_BPR1_EL1,
99 HV_GIC_ICC_REG_CTLR_EL1,
100 HV_GIC_ICC_REG_SRE_EL1,
101 HV_GIC_ICC_REG_IGRPEN0_EL1,
102 HV_GIC_ICC_REG_IGRPEN1_EL1,
103 HV_GIC_ICC_REG_INVALID,
104 /** @todo */
105} hv_gic_icc_reg_t;
106
107
108typedef enum hv_gic_ich_reg_t : uint16_t
109{
110 HV_GIC_ICH_REG_AP0R0_EL2
111 /** @todo */
112} hv_gic_ich_reg_t;
113
114
115typedef enum hv_gic_icv_reg_t : uint16_t
116{
117 HV_GIC_ICV_REG_AP0R0_EL1
118 /** @todo */
119} hv_gic_icv_reg_t;
120
121
122typedef enum hv_gic_msi_reg_t : uint16_t
123{
124 HV_GIC_REG_GICM_SET_SPI_NSR
125 /** @todo */
126} hv_gic_msi_reg_t;
127
128
129typedef enum hv_gic_redistributor_reg_t : uint16_t
130{
131 HV_GIC_REDISTRIBUTOR_REG_GICR_ICACTIVER0
132 /** @todo */
133} hv_gic_redistributor_reg_t;
134
135
136typedef enum hv_gic_intid_t : uint16_t
137{
138 HV_GIC_INT_EL1_PHYSICAL_TIMER = 23,
139 HV_GIC_INT_EL1_VIRTUAL_TIMER = 25,
140 HV_GIC_INT_EL2_PHYSICAL_TIMER = 26,
141 HV_GIC_INT_MAINTENANCE = 27,
142 HV_GIC_INT_PERFORMANCE_MONITOR = 30
143} hv_gic_intid_t;
144
145#else
146# define HV_GIC_ICC_REG_INVALID (hv_gic_icc_reg_t)UINT16_MAX
147#endif
148
149typedef hv_vm_config_t FN_HV_VM_CONFIG_CREATE(void);
150typedef hv_return_t FN_HV_VM_CONFIG_GET_EL2_SUPPORTED(bool *el2_supported);
151typedef hv_return_t FN_HV_VM_CONFIG_GET_EL2_ENABLED(hv_vm_config_t config, bool *el2_enabled);
152typedef hv_return_t FN_HV_VM_CONFIG_SET_EL2_ENABLED(hv_vm_config_t config, bool el2_enabled);
153
154typedef struct hv_gic_config_s *hv_gic_config_t;
155typedef hv_return_t FN_HV_GIC_CREATE(hv_gic_config_t gic_config);
156typedef hv_return_t FN_HV_GIC_RESET(void);
157typedef hv_gic_config_t FN_HV_GIC_CONFIG_CREATE(void);
158typedef hv_return_t FN_HV_GIC_CONFIG_SET_DISTRIBUTOR_BASE(hv_gic_config_t config, hv_ipa_t distributor_base_address);
159typedef hv_return_t FN_HV_GIC_CONFIG_SET_REDISTRIBUTOR_BASE(hv_gic_config_t config, hv_ipa_t redistributor_base_address);
160typedef hv_return_t FN_HV_GIC_CONFIG_SET_MSI_REGION_BASE(hv_gic_config_t config, hv_ipa_t msi_region_base_address);
161typedef hv_return_t FN_HV_GIC_CONFIG_SET_MSI_INTERRUPT_RANGE(hv_gic_config_t config, uint32_t msi_intid_base, uint32_t msi_intid_count);
162
163typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_BASE(hv_vcpu_t vcpu, hv_ipa_t *redistributor_base_address);
164typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_REGION_SIZE(size_t *redistributor_region_size);
165typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_SIZE(size_t *redistributor_size);
166typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_SIZE(size_t *distributor_size);
167typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_BASE_ALIGNMENT(size_t *distributor_base_alignment);
168typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_BASE_ALIGNMENT(size_t *redistributor_base_alignment);
169typedef hv_return_t FN_HV_GIC_GET_MSI_REGION_BASE_ALIGNMENT(size_t *msi_region_base_alignment);
170typedef hv_return_t FN_HV_GIC_GET_MSI_REGION_SIZE(size_t *msi_region_size);
171typedef hv_return_t FN_HV_GIC_GET_SPI_INTERRUPT_RANGE(uint32_t *spi_intid_base, uint32_t *spi_intid_count);
172
173typedef struct hv_gic_state_s *hv_gic_state_t;
174typedef hv_gic_state_t FN_HV_GIC_STATE_CREATE(void);
175typedef hv_return_t FN_HV_GIC_SET_STATE(const void *gic_state_data, size_t gic_state_size);
176typedef hv_return_t FN_HV_GIC_STATE_GET_SIZE(hv_gic_state_t state, size_t *gic_state_size);
177typedef hv_return_t FN_HV_GIC_STATE_GET_DATA(hv_gic_state_t state, void *gic_state_data);
178
179typedef hv_return_t FN_HV_GIC_SEND_MSI(hv_ipa_t address, uint32_t intid);
180typedef hv_return_t FN_HV_GIC_SET_SPI(uint32_t intid, bool level);
181
182typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_REG(hv_gic_distributor_reg_t reg, uint64_t *value);
183typedef hv_return_t FN_HV_GIC_GET_MSI_REG(hv_gic_msi_reg_t reg, uint64_t *value);
184typedef hv_return_t FN_HV_GIC_GET_ICC_REG(hv_vcpu_t vcpu, hv_gic_icc_reg_t reg, uint64_t *value);
185typedef hv_return_t FN_HV_GIC_GET_ICH_REG(hv_vcpu_t vcpu, hv_gic_ich_reg_t reg, uint64_t *value);
186typedef hv_return_t FN_HV_GIC_GET_ICV_REG(hv_vcpu_t vcpu, hv_gic_icv_reg_t reg, uint64_t *value);
187typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_REG(hv_vcpu_t vcpu, hv_gic_redistributor_reg_t reg, uint64_t *value);
188
189typedef hv_return_t FN_HV_GIC_SET_DISTRIBUTOR_REG(hv_gic_distributor_reg_t reg, uint64_t value);
190typedef hv_return_t FN_HV_GIC_SET_MSI_REG(hv_gic_msi_reg_t reg, uint64_t value);
191typedef hv_return_t FN_HV_GIC_SET_ICC_REG(hv_vcpu_t vcpu, hv_gic_icc_reg_t reg, uint64_t value);
192typedef hv_return_t FN_HV_GIC_SET_ICH_REG(hv_vcpu_t vcpu, hv_gic_ich_reg_t reg, uint64_t value);
193typedef hv_return_t FN_HV_GIC_SET_ICV_REG(hv_vcpu_t vcpu, hv_gic_icv_reg_t reg, uint64_t value);
194typedef hv_return_t FN_HV_GIC_SET_REDISTRIBUTOR_REG(hv_vcpu_t vcpu, hv_gic_redistributor_reg_t reg, uint64_t value);
195
196typedef hv_return_t FN_HV_GIC_GET_INTID(hv_gic_intid_t interrupt, uint32_t *intid);
197
198
199/*********************************************************************************************************************************
200* Global Variables *
201*********************************************************************************************************************************/
202/** @name Optional APIs imported from Hypervisor.framework.
203 * @{ */
204static FN_HV_VM_CONFIG_CREATE *g_pfnHvVmConfigCreate = NULL; /* Since 13.0 */
205static FN_HV_VM_CONFIG_GET_EL2_SUPPORTED *g_pfnHvVmConfigGetEl2Supported = NULL; /* Since 15.0 */
206static FN_HV_VM_CONFIG_GET_EL2_ENABLED *g_pfnHvVmConfigGetEl2Enabled = NULL; /* Since 15.0 */
207static FN_HV_VM_CONFIG_SET_EL2_ENABLED *g_pfnHvVmConfigSetEl2Enabled = NULL; /* Since 15.0 */
208
209static FN_HV_GIC_CREATE *g_pfnHvGicCreate = NULL; /* Since 15.0 */
210static FN_HV_GIC_RESET *g_pfnHvGicReset = NULL; /* Since 15.0 */
211static FN_HV_GIC_CONFIG_CREATE *g_pfnHvGicConfigCreate = NULL; /* Since 15.0 */
212static FN_HV_GIC_CONFIG_SET_DISTRIBUTOR_BASE *g_pfnHvGicConfigSetDistributorBase = NULL; /* Since 15.0 */
213static FN_HV_GIC_CONFIG_SET_REDISTRIBUTOR_BASE *g_pfnHvGicConfigSetRedistributorBase = NULL; /* Since 15.0 */
214static FN_HV_GIC_CONFIG_SET_MSI_REGION_BASE *g_pfnHvGicConfigSetMsiRegionBase = NULL; /* Since 15.0 */
215static FN_HV_GIC_CONFIG_SET_MSI_INTERRUPT_RANGE *g_pfnHvGicConfigSetMsiInterruptRange = NULL; /* Since 15.0 */
216static FN_HV_GIC_GET_REDISTRIBUTOR_BASE *g_pfnHvGicGetRedistributorBase = NULL; /* Since 15.0 */
217static FN_HV_GIC_GET_REDISTRIBUTOR_REGION_SIZE *g_pfnHvGicGetRedistributorRegionSize = NULL; /* Since 15.0 */
218static FN_HV_GIC_GET_REDISTRIBUTOR_SIZE *g_pfnHvGicGetRedistributorSize = NULL; /* Since 15.0 */
219static FN_HV_GIC_GET_DISTRIBUTOR_SIZE *g_pfnHvGicGetDistributorSize = NULL; /* Since 15.0 */
220static FN_HV_GIC_GET_DISTRIBUTOR_BASE_ALIGNMENT *g_pfnHvGicGetDistributorBaseAlignment = NULL; /* Since 15.0 */
221static FN_HV_GIC_GET_REDISTRIBUTOR_BASE_ALIGNMENT *g_pfnHvGicGetRedistributorBaseAlignment = NULL; /* Since 15.0 */
222static FN_HV_GIC_GET_MSI_REGION_BASE_ALIGNMENT *g_pfnHvGicGetMsiRegionBaseAlignment = NULL; /* Since 15.0 */
223static FN_HV_GIC_GET_MSI_REGION_SIZE *g_pfnHvGicGetMsiRegionSize = NULL; /* Since 15.0 */
224static FN_HV_GIC_GET_SPI_INTERRUPT_RANGE *g_pfnHvGicGetSpiInterruptRange = NULL; /* Since 15.0 */
225static FN_HV_GIC_STATE_CREATE *g_pfnHvGicStateCreate = NULL; /* Since 15.0 */
226static FN_HV_GIC_SET_STATE *g_pfnHvGicSetState = NULL; /* Since 15.0 */
227static FN_HV_GIC_STATE_GET_SIZE *g_pfnHvGicStateGetSize = NULL; /* Since 15.0 */
228static FN_HV_GIC_STATE_GET_DATA *g_pfnHvGicStateGetData = NULL; /* Since 15.0 */
229static FN_HV_GIC_SEND_MSI *g_pfnHvGicSendMsi = NULL; /* Since 15.0 */
230static FN_HV_GIC_SET_SPI *g_pfnHvGicSetSpi = NULL; /* Since 15.0 */
231static FN_HV_GIC_GET_DISTRIBUTOR_REG *g_pfnHvGicGetDistributorReg = NULL; /* Since 15.0 */
232static FN_HV_GIC_GET_MSI_REG *g_pfnHvGicGetMsiReg = NULL; /* Since 15.0 */
233static FN_HV_GIC_GET_ICC_REG *g_pfnHvGicGetIccReg = NULL; /* Since 15.0 */
234static FN_HV_GIC_GET_ICH_REG *g_pfnHvGicGetIchReg = NULL; /* Since 15.0 */
235static FN_HV_GIC_GET_ICV_REG *g_pfnHvGicGetIcvReg = NULL; /* Since 15.0 */
236static FN_HV_GIC_GET_REDISTRIBUTOR_REG *g_pfnHvGicGetRedistributorReg = NULL; /* Since 15.0 */
237static FN_HV_GIC_SET_DISTRIBUTOR_REG *g_pfnHvGicSetDistributorReg = NULL; /* Since 15.0 */
238static FN_HV_GIC_SET_MSI_REG *g_pfnHvGicSetMsiReg = NULL; /* Since 15.0 */
239static FN_HV_GIC_SET_ICC_REG *g_pfnHvGicSetIccReg = NULL; /* Since 15.0 */
240static FN_HV_GIC_SET_ICH_REG *g_pfnHvGicSetIchReg = NULL; /* Since 15.0 */
241static FN_HV_GIC_SET_ICV_REG *g_pfnHvGicSetIcvReg = NULL; /* Since 15.0 */
242static FN_HV_GIC_SET_REDISTRIBUTOR_REG *g_pfnHvGicSetRedistributorReg = NULL; /* Since 15.0 */
243static FN_HV_GIC_GET_INTID *g_pfnHvGicGetIntid = NULL; /* Since 15.0 */
244/** @} */
245
246
247/**
248 * Import instructions.
249 */
250static const struct
251{
252 void **ppfn; /**< The function pointer variable. */
253 const char *pszName; /**< The function name. */
254} g_aImports[] =
255{
256#define NEM_DARWIN_IMPORT(a_Pfn, a_Name) { (void **)&(a_Pfn), #a_Name }
257 NEM_DARWIN_IMPORT(g_pfnHvVmConfigCreate, hv_vm_config_create),
258 NEM_DARWIN_IMPORT(g_pfnHvVmConfigGetEl2Supported, hv_vm_config_get_el2_supported),
259 NEM_DARWIN_IMPORT(g_pfnHvVmConfigGetEl2Enabled, hv_vm_config_get_el2_enabled),
260 NEM_DARWIN_IMPORT(g_pfnHvVmConfigSetEl2Enabled, hv_vm_config_set_el2_enabled),
261
262 NEM_DARWIN_IMPORT(g_pfnHvGicCreate, hv_gic_create),
263 NEM_DARWIN_IMPORT(g_pfnHvGicReset, hv_gic_reset),
264 NEM_DARWIN_IMPORT(g_pfnHvGicConfigCreate, hv_gic_config_create),
265 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetDistributorBase, hv_gic_config_set_distributor_base),
266 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetRedistributorBase, hv_gic_config_set_redistributor_base),
267 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetMsiRegionBase, hv_gic_config_set_msi_region_base),
268 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetMsiInterruptRange, hv_gic_config_set_msi_interrupt_range),
269 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorBase, hv_gic_get_redistributor_base),
270 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorRegionSize, hv_gic_get_redistributor_region_size),
271 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorSize, hv_gic_get_redistributor_size),
272 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorSize, hv_gic_get_distributor_size),
273 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorBaseAlignment, hv_gic_get_distributor_base_alignment),
274 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorBaseAlignment, hv_gic_get_redistributor_base_alignment),
275 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiRegionBaseAlignment, hv_gic_get_msi_region_base_alignment),
276 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiRegionSize, hv_gic_get_msi_region_size),
277 NEM_DARWIN_IMPORT(g_pfnHvGicGetSpiInterruptRange, hv_gic_get_spi_interrupt_range),
278 NEM_DARWIN_IMPORT(g_pfnHvGicStateCreate, hv_gic_state_create),
279 NEM_DARWIN_IMPORT(g_pfnHvGicSetState, hv_gic_set_state),
280 NEM_DARWIN_IMPORT(g_pfnHvGicStateGetSize, hv_gic_state_get_size),
281 NEM_DARWIN_IMPORT(g_pfnHvGicStateGetData, hv_gic_state_get_data),
282 NEM_DARWIN_IMPORT(g_pfnHvGicSendMsi, hv_gic_send_msi),
283 NEM_DARWIN_IMPORT(g_pfnHvGicSetSpi, hv_gic_set_spi),
284 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorReg, hv_gic_get_distributor_reg),
285 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiReg, hv_gic_get_msi_reg),
286 NEM_DARWIN_IMPORT(g_pfnHvGicGetIccReg, hv_gic_get_icc_reg),
287 NEM_DARWIN_IMPORT(g_pfnHvGicGetIchReg, hv_gic_get_ich_reg),
288 NEM_DARWIN_IMPORT(g_pfnHvGicGetIcvReg, hv_gic_get_icv_reg),
289 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorReg, hv_gic_get_redistributor_reg),
290 NEM_DARWIN_IMPORT(g_pfnHvGicSetDistributorReg, hv_gic_set_distributor_reg),
291 NEM_DARWIN_IMPORT(g_pfnHvGicSetMsiReg, hv_gic_set_msi_reg),
292 NEM_DARWIN_IMPORT(g_pfnHvGicSetIccReg, hv_gic_set_icc_reg),
293 NEM_DARWIN_IMPORT(g_pfnHvGicSetIchReg, hv_gic_set_ich_reg),
294 NEM_DARWIN_IMPORT(g_pfnHvGicSetIcvReg, hv_gic_set_icv_reg),
295 NEM_DARWIN_IMPORT(g_pfnHvGicSetRedistributorReg, hv_gic_set_redistributor_reg),
296 NEM_DARWIN_IMPORT(g_pfnHvGicGetIntid, hv_gic_get_intid)
297#undef NEM_DARWIN_IMPORT
298};
299
300
301/*
302 * Let the preprocessor alias the APIs to import variables for better autocompletion.
303 */
304#ifndef IN_SLICKEDIT
305# define hv_vm_config_create g_pfnHvVmConfigCreate
306# define hv_vm_config_get_el2_supported g_pfnHvVmConfigGetEl2Supported
307# define hv_vm_config_get_el2_enabled g_pfnHvVmConfigGetEl2Enabled
308# define hv_vm_config_set_el2_enabled g_pfnHvVmConfigSetEl2Enabled
309
310# define hv_gic_create g_pfnHvGicCreate
311# define hv_gic_reset g_pfnHvGicReset
312# define hv_gic_config_create g_pfnHvGicConfigCreate
313# define hv_gic_config_set_distributor_base g_pfnHvGicConfigSetDistributorBase
314# define hv_gic_config_set_redistributor_base g_pfnHvGicConfigSetRedistributorBase
315# define hv_gic_config_set_msi_region_base g_pfnHvGicConfigSetMsiRegionBase
316# define hv_gic_config_set_msi_interrupt_range g_pfnHvGicConfigSetMsiInterruptRange
317# define hv_gic_get_redistributor_base g_pfnHvGicGetRedistributorBase
318# define hv_gic_get_redistributor_region_size g_pfnHvGicGetRedistributorRegionSize
319# define hv_gic_get_redistributor_size g_pfnHvGicGetRedistributorSize
320# define hv_gic_get_distributor_size g_pfnHvGicGetDistributorSize
321# define hv_gic_get_distributor_base_alignment g_pfnHvGicGetDistributorBaseAlignment
322# define hv_gic_get_redistributor_base_alignment g_pfnHvGicGetRedistributorBaseAlignment
323# define hv_gic_get_msi_region_base_alignment g_pfnHvGicGetMsiRegionBaseAlignment
324# define hv_gic_get_msi_region_size g_pfnHvGicGetMsiRegionSize
325# define hv_gic_get_spi_interrupt_range g_pfnHvGicGetSpiInterruptRange
326# define hv_gic_state_create g_pfnHvGicStateCreate
327# define hv_gic_set_state g_pfnHvGicSetState
328# define hv_gic_state_get_size g_pfnHvGicStateGetSize
329# define hv_gic_state_get_data g_pfnHvGicStateGetData
330# define hv_gic_send_msi g_pfnHvGicSendMsi
331# define hv_gic_set_spi g_pfnHvGicSetSpi
332# define hv_gic_get_distributor_reg g_pfnHvGicGetDistributorReg
333# define hv_gic_get_msi_reg g_pfnHvGicGetMsiReg
334# define hv_gic_get_icc_reg g_pfnHvGicGetIccReg
335# define hv_gic_get_ich_reg g_pfnHvGicGetIchReg
336# define hv_gic_get_icv_reg g_pfnHvGicGetIcvReg
337# define hv_gic_get_redistributor_reg g_pfnHvGicGetRedistributorReg
338# define hv_gic_set_distributor_reg g_pfnHvGicSetDistributorReg
339# define hv_gic_set_msi_reg g_pfnHvGicSetMsiReg
340# define hv_gic_set_icc_reg g_pfnHvGicSetIccReg
341# define hv_gic_set_ich_reg g_pfnHvGicSetIchReg
342# define hv_gic_set_icv_reg g_pfnHvGicSetIcvReg
343# define hv_gic_set_redistributor_reg g_pfnHvGicSetRedistributorReg
344# define hv_gic_get_intid g_pfnHvGicGetIntid
345#endif
346
347
348/** The general registers. */
349static const struct
350{
351 hv_reg_t enmHvReg;
352 uint32_t fCpumExtrn;
353 uint32_t offCpumCtx;
354} s_aCpumRegs[] =
355{
356#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
357#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
358 CPUM_GREG_EMIT_X0_X3(0),
359 CPUM_GREG_EMIT_X0_X3(1),
360 CPUM_GREG_EMIT_X0_X3(2),
361 CPUM_GREG_EMIT_X0_X3(3),
362 CPUM_GREG_EMIT_X4_X28(4),
363 CPUM_GREG_EMIT_X4_X28(5),
364 CPUM_GREG_EMIT_X4_X28(6),
365 CPUM_GREG_EMIT_X4_X28(7),
366 CPUM_GREG_EMIT_X4_X28(8),
367 CPUM_GREG_EMIT_X4_X28(9),
368 CPUM_GREG_EMIT_X4_X28(10),
369 CPUM_GREG_EMIT_X4_X28(11),
370 CPUM_GREG_EMIT_X4_X28(12),
371 CPUM_GREG_EMIT_X4_X28(13),
372 CPUM_GREG_EMIT_X4_X28(14),
373 CPUM_GREG_EMIT_X4_X28(15),
374 CPUM_GREG_EMIT_X4_X28(16),
375 CPUM_GREG_EMIT_X4_X28(17),
376 CPUM_GREG_EMIT_X4_X28(18),
377 CPUM_GREG_EMIT_X4_X28(19),
378 CPUM_GREG_EMIT_X4_X28(20),
379 CPUM_GREG_EMIT_X4_X28(21),
380 CPUM_GREG_EMIT_X4_X28(22),
381 CPUM_GREG_EMIT_X4_X28(23),
382 CPUM_GREG_EMIT_X4_X28(24),
383 CPUM_GREG_EMIT_X4_X28(25),
384 CPUM_GREG_EMIT_X4_X28(26),
385 CPUM_GREG_EMIT_X4_X28(27),
386 CPUM_GREG_EMIT_X4_X28(28),
387 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
388 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
389 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
390 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
391 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
392#undef CPUM_GREG_EMIT_X0_X3
393#undef CPUM_GREG_EMIT_X4_X28
394};
395/** SIMD/FP registers. */
396static const struct
397{
398 hv_simd_fp_reg_t enmHvReg;
399 uint32_t offCpumCtx;
400} s_aCpumFpRegs[] =
401{
402#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
403 CPUM_VREG_EMIT(0),
404 CPUM_VREG_EMIT(1),
405 CPUM_VREG_EMIT(2),
406 CPUM_VREG_EMIT(3),
407 CPUM_VREG_EMIT(4),
408 CPUM_VREG_EMIT(5),
409 CPUM_VREG_EMIT(6),
410 CPUM_VREG_EMIT(7),
411 CPUM_VREG_EMIT(8),
412 CPUM_VREG_EMIT(9),
413 CPUM_VREG_EMIT(10),
414 CPUM_VREG_EMIT(11),
415 CPUM_VREG_EMIT(12),
416 CPUM_VREG_EMIT(13),
417 CPUM_VREG_EMIT(14),
418 CPUM_VREG_EMIT(15),
419 CPUM_VREG_EMIT(16),
420 CPUM_VREG_EMIT(17),
421 CPUM_VREG_EMIT(18),
422 CPUM_VREG_EMIT(19),
423 CPUM_VREG_EMIT(20),
424 CPUM_VREG_EMIT(21),
425 CPUM_VREG_EMIT(22),
426 CPUM_VREG_EMIT(23),
427 CPUM_VREG_EMIT(24),
428 CPUM_VREG_EMIT(25),
429 CPUM_VREG_EMIT(26),
430 CPUM_VREG_EMIT(27),
431 CPUM_VREG_EMIT(28),
432 CPUM_VREG_EMIT(29),
433 CPUM_VREG_EMIT(30),
434 CPUM_VREG_EMIT(31)
435#undef CPUM_VREG_EMIT
436};
437/** Debug system registers. */
438static const struct
439{
440 hv_sys_reg_t enmHvReg;
441 uint32_t offCpumCtx;
442} s_aCpumDbgRegs[] =
443{
444#define CPUM_DBGREG_EMIT(a_BorW, a_Idx) \
445 { HV_SYS_REG_DBG ## a_BorW ## CR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Ctrl.u64) }, \
446 { HV_SYS_REG_DBG ## a_BorW ## VR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Value.u64) }
447 /* Breakpoint registers. */
448 CPUM_DBGREG_EMIT(B, 0),
449 CPUM_DBGREG_EMIT(B, 1),
450 CPUM_DBGREG_EMIT(B, 2),
451 CPUM_DBGREG_EMIT(B, 3),
452 CPUM_DBGREG_EMIT(B, 4),
453 CPUM_DBGREG_EMIT(B, 5),
454 CPUM_DBGREG_EMIT(B, 6),
455 CPUM_DBGREG_EMIT(B, 7),
456 CPUM_DBGREG_EMIT(B, 8),
457 CPUM_DBGREG_EMIT(B, 9),
458 CPUM_DBGREG_EMIT(B, 10),
459 CPUM_DBGREG_EMIT(B, 11),
460 CPUM_DBGREG_EMIT(B, 12),
461 CPUM_DBGREG_EMIT(B, 13),
462 CPUM_DBGREG_EMIT(B, 14),
463 CPUM_DBGREG_EMIT(B, 15),
464 /* Watchpoint registers. */
465 CPUM_DBGREG_EMIT(W, 0),
466 CPUM_DBGREG_EMIT(W, 1),
467 CPUM_DBGREG_EMIT(W, 2),
468 CPUM_DBGREG_EMIT(W, 3),
469 CPUM_DBGREG_EMIT(W, 4),
470 CPUM_DBGREG_EMIT(W, 5),
471 CPUM_DBGREG_EMIT(W, 6),
472 CPUM_DBGREG_EMIT(W, 7),
473 CPUM_DBGREG_EMIT(W, 8),
474 CPUM_DBGREG_EMIT(W, 9),
475 CPUM_DBGREG_EMIT(W, 10),
476 CPUM_DBGREG_EMIT(W, 11),
477 CPUM_DBGREG_EMIT(W, 12),
478 CPUM_DBGREG_EMIT(W, 13),
479 CPUM_DBGREG_EMIT(W, 14),
480 CPUM_DBGREG_EMIT(W, 15),
481 { HV_SYS_REG_MDSCR_EL1, RT_UOFFSETOF(CPUMCTX, Mdscr.u64) }
482#undef CPUM_DBGREG_EMIT
483};
484/** PAuth key system registers. */
485static const struct
486{
487 hv_sys_reg_t enmHvReg;
488 uint32_t offCpumCtx;
489} s_aCpumPAuthKeyRegs[] =
490{
491 { HV_SYS_REG_APDAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apda.Low.u64) },
492 { HV_SYS_REG_APDAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apda.High.u64) },
493 { HV_SYS_REG_APDBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.Low.u64) },
494 { HV_SYS_REG_APDBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.High.u64) },
495 { HV_SYS_REG_APGAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apga.Low.u64) },
496 { HV_SYS_REG_APGAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apga.High.u64) },
497 { HV_SYS_REG_APIAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apia.Low.u64) },
498 { HV_SYS_REG_APIAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apia.High.u64) },
499 { HV_SYS_REG_APIBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apib.Low.u64) },
500 { HV_SYS_REG_APIBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apib.High.u64) }
501};
502/** System registers. */
503static const struct
504{
505 hv_sys_reg_t enmHvReg;
506 uint32_t fCpumExtrn;
507 uint32_t offCpumCtx;
508} s_aCpumSysRegs[] =
509{
510 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
511 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
512 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
513 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
514 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
515 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
516 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
517 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
518 { HV_SYS_REG_VBAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
519 { HV_SYS_REG_AFSR0_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr0.u64) },
520 { HV_SYS_REG_AFSR1_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr1.u64) },
521 { HV_SYS_REG_AMAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Amair.u64) },
522 { HV_SYS_REG_CNTKCTL_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, CntKCtl.u64) },
523 { HV_SYS_REG_CONTEXTIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, ContextIdr.u64) },
524 { HV_SYS_REG_CPACR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Cpacr.u64) },
525 { HV_SYS_REG_CSSELR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Csselr.u64) },
526 { HV_SYS_REG_ESR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Esr.u64) },
527 { HV_SYS_REG_FAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Far.u64) },
528 { HV_SYS_REG_MAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Mair.u64) },
529 { HV_SYS_REG_PAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Par.u64) },
530 { HV_SYS_REG_TPIDRRO_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, TpIdrRoEl0.u64) },
531 { HV_SYS_REG_TPIDR_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[0].u64) },
532 { HV_SYS_REG_TPIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[1].u64) },
533 { HV_SYS_REG_MDCCINT_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, MDccInt.u64) }
534
535};
536/** EL2 support system registers. */
537static const struct
538{
539 uint16_t idSysReg;
540 uint32_t offCpumCtx;
541} s_aCpumEl2SysRegs[] =
542{
543 { ARMV8_AARCH64_SYSREG_CNTHCTL_EL2, RT_UOFFSETOF(CPUMCTX, CntHCtlEl2.u64) },
544 { ARMV8_AARCH64_SYSREG_CNTHP_CTL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpCtlEl2.u64) },
545 { ARMV8_AARCH64_SYSREG_CNTHP_CVAL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpCValEl2.u64) },
546 { ARMV8_AARCH64_SYSREG_CNTHP_TVAL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpTValEl2.u64) },
547 { ARMV8_AARCH64_SYSREG_CNTVOFF_EL2, RT_UOFFSETOF(CPUMCTX, CntVOffEl2.u64) },
548 { ARMV8_AARCH64_SYSREG_CPTR_EL2, RT_UOFFSETOF(CPUMCTX, CptrEl2.u64) },
549 { ARMV8_AARCH64_SYSREG_ELR_EL2, RT_UOFFSETOF(CPUMCTX, ElrEl2.u64) },
550 { ARMV8_AARCH64_SYSREG_ESR_EL2, RT_UOFFSETOF(CPUMCTX, EsrEl2.u64) },
551 { ARMV8_AARCH64_SYSREG_FAR_EL2, RT_UOFFSETOF(CPUMCTX, FarEl2.u64) },
552 { ARMV8_AARCH64_SYSREG_HCR_EL2, RT_UOFFSETOF(CPUMCTX, HcrEl2.u64) },
553 { ARMV8_AARCH64_SYSREG_HPFAR_EL2, RT_UOFFSETOF(CPUMCTX, HpFarEl2.u64) },
554 { ARMV8_AARCH64_SYSREG_MAIR_EL2, RT_UOFFSETOF(CPUMCTX, MairEl2.u64) },
555 //{ ARMV8_AARCH64_SYSREG_MDCR_EL2, RT_UOFFSETOF(CPUMCTX, MdcrEl2.u64) },
556 { ARMV8_AARCH64_SYSREG_SCTLR_EL2, RT_UOFFSETOF(CPUMCTX, SctlrEl2.u64) },
557 { ARMV8_AARCH64_SYSREG_SPSR_EL2, RT_UOFFSETOF(CPUMCTX, SpsrEl2.u64) },
558 { ARMV8_AARCH64_SYSREG_SP_EL2, RT_UOFFSETOF(CPUMCTX, SpEl2.u64) },
559 { ARMV8_AARCH64_SYSREG_TCR_EL2, RT_UOFFSETOF(CPUMCTX, TcrEl2.u64) },
560 { ARMV8_AARCH64_SYSREG_TPIDR_EL2, RT_UOFFSETOF(CPUMCTX, TpidrEl2.u64) },
561 { ARMV8_AARCH64_SYSREG_TTBR0_EL2, RT_UOFFSETOF(CPUMCTX, Ttbr0El2.u64) },
562 { ARMV8_AARCH64_SYSREG_TTBR1_EL2, RT_UOFFSETOF(CPUMCTX, Ttbr1El2.u64) },
563 { ARMV8_AARCH64_SYSREG_VBAR_EL2, RT_UOFFSETOF(CPUMCTX, VBarEl2.u64) },
564 { ARMV8_AARCH64_SYSREG_VMPIDR_EL2, RT_UOFFSETOF(CPUMCTX, VMpidrEl2.u64) },
565 { ARMV8_AARCH64_SYSREG_VPIDR_EL2, RT_UOFFSETOF(CPUMCTX, VPidrEl2.u64) },
566 { ARMV8_AARCH64_SYSREG_VTCR_EL2, RT_UOFFSETOF(CPUMCTX, VTcrEl2.u64) },
567 { ARMV8_AARCH64_SYSREG_VTTBR_EL2, RT_UOFFSETOF(CPUMCTX, VTtbrEl2.u64) }
568};
569/** ID registers. */
570static const struct
571{
572 hv_feature_reg_t enmHvReg;
573 uint32_t offIdStruct;
574} s_aIdRegs[] =
575{
576 { HV_FEATURE_REG_ID_AA64DFR0_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr0El1) },
577 { HV_FEATURE_REG_ID_AA64DFR1_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr1El1) },
578 { HV_FEATURE_REG_ID_AA64ISAR0_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar0El1) },
579 { HV_FEATURE_REG_ID_AA64ISAR1_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar1El1) },
580 { HV_FEATURE_REG_ID_AA64MMFR0_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr0El1) },
581 { HV_FEATURE_REG_ID_AA64MMFR1_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr1El1) },
582 { HV_FEATURE_REG_ID_AA64MMFR2_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr2El1) },
583 { HV_FEATURE_REG_ID_AA64PFR0_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr0El1) },
584 { HV_FEATURE_REG_ID_AA64PFR1_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr1El1) },
585 { HV_FEATURE_REG_CLIDR_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegClidrEl1) },
586 { HV_FEATURE_REG_CTR_EL0, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegCtrEl0) },
587 { HV_FEATURE_REG_DCZID_EL0, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegDczidEl0) }
588};
589
590
591/*********************************************************************************************************************************
592* Internal Functions *
593*********************************************************************************************************************************/
594
595
596/**
597 * Converts a HV return code to a VBox status code.
598 *
599 * @returns VBox status code.
600 * @param hrc The HV return code to convert.
601 */
602DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
603{
604 if (hrc == HV_SUCCESS)
605 return VINF_SUCCESS;
606
607 switch (hrc)
608 {
609 case HV_ERROR: return VERR_INVALID_STATE;
610 case HV_BUSY: return VERR_RESOURCE_BUSY;
611 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
612 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
613 case HV_NO_DEVICE: return VERR_NOT_FOUND;
614 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
615 }
616
617 return VERR_IPE_UNEXPECTED_STATUS;
618}
619
620
621/** Puts a name to a hypervisor framework status code. */
622static const char *nemR3DarwinHvStatusName(hv_return_t hrc)
623{
624 switch (hrc)
625 {
626 RT_CASE_RET_STR(HV_SUCCESS);
627 RT_CASE_RET_STR(HV_ERROR);
628 RT_CASE_RET_STR(HV_BUSY);
629 RT_CASE_RET_STR(HV_BAD_ARGUMENT);
630 RT_CASE_RET_STR(HV_ILLEGAL_GUEST_STATE);
631 RT_CASE_RET_STR(HV_NO_RESOURCES);
632 RT_CASE_RET_STR(HV_NO_DEVICE);
633 RT_CASE_RET_STR(HV_DENIED);
634 RT_CASE_RET_STR(HV_UNSUPPORTED);
635 }
636 return "";
637}
638
639
640/**
641 * Converts an ICC system register into Darwin's Hypervisor.Framework equivalent.
642 *
643 * @returns HvF's ICC system register.
644 * @param u32Reg The ARMv8 ICC system register.
645 */
646static hv_gic_icc_reg_t nemR3DarwinIccRegFromSysReg(uint32_t u32Reg)
647{
648 switch (u32Reg)
649 {
650 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1: return HV_GIC_ICC_REG_PMR_EL1;
651 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1: return HV_GIC_ICC_REG_INVALID;
652 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1: return HV_GIC_ICC_REG_INVALID;
653 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1: return HV_GIC_ICC_REG_INVALID;
654 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1: return HV_GIC_ICC_REG_BPR0_EL1;
655 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1: return HV_GIC_ICC_REG_AP0R0_EL1;
656 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1: return HV_GIC_ICC_REG_INVALID;
657 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1: return HV_GIC_ICC_REG_INVALID;
658 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1: return HV_GIC_ICC_REG_INVALID;
659 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1: return HV_GIC_ICC_REG_AP1R0_EL1;
660 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1: return HV_GIC_ICC_REG_INVALID;
661 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1: return HV_GIC_ICC_REG_INVALID;
662 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1: return HV_GIC_ICC_REG_INVALID;
663 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1: return HV_GIC_ICC_REG_INVALID;
664 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1: return HV_GIC_ICC_REG_INVALID;
665 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1: return HV_GIC_ICC_REG_RPR_EL1;
666 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1: return HV_GIC_ICC_REG_INVALID;
667 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1: return HV_GIC_ICC_REG_INVALID;
668 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1: return HV_GIC_ICC_REG_INVALID;
669 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1: return HV_GIC_ICC_REG_INVALID;
670 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1: return HV_GIC_ICC_REG_INVALID;
671 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1: return HV_GIC_ICC_REG_INVALID;
672 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1: return HV_GIC_ICC_REG_BPR1_EL1;
673 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1: return HV_GIC_ICC_REG_CTLR_EL1;
674 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1: return HV_GIC_ICC_REG_SRE_EL1;
675 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1: return HV_GIC_ICC_REG_IGRPEN0_EL1;
676 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1: return HV_GIC_ICC_REG_IGRPEN1_EL1;
677 }
678 AssertReleaseFailed();
679 return HV_GIC_ICC_REG_INVALID;
680}
681
682
683/**
684 * Returns a human readable string of the given exception class.
685 *
686 * @returns Pointer to the string matching the given EC.
687 * @param u32Ec The exception class to return the string for.
688 */
689static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
690{
691 switch (u32Ec)
692 {
693#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
694 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
695 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
696 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
697 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
698 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
699 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
700 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
701 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
702 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
703 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
704 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
705 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
706 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
707 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
708 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
709 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
710 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
711 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
712 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
713 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
714 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
715 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
716 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
717 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
718 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
719 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
720 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
721 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
722 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
723 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
724 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
725 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
726 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
727 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
728 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
729 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
730 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
731 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
732 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
733 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
734 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
735 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
736 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
737 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
738 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
739#undef ARMV8_EC_CASE
740 default:
741 break;
742 }
743
744 return "<INVALID>";
745}
746
747
748/**
749 * Resolves a NEM page state from the given protection flags.
750 *
751 * @returns NEM page state.
752 * @param fPageProt The page protection flags.
753 */
754DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
755{
756 switch (fPageProt)
757 {
758 case NEM_PAGE_PROT_NONE:
759 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
760 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
761 return NEM_DARWIN_PAGE_STATE_RX;
762 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
763 return NEM_DARWIN_PAGE_STATE_RW;
764 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
765 return NEM_DARWIN_PAGE_STATE_RWX;
766 default:
767 break;
768 }
769
770 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
771 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
772}
773
774
775/**
776 * Unmaps the given guest physical address range (page aligned).
777 *
778 * @returns VBox status code.
779 * @param pVM The cross context VM structure.
780 * @param GCPhys The guest physical address to start unmapping at.
781 * @param cb The size of the range to unmap in bytes.
782 * @param pu2State Where to store the new state of the unmappd page, optional.
783 */
784DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
785{
786 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
787 {
788 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
789 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
790 return VINF_SUCCESS;
791 }
792
793 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
794 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
795 if (RT_LIKELY(hrc == HV_SUCCESS))
796 {
797 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
798 if (pu2State)
799 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
800 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
801 return VINF_SUCCESS;
802 }
803
804 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
805 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
806 GCPhys, hrc));
807 return VERR_NEM_IPE_6;
808}
809
810
811/**
812 * Maps a given guest physical address range backed by the given memory with the given
813 * protection flags.
814 *
815 * @returns VBox status code.
816 * @param pVM The cross context VM structure.
817 * @param GCPhys The guest physical address to start mapping.
818 * @param pvRam The R3 pointer of the memory to back the range with.
819 * @param cb The size of the range, page aligned.
820 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
821 * @param pu2State Where to store the state for the new page, optional.
822 */
823DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
824{
825 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
826
827 Assert(fPageProt != NEM_PAGE_PROT_NONE);
828 RT_NOREF(pVM);
829
830 hv_memory_flags_t fHvMemProt = 0;
831 if (fPageProt & NEM_PAGE_PROT_READ)
832 fHvMemProt |= HV_MEMORY_READ;
833 if (fPageProt & NEM_PAGE_PROT_WRITE)
834 fHvMemProt |= HV_MEMORY_WRITE;
835 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
836 fHvMemProt |= HV_MEMORY_EXEC;
837
838 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
839 if (hrc == HV_SUCCESS)
840 {
841 if (pu2State)
842 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
843 return VINF_SUCCESS;
844 }
845
846 return nemR3DarwinHvSts2Rc(hrc);
847}
848
849
850/**
851 * Changes the protection flags for the given guest physical address range.
852 *
853 * @returns VBox status code.
854 * @param GCPhys The guest physical address to start mapping.
855 * @param cb The size of the range, page aligned.
856 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
857 * @param pu2State Where to store the state for the new page, optional.
858 */
859DECLINLINE(int) nemR3DarwinProtect(RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
860{
861 hv_memory_flags_t fHvMemProt = 0;
862 if (fPageProt & NEM_PAGE_PROT_READ)
863 fHvMemProt |= HV_MEMORY_READ;
864 if (fPageProt & NEM_PAGE_PROT_WRITE)
865 fHvMemProt |= HV_MEMORY_WRITE;
866 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
867 fHvMemProt |= HV_MEMORY_EXEC;
868
869 hv_return_t hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
870 if (hrc == HV_SUCCESS)
871 {
872 if (pu2State)
873 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
874 return VINF_SUCCESS;
875 }
876
877 LogRel(("nemR3DarwinProtect(%RGp,%zu,%#x): failed! hrc=%#x\n",
878 GCPhys, cb, fPageProt, hrc));
879 return nemR3DarwinHvSts2Rc(hrc);
880}
881
882
883#ifdef LOG_ENABLED
884/**
885 * Logs the current CPU state.
886 */
887static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
888{
889 if (LogIs3Enabled())
890 {
891 char szRegs[4096];
892 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
893 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
894 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
895 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
896 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
897 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
898 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
899 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
900 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
901 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
902 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
903 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
904 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
905 "vbar_el1=%016VR{vbar_el1}\n"
906 );
907 if (pVM->nem.s.fEl2Enabled)
908 {
909 Log3(("%s\n", szRegs));
910 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
911 "sp_el2=%016VR{sp_el2} elr_el2=%016VR{elr_el2}\n"
912 "spsr_el2=%016VR{spsr_el2} tpidr_el2=%016VR{tpidr_el2}\n"
913 "sctlr_el2=%016VR{sctlr_el2} tcr_el2=%016VR{tcr_el2}\n"
914 "ttbr0_el2=%016VR{ttbr0_el2} ttbr1_el2=%016VR{ttbr1_el2}\n"
915 "esr_el2=%016VR{esr_el2} far_el2=%016VR{far_el2}\n"
916 "hcr_el2=%016VR{hcr_el2} tcr_el2=%016VR{tcr_el2}\n"
917 "vbar_el2=%016VR{vbar_el2} cptr_el2=%016VR{cptr_el2}\n"
918 );
919 }
920 char szInstr[256]; RT_ZERO(szInstr);
921 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
922 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
923 szInstr, sizeof(szInstr), NULL);
924 Log3(("%s%s\n", szRegs, szInstr));
925 }
926}
927#endif /* LOG_ENABLED */
928
929
930static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
931{
932 RT_NOREF(pVM);
933
934 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
935 if (hrc == HV_SUCCESS)
936 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
937
938 if ( hrc == HV_SUCCESS
939 && (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR)))
940 {
941 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
942 {
943 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
944 {
945 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
946 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
947 }
948 }
949 }
950
951 if ( hrc == HV_SUCCESS
952 && (fWhat & CPUMCTX_EXTRN_V0_V31))
953 {
954 /* SIMD/FP registers. */
955 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
956 {
957 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
958 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
959 }
960 }
961
962 if ( hrc == HV_SUCCESS
963 && (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG))
964 {
965 /* Debug registers. */
966 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
967 {
968 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
969 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, pu64);
970 }
971 }
972
973 if ( hrc == HV_SUCCESS
974 && (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
975 {
976 /* Debug registers. */
977 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
978 {
979 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
980 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, pu64);
981 }
982 }
983
984 if ( hrc == HV_SUCCESS
985 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC)))
986 {
987 /* System registers. */
988 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
989 {
990 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
991 {
992 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
993 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
994 }
995 }
996 }
997
998 if ( hrc == HV_SUCCESS
999 && (fWhat & CPUMCTX_EXTRN_SYSREG_EL2)
1000 && pVM->nem.s.fEl2Enabled)
1001 {
1002 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumEl2SysRegs); i++)
1003 {
1004 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumEl2SysRegs[i].offCpumCtx);
1005 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, (hv_sys_reg_t)s_aCpumEl2SysRegs[i].idSysReg, pu64);
1006 }
1007 }
1008
1009 if ( hrc == HV_SUCCESS
1010 && (fWhat & CPUMCTX_EXTRN_PSTATE))
1011 {
1012 uint64_t u64Tmp;
1013 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
1014 if (hrc == HV_SUCCESS)
1015 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
1016 }
1017
1018 /* Almost done, just update extern flags. */
1019 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1020 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1021 pVCpu->cpum.GstCtx.fExtrn = 0;
1022
1023 return nemR3DarwinHvSts2Rc(hrc);
1024}
1025
1026
1027/**
1028 * Exports the guest state to HV for execution.
1029 *
1030 * @returns VBox status code.
1031 * @param pVM The cross context VM structure.
1032 * @param pVCpu The cross context virtual CPU structure of the
1033 * calling EMT.
1034 */
1035static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
1036{
1037 RT_NOREF(pVM);
1038 hv_return_t hrc = HV_SUCCESS;
1039
1040 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1041 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
1042 {
1043 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
1044 {
1045 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1046 {
1047 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
1048 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
1049 }
1050 }
1051 }
1052
1053 if ( hrc == HV_SUCCESS
1054 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
1055 {
1056 /* SIMD/FP registers. */
1057 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
1058 {
1059 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
1060 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
1061 }
1062 }
1063
1064 if ( hrc == HV_SUCCESS
1065 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_DEBUG))
1066 {
1067 /* Debug registers. */
1068 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
1069 {
1070 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
1071 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, *pu64);
1072 }
1073 }
1074
1075 if ( hrc == HV_SUCCESS
1076 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
1077 {
1078 /* Debug registers. */
1079 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1080 {
1081 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1082 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, *pu64);
1083 }
1084 }
1085
1086 if ( hrc == HV_SUCCESS
1087 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
1088 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
1089 {
1090 /* System registers. */
1091 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1092 {
1093 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1094 {
1095 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1096 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
1097 }
1098 }
1099 }
1100
1101 if ( hrc == HV_SUCCESS
1102 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_EL2)
1103 && pVM->nem.s.fEl2Enabled)
1104 {
1105 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumEl2SysRegs); i++)
1106 {
1107 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumEl2SysRegs[i].offCpumCtx);
1108 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, (hv_sys_reg_t)s_aCpumEl2SysRegs[i].idSysReg, *pu64);
1109 Assert(hrc == HV_SUCCESS);
1110 }
1111 }
1112
1113 if ( hrc == HV_SUCCESS
1114 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
1115 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
1116
1117 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1118 return nemR3DarwinHvSts2Rc(hrc);
1119}
1120
1121
1122/**
1123 * Worker for nemR3NativeInit that loads the Hypervisor.framework shared library.
1124 *
1125 * @returns VBox status code.
1126 * @param pErrInfo Where to always return error info.
1127 */
1128static int nemR3DarwinLoadHv(PRTERRINFO pErrInfo)
1129{
1130 RTLDRMOD hMod = NIL_RTLDRMOD;
1131 static const char *s_pszHvPath = "/System/Library/Frameworks/Hypervisor.framework/Hypervisor";
1132
1133 int rc = RTLdrLoadEx(s_pszHvPath, &hMod, RTLDRLOAD_FLAGS_NO_UNLOAD | RTLDRLOAD_FLAGS_NO_SUFFIX, pErrInfo);
1134 if (RT_SUCCESS(rc))
1135 {
1136 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
1137 {
1138 int rc2 = RTLdrGetSymbol(hMod, g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
1139 if (RT_SUCCESS(rc2))
1140 LogRel(("NEM: info: Found optional import Hypervisor!%s.\n", g_aImports[i].pszName));
1141 else
1142 {
1143 *g_aImports[i].ppfn = NULL;
1144 LogRel(("NEM: info: Optional import Hypervisor!%s not found: %Rrc\n", g_aImports[i].pszName, rc2));
1145 }
1146 }
1147 Assert(RT_SUCCESS(rc) && !RTErrInfoIsSet(pErrInfo));
1148 RTLdrClose(hMod);
1149 }
1150 else
1151 {
1152 RTErrInfoAddF(pErrInfo, rc, "Failed to load Hypervisor.framwork: %s: %Rrc", s_pszHvPath, rc);
1153 rc = VERR_NEM_INIT_FAILED;
1154 }
1155
1156 return rc;
1157}
1158
1159
1160/**
1161 * Dumps some GIC information to the release log.
1162 */
1163static void nemR3DarwinDumpGicInfo(void)
1164{
1165 size_t val = 0;
1166 hv_return_t hrc = hv_gic_get_redistributor_size(&val);
1167 LogRel(("GICNem: hv_gic_get_redistributor_size() -> hrc=%#x / size=%zu\n", hrc, val));
1168 hrc = hv_gic_get_distributor_size(&val);
1169 LogRel(("GICNem: hv_gic_get_distributor_size() -> hrc=%#x / size=%zu\n", hrc, val));
1170 hrc = hv_gic_get_distributor_base_alignment(&val);
1171 LogRel(("GICNem: hv_gic_get_distributor_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1172 hrc = hv_gic_get_redistributor_base_alignment(&val);
1173 LogRel(("GICNem: hv_gic_get_redistributor_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1174 hrc = hv_gic_get_msi_region_base_alignment(&val);
1175 LogRel(("GICNem: hv_gic_get_msi_region_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1176 hrc = hv_gic_get_msi_region_size(&val);
1177 LogRel(("GICNem: hv_gic_get_msi_region_size() -> hrc=%#x / size=%zu\n", hrc, val));
1178 uint32_t u32SpiIntIdBase = 0;
1179 uint32_t cSpiIntIds = 0;
1180 hrc = hv_gic_get_spi_interrupt_range(&u32SpiIntIdBase, &cSpiIntIds);
1181 LogRel(("GICNem: hv_gic_get_spi_interrupt_range() -> hrc=%#x / SpiIntIdBase=%u, cSpiIntIds=%u\n", hrc, u32SpiIntIdBase, cSpiIntIds));
1182
1183 uint32_t u32IntId = 0;
1184 hrc = hv_gic_get_intid(HV_GIC_INT_EL1_PHYSICAL_TIMER, &u32IntId);
1185 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL1_PHYSICAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1186 hrc = hv_gic_get_intid(HV_GIC_INT_EL1_VIRTUAL_TIMER, &u32IntId);
1187 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL1_VIRTUAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1188 hrc = hv_gic_get_intid(HV_GIC_INT_EL2_PHYSICAL_TIMER, &u32IntId);
1189 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL2_PHYSICAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1190 hrc = hv_gic_get_intid(HV_GIC_INT_MAINTENANCE, &u32IntId);
1191 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_MAINTENANCE) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1192 hrc = hv_gic_get_intid(HV_GIC_INT_PERFORMANCE_MONITOR, &u32IntId);
1193 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_PERFORMANCE_MONITOR) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1194}
1195
1196
1197/**
1198 * Sets the given SPI inside the in-kernel HvF GIC.
1199 *
1200 * @returns VBox status code.
1201 * @param pVM The VM instance.
1202 * @param uIntId The SPI ID to update.
1203 * @param fAsserted Flag whether the interrupt is asserted (true) or not (false).
1204 */
1205VMM_INT_DECL(int) NEMR3GicSetSpi(PVMCC pVM, uint32_t uIntId, bool fAsserted)
1206{
1207 RT_NOREF(pVM);
1208 Assert(hv_gic_set_spi);
1209
1210 hv_return_t hrc = hv_gic_set_spi(uIntId + GIC_INTID_RANGE_SPI_START, fAsserted);
1211 return nemR3DarwinHvSts2Rc(hrc);
1212}
1213
1214
1215/**
1216 * Sets the given PPI inside the in-kernel HvF GIC.
1217 *
1218 * @returns VBox status code.
1219 * @param pVCpu The vCPU for which the PPI state is to be updated.
1220 * @param uIntId The PPI ID to update.
1221 * @param fAsserted Flag whether the interrupt is asserted (true) or not (false).
1222 */
1223VMM_INT_DECL(int) NEMR3GicSetPpi(PVMCPUCC pVCpu, uint32_t uIntId, bool fAsserted)
1224{
1225 RT_NOREF(pVCpu, uIntId, fAsserted);
1226
1227 /* Should never be called as the PPIs are handled entirely in Hypervisor.framework/AppleHV. */
1228 AssertFailed();
1229 return VERR_NEM_IPE_9;
1230}
1231
1232
1233/**
1234 * Writes a system ICC register inside the in-kernel HvF GIC.
1235 *
1236 * @returns VBox status code.
1237 * @param pVCpu The cross context virtual CPU structure.
1238 * @param u32Reg The ICC register.
1239 * @param u64Value The value being set.
1240 */
1241VMM_INT_DECL(VBOXSTRICTRC) NEMR3GicWriteSysReg(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t u64Value)
1242{
1243 hv_gic_icc_reg_t const enmIccReg = nemR3DarwinIccRegFromSysReg(u32Reg);
1244 hv_return_t const hrc = hv_gic_set_icc_reg(pVCpu->nem.s.hVCpu, enmIccReg, u64Value);
1245 return nemR3DarwinHvSts2Rc(hrc);
1246}
1247
1248
1249/**
1250 * Reads a system ICC register inside the in-kernel HvF GIC.
1251 *
1252 * @returns VBox status code.
1253 * @param pVCpu The cross context virtual CPU structure.
1254 * @param u32Reg The ICC register.
1255 * @param pu64Value Where to store value.
1256 */
1257VMM_INT_DECL(VBOXSTRICTRC) NEMR3GicReadSysReg(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
1258{
1259 hv_gic_icc_reg_t const enmIccReg = nemR3DarwinIccRegFromSysReg(u32Reg);
1260 hv_return_t const hrc = hv_gic_get_icc_reg(pVCpu->nem.s.hVCpu, enmIccReg, pu64Value);
1261 return nemR3DarwinHvSts2Rc(hrc);
1262}
1263
1264
1265static int nemR3DarwinGicCreate(PVM pVM)
1266{
1267 nemR3DarwinDumpGicInfo();
1268
1269 //PCFGMNODE pGicDev = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic/0");
1270 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
1271 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
1272
1273 hv_gic_config_t hGicCfg = hv_gic_config_create();
1274
1275 /*
1276 * Query the MMIO ranges.
1277 */
1278 RTGCPHYS GCPhysMmioBaseDist = 0;
1279 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
1280 if (RT_FAILURE(rc))
1281 return VMSetError(pVM, rc, RT_SRC_POS,
1282 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
1283
1284 RTGCPHYS GCPhysMmioBaseReDist = 0;
1285 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
1286 if (RT_FAILURE(rc))
1287 return VMSetError(pVM, rc, RT_SRC_POS,
1288 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
1289
1290 hv_return_t hrc = hv_gic_config_set_distributor_base(hGicCfg, GCPhysMmioBaseDist);
1291 if (hrc != HV_SUCCESS)
1292 return nemR3DarwinHvSts2Rc(hrc);
1293
1294 hrc = hv_gic_config_set_redistributor_base(hGicCfg, GCPhysMmioBaseReDist);
1295 if (hrc != HV_SUCCESS)
1296 return nemR3DarwinHvSts2Rc(hrc);
1297
1298 hrc = hv_gic_create(hGicCfg);
1299 os_release(hGicCfg);
1300 if (hrc != HV_SUCCESS)
1301 return nemR3DarwinHvSts2Rc(hrc);
1302
1303 /* Make sure the device is not instantiated as Hypervisor.framework provides it. */
1304 //CFGMR3RemoveNode(pGicDev);
1305 return rc;
1306}
1307
1308
1309/**
1310 * Try initialize the native API.
1311 *
1312 * This may only do part of the job, more can be done in
1313 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
1314 *
1315 * @returns VBox status code.
1316 * @param pVM The cross context VM structure.
1317 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
1318 * the latter we'll fail if we cannot initialize.
1319 * @param fForced Whether the HMForced flag is set and we should
1320 * fail if we cannot initialize.
1321 */
1322int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
1323{
1324 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
1325
1326 /*
1327 * Some state init.
1328 */
1329 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
1330 RT_NOREF(pCfgNem);
1331
1332 /*
1333 * Error state.
1334 * The error message will be non-empty on failure and 'rc' will be set too.
1335 */
1336 RTERRINFOSTATIC ErrInfo;
1337 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
1338
1339 /* Resolve optional imports */
1340 int rc = nemR3DarwinLoadHv(pErrInfo);
1341 if (RT_FAILURE(rc))
1342 {
1343 if ((fForced || !fFallback) && RTErrInfoIsSet(pErrInfo))
1344 return VMSetError(pVM, rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1345 return rc;
1346 }
1347
1348 /*
1349 * Need to enable nested virt here if supported and reset the CFGM value to false
1350 * if not supported. This ASSUMES that NEM is initialized before CPUM.
1351 */
1352 PCFGMNODE pCfgCpum = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/");
1353
1354 hv_vm_config_t hVmCfg = NULL;
1355 if ( hv_vm_config_create
1356 && hv_vm_config_get_el2_supported)
1357 {
1358 hVmCfg = hv_vm_config_create();
1359
1360 bool fHvEl2Supported = false;
1361 hv_return_t hrc = hv_vm_config_get_el2_supported(&fHvEl2Supported);
1362 if ( hrc == HV_SUCCESS
1363 && fHvEl2Supported)
1364 {
1365 /** @cfgm{/CPUM/NestedHWVirt, bool, false}
1366 * Whether to expose the hardware virtualization (EL2/VHE) feature to the guest.
1367 * The default is false. Only supported on M3 and later and macOS 15.0+ (Sonoma).
1368 */
1369 bool fNestedHWVirt = false;
1370 rc = CFGMR3QueryBoolDef(pCfgCpum, "NestedHWVirt", &fNestedHWVirt, false);
1371 AssertLogRelRCReturn(rc, rc);
1372 if (fNestedHWVirt)
1373 {
1374 hrc = hv_vm_config_set_el2_enabled(hVmCfg, fNestedHWVirt);
1375 if (hrc != HV_SUCCESS)
1376 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
1377 "Cannot enable nested virtualization: hrc=%#x %s!\n", hrc, nemR3DarwinHvStatusName(hrc));
1378 pVM->nem.s.fEl2Enabled = true;
1379 LogRel(("NEM: Enabled nested virtualization (EL2) support\n"));
1380 }
1381 }
1382 else
1383 {
1384 /* Ensure nested virt is not set. */
1385 rc = CFGMR3RemoveValue(pCfgCpum, "NestedHWVirt");
1386 AssertLogRelRC(rc);
1387
1388 LogRel(("NEM: The host doesn't supported nested virtualization! (hrc=%#x fHvEl2Supported=%RTbool)\n",
1389 hrc, fHvEl2Supported));
1390 }
1391 }
1392 else
1393 {
1394 /* Ensure nested virt is not set. */
1395 rc = CFGMR3RemoveValue(pCfgCpum, "NestedHWVirt");
1396 AssertLogRelRC(rc);
1397
1398 LogRel(("NEM: Hypervisor.framework doesn't supported nested virtualization!\n"));
1399 }
1400
1401 hv_return_t hrc = hv_vm_create(hVmCfg);
1402 os_release(hVmCfg);
1403 if (hrc == HV_SUCCESS)
1404 {
1405 pVM->nem.s.fCreatedVm = true;
1406 pVM->nem.s.u64CntFrqHz = ASMReadCntFrqEl0();
1407
1408 /* Will be initialized in NEMHCResumeCpuTickOnAll() before executing guest code. */
1409 pVM->nem.s.u64VTimerOff = 0;
1410
1411 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
1412 Log(("NEM: Marked active!\n"));
1413 PGMR3EnableNemMode(pVM);
1414 return VINF_SUCCESS;
1415 }
1416
1417 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED, "hv_vm_create() failed: %#x %s", hrc, nemR3DarwinHvStatusName(hrc));
1418
1419 /*
1420 * We only fail if in forced mode, otherwise just log the complaint and return.
1421 */
1422 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1423 if ( (fForced || !fFallback)
1424 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1425 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1426
1427 if (RTErrInfoIsSet(pErrInfo))
1428 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1429 return VINF_SUCCESS;
1430}
1431
1432
1433/**
1434 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
1435 *
1436 * @returns VBox status code
1437 * @param pVM The VM handle.
1438 * @param pVCpu The vCPU handle.
1439 * @param idCpu ID of the CPU to create.
1440 */
1441static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
1442{
1443 if (idCpu == 0)
1444 {
1445 Assert(pVM->nem.s.hVCpuCfg == NULL);
1446
1447 /* Create a new vCPU config and query the ID registers. */
1448 pVM->nem.s.hVCpuCfg = hv_vcpu_config_create();
1449 if (!pVM->nem.s.hVCpuCfg)
1450 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1451 "Call to hv_vcpu_config_create failed on vCPU %u", idCpu);
1452
1453 /* Query ID registers and hand them to CPUM. */
1454 CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
1455 for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
1456 {
1457 uint64_t *pu64 = (uint64_t *)((uint8_t *)&IdRegs + s_aIdRegs[i].offIdStruct);
1458 hv_return_t hrc = hv_vcpu_config_get_feature_reg(pVM->nem.s.hVCpuCfg, s_aIdRegs[i].enmHvReg, pu64);
1459 if (hrc != HV_SUCCESS)
1460 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1461 "Call to hv_vcpu_get_feature_reg(, %#x, ) failed: %#x (%Rrc)", hrc, nemR3DarwinHvSts2Rc(hrc));
1462 }
1463
1464 int rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
1465 if (RT_FAILURE(rc))
1466 return rc;
1467 }
1468
1469 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, pVM->nem.s.hVCpuCfg);
1470 if (hrc != HV_SUCCESS)
1471 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1472 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1473
1474 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MPIDR_EL1, idCpu);
1475 if (hrc != HV_SUCCESS)
1476 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1477 "Setting MPIDR_EL1 failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1478
1479 return VINF_SUCCESS;
1480}
1481
1482
1483/**
1484 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
1485 *
1486 * @returns VBox status code.
1487 * @param pVM The VM handle.
1488 * @param pVCpu The vCPU handle.
1489 */
1490static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVM pVM, PVMCPU pVCpu)
1491{
1492 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
1493 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
1494
1495 if (pVCpu->idCpu == 0)
1496 {
1497 os_release(pVM->nem.s.hVCpuCfg);
1498 pVM->nem.s.hVCpuCfg = NULL;
1499 }
1500 return VINF_SUCCESS;
1501}
1502
1503
1504/**
1505 * This is called after CPUMR3Init is done.
1506 *
1507 * @returns VBox status code.
1508 * @param pVM The VM handle..
1509 */
1510int nemR3NativeInitAfterCPUM(PVM pVM)
1511{
1512 /*
1513 * Validate sanity.
1514 */
1515 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
1516 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1517
1518 /*
1519 * Need to create the GIC here if the NEM variant is configured
1520 * before any vCPU is created according to the Apple docs.
1521 */
1522 if ( hv_gic_create
1523 && CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0"))
1524 {
1525 int rc = nemR3DarwinGicCreate(pVM);
1526 if (RT_FAILURE(rc))
1527 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Creating the GIC failed: %Rrc", rc);
1528 }
1529
1530 /*
1531 * Setup the EMTs.
1532 */
1533 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1534 {
1535 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1536
1537 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
1538 if (RT_FAILURE(rc))
1539 {
1540 /* Rollback. */
1541 while (idCpu--)
1542 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 2, pVM, pVCpu);
1543
1544 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
1545 }
1546 }
1547
1548 pVM->nem.s.fCreatedEmts = true;
1549 return VINF_SUCCESS;
1550}
1551
1552
1553int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1554{
1555 RT_NOREF(pVM, enmWhat);
1556 return VINF_SUCCESS;
1557}
1558
1559
1560int nemR3NativeTerm(PVM pVM)
1561{
1562 /*
1563 * Delete the VM.
1564 */
1565
1566 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
1567 {
1568 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1569
1570 /*
1571 * Apple's documentation states that the vCPU should be destroyed
1572 * on the thread running the vCPU but as all the other EMTs are gone
1573 * at this point, destroying the VM would hang.
1574 *
1575 * We seem to be at luck here though as destroying apparently works
1576 * from EMT(0) as well.
1577 */
1578 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
1579 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
1580 }
1581
1582 pVM->nem.s.fCreatedEmts = false;
1583 if (pVM->nem.s.fCreatedVm)
1584 {
1585 hv_return_t hrc = hv_vm_destroy();
1586 if (hrc != HV_SUCCESS)
1587 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
1588
1589 pVM->nem.s.fCreatedVm = false;
1590 }
1591 return VINF_SUCCESS;
1592}
1593
1594
1595/**
1596 * VM reset notification.
1597 *
1598 * @param pVM The cross context VM structure.
1599 */
1600void nemR3NativeReset(PVM pVM)
1601{
1602 RT_NOREF(pVM);
1603}
1604
1605
1606/**
1607 * Reset CPU due to INIT IPI or hot (un)plugging.
1608 *
1609 * @param pVCpu The cross context virtual CPU structure of the CPU being
1610 * reset.
1611 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1612 */
1613void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1614{
1615 RT_NOREF(pVCpu, fInitIpi);
1616}
1617
1618
1619/**
1620 * Returns the byte size from the given access SAS value.
1621 *
1622 * @returns Number of bytes to transfer.
1623 * @param uSas The SAS value to convert.
1624 */
1625DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
1626{
1627 switch (uSas)
1628 {
1629 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
1630 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
1631 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
1632 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
1633 default:
1634 AssertReleaseFailed();
1635 }
1636
1637 return 0;
1638}
1639
1640
1641/**
1642 * Sets the given general purpose register to the given value.
1643 *
1644 * @param pVCpu The cross context virtual CPU structure of the
1645 * calling EMT.
1646 * @param uReg The register index.
1647 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
1648 * @param fSignExtend Flag whether to sign extend the value.
1649 * @param u64Val The value.
1650 */
1651DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
1652{
1653 AssertReturnVoid(uReg < 31);
1654
1655 if (f64BitReg)
1656 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1657 else
1658 pVCpu->cpum.GstCtx.aGRegs[uReg].x = (uint64_t)(fSignExtend ? (int32_t)u64Val : (uint32_t)u64Val);
1659
1660 /* Mark the register as not extern anymore. */
1661 switch (uReg)
1662 {
1663 case 0:
1664 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1665 break;
1666 case 1:
1667 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1668 break;
1669 case 2:
1670 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1671 break;
1672 case 3:
1673 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1674 break;
1675 default:
1676 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1677 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1678 }
1679}
1680
1681
1682/**
1683 * Gets the given general purpose register and returns the value.
1684 *
1685 * @returns Value from the given register.
1686 * @param pVCpu The cross context virtual CPU structure of the
1687 * calling EMT.
1688 * @param uReg The register index.
1689 */
1690DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
1691{
1692 AssertReturn(uReg <= ARMV8_A64_REG_XZR, 0);
1693
1694 if (uReg == ARMV8_A64_REG_XZR)
1695 return 0;
1696
1697 /** @todo Import the register if extern. */
1698 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1699
1700 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1701}
1702
1703
1704/**
1705 * Works on the data abort exception (which will be a MMIO access most of the time).
1706 *
1707 * @returns VBox strict status code.
1708 * @param pVM The cross context VM structure.
1709 * @param pVCpu The cross context virtual CPU structure of the
1710 * calling EMT.
1711 * @param uIss The instruction specific syndrome value.
1712 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1713 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
1714 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
1715 */
1716static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
1717 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
1718{
1719 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
1720 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
1721 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
1722 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
1723 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
1724 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
1725 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
1726 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
1727 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
1728 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
1729
1730 RT_NOREF(fL2Fault, GCPtrDataAbrt);
1731
1732 if (fWrite)
1733 {
1734 /*
1735 * Check whether this is one of the dirty tracked regions, mark it as dirty
1736 * and enable write support for this region again.
1737 *
1738 * This is required for proper VRAM tracking or the display might not get updated
1739 * and it is impossible to use the PGM generic facility as it operates on guest page sizes
1740 * but setting protection flags with Hypervisor.framework works only host page sized regions, so
1741 * we have to cook our own. Additionally the VRAM region is marked as prefetchable (write-back)
1742 * which doesn't produce a valid instruction syndrome requiring restarting the instruction after enabling
1743 * write access again (due to a missing interpreter right now).
1744 */
1745 for (uint32_t idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
1746 {
1747 PNEMHVMMIO2REGION pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
1748
1749 if ( GCPhysDataAbrt >= pMmio2Region->GCPhysStart
1750 && GCPhysDataAbrt <= pMmio2Region->GCPhysLast)
1751 {
1752 pMmio2Region->fDirty = true;
1753
1754 uint8_t u2State;
1755 int rc = nemR3DarwinProtect(pMmio2Region->GCPhysStart, pMmio2Region->GCPhysLast - pMmio2Region->GCPhysStart + 1,
1756 NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE, &u2State);
1757
1758 /* Restart the instruction if there is no instruction syndrome available. */
1759 if (RT_FAILURE(rc) || !fIsv)
1760 return rc;
1761 }
1762 }
1763 }
1764
1765 VBOXSTRICTRC rcStrict;
1766 if (fIsv)
1767 {
1768 EMHistoryAddExit(pVCpu,
1769 fWrite
1770 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1771 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1772 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1773
1774 uint64_t u64Val = 0;
1775 if (fWrite)
1776 {
1777 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1778 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1779 Log4(("MmioExit/%u: %08RX64: WRITE %#RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
1780 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1781 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1782 }
1783 else
1784 {
1785 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1786 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1787 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1788 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1789 if (rcStrict == VINF_SUCCESS)
1790 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
1791 }
1792 }
1793 else
1794 {
1795 /** @todo Our UEFI firmware accesses the flash region with the following instruction
1796 * when the NVRAM actually contains data:
1797 * ldrb w9, [x6, #-0x0001]!
1798 * This is too complicated for the hardware so the ISV bit is not set. Until there
1799 * is a proper IEM implementation we just handle this here for now to avoid annoying
1800 * users too much.
1801 */
1802 /* The following ASSUMES that the vCPU state is completely synced. */
1803
1804 /* Read instruction. */
1805 RTGCPTR GCPtrPage = pVCpu->cpum.GstCtx.Pc.u64 & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
1806 const void *pvPageR3 = NULL;
1807 PGMPAGEMAPLOCK PageMapLock;
1808
1809 rcStrict = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrPage, &pvPageR3, &PageMapLock);
1810 if (rcStrict == VINF_SUCCESS)
1811 {
1812 uint32_t u32Instr = *(uint32_t *)((uint8_t *)pvPageR3 + (pVCpu->cpum.GstCtx.Pc.u64 - GCPtrPage));
1813 PGMPhysReleasePageMappingLock(pVCpu->pVMR3, &PageMapLock);
1814
1815 DISSTATE Dis;
1816 rcStrict = DISInstrWithPrefetchedBytes((uintptr_t)pVCpu->cpum.GstCtx.Pc.u64, DISCPUMODE_ARMV8_A64, 0 /*fFilter - none */,
1817 &u32Instr, sizeof(u32Instr), NULL, NULL, &Dis, NULL);
1818 if (rcStrict == VINF_SUCCESS)
1819 {
1820 if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDRB
1821 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
1822 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
1823 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmAddrInGpr
1824 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit
1825 && (Dis.aParams[1].fUse & DISUSE_PRE_INDEXED))
1826 {
1827 /* The fault address is already the final address. */
1828 uint8_t bVal = 0;
1829 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &bVal, 1, PGMACCESSORIGIN_HM);
1830 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1831 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, sizeof(bVal), sizeof(bVal),
1832 &bVal, VBOXSTRICTRC_VAL(rcStrict) ));
1833 if (rcStrict == VINF_SUCCESS)
1834 {
1835 nemR3DarwinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, bVal);
1836 /* Update the indexed register. */
1837 pVCpu->cpum.GstCtx.aGRegs[Dis.aParams[1].armv8.Op.Reg.idReg].x += Dis.aParams[1].armv8.u.offBase;
1838 }
1839 }
1840 /*
1841 * Seeing the following with the Windows 11/ARM TPM driver:
1842 * %fffff800e5342888 48 25 45 29 ldp w8, w9, [x10, #+0x0028]
1843 */
1844 else if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDP
1845 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
1846 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
1847 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmReg
1848 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
1849 && Dis.aParams[2].armv8.enmType == kDisArmv8OpParmAddrInGpr
1850 && Dis.aParams[2].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit)
1851 {
1852 /** @todo This is tricky to handle if the first register read returns something else than VINF_SUCCESS... */
1853 /* The fault address is already the final address. */
1854 uint32_t u32Val1 = 0;
1855 uint32_t u32Val2 = 0;
1856 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u32Val1, sizeof(u32Val1), PGMACCESSORIGIN_HM);
1857 if (rcStrict == VINF_SUCCESS)
1858 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt + sizeof(uint32_t), &u32Val2, sizeof(u32Val2), PGMACCESSORIGIN_HM);
1859 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs %.*Rhxs rcStrict=%Rrc\n",
1860 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, 2 * sizeof(uint32_t), sizeof(u32Val1),
1861 &u32Val1, sizeof(u32Val2), &u32Val2, VBOXSTRICTRC_VAL(rcStrict) ));
1862 if (rcStrict == VINF_SUCCESS)
1863 {
1864 nemR3DarwinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val1);
1865 nemR3DarwinSetGReg(pVCpu, Dis.aParams[1].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, u32Val2);
1866 }
1867 }
1868 /* T O D O:
1869 * Recent W11:
1870 * x0=ffffb804ea3217d8 x1=ffffe28437802000 x2=0000000000000424 x3=fffff802e5716030
1871 * x4=ffffe28437802424 x5=ffffb804ea321bfc x6=000000000080009c x7=000000000080009c
1872 * x8=ffff87849fefc788 x9=ffff87849fefc788 x10=000000000000001c x11=ffffb804ea32909c
1873 * x12=000000000000001c x13=000000000000009c x14=ffffb804ea3290a8 x15=ffffd580b2b1f7d8
1874 * x16=0000f6999080cdbe x17=0000f6999080cdbe x18=ffffd08158fbf000 x19=ffffb804ea3217d0
1875 * x20=0000000000000001 x21=0000000000000004 x22=ffffb804ea321660 x23=000047fb15cdefd8
1876 * x24=0000000000000000 x25=ffffb804ea2f1080 x26=0000000000000000 x27=0000000000000380
1877 * x28=0000000000000000 x29=ffff87849fefc7e0 x30=fffff802e57120b0
1878 * pc=fffff802e5713c20 pstate=00000000a0001344
1879 * sp_el0=ffff87849fefc7e0 sp_el1=ffff87849e462400 elr_el1=fffff802e98889c8
1880 * pl061gpio!start_seg1_.text+0x2c20:
1881 * %fffff802e5713c20 23 00 c0 3d ldr q3, [x1]
1882 * VBoxDbg> format %%(%@x1)
1883 * Guest physical address: %%ffddd000
1884 * VBoxDbg> info mmio
1885 * MMIO registrations: 12 (186 allocated)
1886 * ## Ctx Size Mapping PCI Description
1887 * 0 R3 00000000000c0000 0000000004000000-00000000040bffff Flash Memory
1888 * [snip]
1889 * 11 R3 0000000000001000 00000000ffddd000-00000000ffdddfff PL061
1890 */
1891 else
1892 AssertLogRelMsgFailedReturn(("pc=%#RX64: %#x opcode=%d\n",
1893 pVCpu->cpum.GstCtx.Pc.u64, Dis.Instr.au32[0], Dis.pCurInstr->uOpcode),
1894 VERR_NEM_IPE_2);
1895 }
1896 }
1897 }
1898
1899 if (rcStrict == VINF_SUCCESS)
1900 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1901
1902 return rcStrict;
1903}
1904
1905
1906/**
1907 * Works on the trapped MRS, MSR and system instruction exception.
1908 *
1909 * @returns VBox strict status code.
1910 * @param pVM The cross context VM structure.
1911 * @param pVCpu The cross context virtual CPU structure of the
1912 * calling EMT.
1913 * @param uIss The instruction specific syndrome value.
1914 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1915 */
1916static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
1917{
1918 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
1919 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
1920 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
1921 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
1922 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
1923 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
1924 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
1925 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
1926 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
1927 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
1928
1929 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
1930 EMHistoryAddExit(pVCpu,
1931 fRead
1932 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1933 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1934 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1935
1936 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1937 uint64_t u64Val = 0;
1938 if (fRead)
1939 {
1940 RT_NOREF(pVM);
1941 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
1942 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
1943 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1944 VBOXSTRICTRC_VAL(rcStrict) ));
1945 if (rcStrict == VINF_SUCCESS)
1946 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
1947 }
1948 else
1949 {
1950 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1951 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
1952 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
1953 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1954 VBOXSTRICTRC_VAL(rcStrict) ));
1955 }
1956
1957 if (rcStrict == VINF_SUCCESS)
1958 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1959
1960 return rcStrict;
1961}
1962
1963
1964/**
1965 * Works on the trapped HVC instruction exception.
1966 *
1967 * @returns VBox strict status code.
1968 * @param pVM The cross context VM structure.
1969 * @param pVCpu The cross context virtual CPU structure of the
1970 * calling EMT.
1971 * @param uIss The instruction specific syndrome value.
1972 * @param fAdvancePc Flag whether to advance the guest program counter.
1973 */
1974static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fAdvancePc = false)
1975{
1976 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
1977 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
1978
1979#if 0 /** @todo For later */
1980 EMHistoryAddExit(pVCpu,
1981 fRead
1982 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1983 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1984 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1985#endif
1986
1987 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1988 if (u16Imm == 0)
1989 {
1990 /** @todo Raise exception to EL1 if PSCI not configured. */
1991 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
1992 uint32_t uFunId = pVCpu->cpum.GstCtx.aGRegs[ARMV8_A64_REG_X0].w;
1993 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
1994 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
1995 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
1996 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
1997 {
1998 switch (uFunNum)
1999 {
2000 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2001 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
2002 break;
2003 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2004 rcStrict = VMR3PowerOff(pVM->pUVM);
2005 break;
2006 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2007 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2008 {
2009 bool fHaltOnReset;
2010 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
2011 if (RT_SUCCESS(rc) && fHaltOnReset)
2012 {
2013 Log(("nemR3DarwinHandleExitExceptionTrappedHvcInsn: Halt On Reset!\n"));
2014 rcStrict = VINF_EM_HALT;
2015 }
2016 else
2017 {
2018 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
2019 VM_FF_SET(pVM, VM_FF_RESET);
2020 rcStrict = VINF_EM_RESET;
2021 }
2022 break;
2023 }
2024 case ARM_PSCI_FUNC_ID_CPU_ON:
2025 {
2026 uint64_t u64TgtCpu = nemR3DarwinGetGReg(pVCpu, ARMV8_A64_REG_X1);
2027 RTGCPHYS GCPhysExecAddr = nemR3DarwinGetGReg(pVCpu, ARMV8_A64_REG_X2);
2028 uint64_t u64CtxId = nemR3DarwinGetGReg(pVCpu, ARMV8_A64_REG_X3);
2029 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
2030 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
2031 break;
2032 }
2033 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
2034 {
2035 uint32_t u32FunNum = (uint32_t)nemR3DarwinGetGReg(pVCpu, ARMV8_A64_REG_X1);
2036 switch (u32FunNum)
2037 {
2038 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
2039 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
2040 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
2041 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
2042 case ARM_PSCI_FUNC_ID_CPU_ON:
2043 case ARM_PSCI_FUNC_ID_MIGRATE_INFO_TYPE:
2044 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2045 false /*f64BitReg*/, false /*fSignExtend*/,
2046 (uint64_t)ARM_PSCI_STS_SUCCESS);
2047 break;
2048 default:
2049 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0,
2050 false /*f64BitReg*/, false /*fSignExtend*/,
2051 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2052 }
2053 break;
2054 }
2055 case ARM_PSCI_FUNC_ID_MIGRATE_INFO_TYPE:
2056 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_MIGRATE_INFO_TYPE_TOS_NOT_PRESENT);
2057 break;
2058 default:
2059 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2060 }
2061 }
2062 else
2063 nemR3DarwinSetGReg(pVCpu, ARMV8_A64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
2064 }
2065
2066 /** @todo What to do if immediate is != 0? */
2067
2068 if ( rcStrict == VINF_SUCCESS
2069 && fAdvancePc)
2070 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
2071
2072 return rcStrict;
2073}
2074
2075
2076/**
2077 * Handles an exception VM exit.
2078 *
2079 * @returns VBox strict status code.
2080 * @param pVM The cross context VM structure.
2081 * @param pVCpu The cross context virtual CPU structure of the
2082 * calling EMT.
2083 * @param pExit Pointer to the exit information.
2084 */
2085static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
2086{
2087 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
2088 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
2089 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
2090
2091 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
2092 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
2093
2094 switch (uEc)
2095 {
2096 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
2097 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
2098 pExit->exception.physical_address);
2099 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
2100 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
2101 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
2102 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
2103 case ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN:
2104 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss, true);
2105 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
2106 {
2107 /* No need to halt if there is an interrupt pending already. */
2108 if (VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ)))
2109 {
2110 LogFlowFunc(("IRQ | FIQ set => VINF_SUCCESS\n"));
2111 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
2112 return VINF_SUCCESS;
2113 }
2114
2115 /* Set the vTimer expiration in order to get out of the halt at the right point in time. */
2116 if ( (pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE)
2117 && !(pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_IMASK))
2118 {
2119 uint64_t cTicksVTimer = mach_absolute_time() - pVM->nem.s.u64VTimerOff;
2120
2121 /* Check whether it expired and start executing guest code. */
2122 if (cTicksVTimer >= pVCpu->cpum.GstCtx.CntvCValEl0)
2123 {
2124 LogFlowFunc(("Guest timer expired (cTicksVTimer=%RU64 CntvCValEl0=%RU64) => VINF_SUCCESS\n",
2125 cTicksVTimer, pVCpu->cpum.GstCtx.CntvCValEl0));
2126 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
2127 return VINF_SUCCESS;
2128 }
2129
2130 uint64_t cTicksVTimerToExpire = pVCpu->cpum.GstCtx.CntvCValEl0 - cTicksVTimer;
2131 uint64_t cNanoSecsVTimerToExpire = ASMMultU64ByU32DivByU32(cTicksVTimerToExpire, RT_NS_1SEC, (uint32_t)pVM->nem.s.u64CntFrqHz);
2132
2133 /*
2134 * Our halt method doesn't work with sub millisecond granularity at the moment causing a huge slowdown
2135 * + scheduling overhead which would increase the wakeup latency.
2136 * So only halt when the threshold is exceeded (needs more experimentation but 5ms turned out to be a good compromise
2137 * between CPU load when the guest is idle and performance).
2138 */
2139 if (cNanoSecsVTimerToExpire < 2 * RT_NS_1MS)
2140 {
2141 LogFlowFunc(("Guest timer expiration < 2ms (cNanoSecsVTimerToExpire=%RU64) => VINF_SUCCESS\n",
2142 cNanoSecsVTimerToExpire));
2143 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
2144 return VINF_SUCCESS;
2145 }
2146
2147 LogFlowFunc(("Set vTimer activation to cNanoSecsVTimerToExpire=%#RX64 (CntvCValEl0=%#RX64, u64VTimerOff=%#RX64 cTicksVTimer=%#RX64 u64CntFrqHz=%#RX64)\n",
2148 cNanoSecsVTimerToExpire, pVCpu->cpum.GstCtx.CntvCValEl0, pVM->nem.s.u64VTimerOff, cTicksVTimer, pVM->nem.s.u64CntFrqHz));
2149 TMCpuSetVTimerNextActivation(pVCpu, cNanoSecsVTimerToExpire);
2150 }
2151 else
2152 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
2153
2154 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
2155 return VINF_EM_HALT;
2156 }
2157 case ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN:
2158 {
2159 VBOXSTRICTRC rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
2160 /** @todo Forward genuine guest traps to the guest by either single stepping instruction with debug exception trapping turned off
2161 * or create instruction interpreter and inject exception ourselves. */
2162 Assert(rcStrict == VINF_EM_DBG_BREAKPOINT);
2163 return rcStrict;
2164 }
2165 case ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL:
2166 return VINF_EM_DBG_STEPPED;
2167 case ARMV8_ESR_EL2_EC_UNKNOWN:
2168 default:
2169 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
2170 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
2171 AssertReleaseFailed();
2172 return VERR_NOT_IMPLEMENTED;
2173 }
2174
2175 return VINF_SUCCESS;
2176}
2177
2178
2179/**
2180 * Handles an exit from hv_vcpu_run().
2181 *
2182 * @returns VBox strict status code.
2183 * @param pVM The cross context VM structure.
2184 * @param pVCpu The cross context virtual CPU structure of the
2185 * calling EMT.
2186 */
2187static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
2188{
2189 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
2190 if (RT_FAILURE(rc))
2191 return rc;
2192
2193#ifdef LOG_ENABLED
2194 if (LogIs3Enabled())
2195 nemR3DarwinLogState(pVM, pVCpu);
2196#endif
2197
2198 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
2199 switch (pExit->reason)
2200 {
2201 case HV_EXIT_REASON_CANCELED:
2202 return VINF_EM_RAW_INTERRUPT;
2203 case HV_EXIT_REASON_EXCEPTION:
2204 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
2205 case HV_EXIT_REASON_VTIMER_ACTIVATED:
2206 {
2207 LogFlowFunc(("vTimer got activated\n"));
2208 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
2209 pVCpu->nem.s.fVTimerActivated = true;
2210 return PDMGicSetPpi(pVCpu, pVM->nem.s.u32GicPpiVTimer, true /*fAsserted*/);
2211 }
2212 default:
2213 AssertReleaseFailed();
2214 break;
2215 }
2216
2217 return VERR_INVALID_STATE;
2218}
2219
2220
2221/**
2222 * Runs the guest once until an exit occurs.
2223 *
2224 * @returns HV status code.
2225 * @param pVM The cross context VM structure.
2226 * @param pVCpu The cross context virtual CPU structure.
2227 */
2228static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
2229{
2230 TMNotifyStartOfExecution(pVM, pVCpu);
2231
2232 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
2233
2234 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2235
2236 return hrc;
2237}
2238
2239
2240/**
2241 * Prepares the VM to run the guest.
2242 *
2243 * @returns Strict VBox status code.
2244 * @param pVM The cross context VM structure.
2245 * @param pVCpu The cross context virtual CPU structure.
2246 * @param fSingleStepping Flag whether we run in single stepping mode.
2247 */
2248static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
2249{
2250#ifdef LOG_ENABLED
2251 bool fIrq = false;
2252 bool fFiq = false;
2253
2254 if (LogIs3Enabled())
2255 nemR3DarwinLogState(pVM, pVCpu);
2256#endif
2257
2258 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
2259 AssertRCReturn(rc, rc);
2260
2261 /* In single stepping mode we will re-read SPSR and MDSCR and enable the software step bits. */
2262 if (fSingleStepping)
2263 {
2264 uint64_t u64Tmp;
2265 hv_return_t hrc = hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
2266 if (hrc == HV_SUCCESS)
2267 {
2268 u64Tmp |= ARMV8_SPSR_EL2_AARCH64_SS;
2269 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, u64Tmp);
2270 }
2271
2272 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MDSCR_EL1, &u64Tmp);
2273 if (hrc == HV_SUCCESS)
2274 {
2275 u64Tmp |= ARMV8_MDSCR_EL1_AARCH64_SS;
2276 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MDSCR_EL1, u64Tmp);
2277 }
2278
2279 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2280 }
2281
2282 /* Check whether the vTimer interrupt was handled by the guest and we can unmask the vTimer. */
2283 if (pVCpu->nem.s.fVTimerActivated)
2284 {
2285 /* Read the CNTV_CTL_EL0 register. */
2286 uint64_t u64CntvCtl = 0;
2287
2288 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &u64CntvCtl);
2289 AssertRCReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2290
2291 if ( (u64CntvCtl & (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_IMASK | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
2292 != (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
2293 {
2294 /* Clear the interrupt. */
2295 PDMGicSetPpi(pVCpu, pVM->nem.s.u32GicPpiVTimer, false /*fAsserted*/);
2296
2297 pVCpu->nem.s.fVTimerActivated = false;
2298 hrc = hv_vcpu_set_vtimer_mask(pVCpu->nem.s.hVCpu, false /*vtimer_is_masked*/);
2299 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2300 }
2301 }
2302
2303 /* Set the pending interrupt state. */
2304 hv_return_t hrc = HV_SUCCESS;
2305 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
2306 {
2307 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
2308 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2309#ifdef LOG_ENABLED
2310 fIrq = true;
2311#endif
2312 }
2313 else
2314 {
2315 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
2316 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2317 }
2318
2319 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
2320 {
2321 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
2322 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2323#ifdef LOG_ENABLED
2324 fFiq = true;
2325#endif
2326 }
2327 else
2328 {
2329 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
2330 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2331 }
2332
2333 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
2334 pVCpu->nem.s.fEventPending = false;
2335 return VINF_SUCCESS;
2336}
2337
2338
2339/**
2340 * The normal runloop (no debugging features enabled).
2341 *
2342 * @returns Strict VBox status code.
2343 * @param pVM The cross context VM structure.
2344 * @param pVCpu The cross context virtual CPU structure.
2345 */
2346static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
2347{
2348 /*
2349 * The run loop.
2350 *
2351 * Current approach to state updating to use the sledgehammer and sync
2352 * everything every time. This will be optimized later.
2353 */
2354
2355 /* Update the vTimer offset after resuming if instructed. */
2356 if (pVCpu->nem.s.fVTimerOffUpdate)
2357 {
2358 hv_return_t hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
2359 if (hrc != HV_SUCCESS)
2360 return nemR3DarwinHvSts2Rc(hrc);
2361
2362 pVCpu->nem.s.fVTimerOffUpdate = false;
2363
2364 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, pVCpu->cpum.GstCtx.CntvCtlEl0);
2365 if (hrc == HV_SUCCESS)
2366 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, pVCpu->cpum.GstCtx.CntvCValEl0);
2367 if (hrc != HV_SUCCESS)
2368 return nemR3DarwinHvSts2Rc(hrc);
2369 }
2370
2371 /*
2372 * Poll timers and run for a bit.
2373 */
2374 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2375 * the whole polling job when timers have changed... */
2376 uint64_t offDeltaIgnored;
2377 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2378 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2379 for (unsigned iLoop = 0;; iLoop++)
2380 {
2381 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
2382 if (rcStrict != VINF_SUCCESS)
2383 break;
2384
2385 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
2386 if (hrc == HV_SUCCESS)
2387 {
2388 /*
2389 * Deal with the message.
2390 */
2391 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
2392 if (rcStrict == VINF_SUCCESS)
2393 { /* hopefully likely */ }
2394 else
2395 {
2396 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2397 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2398 break;
2399 }
2400 }
2401 else
2402 {
2403 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
2404 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
2405 }
2406 } /* the run loop */
2407
2408 return rcStrict;
2409}
2410
2411
2412/**
2413 * The debug runloop.
2414 *
2415 * @returns Strict VBox status code.
2416 * @param pVM The cross context VM structure.
2417 * @param pVCpu The cross context virtual CPU structure.
2418 */
2419static VBOXSTRICTRC nemR3DarwinRunGuestDebug(PVM pVM, PVMCPU pVCpu)
2420{
2421 /*
2422 * The run loop.
2423 *
2424 * Current approach to state updating to use the sledgehammer and sync
2425 * everything every time. This will be optimized later.
2426 */
2427
2428 bool const fSavedSingleInstruction = pVCpu->nem.s.fSingleInstruction;
2429 pVCpu->nem.s.fSingleInstruction = pVCpu->nem.s.fSingleInstruction || DBGFIsStepping(pVCpu);
2430 pVCpu->nem.s.fUsingDebugLoop = true;
2431
2432 /* Trap any debug exceptions. */
2433 hv_return_t hrc = hv_vcpu_set_trap_debug_exceptions(pVCpu->nem.s.hVCpu, true);
2434 if (hrc != HV_SUCCESS)
2435 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2436 "Trapping debug exceptions on vCPU %u failed: %#x (%Rrc)", pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2437
2438 /* Update the vTimer offset after resuming if instructed. */
2439 if (pVCpu->nem.s.fVTimerOffUpdate)
2440 {
2441 hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
2442 if (hrc != HV_SUCCESS)
2443 return nemR3DarwinHvSts2Rc(hrc);
2444
2445 pVCpu->nem.s.fVTimerOffUpdate = false;
2446
2447 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, pVCpu->cpum.GstCtx.CntvCtlEl0);
2448 if (hrc == HV_SUCCESS)
2449 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, pVCpu->cpum.GstCtx.CntvCValEl0);
2450 if (hrc != HV_SUCCESS)
2451 return nemR3DarwinHvSts2Rc(hrc);
2452 }
2453
2454 /* Save the guest MDSCR_EL1 */
2455 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_DEBUG | CPUMCTX_EXTRN_PSTATE);
2456 uint64_t u64RegMdscrEl1 = pVCpu->cpum.GstCtx.Mdscr.u64;
2457
2458 /*
2459 * Poll timers and run for a bit.
2460 */
2461 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2462 * the whole polling job when timers have changed... */
2463 uint64_t offDeltaIgnored;
2464 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2465 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2466 for (unsigned iLoop = 0;; iLoop++)
2467 {
2468 bool const fStepping = pVCpu->nem.s.fSingleInstruction;
2469
2470 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, fStepping);
2471 if (rcStrict != VINF_SUCCESS)
2472 break;
2473
2474 hrc = nemR3DarwinRunGuest(pVM, pVCpu);
2475 if (hrc == HV_SUCCESS)
2476 {
2477 /*
2478 * Deal with the message.
2479 */
2480 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
2481 if (rcStrict == VINF_SUCCESS)
2482 { /* hopefully likely */ }
2483 else
2484 {
2485 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2486 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2487 break;
2488 }
2489 }
2490 else
2491 {
2492 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
2493 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
2494 }
2495 } /* the run loop */
2496
2497 /* Restore single stepping state. */
2498 if (pVCpu->nem.s.fSingleInstruction)
2499 {
2500 /** @todo This ASSUMES that guest code being single stepped is not modifying the MDSCR_EL1 register. */
2501 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_DEBUG | CPUMCTX_EXTRN_PSTATE);
2502 Assert(pVCpu->cpum.GstCtx.Mdscr.u64 & ARMV8_MDSCR_EL1_AARCH64_SS);
2503
2504 pVCpu->cpum.GstCtx.Mdscr.u64 = u64RegMdscrEl1;
2505 }
2506
2507 /* Restore debug exceptions trapping. */
2508 hrc |= hv_vcpu_set_trap_debug_exceptions(pVCpu->nem.s.hVCpu, false);
2509 if (hrc != HV_SUCCESS)
2510 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2511 "Clearing trapping of debug exceptions on vCPU %u failed: %#x (%Rrc)", pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2512
2513 pVCpu->nem.s.fUsingDebugLoop = false;
2514 pVCpu->nem.s.fSingleInstruction = fSavedSingleInstruction;
2515
2516 return rcStrict;
2517
2518}
2519
2520
2521VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2522{
2523#ifdef LOG_ENABLED
2524 if (LogIs3Enabled())
2525 nemR3DarwinLogState(pVM, pVCpu);
2526#endif
2527
2528 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
2529
2530 if (RT_UNLIKELY(!pVCpu->nem.s.fIdRegsSynced))
2531 {
2532 /*
2533 * Sync the guest ID registers which are per VM once (they are readonly and stay constant during VM lifetime).
2534 * Need to do it here and not during the init because loading a saved state might change the ID registers from what
2535 * done in the call to CPUMR3PopulateFeaturesByIdRegisters().
2536 */
2537 static const struct
2538 {
2539 const char *pszIdReg;
2540 hv_sys_reg_t enmHvReg;
2541 uint32_t offIdStruct;
2542 } s_aSysIdRegs[] =
2543 {
2544#define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg, RT_UOFFSETOF(CPUMARMV8IDREGS, a_CpumIdReg) }
2545 ID_SYS_REG_CREATE(ID_AA64DFR0_EL1, u64RegIdAa64Dfr0El1),
2546 ID_SYS_REG_CREATE(ID_AA64DFR1_EL1, u64RegIdAa64Dfr1El1),
2547 ID_SYS_REG_CREATE(ID_AA64ISAR0_EL1, u64RegIdAa64Isar0El1),
2548 ID_SYS_REG_CREATE(ID_AA64ISAR1_EL1, u64RegIdAa64Isar1El1),
2549 ID_SYS_REG_CREATE(ID_AA64MMFR0_EL1, u64RegIdAa64Mmfr0El1),
2550 ID_SYS_REG_CREATE(ID_AA64MMFR1_EL1, u64RegIdAa64Mmfr1El1),
2551 ID_SYS_REG_CREATE(ID_AA64MMFR2_EL1, u64RegIdAa64Mmfr2El1),
2552 ID_SYS_REG_CREATE(ID_AA64PFR0_EL1, u64RegIdAa64Pfr0El1),
2553 ID_SYS_REG_CREATE(ID_AA64PFR1_EL1, u64RegIdAa64Pfr1El1),
2554#undef ID_SYS_REG_CREATE
2555 };
2556
2557 PCCPUMARMV8IDREGS pIdRegsGst = NULL;
2558 int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
2559 AssertRCReturn(rc, rc);
2560
2561 for (uint32_t i = 0; i < RT_ELEMENTS(s_aSysIdRegs); i++)
2562 {
2563 uint64_t *pu64 = (uint64_t *)((uint8_t *)pIdRegsGst + s_aSysIdRegs[i].offIdStruct);
2564 hv_return_t hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aSysIdRegs[i].enmHvReg, *pu64);
2565 if (hrc != HV_SUCCESS)
2566 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2567 "Setting %s failed on vCPU %u: %#x (%Rrc)", s_aSysIdRegs[i].pszIdReg, pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2568 }
2569
2570 pVCpu->nem.s.fIdRegsSynced = true;
2571 }
2572
2573 /*
2574 * Try switch to NEM runloop state.
2575 */
2576 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2577 { /* likely */ }
2578 else
2579 {
2580 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2581 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2582 return VINF_SUCCESS;
2583 }
2584
2585 VBOXSTRICTRC rcStrict;
2586 if ( !pVCpu->nem.s.fUseDebugLoop
2587 /*&& !nemR3DarwinAnyExpensiveProbesEnabled()*/
2588 && !DBGFIsStepping(pVCpu)
2589 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledSwBreakpoints)
2590 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
2591 else
2592 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
2593
2594 if (rcStrict == VINF_EM_RAW_TO_R3)
2595 rcStrict = VINF_SUCCESS;
2596
2597 /*
2598 * Convert any pending HM events back to TRPM due to premature exits.
2599 *
2600 * This is because execution may continue from IEM and we would need to inject
2601 * the event from there (hence place it back in TRPM).
2602 */
2603 if (pVCpu->nem.s.fEventPending)
2604 {
2605 /** @todo */
2606 }
2607
2608
2609 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2610 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2611
2612 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
2613 {
2614 /* Try anticipate what we might need. */
2615 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
2616 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2617 || RT_FAILURE(rcStrict))
2618 fImport = CPUMCTX_EXTRN_ALL;
2619 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
2620 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
2621 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2622
2623 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2624 {
2625 /* Only import what is external currently. */
2626 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
2627 if (RT_SUCCESS(rc2))
2628 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2629 else if (RT_SUCCESS(rcStrict))
2630 rcStrict = rc2;
2631 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2632 pVCpu->cpum.GstCtx.fExtrn = 0;
2633 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2634 }
2635 else
2636 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2637 }
2638 else
2639 {
2640 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2641 pVCpu->cpum.GstCtx.fExtrn = 0;
2642 }
2643
2644 return rcStrict;
2645}
2646
2647
2648VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2649{
2650 RT_NOREF(pVM, pVCpu);
2651 return true; /** @todo Are there any cases where we have to emulate? */
2652}
2653
2654
2655bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2656{
2657 VMCPU_ASSERT_EMT(pVCpu);
2658 bool fOld = pVCpu->nem.s.fSingleInstruction;
2659 pVCpu->nem.s.fSingleInstruction = fEnable;
2660 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
2661 return fOld;
2662}
2663
2664
2665void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2666{
2667 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
2668
2669 RT_NOREF(pVM, fFlags);
2670
2671 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
2672 if (hrc != HV_SUCCESS)
2673 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
2674}
2675
2676
2677DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2678{
2679 RT_NOREF(pVM, fUseDebugLoop);
2680 //AssertReleaseFailed();
2681 return false;
2682}
2683
2684
2685DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2686{
2687 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2688 return fUseDebugLoop;
2689}
2690
2691
2692VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2693 uint8_t *pu2State, uint32_t *puNemRange)
2694{
2695 RT_NOREF(pVM, puNemRange);
2696
2697 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
2698#if defined(VBOX_WITH_PGM_NEM_MODE)
2699 if (pvR3)
2700 {
2701 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2702 if (RT_FAILURE(rc))
2703 {
2704 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
2705 return VERR_NEM_MAP_PAGES_FAILED;
2706 }
2707 }
2708 return VINF_SUCCESS;
2709#else
2710 RT_NOREF(pVM, GCPhys, cb, pvR3);
2711 return VERR_NEM_MAP_PAGES_FAILED;
2712#endif
2713}
2714
2715
2716VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2717{
2718 RT_NOREF(pVM);
2719 return true;
2720}
2721
2722
2723VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2724 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2725{
2726 RT_NOREF(pvRam);
2727
2728 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
2729 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
2730
2731#if defined(VBOX_WITH_PGM_NEM_MODE)
2732 /*
2733 * Unmap the RAM we're replacing.
2734 */
2735 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2736 {
2737 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2738 if (RT_SUCCESS(rc))
2739 { /* likely */ }
2740 else if (pvMmio2)
2741 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
2742 GCPhys, cb, fFlags, rc));
2743 else
2744 {
2745 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2746 GCPhys, cb, fFlags, rc));
2747 return VERR_NEM_UNMAP_PAGES_FAILED;
2748 }
2749 }
2750
2751 /*
2752 * Map MMIO2 if any.
2753 */
2754 if (pvMmio2)
2755 {
2756 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2757
2758 /* We need to set up our own dirty tracking due to Hypervisor.framework only working on host page sized aligned regions. */
2759 uint32_t fProt = NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
2760 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
2761 {
2762 /* Find a slot for dirty tracking. */
2763 PNEMHVMMIO2REGION pMmio2Region = NULL;
2764 uint32_t idSlot;
2765 for (idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
2766 {
2767 if ( pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart == 0
2768 && pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast == 0)
2769 {
2770 pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
2771 break;
2772 }
2773 }
2774
2775 if (!pMmio2Region)
2776 {
2777 LogRel(("NEMR3NotifyPhysMmioExMapEarly: Out of dirty tracking structures -> VERR_NEM_MAP_PAGES_FAILED\n"));
2778 return VERR_NEM_MAP_PAGES_FAILED;
2779 }
2780
2781 pMmio2Region->GCPhysStart = GCPhys;
2782 pMmio2Region->GCPhysLast = GCPhys + cb - 1;
2783 pMmio2Region->fDirty = false;
2784 *puNemRange = idSlot;
2785 }
2786 else
2787 fProt |= NEM_PAGE_PROT_WRITE;
2788
2789 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, fProt, pu2State);
2790 if (RT_FAILURE(rc))
2791 {
2792 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
2793 GCPhys, cb, fFlags, pvMmio2, rc));
2794 return VERR_NEM_MAP_PAGES_FAILED;
2795 }
2796 }
2797 else
2798 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2799
2800#else
2801 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
2802 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
2803#endif
2804 return VINF_SUCCESS;
2805}
2806
2807
2808VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2809 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2810{
2811 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2812 return VINF_SUCCESS;
2813}
2814
2815
2816VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2817 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2818{
2819 RT_NOREF(pVM, puNemRange);
2820
2821 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2822 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2823
2824 int rc = VINF_SUCCESS;
2825#if defined(VBOX_WITH_PGM_NEM_MODE)
2826 /*
2827 * Unmap the MMIO2 pages.
2828 */
2829 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2830 * we may have more stuff to unmap even in case of pure MMIO... */
2831 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2832 {
2833 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2834 if (RT_FAILURE(rc))
2835 {
2836 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2837 GCPhys, cb, fFlags, rc));
2838 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2839 }
2840
2841 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
2842 {
2843 /* Reset tracking structure. */
2844 uint32_t idSlot = *puNemRange;
2845 *puNemRange = UINT32_MAX;
2846
2847 Assert(idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
2848 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart = 0;
2849 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast = 0;
2850 pVM->nem.s.aMmio2DirtyTracking[idSlot].fDirty = false;
2851 }
2852 }
2853
2854 /* Ensure the page is masked as unmapped if relevant. */
2855 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
2856
2857 /*
2858 * Restore the RAM we replaced.
2859 */
2860 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2861 {
2862 AssertPtr(pvRam);
2863 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2864 if (RT_SUCCESS(rc))
2865 { /* likely */ }
2866 else
2867 {
2868 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
2869 rc = VERR_NEM_MAP_PAGES_FAILED;
2870 }
2871 }
2872
2873 RT_NOREF(pvMmio2);
2874#else
2875 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
2876 if (pu2State)
2877 *pu2State = UINT8_MAX;
2878 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2879#endif
2880 return rc;
2881}
2882
2883
2884VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2885 void *pvBitmap, size_t cbBitmap)
2886{
2887 LogFlowFunc(("NEMR3PhysMmio2QueryAndResetDirtyBitmap: %RGp LB %RGp UnemRange=%u\n", GCPhys, cb, uNemRange));
2888 Assert(uNemRange < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
2889
2890 /* Keep it simple for now and mark everything as dirty if it is. */
2891 int rc = VINF_SUCCESS;
2892 if (pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty)
2893 {
2894 ASMBitSetRange(pvBitmap, 0, cbBitmap * 8);
2895
2896 pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty = false;
2897 /* Restore as RX only. */
2898 uint8_t u2State;
2899 rc = nemR3DarwinProtect(GCPhys, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, &u2State);
2900 }
2901 else
2902 ASMBitClearRange(pvBitmap, 0, cbBitmap * 8);
2903
2904 return rc;
2905}
2906
2907
2908VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2909 uint8_t *pu2State, uint32_t *puNemRange)
2910{
2911 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2912
2913 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2914 *pu2State = UINT8_MAX;
2915 *puNemRange = 0;
2916 return VINF_SUCCESS;
2917}
2918
2919
2920VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2921 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2922{
2923 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2924 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2925 *pu2State = UINT8_MAX;
2926
2927#if defined(VBOX_WITH_PGM_NEM_MODE)
2928 /*
2929 * (Re-)map readonly.
2930 */
2931 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2932
2933 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2934 AssertRC(rc);
2935
2936 rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
2937 if (RT_FAILURE(rc))
2938 {
2939 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
2940 GCPhys, cb, pvPages, fFlags, rc));
2941 return VERR_NEM_MAP_PAGES_FAILED;
2942 }
2943 RT_NOREF(fFlags, puNemRange);
2944 return VINF_SUCCESS;
2945#else
2946 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2947 return VERR_NEM_MAP_PAGES_FAILED;
2948#endif
2949}
2950
2951
2952VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2953 RTR3PTR pvMemR3, uint8_t *pu2State)
2954{
2955 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2956 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2957
2958 *pu2State = UINT8_MAX;
2959#if defined(VBOX_WITH_PGM_NEM_MODE)
2960 if (pvMemR3)
2961 {
2962 /* Unregister what was there before. */
2963 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2964 AssertRC(rc);
2965
2966 rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2967 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
2968 pvMemR3, GCPhys, cb, rc));
2969 }
2970 RT_NOREF(enmKind);
2971#else
2972 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
2973 AssertFailed();
2974#endif
2975}
2976
2977
2978VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2979{
2980 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
2981 RT_NOREF(pVCpu, fEnabled);
2982}
2983
2984
2985void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2986{
2987 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2988 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2989}
2990
2991
2992void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2993 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2994{
2995 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2996 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2997 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2998}
2999
3000
3001int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
3002 PGMPAGETYPE enmType, uint8_t *pu2State)
3003{
3004 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3005 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3006 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
3007
3008 AssertFailed();
3009 return VINF_SUCCESS;
3010}
3011
3012
3013VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
3014 PGMPAGETYPE enmType, uint8_t *pu2State)
3015{
3016 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3017 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3018 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
3019}
3020
3021
3022VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
3023 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
3024{
3025 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3026 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
3027 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
3028
3029 AssertFailed();
3030}
3031
3032
3033/**
3034 * Interface for importing state on demand (used by IEM).
3035 *
3036 * @returns VBox status code.
3037 * @param pVCpu The cross context CPU structure.
3038 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
3039 */
3040VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
3041{
3042 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
3043 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
3044
3045 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
3046}
3047
3048
3049/**
3050 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
3051 *
3052 * @returns VBox status code.
3053 * @param pVCpu The cross context CPU structure.
3054 * @param pcTicks Where to return the CPU tick count.
3055 * @param puAux Where to return the TSC_AUX register value.
3056 */
3057VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
3058{
3059 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
3060 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
3061
3062 if (puAux)
3063 *puAux = 0;
3064 *pcTicks = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff; /* This is the host timer minus the offset. */
3065 return VINF_SUCCESS;
3066}
3067
3068
3069/**
3070 * Resumes CPU clock (TSC) on all virtual CPUs.
3071 *
3072 * This is called by TM when the VM is started, restored, resumed or similar.
3073 *
3074 * @returns VBox status code.
3075 * @param pVM The cross context VM structure.
3076 * @param pVCpu The cross context CPU structure of the calling EMT.
3077 * @param uPausedTscValue The TSC value at the time of pausing.
3078 */
3079VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
3080{
3081 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVM, pVCpu, uPausedTscValue));
3082 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
3083 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
3084
3085 /*
3086 * Calculate the new offset, first get the new TSC value with the old vTimer offset and then adjust the
3087 * the new offset to let the guest not notice the pause.
3088 */
3089 uint64_t u64TscNew = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff;
3090 Assert(u64TscNew >= uPausedTscValue);
3091 LogFlowFunc(("u64VTimerOffOld=%#RX64 u64TscNew=%#RX64 u64VTimerValuePaused=%#RX64 -> u64VTimerOff=%#RX64\n",
3092 pVM->nem.s.u64VTimerOff, u64TscNew, uPausedTscValue,
3093 pVM->nem.s.u64VTimerOff + (u64TscNew - uPausedTscValue)));
3094
3095 pVM->nem.s.u64VTimerOff += u64TscNew - uPausedTscValue;
3096
3097 /*
3098 * Set the flag to update the vTimer offset when the vCPU resumes for the first time
3099 * (needs to be done on the actual EMT).
3100 */
3101 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3102 {
3103 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
3104 pVCpuDst->nem.s.fVTimerOffUpdate = true;
3105 }
3106
3107 return VINF_SUCCESS;
3108}
3109
3110
3111/**
3112 * Returns features supported by the NEM backend.
3113 *
3114 * @returns Flags of features supported by the native NEM backend.
3115 * @param pVM The cross context VM structure.
3116 */
3117VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
3118{
3119 RT_NOREF(pVM);
3120 /*
3121 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
3122 * and unrestricted guest execution support so we can safely return these flags here always.
3123 */
3124 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
3125}
3126
3127
3128/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
3129 *
3130 * @todo Add notes as the implementation progresses...
3131 */
3132
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette