VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 106667

Last change on this file since 106667 was 106667, checked in by vboxsync, 5 months ago

VMM/ARM: Workaround for the UEFI accessing MMIO space with an instruction which doesn't produce a valid instruction syndrome, bugref:10732 [fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 123.2 KB
Line 
1/* $Id: NEMR3Native-darwin-armv8.cpp 106667 2024-10-24 16:45:40Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.virtualbox.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#define VBOX_DIS_WITH_ARMV8
39
40#include <VBox/vmm/nem.h>
41#include <VBox/vmm/iem.h>
42#include <VBox/vmm/em.h>
43#include <VBox/vmm/gic.h>
44#include <VBox/vmm/pdm.h>
45#include <VBox/vmm/dbgftrace.h>
46#include <VBox/vmm/gcm.h>
47#include "NEMInternal.h"
48#include <VBox/vmm/vmcc.h>
49#include <VBox/vmm/vmm.h>
50#include <VBox/dis.h>
51#include <VBox/gic.h>
52#include "dtrace/VBoxVMM.h"
53
54#include <iprt/armv8.h>
55#include <iprt/asm.h>
56#include <iprt/asm-arm.h>
57#include <iprt/asm-math.h>
58#include <iprt/ldr.h>
59#include <iprt/mem.h>
60#include <iprt/path.h>
61#include <iprt/string.h>
62#include <iprt/system.h>
63#include <iprt/utf16.h>
64
65#include <iprt/formats/arm-psci.h>
66
67#include <mach/mach_time.h>
68#include <mach/kern_return.h>
69
70#include <Hypervisor/Hypervisor.h>
71
72
73/*********************************************************************************************************************************
74* Defined Constants And Macros *
75*********************************************************************************************************************************/
76
77
78/*********************************************************************************************************************************
79* Structures and Typedefs *
80*********************************************************************************************************************************/
81
82#if MAC_OS_X_VERSION_MIN_REQUIRED < 150000
83
84/* Since 15.0+ */
85typedef enum hv_gic_distributor_reg_t : uint16_t
86{
87 HV_GIC_DISTRIBUTOR_REG_GICD_CTLR,
88 HV_GIC_DISTRIBUTOR_REG_GICD_ICACTIVER0
89 /** @todo */
90} hv_gic_distributor_reg_t;
91
92
93typedef enum hv_gic_icc_reg_t : uint16_t
94{
95 HV_GIC_ICC_REG_AP0R0_EL1
96 /** @todo */
97} hv_gic_icc_reg_t;
98
99
100typedef enum hv_gic_ich_reg_t : uint16_t
101{
102 HV_GIC_ICH_REG_AP0R0_EL2
103 /** @todo */
104} hv_gic_ich_reg_t;
105
106
107typedef enum hv_gic_icv_reg_t : uint16_t
108{
109 HV_GIC_ICV_REG_AP0R0_EL1
110 /** @todo */
111} hv_gic_icv_reg_t;
112
113
114typedef enum hv_gic_msi_reg_t : uint16_t
115{
116 HV_GIC_REG_GICM_SET_SPI_NSR
117 /** @todo */
118} hv_gic_msi_reg_t;
119
120
121typedef enum hv_gic_redistributor_reg_t : uint16_t
122{
123 HV_GIC_REDISTRIBUTOR_REG_GICR_ICACTIVER0
124 /** @todo */
125} hv_gic_redistributor_reg_t;
126
127
128typedef enum hv_gic_intid_t : uint16_t
129{
130 HV_GIC_INT_EL1_PHYSICAL_TIMER = 23,
131 HV_GIC_INT_EL1_VIRTUAL_TIMER = 25,
132 HV_GIC_INT_EL2_PHYSICAL_TIMER = 26,
133 HV_GIC_INT_MAINTENANCE = 27,
134 HV_GIC_INT_PERFORMANCE_MONITOR = 30
135} hv_gic_intid_t;
136
137#endif
138
139typedef hv_vm_config_t FN_HV_VM_CONFIG_CREATE(void);
140typedef hv_return_t FN_HV_VM_CONFIG_GET_EL2_SUPPORTED(bool *el2_supported);
141typedef hv_return_t FN_HV_VM_CONFIG_GET_EL2_ENABLED(hv_vm_config_t config, bool *el2_enabled);
142typedef hv_return_t FN_HV_VM_CONFIG_SET_EL2_ENABLED(hv_vm_config_t config, bool el2_enabled);
143
144typedef struct hv_gic_config_s *hv_gic_config_t;
145typedef hv_return_t FN_HV_GIC_CREATE(hv_gic_config_t gic_config);
146typedef hv_return_t FN_HV_GIC_RESET(void);
147typedef hv_gic_config_t FN_HV_GIC_CONFIG_CREATE(void);
148typedef hv_return_t FN_HV_GIC_CONFIG_SET_DISTRIBUTOR_BASE(hv_gic_config_t config, hv_ipa_t distributor_base_address);
149typedef hv_return_t FN_HV_GIC_CONFIG_SET_REDISTRIBUTOR_BASE(hv_gic_config_t config, hv_ipa_t redistributor_base_address);
150typedef hv_return_t FN_HV_GIC_CONFIG_SET_MSI_REGION_BASE(hv_gic_config_t config, hv_ipa_t msi_region_base_address);
151typedef hv_return_t FN_HV_GIC_CONFIG_SET_MSI_INTERRUPT_RANGE(hv_gic_config_t config, uint32_t msi_intid_base, uint32_t msi_intid_count);
152
153typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_BASE(hv_vcpu_t vcpu, hv_ipa_t *redistributor_base_address);
154typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_REGION_SIZE(size_t *redistributor_region_size);
155typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_SIZE(size_t *redistributor_size);
156typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_SIZE(size_t *distributor_size);
157typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_BASE_ALIGNMENT(size_t *distributor_base_alignment);
158typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_BASE_ALIGNMENT(size_t *redistributor_base_alignment);
159typedef hv_return_t FN_HV_GIC_GET_MSI_REGION_BASE_ALIGNMENT(size_t *msi_region_base_alignment);
160typedef hv_return_t FN_HV_GIC_GET_MSI_REGION_SIZE(size_t *msi_region_size);
161typedef hv_return_t FN_HV_GIC_GET_SPI_INTERRUPT_RANGE(uint32_t *spi_intid_base, uint32_t *spi_intid_count);
162
163typedef struct hv_gic_state_s *hv_gic_state_t;
164typedef hv_gic_state_t FN_HV_GIC_STATE_CREATE(void);
165typedef hv_return_t FN_HV_GIC_SET_STATE(const void *gic_state_data, size_t gic_state_size);
166typedef hv_return_t FN_HV_GIC_STATE_GET_SIZE(hv_gic_state_t state, size_t *gic_state_size);
167typedef hv_return_t FN_HV_GIC_STATE_GET_DATA(hv_gic_state_t state, void *gic_state_data);
168
169typedef hv_return_t FN_HV_GIC_SEND_MSI(hv_ipa_t address, uint32_t intid);
170typedef hv_return_t FN_HV_GIC_SET_SPI(uint32_t intid, bool level);
171
172typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_REG(hv_gic_distributor_reg_t reg, uint64_t *value);
173typedef hv_return_t FN_HV_GIC_GET_MSI_REG(hv_gic_msi_reg_t reg, uint64_t *value);
174typedef hv_return_t FN_HV_GIC_GET_ICC_REG(hv_vcpu_t vcpu, hv_gic_icc_reg_t reg, uint64_t *value);
175typedef hv_return_t FN_HV_GIC_GET_ICH_REG(hv_vcpu_t vcpu, hv_gic_ich_reg_t reg, uint64_t *value);
176typedef hv_return_t FN_HV_GIC_GET_ICV_REG(hv_vcpu_t vcpu, hv_gic_icv_reg_t reg, uint64_t *value);
177typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_REG(hv_vcpu_t vcpu, hv_gic_redistributor_reg_t reg, uint64_t *value);
178
179typedef hv_return_t FN_HV_GIC_SET_DISTRIBUTOR_REG(hv_gic_distributor_reg_t reg, uint64_t value);
180typedef hv_return_t FN_HV_GIC_SET_MSI_REG(hv_gic_msi_reg_t reg, uint64_t value);
181typedef hv_return_t FN_HV_GIC_SET_ICC_REG(hv_vcpu_t vcpu, hv_gic_icc_reg_t reg, uint64_t value);
182typedef hv_return_t FN_HV_GIC_SET_ICH_REG(hv_vcpu_t vcpu, hv_gic_ich_reg_t reg, uint64_t value);
183typedef hv_return_t FN_HV_GIC_SET_ICV_REG(hv_vcpu_t vcpu, hv_gic_icv_reg_t reg, uint64_t value);
184typedef hv_return_t FN_HV_GIC_SET_REDISTRIBUTOR_REG(hv_vcpu_t vcpu, hv_gic_redistributor_reg_t reg, uint64_t value);
185
186typedef hv_return_t FN_HV_GIC_GET_INTID(hv_gic_intid_t interrupt, uint32_t *intid);
187
188
189/*********************************************************************************************************************************
190* Global Variables *
191*********************************************************************************************************************************/
192/** @name Optional APIs imported from Hypervisor.framework.
193 * @{ */
194static FN_HV_VM_CONFIG_CREATE *g_pfnHvVmConfigCreate = NULL; /* Since 13.0 */
195static FN_HV_VM_CONFIG_GET_EL2_SUPPORTED *g_pfnHvVmConfigGetEl2Supported = NULL; /* Since 15.0 */
196static FN_HV_VM_CONFIG_GET_EL2_ENABLED *g_pfnHvVmConfigGetEl2Enabled = NULL; /* Since 15.0 */
197static FN_HV_VM_CONFIG_SET_EL2_ENABLED *g_pfnHvVmConfigSetEl2Enabled = NULL; /* Since 15.0 */
198
199static FN_HV_GIC_CREATE *g_pfnHvGicCreate = NULL; /* Since 15.0 */
200static FN_HV_GIC_RESET *g_pfnHvGicReset = NULL; /* Since 15.0 */
201static FN_HV_GIC_CONFIG_CREATE *g_pfnHvGicConfigCreate = NULL; /* Since 15.0 */
202static FN_HV_GIC_CONFIG_SET_DISTRIBUTOR_BASE *g_pfnHvGicConfigSetDistributorBase = NULL; /* Since 15.0 */
203static FN_HV_GIC_CONFIG_SET_REDISTRIBUTOR_BASE *g_pfnHvGicConfigSetRedistributorBase = NULL; /* Since 15.0 */
204static FN_HV_GIC_CONFIG_SET_MSI_REGION_BASE *g_pfnHvGicConfigSetMsiRegionBase = NULL; /* Since 15.0 */
205static FN_HV_GIC_CONFIG_SET_MSI_INTERRUPT_RANGE *g_pfnHvGicConfigSetMsiInterruptRange = NULL; /* Since 15.0 */
206static FN_HV_GIC_GET_REDISTRIBUTOR_BASE *g_pfnHvGicGetRedistributorBase = NULL; /* Since 15.0 */
207static FN_HV_GIC_GET_REDISTRIBUTOR_REGION_SIZE *g_pfnHvGicGetRedistributorRegionSize = NULL; /* Since 15.0 */
208static FN_HV_GIC_GET_REDISTRIBUTOR_SIZE *g_pfnHvGicGetRedistributorSize = NULL; /* Since 15.0 */
209static FN_HV_GIC_GET_DISTRIBUTOR_SIZE *g_pfnHvGicGetDistributorSize = NULL; /* Since 15.0 */
210static FN_HV_GIC_GET_DISTRIBUTOR_BASE_ALIGNMENT *g_pfnHvGicGetDistributorBaseAlignment = NULL; /* Since 15.0 */
211static FN_HV_GIC_GET_REDISTRIBUTOR_BASE_ALIGNMENT *g_pfnHvGicGetRedistributorBaseAlignment = NULL; /* Since 15.0 */
212static FN_HV_GIC_GET_MSI_REGION_BASE_ALIGNMENT *g_pfnHvGicGetMsiRegionBaseAlignment = NULL; /* Since 15.0 */
213static FN_HV_GIC_GET_MSI_REGION_SIZE *g_pfnHvGicGetMsiRegionSize = NULL; /* Since 15.0 */
214static FN_HV_GIC_GET_SPI_INTERRUPT_RANGE *g_pfnHvGicGetSpiInterruptRange = NULL; /* Since 15.0 */
215static FN_HV_GIC_STATE_CREATE *g_pfnHvGicStateCreate = NULL; /* Since 15.0 */
216static FN_HV_GIC_SET_STATE *g_pfnHvGicSetState = NULL; /* Since 15.0 */
217static FN_HV_GIC_STATE_GET_SIZE *g_pfnHvGicStateGetSize = NULL; /* Since 15.0 */
218static FN_HV_GIC_STATE_GET_DATA *g_pfnHvGicStateGetData = NULL; /* Since 15.0 */
219static FN_HV_GIC_SEND_MSI *g_pfnHvGicSendMsi = NULL; /* Since 15.0 */
220static FN_HV_GIC_SET_SPI *g_pfnHvGicSetSpi = NULL; /* Since 15.0 */
221static FN_HV_GIC_GET_DISTRIBUTOR_REG *g_pfnHvGicGetDistributorReg = NULL; /* Since 15.0 */
222static FN_HV_GIC_GET_MSI_REG *g_pfnHvGicGetMsiReg = NULL; /* Since 15.0 */
223static FN_HV_GIC_GET_ICC_REG *g_pfnHvGicGetIccReg = NULL; /* Since 15.0 */
224static FN_HV_GIC_GET_ICH_REG *g_pfnHvGicGetIchReg = NULL; /* Since 15.0 */
225static FN_HV_GIC_GET_ICV_REG *g_pfnHvGicGetIcvReg = NULL; /* Since 15.0 */
226static FN_HV_GIC_GET_REDISTRIBUTOR_REG *g_pfnHvGicGetRedistributorReg = NULL; /* Since 15.0 */
227static FN_HV_GIC_SET_DISTRIBUTOR_REG *g_pfnHvGicSetDistributorReg = NULL; /* Since 15.0 */
228static FN_HV_GIC_SET_MSI_REG *g_pfnHvGicSetMsiReg = NULL; /* Since 15.0 */
229static FN_HV_GIC_SET_ICC_REG *g_pfnHvGicSetIccReg = NULL; /* Since 15.0 */
230static FN_HV_GIC_SET_ICH_REG *g_pfnHvGicSetIchReg = NULL; /* Since 15.0 */
231static FN_HV_GIC_SET_ICV_REG *g_pfnHvGicSetIcvReg = NULL; /* Since 15.0 */
232static FN_HV_GIC_SET_REDISTRIBUTOR_REG *g_pfnHvGicSetRedistributorReg = NULL; /* Since 15.0 */
233static FN_HV_GIC_GET_INTID *g_pfnHvGicGetIntid = NULL; /* Since 15.0 */
234/** @} */
235
236
237/**
238 * Import instructions.
239 */
240static const struct
241{
242 void **ppfn; /**< The function pointer variable. */
243 const char *pszName; /**< The function name. */
244} g_aImports[] =
245{
246#define NEM_DARWIN_IMPORT(a_Pfn, a_Name) { (void **)&(a_Pfn), #a_Name }
247 NEM_DARWIN_IMPORT(g_pfnHvVmConfigCreate, hv_vm_config_create),
248 NEM_DARWIN_IMPORT(g_pfnHvVmConfigGetEl2Supported, hv_vm_config_get_el2_supported),
249 NEM_DARWIN_IMPORT(g_pfnHvVmConfigGetEl2Enabled, hv_vm_config_get_el2_enabled),
250 NEM_DARWIN_IMPORT(g_pfnHvVmConfigSetEl2Enabled, hv_vm_config_set_el2_enabled),
251
252 NEM_DARWIN_IMPORT(g_pfnHvGicCreate, hv_gic_create),
253 NEM_DARWIN_IMPORT(g_pfnHvGicReset, hv_gic_reset),
254 NEM_DARWIN_IMPORT(g_pfnHvGicConfigCreate, hv_gic_config_create),
255 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetDistributorBase, hv_gic_config_set_distributor_base),
256 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetRedistributorBase, hv_gic_config_set_redistributor_base),
257 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetMsiRegionBase, hv_gic_config_set_msi_region_base),
258 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetMsiInterruptRange, hv_gic_config_set_msi_interrupt_range),
259 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorBase, hv_gic_get_redistributor_base),
260 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorRegionSize, hv_gic_get_redistributor_region_size),
261 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorSize, hv_gic_get_redistributor_size),
262 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorSize, hv_gic_get_distributor_size),
263 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorBaseAlignment, hv_gic_get_distributor_base_alignment),
264 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorBaseAlignment, hv_gic_get_redistributor_base_alignment),
265 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiRegionBaseAlignment, hv_gic_get_msi_region_base_alignment),
266 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiRegionSize, hv_gic_get_msi_region_size),
267 NEM_DARWIN_IMPORT(g_pfnHvGicGetSpiInterruptRange, hv_gic_get_spi_interrupt_range),
268 NEM_DARWIN_IMPORT(g_pfnHvGicStateCreate, hv_gic_state_create),
269 NEM_DARWIN_IMPORT(g_pfnHvGicSetState, hv_gic_set_state),
270 NEM_DARWIN_IMPORT(g_pfnHvGicStateGetSize, hv_gic_state_get_size),
271 NEM_DARWIN_IMPORT(g_pfnHvGicStateGetData, hv_gic_state_get_data),
272 NEM_DARWIN_IMPORT(g_pfnHvGicSendMsi, hv_gic_send_msi),
273 NEM_DARWIN_IMPORT(g_pfnHvGicSetSpi, hv_gic_set_spi),
274 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorReg, hv_gic_get_distributor_reg),
275 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiReg, hv_gic_get_msi_reg),
276 NEM_DARWIN_IMPORT(g_pfnHvGicGetIccReg, hv_gic_get_icc_reg),
277 NEM_DARWIN_IMPORT(g_pfnHvGicGetIchReg, hv_gic_get_ich_reg),
278 NEM_DARWIN_IMPORT(g_pfnHvGicGetIcvReg, hv_gic_get_icv_reg),
279 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorReg, hv_gic_get_redistributor_reg),
280 NEM_DARWIN_IMPORT(g_pfnHvGicSetDistributorReg, hv_gic_set_distributor_reg),
281 NEM_DARWIN_IMPORT(g_pfnHvGicSetMsiReg, hv_gic_set_msi_reg),
282 NEM_DARWIN_IMPORT(g_pfnHvGicSetIccReg, hv_gic_set_icc_reg),
283 NEM_DARWIN_IMPORT(g_pfnHvGicSetIchReg, hv_gic_set_ich_reg),
284 NEM_DARWIN_IMPORT(g_pfnHvGicSetIcvReg, hv_gic_set_icv_reg),
285 NEM_DARWIN_IMPORT(g_pfnHvGicSetRedistributorReg, hv_gic_set_redistributor_reg),
286 NEM_DARWIN_IMPORT(g_pfnHvGicGetIntid, hv_gic_get_intid)
287#undef NEM_DARWIN_IMPORT
288};
289
290
291/*
292 * Let the preprocessor alias the APIs to import variables for better autocompletion.
293 */
294#ifndef IN_SLICKEDIT
295# define hv_vm_config_create g_pfnHvVmConfigCreate
296# define hv_vm_config_get_el2_supported g_pfnHvVmConfigGetEl2Supported
297# define hv_vm_config_get_el2_enabled g_pfnHvVmConfigGetEl2Enabled
298# define hv_vm_config_set_el2_enabled g_pfnHvVmConfigSetEl2Enabled
299
300# define hv_gic_create g_pfnHvGicCreate
301# define hv_gic_reset g_pfnHvGicReset
302# define hv_gic_config_create g_pfnHvGicConfigCreate
303# define hv_gic_config_set_distributor_base g_pfnHvGicConfigSetDistributorBase
304# define hv_gic_config_set_redistributor_base g_pfnHvGicConfigSetRedistributorBase
305# define hv_gic_config_set_msi_region_base g_pfnHvGicConfigSetMsiRegionBase
306# define hv_gic_config_set_msi_interrupt_range g_pfnHvGicConfigSetMsiInterruptRange
307# define hv_gic_get_redistributor_base g_pfnHvGicGetRedistributorBase
308# define hv_gic_get_redistributor_region_size g_pfnHvGicGetRedistributorRegionSize
309# define hv_gic_get_redistributor_size g_pfnHvGicGetRedistributorSize
310# define hv_gic_get_distributor_size g_pfnHvGicGetDistributorSize
311# define hv_gic_get_distributor_base_alignment g_pfnHvGicGetDistributorBaseAlignment
312# define hv_gic_get_redistributor_base_alignment g_pfnHvGicGetRedistributorBaseAlignment
313# define hv_gic_get_msi_region_base_alignment g_pfnHvGicGetMsiRegionBaseAlignment
314# define hv_gic_get_msi_region_size g_pfnHvGicGetMsiRegionSize
315# define hv_gic_get_spi_interrupt_range g_pfnHvGicGetSpiInterruptRange
316# define hv_gic_state_create g_pfnHvGicStateCreate
317# define hv_gic_set_state g_pfnHvGicSetState
318# define hv_gic_state_get_size g_pfnHvGicStateGetSize
319# define hv_gic_state_get_data g_pfnHvGicStateGetData
320# define hv_gic_send_msi g_pfnHvGicSendMsi
321# define hv_gic_set_spi g_pfnHvGicSetSpi
322# define hv_gic_get_distributor_reg g_pfnHvGicGetDistributorReg
323# define hv_gic_get_msi_reg g_pfnHvGicGetMsiReg
324# define hv_gic_get_icc_reg g_pfnHvGicGetIccReg
325# define hv_gic_get_ich_reg g_pfnHvGicGetIchReg
326# define hv_gic_get_icv_reg g_pfnHvGicGetIcvReg
327# define hv_gic_get_redistributor_reg g_pfnHvGicGetRedistributorReg
328# define hv_gic_set_distributor_reg g_pfnHvGicSetDistributorReg
329# define hv_gic_set_msi_reg g_pfnHvGicSetMsiReg
330# define hv_gic_set_icc_reg g_pfnHvGicSetIccReg
331# define hv_gic_set_ich_reg g_pfnHvGicSetIchReg
332# define hv_gic_set_icv_reg g_pfnHvGicSetIcvReg
333# define hv_gic_set_redistributor_reg g_pfnHvGicSetRedistributorReg
334# define hv_gic_get_intid g_pfnHvGicGetIntid
335#endif
336
337
338/** The general registers. */
339static const struct
340{
341 hv_reg_t enmHvReg;
342 uint32_t fCpumExtrn;
343 uint32_t offCpumCtx;
344} s_aCpumRegs[] =
345{
346#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
347#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
348 CPUM_GREG_EMIT_X0_X3(0),
349 CPUM_GREG_EMIT_X0_X3(1),
350 CPUM_GREG_EMIT_X0_X3(2),
351 CPUM_GREG_EMIT_X0_X3(3),
352 CPUM_GREG_EMIT_X4_X28(4),
353 CPUM_GREG_EMIT_X4_X28(5),
354 CPUM_GREG_EMIT_X4_X28(6),
355 CPUM_GREG_EMIT_X4_X28(7),
356 CPUM_GREG_EMIT_X4_X28(8),
357 CPUM_GREG_EMIT_X4_X28(9),
358 CPUM_GREG_EMIT_X4_X28(10),
359 CPUM_GREG_EMIT_X4_X28(11),
360 CPUM_GREG_EMIT_X4_X28(12),
361 CPUM_GREG_EMIT_X4_X28(13),
362 CPUM_GREG_EMIT_X4_X28(14),
363 CPUM_GREG_EMIT_X4_X28(15),
364 CPUM_GREG_EMIT_X4_X28(16),
365 CPUM_GREG_EMIT_X4_X28(17),
366 CPUM_GREG_EMIT_X4_X28(18),
367 CPUM_GREG_EMIT_X4_X28(19),
368 CPUM_GREG_EMIT_X4_X28(20),
369 CPUM_GREG_EMIT_X4_X28(21),
370 CPUM_GREG_EMIT_X4_X28(22),
371 CPUM_GREG_EMIT_X4_X28(23),
372 CPUM_GREG_EMIT_X4_X28(24),
373 CPUM_GREG_EMIT_X4_X28(25),
374 CPUM_GREG_EMIT_X4_X28(26),
375 CPUM_GREG_EMIT_X4_X28(27),
376 CPUM_GREG_EMIT_X4_X28(28),
377 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
378 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
379 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
380 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
381 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
382#undef CPUM_GREG_EMIT_X0_X3
383#undef CPUM_GREG_EMIT_X4_X28
384};
385/** SIMD/FP registers. */
386static const struct
387{
388 hv_simd_fp_reg_t enmHvReg;
389 uint32_t offCpumCtx;
390} s_aCpumFpRegs[] =
391{
392#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
393 CPUM_VREG_EMIT(0),
394 CPUM_VREG_EMIT(1),
395 CPUM_VREG_EMIT(2),
396 CPUM_VREG_EMIT(3),
397 CPUM_VREG_EMIT(4),
398 CPUM_VREG_EMIT(5),
399 CPUM_VREG_EMIT(6),
400 CPUM_VREG_EMIT(7),
401 CPUM_VREG_EMIT(8),
402 CPUM_VREG_EMIT(9),
403 CPUM_VREG_EMIT(10),
404 CPUM_VREG_EMIT(11),
405 CPUM_VREG_EMIT(12),
406 CPUM_VREG_EMIT(13),
407 CPUM_VREG_EMIT(14),
408 CPUM_VREG_EMIT(15),
409 CPUM_VREG_EMIT(16),
410 CPUM_VREG_EMIT(17),
411 CPUM_VREG_EMIT(18),
412 CPUM_VREG_EMIT(19),
413 CPUM_VREG_EMIT(20),
414 CPUM_VREG_EMIT(21),
415 CPUM_VREG_EMIT(22),
416 CPUM_VREG_EMIT(23),
417 CPUM_VREG_EMIT(24),
418 CPUM_VREG_EMIT(25),
419 CPUM_VREG_EMIT(26),
420 CPUM_VREG_EMIT(27),
421 CPUM_VREG_EMIT(28),
422 CPUM_VREG_EMIT(29),
423 CPUM_VREG_EMIT(30),
424 CPUM_VREG_EMIT(31)
425#undef CPUM_VREG_EMIT
426};
427/** Debug system registers. */
428static const struct
429{
430 hv_sys_reg_t enmHvReg;
431 uint32_t offCpumCtx;
432} s_aCpumDbgRegs[] =
433{
434#define CPUM_DBGREG_EMIT(a_BorW, a_Idx) \
435 { HV_SYS_REG_DBG ## a_BorW ## CR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Ctrl.u64) }, \
436 { HV_SYS_REG_DBG ## a_BorW ## VR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Value.u64) }
437 /* Breakpoint registers. */
438 CPUM_DBGREG_EMIT(B, 0),
439 CPUM_DBGREG_EMIT(B, 1),
440 CPUM_DBGREG_EMIT(B, 2),
441 CPUM_DBGREG_EMIT(B, 3),
442 CPUM_DBGREG_EMIT(B, 4),
443 CPUM_DBGREG_EMIT(B, 5),
444 CPUM_DBGREG_EMIT(B, 6),
445 CPUM_DBGREG_EMIT(B, 7),
446 CPUM_DBGREG_EMIT(B, 8),
447 CPUM_DBGREG_EMIT(B, 9),
448 CPUM_DBGREG_EMIT(B, 10),
449 CPUM_DBGREG_EMIT(B, 11),
450 CPUM_DBGREG_EMIT(B, 12),
451 CPUM_DBGREG_EMIT(B, 13),
452 CPUM_DBGREG_EMIT(B, 14),
453 CPUM_DBGREG_EMIT(B, 15),
454 /* Watchpoint registers. */
455 CPUM_DBGREG_EMIT(W, 0),
456 CPUM_DBGREG_EMIT(W, 1),
457 CPUM_DBGREG_EMIT(W, 2),
458 CPUM_DBGREG_EMIT(W, 3),
459 CPUM_DBGREG_EMIT(W, 4),
460 CPUM_DBGREG_EMIT(W, 5),
461 CPUM_DBGREG_EMIT(W, 6),
462 CPUM_DBGREG_EMIT(W, 7),
463 CPUM_DBGREG_EMIT(W, 8),
464 CPUM_DBGREG_EMIT(W, 9),
465 CPUM_DBGREG_EMIT(W, 10),
466 CPUM_DBGREG_EMIT(W, 11),
467 CPUM_DBGREG_EMIT(W, 12),
468 CPUM_DBGREG_EMIT(W, 13),
469 CPUM_DBGREG_EMIT(W, 14),
470 CPUM_DBGREG_EMIT(W, 15),
471 { HV_SYS_REG_MDSCR_EL1, RT_UOFFSETOF(CPUMCTX, Mdscr.u64) }
472#undef CPUM_DBGREG_EMIT
473};
474/** PAuth key system registers. */
475static const struct
476{
477 hv_sys_reg_t enmHvReg;
478 uint32_t offCpumCtx;
479} s_aCpumPAuthKeyRegs[] =
480{
481 { HV_SYS_REG_APDAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apda.Low.u64) },
482 { HV_SYS_REG_APDAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apda.High.u64) },
483 { HV_SYS_REG_APDBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.Low.u64) },
484 { HV_SYS_REG_APDBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.High.u64) },
485 { HV_SYS_REG_APGAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apga.Low.u64) },
486 { HV_SYS_REG_APGAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apga.High.u64) },
487 { HV_SYS_REG_APIAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apia.Low.u64) },
488 { HV_SYS_REG_APIAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apia.High.u64) },
489 { HV_SYS_REG_APIBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apib.Low.u64) },
490 { HV_SYS_REG_APIBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apib.High.u64) }
491};
492/** System registers. */
493static const struct
494{
495 hv_sys_reg_t enmHvReg;
496 uint32_t fCpumExtrn;
497 uint32_t offCpumCtx;
498} s_aCpumSysRegs[] =
499{
500 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
501 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
502 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
503 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
504 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
505 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
506 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
507 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
508 { HV_SYS_REG_VBAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
509 { HV_SYS_REG_AFSR0_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr0.u64) },
510 { HV_SYS_REG_AFSR1_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr1.u64) },
511 { HV_SYS_REG_AMAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Amair.u64) },
512 { HV_SYS_REG_CNTKCTL_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, CntKCtl.u64) },
513 { HV_SYS_REG_CONTEXTIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, ContextIdr.u64) },
514 { HV_SYS_REG_CPACR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Cpacr.u64) },
515 { HV_SYS_REG_CSSELR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Csselr.u64) },
516 { HV_SYS_REG_ESR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Esr.u64) },
517 { HV_SYS_REG_FAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Far.u64) },
518 { HV_SYS_REG_MAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Mair.u64) },
519 { HV_SYS_REG_PAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Par.u64) },
520 { HV_SYS_REG_TPIDRRO_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, TpIdrRoEl0.u64) },
521 { HV_SYS_REG_TPIDR_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[0].u64) },
522 { HV_SYS_REG_TPIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[1].u64) },
523 { HV_SYS_REG_MDCCINT_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, MDccInt.u64) }
524
525};
526/** EL2 support system registers. */
527static const struct
528{
529 uint16_t idSysReg;
530 uint32_t offCpumCtx;
531} s_aCpumEl2SysRegs[] =
532{
533 { ARMV8_AARCH64_SYSREG_CNTHCTL_EL2, RT_UOFFSETOF(CPUMCTX, CntHCtlEl2.u64) },
534 { ARMV8_AARCH64_SYSREG_CNTHP_CTL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpCtlEl2.u64) },
535 { ARMV8_AARCH64_SYSREG_CNTHP_CVAL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpCValEl2.u64) },
536 { ARMV8_AARCH64_SYSREG_CNTHP_TVAL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpTValEl2.u64) },
537 { ARMV8_AARCH64_SYSREG_CNTVOFF_EL2, RT_UOFFSETOF(CPUMCTX, CntVOffEl2.u64) },
538 { ARMV8_AARCH64_SYSREG_CPTR_EL2, RT_UOFFSETOF(CPUMCTX, CptrEl2.u64) },
539 { ARMV8_AARCH64_SYSREG_ELR_EL2, RT_UOFFSETOF(CPUMCTX, ElrEl2.u64) },
540 { ARMV8_AARCH64_SYSREG_ESR_EL2, RT_UOFFSETOF(CPUMCTX, EsrEl2.u64) },
541 { ARMV8_AARCH64_SYSREG_FAR_EL2, RT_UOFFSETOF(CPUMCTX, FarEl2.u64) },
542 { ARMV8_AARCH64_SYSREG_HCR_EL2, RT_UOFFSETOF(CPUMCTX, HcrEl2.u64) },
543 { ARMV8_AARCH64_SYSREG_HPFAR_EL2, RT_UOFFSETOF(CPUMCTX, HpFarEl2.u64) },
544 { ARMV8_AARCH64_SYSREG_MAIR_EL2, RT_UOFFSETOF(CPUMCTX, MairEl2.u64) },
545 //{ ARMV8_AARCH64_SYSREG_MDCR_EL2, RT_UOFFSETOF(CPUMCTX, MdcrEl2.u64) },
546 { ARMV8_AARCH64_SYSREG_SCTLR_EL2, RT_UOFFSETOF(CPUMCTX, SctlrEl2.u64) },
547 { ARMV8_AARCH64_SYSREG_SPSR_EL2, RT_UOFFSETOF(CPUMCTX, SpsrEl2.u64) },
548 { ARMV8_AARCH64_SYSREG_SP_EL2, RT_UOFFSETOF(CPUMCTX, SpEl2.u64) },
549 { ARMV8_AARCH64_SYSREG_TCR_EL2, RT_UOFFSETOF(CPUMCTX, TcrEl2.u64) },
550 { ARMV8_AARCH64_SYSREG_TPIDR_EL2, RT_UOFFSETOF(CPUMCTX, TpidrEl2.u64) },
551 { ARMV8_AARCH64_SYSREG_TTBR0_EL2, RT_UOFFSETOF(CPUMCTX, Ttbr0El2.u64) },
552 { ARMV8_AARCH64_SYSREG_TTBR1_EL2, RT_UOFFSETOF(CPUMCTX, Ttbr1El2.u64) },
553 { ARMV8_AARCH64_SYSREG_VBAR_EL2, RT_UOFFSETOF(CPUMCTX, VBarEl2.u64) },
554 { ARMV8_AARCH64_SYSREG_VMPIDR_EL2, RT_UOFFSETOF(CPUMCTX, VMpidrEl2.u64) },
555 { ARMV8_AARCH64_SYSREG_VPIDR_EL2, RT_UOFFSETOF(CPUMCTX, VPidrEl2.u64) },
556 { ARMV8_AARCH64_SYSREG_VTCR_EL2, RT_UOFFSETOF(CPUMCTX, VTcrEl2.u64) },
557 { ARMV8_AARCH64_SYSREG_VTTBR_EL2, RT_UOFFSETOF(CPUMCTX, VTtbrEl2.u64) }
558};
559/** ID registers. */
560static const struct
561{
562 hv_feature_reg_t enmHvReg;
563 uint32_t offIdStruct;
564} s_aIdRegs[] =
565{
566 { HV_FEATURE_REG_ID_AA64DFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr0El1) },
567 { HV_FEATURE_REG_ID_AA64DFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr1El1) },
568 { HV_FEATURE_REG_ID_AA64ISAR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar0El1) },
569 { HV_FEATURE_REG_ID_AA64ISAR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar1El1) },
570 { HV_FEATURE_REG_ID_AA64MMFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr0El1) },
571 { HV_FEATURE_REG_ID_AA64MMFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr1El1) },
572 { HV_FEATURE_REG_ID_AA64MMFR2_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr2El1) },
573 { HV_FEATURE_REG_ID_AA64PFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr0El1) },
574 { HV_FEATURE_REG_ID_AA64PFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr1El1) },
575 { HV_FEATURE_REG_CLIDR_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegClidrEl1) },
576 { HV_FEATURE_REG_CTR_EL0, RT_UOFFSETOF(CPUMIDREGS, u64RegCtrEl0) },
577 { HV_FEATURE_REG_DCZID_EL0, RT_UOFFSETOF(CPUMIDREGS, u64RegDczidEl0) }
578};
579
580
581/*********************************************************************************************************************************
582* Internal Functions *
583*********************************************************************************************************************************/
584
585
586/**
587 * Converts a HV return code to a VBox status code.
588 *
589 * @returns VBox status code.
590 * @param hrc The HV return code to convert.
591 */
592DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
593{
594 if (hrc == HV_SUCCESS)
595 return VINF_SUCCESS;
596
597 switch (hrc)
598 {
599 case HV_ERROR: return VERR_INVALID_STATE;
600 case HV_BUSY: return VERR_RESOURCE_BUSY;
601 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
602 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
603 case HV_NO_DEVICE: return VERR_NOT_FOUND;
604 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
605 }
606
607 return VERR_IPE_UNEXPECTED_STATUS;
608}
609
610
611/**
612 * Returns a human readable string of the given exception class.
613 *
614 * @returns Pointer to the string matching the given EC.
615 * @param u32Ec The exception class to return the string for.
616 */
617static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
618{
619 switch (u32Ec)
620 {
621#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
622 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
623 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
624 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
625 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
626 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
627 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
628 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
629 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
630 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
631 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
632 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
633 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
634 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
635 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
636 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
637 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
638 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
639 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
640 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
641 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
642 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
643 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
644 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
645 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
646 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
647 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
648 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
649 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
650 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
651 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
652 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
653 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
654 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
655 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
656 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
657 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
658 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
659 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
660 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
661 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
662 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
663 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
664 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
665 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
666 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
667#undef ARMV8_EC_CASE
668 default:
669 break;
670 }
671
672 return "<INVALID>";
673}
674
675
676/**
677 * Resolves a NEM page state from the given protection flags.
678 *
679 * @returns NEM page state.
680 * @param fPageProt The page protection flags.
681 */
682DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
683{
684 switch (fPageProt)
685 {
686 case NEM_PAGE_PROT_NONE:
687 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
688 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
689 return NEM_DARWIN_PAGE_STATE_RX;
690 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
691 return NEM_DARWIN_PAGE_STATE_RW;
692 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
693 return NEM_DARWIN_PAGE_STATE_RWX;
694 default:
695 break;
696 }
697
698 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
699 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
700}
701
702
703/**
704 * Unmaps the given guest physical address range (page aligned).
705 *
706 * @returns VBox status code.
707 * @param pVM The cross context VM structure.
708 * @param GCPhys The guest physical address to start unmapping at.
709 * @param cb The size of the range to unmap in bytes.
710 * @param pu2State Where to store the new state of the unmappd page, optional.
711 */
712DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
713{
714 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
715 {
716 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
717 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
718 return VINF_SUCCESS;
719 }
720
721 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
722 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
723 if (RT_LIKELY(hrc == HV_SUCCESS))
724 {
725 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
726 if (pu2State)
727 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
728 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
729 return VINF_SUCCESS;
730 }
731
732 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
733 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
734 GCPhys, hrc));
735 return VERR_NEM_IPE_6;
736}
737
738
739/**
740 * Maps a given guest physical address range backed by the given memory with the given
741 * protection flags.
742 *
743 * @returns VBox status code.
744 * @param pVM The cross context VM structure.
745 * @param GCPhys The guest physical address to start mapping.
746 * @param pvRam The R3 pointer of the memory to back the range with.
747 * @param cb The size of the range, page aligned.
748 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
749 * @param pu2State Where to store the state for the new page, optional.
750 */
751DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
752{
753 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
754
755 Assert(fPageProt != NEM_PAGE_PROT_NONE);
756 RT_NOREF(pVM);
757
758 hv_memory_flags_t fHvMemProt = 0;
759 if (fPageProt & NEM_PAGE_PROT_READ)
760 fHvMemProt |= HV_MEMORY_READ;
761 if (fPageProt & NEM_PAGE_PROT_WRITE)
762 fHvMemProt |= HV_MEMORY_WRITE;
763 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
764 fHvMemProt |= HV_MEMORY_EXEC;
765
766 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
767 if (hrc == HV_SUCCESS)
768 {
769 if (pu2State)
770 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
771 return VINF_SUCCESS;
772 }
773
774 return nemR3DarwinHvSts2Rc(hrc);
775}
776
777
778/**
779 * Changes the protection flags for the given guest physical address range.
780 *
781 * @returns VBox status code.
782 * @param GCPhys The guest physical address to start mapping.
783 * @param cb The size of the range, page aligned.
784 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
785 * @param pu2State Where to store the state for the new page, optional.
786 */
787DECLINLINE(int) nemR3DarwinProtect(RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
788{
789 hv_memory_flags_t fHvMemProt = 0;
790 if (fPageProt & NEM_PAGE_PROT_READ)
791 fHvMemProt |= HV_MEMORY_READ;
792 if (fPageProt & NEM_PAGE_PROT_WRITE)
793 fHvMemProt |= HV_MEMORY_WRITE;
794 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
795 fHvMemProt |= HV_MEMORY_EXEC;
796
797 hv_return_t hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
798 if (hrc == HV_SUCCESS)
799 {
800 if (pu2State)
801 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
802 return VINF_SUCCESS;
803 }
804
805 LogRel(("nemR3DarwinProtect(%RGp,%zu,%#x): failed! hrc=%#x\n",
806 GCPhys, cb, fPageProt, hrc));
807 return nemR3DarwinHvSts2Rc(hrc);
808}
809
810
811#ifdef LOG_ENABLED
812/**
813 * Logs the current CPU state.
814 */
815static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
816{
817 if (LogIs3Enabled())
818 {
819 char szRegs[4096];
820 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
821 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
822 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
823 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
824 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
825 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
826 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
827 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
828 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
829 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
830 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
831 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
832 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
833 "vbar_el1=%016VR{vbar_el1}\n"
834 );
835 if (pVM->nem.s.fEl2Enabled)
836 {
837 Log3(("%s\n", szRegs));
838 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
839 "sp_el2=%016VR{sp_el2} elr_el2=%016VR{elr_el2}\n"
840 "spsr_el2=%016VR{spsr_el2} tpidr_el2=%016VR{tpidr_el2}\n"
841 "sctlr_el2=%016VR{sctlr_el2} tcr_el2=%016VR{tcr_el2}\n"
842 "ttbr0_el2=%016VR{ttbr0_el2} ttbr1_el2=%016VR{ttbr1_el2}\n"
843 "esr_el2=%016VR{esr_el2} far_el2=%016VR{far_el2}\n"
844 "hcr_el2=%016VR{hcr_el2} tcr_el2=%016VR{tcr_el2}\n"
845 "vbar_el2=%016VR{vbar_el2} cptr_el2=%016VR{cptr_el2}\n"
846 );
847 }
848 char szInstr[256]; RT_ZERO(szInstr);
849 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
850 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
851 szInstr, sizeof(szInstr), NULL);
852 Log3(("%s%s\n", szRegs, szInstr));
853 }
854}
855#endif /* LOG_ENABLED */
856
857
858static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
859{
860 RT_NOREF(pVM);
861
862 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
863 if (hrc == HV_SUCCESS)
864 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
865
866 if ( hrc == HV_SUCCESS
867 && (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR)))
868 {
869 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
870 {
871 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
872 {
873 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
874 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
875 }
876 }
877 }
878
879 if ( hrc == HV_SUCCESS
880 && (fWhat & CPUMCTX_EXTRN_V0_V31))
881 {
882 /* SIMD/FP registers. */
883 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
884 {
885 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
886 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
887 }
888 }
889
890 if ( hrc == HV_SUCCESS
891 && (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG))
892 {
893 /* Debug registers. */
894 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
895 {
896 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
897 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, pu64);
898 }
899 }
900
901 if ( hrc == HV_SUCCESS
902 && (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
903 {
904 /* Debug registers. */
905 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
906 {
907 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
908 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, pu64);
909 }
910 }
911
912 if ( hrc == HV_SUCCESS
913 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC)))
914 {
915 /* System registers. */
916 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
917 {
918 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
919 {
920 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
921 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
922 }
923 }
924 }
925
926 if ( hrc == HV_SUCCESS
927 && (fWhat & CPUMCTX_EXTRN_SYSREG_EL2)
928 && pVM->nem.s.fEl2Enabled)
929 {
930 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumEl2SysRegs); i++)
931 {
932 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumEl2SysRegs[i].offCpumCtx);
933 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, (hv_sys_reg_t)s_aCpumEl2SysRegs[i].idSysReg, pu64);
934 }
935 }
936
937 if ( hrc == HV_SUCCESS
938 && (fWhat & CPUMCTX_EXTRN_PSTATE))
939 {
940 uint64_t u64Tmp;
941 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
942 if (hrc == HV_SUCCESS)
943 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
944 }
945
946 /* Almost done, just update extern flags. */
947 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
948 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
949 pVCpu->cpum.GstCtx.fExtrn = 0;
950
951 return nemR3DarwinHvSts2Rc(hrc);
952}
953
954
955/**
956 * Exports the guest state to HV for execution.
957 *
958 * @returns VBox status code.
959 * @param pVM The cross context VM structure.
960 * @param pVCpu The cross context virtual CPU structure of the
961 * calling EMT.
962 */
963static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
964{
965 RT_NOREF(pVM);
966 hv_return_t hrc = HV_SUCCESS;
967
968 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
969 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
970 {
971 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
972 {
973 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
974 {
975 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
976 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
977 }
978 }
979 }
980
981 if ( hrc == HV_SUCCESS
982 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
983 {
984 /* SIMD/FP registers. */
985 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
986 {
987 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
988 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
989 }
990 }
991
992 if ( hrc == HV_SUCCESS
993 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_DEBUG))
994 {
995 /* Debug registers. */
996 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
997 {
998 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
999 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, *pu64);
1000 }
1001 }
1002
1003 if ( hrc == HV_SUCCESS
1004 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
1005 {
1006 /* Debug registers. */
1007 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1008 {
1009 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1010 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, *pu64);
1011 }
1012 }
1013
1014 if ( hrc == HV_SUCCESS
1015 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
1016 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
1017 {
1018 /* System registers. */
1019 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1020 {
1021 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1022 {
1023 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1024 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
1025 }
1026 }
1027 }
1028
1029 if ( hrc == HV_SUCCESS
1030 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_EL2)
1031 && pVM->nem.s.fEl2Enabled)
1032 {
1033 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumEl2SysRegs); i++)
1034 {
1035 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumEl2SysRegs[i].offCpumCtx);
1036 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, (hv_sys_reg_t)s_aCpumEl2SysRegs[i].idSysReg, *pu64);
1037 Assert(hrc == HV_SUCCESS);
1038 }
1039 }
1040
1041 if ( hrc == HV_SUCCESS
1042 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
1043 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
1044
1045 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1046 return nemR3DarwinHvSts2Rc(hrc);
1047}
1048
1049
1050/**
1051 * Worker for nemR3NativeInit that loads the Hypervisor.framework shared library.
1052 *
1053 * @returns VBox status code.
1054 * @param pErrInfo Where to always return error info.
1055 */
1056static int nemR3DarwinLoadHv(PRTERRINFO pErrInfo)
1057{
1058 RTLDRMOD hMod = NIL_RTLDRMOD;
1059 static const char *s_pszHvPath = "/System/Library/Frameworks/Hypervisor.framework/Hypervisor";
1060
1061 int rc = RTLdrLoadEx(s_pszHvPath, &hMod, RTLDRLOAD_FLAGS_NO_UNLOAD | RTLDRLOAD_FLAGS_NO_SUFFIX, pErrInfo);
1062 if (RT_SUCCESS(rc))
1063 {
1064 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
1065 {
1066 int rc2 = RTLdrGetSymbol(hMod, g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
1067 if (RT_SUCCESS(rc2))
1068 {
1069 LogRel(("NEM: info: Found optional import Hypervisor!%s.\n",
1070 g_aImports[i].pszName));
1071 }
1072 else
1073 {
1074 *g_aImports[i].ppfn = NULL;
1075
1076 LogRel(("NEM: info: Failed to import Hypervisor!%s: %Rrc\n",
1077 g_aImports[i].pszName, rc2));
1078 }
1079 }
1080 if (RT_SUCCESS(rc))
1081 {
1082 Assert(!RTErrInfoIsSet(pErrInfo));
1083 }
1084
1085 RTLdrClose(hMod);
1086 }
1087 else
1088 {
1089 RTErrInfoAddF(pErrInfo, rc, "Failed to load Hypervisor.framwork: %s: %Rrc", s_pszHvPath, rc);
1090 rc = VERR_NEM_INIT_FAILED;
1091 }
1092
1093 return rc;
1094}
1095
1096
1097/**
1098 * Dumps some GIC information to the release log.
1099 */
1100static void nemR3DarwinDumpGicInfo(void)
1101{
1102 size_t val = 0;
1103 hv_return_t hrc = hv_gic_get_redistributor_size(&val);
1104 LogRel(("GICNem: hv_gic_get_redistributor_size() -> hrc=%#x / size=%zu\n", hrc, val));
1105 hrc = hv_gic_get_distributor_size(&val);
1106 LogRel(("GICNem: hv_gic_get_distributor_size() -> hrc=%#x / size=%zu\n", hrc, val));
1107 hrc = hv_gic_get_distributor_base_alignment(&val);
1108 LogRel(("GICNem: hv_gic_get_distributor_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1109 hrc = hv_gic_get_redistributor_base_alignment(&val);
1110 LogRel(("GICNem: hv_gic_get_redistributor_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1111 hrc = hv_gic_get_msi_region_base_alignment(&val);
1112 LogRel(("GICNem: hv_gic_get_msi_region_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1113 hrc = hv_gic_get_msi_region_size(&val);
1114 LogRel(("GICNem: hv_gic_get_msi_region_size() -> hrc=%#x / size=%zu\n", hrc, val));
1115 uint32_t u32SpiIntIdBase = 0;
1116 uint32_t cSpiIntIds = 0;
1117 hrc = hv_gic_get_spi_interrupt_range(&u32SpiIntIdBase, &cSpiIntIds);
1118 LogRel(("GICNem: hv_gic_get_spi_interrupt_range() -> hrc=%#x / SpiIntIdBase=%u, cSpiIntIds=%u\n", hrc, u32SpiIntIdBase, cSpiIntIds));
1119
1120 uint32_t u32IntId = 0;
1121 hrc = hv_gic_get_intid(HV_GIC_INT_EL1_PHYSICAL_TIMER, &u32IntId);
1122 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL1_PHYSICAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1123 hrc = hv_gic_get_intid(HV_GIC_INT_EL1_VIRTUAL_TIMER, &u32IntId);
1124 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL1_VIRTUAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1125 hrc = hv_gic_get_intid(HV_GIC_INT_EL2_PHYSICAL_TIMER, &u32IntId);
1126 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL2_PHYSICAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1127 hrc = hv_gic_get_intid(HV_GIC_INT_MAINTENANCE, &u32IntId);
1128 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_MAINTENANCE) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1129 hrc = hv_gic_get_intid(HV_GIC_INT_PERFORMANCE_MONITOR, &u32IntId);
1130 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_PERFORMANCE_MONITOR) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1131}
1132
1133
1134/**
1135 * Sets the given SPI inside the in-kernel KVM GIC.
1136 *
1137 * @returns VBox status code.
1138 * @param pVM The VM instance.
1139 * @param uIntId The SPI ID to update.
1140 * @param fAsserted Flag whether the interrupt is asserted (true) or not (false).
1141 */
1142VMMR3_INT_DECL(int) GICR3NemSpiSet(PVMCC pVM, uint32_t uIntId, bool fAsserted)
1143{
1144 RT_NOREF(pVM);
1145 Assert(hv_gic_set_spi);
1146
1147 hv_return_t hrc = hv_gic_set_spi(uIntId + GIC_INTID_RANGE_SPI_START, fAsserted);
1148 return nemR3DarwinHvSts2Rc(hrc);
1149}
1150
1151
1152/**
1153 * Sets the given PPI inside the in-kernel KVM GIC.
1154 *
1155 * @returns VBox status code.
1156 * @param pVCpu The vCPU for whih the PPI state is updated.
1157 * @param uIntId The PPI ID to update.
1158 * @param fAsserted Flag whether the interrupt is asserted (true) or not (false).
1159 */
1160VMMR3_INT_DECL(int) GICR3NemPpiSet(PVMCPUCC pVCpu, uint32_t uIntId, bool fAsserted)
1161{
1162 RT_NOREF(pVCpu, uIntId, fAsserted);
1163
1164 /* Should never be called as the PPIs are handled entirely in Hypervisor.framework/AppleHV. */
1165 AssertFailed();
1166 return VERR_NEM_IPE_9;
1167}
1168
1169
1170static int nemR3DarwinGicCreate(PVM pVM)
1171{
1172 nemR3DarwinDumpGicInfo();
1173
1174 //PCFGMNODE pGicDev = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic/0");
1175 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
1176 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
1177
1178 hv_gic_config_t hGicCfg = hv_gic_config_create();
1179
1180 /*
1181 * Query the MMIO ranges.
1182 */
1183 RTGCPHYS GCPhysMmioBaseDist = 0;
1184 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
1185 if (RT_FAILURE(rc))
1186 return VMSetError(pVM, rc, RT_SRC_POS,
1187 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
1188
1189 RTGCPHYS GCPhysMmioBaseReDist = 0;
1190 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
1191 if (RT_FAILURE(rc))
1192 return VMSetError(pVM, rc, RT_SRC_POS,
1193 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
1194
1195 hv_return_t hrc = hv_gic_config_set_distributor_base(hGicCfg, GCPhysMmioBaseDist);
1196 if (hrc != HV_SUCCESS)
1197 return nemR3DarwinHvSts2Rc(hrc);
1198
1199 hrc = hv_gic_config_set_redistributor_base(hGicCfg, GCPhysMmioBaseReDist);
1200 if (hrc != HV_SUCCESS)
1201 return nemR3DarwinHvSts2Rc(hrc);
1202
1203 hrc = hv_gic_create(hGicCfg);
1204 os_release(hGicCfg);
1205 if (hrc != HV_SUCCESS)
1206 return nemR3DarwinHvSts2Rc(hrc);
1207
1208 /* Make sure the device is not instantiated as Hypervisor.framework provides it. */
1209 //CFGMR3RemoveNode(pGicDev);
1210 return rc;
1211}
1212
1213
1214/**
1215 * Try initialize the native API.
1216 *
1217 * This may only do part of the job, more can be done in
1218 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
1219 *
1220 * @returns VBox status code.
1221 * @param pVM The cross context VM structure.
1222 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
1223 * the latter we'll fail if we cannot initialize.
1224 * @param fForced Whether the HMForced flag is set and we should
1225 * fail if we cannot initialize.
1226 */
1227int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
1228{
1229 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
1230
1231 /*
1232 * Some state init.
1233 */
1234 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
1235 RT_NOREF(pCfgNem);
1236
1237 /*
1238 * Error state.
1239 * The error message will be non-empty on failure and 'rc' will be set too.
1240 */
1241 RTERRINFOSTATIC ErrInfo;
1242 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
1243
1244 /* Resolve optional imports */
1245 int rc = nemR3DarwinLoadHv(pErrInfo);
1246 if (RT_FAILURE(rc))
1247 return rc;
1248
1249 /*
1250 * Need to enable nested virt here if supported and reset the CFGM value to false
1251 * if not supported. This ASSUMES that NEM is initialized before CPUM.
1252 */
1253 PCFGMNODE pCfgCpum = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/");
1254 hv_vm_config_t hVmCfg = NULL;
1255
1256 if ( hv_vm_config_create
1257 && hv_vm_config_get_el2_supported)
1258 {
1259 hVmCfg = hv_vm_config_create();
1260
1261 bool fHvEl2Supported = false;
1262 hv_return_t hrc = hv_vm_config_get_el2_supported(&fHvEl2Supported);
1263 if ( hrc == HV_SUCCESS
1264 && fHvEl2Supported)
1265 {
1266 /** @cfgm{/CPUM/NestedHWVirt, bool, false}
1267 * Whether to expose the hardware virtualization (EL2/VHE) feature to the guest.
1268 * The default is false. Only supported on M3 and later and macOS 15.0+ (Sonoma).
1269 */
1270 bool fNestedHWVirt = false;
1271 rc = CFGMR3QueryBoolDef(pCfgCpum, "NestedHWVirt", &fNestedHWVirt, false);
1272 AssertLogRelRCReturn(rc, rc);
1273 if (fNestedHWVirt)
1274 {
1275 hrc = hv_vm_config_set_el2_enabled(hVmCfg, fNestedHWVirt);
1276 if (hrc != HV_SUCCESS)
1277 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
1278 "Cannot enable nested virtualization (hrc=%#x)!\n", hrc);
1279 else
1280 {
1281 pVM->nem.s.fEl2Enabled = true;
1282 LogRel(("NEM: Enabled nested virtualization (EL2) support\n"));
1283 }
1284 }
1285 }
1286 else
1287 {
1288 /* Ensure nested virt is not set. */
1289 rc = CFGMR3RemoveValue(pCfgCpum, "NestedHWVirt");
1290
1291 LogRel(("NEM: The host doesn't supported nested virtualization! (hrc=%#x fHvEl2Supported=%RTbool)\n",
1292 hrc, fHvEl2Supported));
1293 }
1294 }
1295 else
1296 {
1297 /* Ensure nested virt is not set. */
1298 rc = CFGMR3RemoveValue(pCfgCpum, "NestedHWVirt");
1299 LogRel(("NEM: Hypervisor.framework doesn't supported nested virtualization!\n"));
1300 }
1301
1302 hv_return_t hrc = hv_vm_create(hVmCfg);
1303 os_release(hVmCfg);
1304 if (hrc == HV_SUCCESS)
1305 {
1306 pVM->nem.s.fCreatedVm = true;
1307 pVM->nem.s.u64CntFrqHz = ASMReadCntFrqEl0();
1308
1309 /* Will be initialized in NEMHCResumeCpuTickOnAll() before executing guest code. */
1310 pVM->nem.s.u64VTimerOff = 0;
1311
1312 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
1313 Log(("NEM: Marked active!\n"));
1314 PGMR3EnableNemMode(pVM);
1315 }
1316 else
1317 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
1318 "hv_vm_create() failed: %#x", hrc);
1319
1320 /*
1321 * We only fail if in forced mode, otherwise just log the complaint and return.
1322 */
1323 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1324 if ( (fForced || !fFallback)
1325 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1326 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1327
1328 if (RTErrInfoIsSet(pErrInfo))
1329 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1330 return VINF_SUCCESS;
1331}
1332
1333
1334/**
1335 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
1336 *
1337 * @returns VBox status code
1338 * @param pVM The VM handle.
1339 * @param pVCpu The vCPU handle.
1340 * @param idCpu ID of the CPU to create.
1341 */
1342static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
1343{
1344 if (idCpu == 0)
1345 {
1346 Assert(pVM->nem.s.hVCpuCfg == NULL);
1347
1348 /* Create a new vCPU config and query the ID registers. */
1349 pVM->nem.s.hVCpuCfg = hv_vcpu_config_create();
1350 if (!pVM->nem.s.hVCpuCfg)
1351 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1352 "Call to hv_vcpu_config_create failed on vCPU %u", idCpu);
1353
1354 /* Query ID registers and hand them to CPUM. */
1355 CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
1356 for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
1357 {
1358 uint64_t *pu64 = (uint64_t *)((uint8_t *)&IdRegs + s_aIdRegs[i].offIdStruct);
1359 hv_return_t hrc = hv_vcpu_config_get_feature_reg(pVM->nem.s.hVCpuCfg, s_aIdRegs[i].enmHvReg, pu64);
1360 if (hrc != HV_SUCCESS)
1361 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1362 "Call to hv_vcpu_get_feature_reg(, %#x, ) failed: %#x (%Rrc)", hrc, nemR3DarwinHvSts2Rc(hrc));
1363 }
1364
1365 int rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
1366 if (RT_FAILURE(rc))
1367 return rc;
1368 }
1369
1370 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, pVM->nem.s.hVCpuCfg);
1371 if (hrc != HV_SUCCESS)
1372 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1373 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1374
1375 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MPIDR_EL1, idCpu);
1376 if (hrc != HV_SUCCESS)
1377 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1378 "Setting MPIDR_EL1 failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1379
1380 return VINF_SUCCESS;
1381}
1382
1383
1384/**
1385 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
1386 *
1387 * @returns VBox status code.
1388 * @param pVM The VM handle.
1389 * @param pVCpu The vCPU handle.
1390 */
1391static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVM pVM, PVMCPU pVCpu)
1392{
1393 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
1394 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
1395
1396 if (pVCpu->idCpu == 0)
1397 {
1398 os_release(pVM->nem.s.hVCpuCfg);
1399 pVM->nem.s.hVCpuCfg = NULL;
1400 }
1401 return VINF_SUCCESS;
1402}
1403
1404
1405/**
1406 * This is called after CPUMR3Init is done.
1407 *
1408 * @returns VBox status code.
1409 * @param pVM The VM handle..
1410 */
1411int nemR3NativeInitAfterCPUM(PVM pVM)
1412{
1413 /*
1414 * Validate sanity.
1415 */
1416 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
1417 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1418
1419 /*
1420 * Need to create the GIC here if the NEM variant is configured
1421 * before any vCPU is created according to the Apple docs.
1422 */
1423 if ( hv_gic_create
1424 && CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0"))
1425 {
1426 int rc = nemR3DarwinGicCreate(pVM);
1427 if (RT_FAILURE(rc))
1428 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Creating the GIC failed: %Rrc", rc);
1429 }
1430
1431 /*
1432 * Setup the EMTs.
1433 */
1434 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1435 {
1436 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1437
1438 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
1439 if (RT_FAILURE(rc))
1440 {
1441 /* Rollback. */
1442 while (idCpu--)
1443 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 2, pVM, pVCpu);
1444
1445 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
1446 }
1447 }
1448
1449 pVM->nem.s.fCreatedEmts = true;
1450 return VINF_SUCCESS;
1451}
1452
1453
1454int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1455{
1456 RT_NOREF(pVM, enmWhat);
1457 return VINF_SUCCESS;
1458}
1459
1460
1461int nemR3NativeTerm(PVM pVM)
1462{
1463 /*
1464 * Delete the VM.
1465 */
1466
1467 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
1468 {
1469 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1470
1471 /*
1472 * Apple's documentation states that the vCPU should be destroyed
1473 * on the thread running the vCPU but as all the other EMTs are gone
1474 * at this point, destroying the VM would hang.
1475 *
1476 * We seem to be at luck here though as destroying apparently works
1477 * from EMT(0) as well.
1478 */
1479 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
1480 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
1481 }
1482
1483 pVM->nem.s.fCreatedEmts = false;
1484 if (pVM->nem.s.fCreatedVm)
1485 {
1486 hv_return_t hrc = hv_vm_destroy();
1487 if (hrc != HV_SUCCESS)
1488 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
1489
1490 pVM->nem.s.fCreatedVm = false;
1491 }
1492 return VINF_SUCCESS;
1493}
1494
1495
1496/**
1497 * VM reset notification.
1498 *
1499 * @param pVM The cross context VM structure.
1500 */
1501void nemR3NativeReset(PVM pVM)
1502{
1503 RT_NOREF(pVM);
1504}
1505
1506
1507/**
1508 * Reset CPU due to INIT IPI or hot (un)plugging.
1509 *
1510 * @param pVCpu The cross context virtual CPU structure of the CPU being
1511 * reset.
1512 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1513 */
1514void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1515{
1516 RT_NOREF(pVCpu, fInitIpi);
1517}
1518
1519
1520/**
1521 * Returns the byte size from the given access SAS value.
1522 *
1523 * @returns Number of bytes to transfer.
1524 * @param uSas The SAS value to convert.
1525 */
1526DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
1527{
1528 switch (uSas)
1529 {
1530 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
1531 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
1532 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
1533 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
1534 default:
1535 AssertReleaseFailed();
1536 }
1537
1538 return 0;
1539}
1540
1541
1542/**
1543 * Sets the given general purpose register to the given value.
1544 *
1545 * @param pVCpu The cross context virtual CPU structure of the
1546 * calling EMT.
1547 * @param uReg The register index.
1548 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
1549 * @param fSignExtend Flag whether to sign extend the value.
1550 * @param u64Val The value.
1551 */
1552DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
1553{
1554 AssertReturnVoid(uReg < 31);
1555
1556 if (f64BitReg)
1557 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1558 else
1559 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
1560
1561 /* Mark the register as not extern anymore. */
1562 switch (uReg)
1563 {
1564 case 0:
1565 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1566 break;
1567 case 1:
1568 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1569 break;
1570 case 2:
1571 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1572 break;
1573 case 3:
1574 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1575 break;
1576 default:
1577 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1578 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1579 }
1580}
1581
1582
1583/**
1584 * Gets the given general purpose register and returns the value.
1585 *
1586 * @returns Value from the given register.
1587 * @param pVCpu The cross context virtual CPU structure of the
1588 * calling EMT.
1589 * @param uReg The register index.
1590 */
1591DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
1592{
1593 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
1594
1595 if (uReg == ARMV8_AARCH64_REG_ZR)
1596 return 0;
1597
1598 /** @todo Import the register if extern. */
1599 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1600
1601 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1602}
1603
1604
1605/**
1606 * Works on the data abort exception (which will be a MMIO access most of the time).
1607 *
1608 * @returns VBox strict status code.
1609 * @param pVM The cross context VM structure.
1610 * @param pVCpu The cross context virtual CPU structure of the
1611 * calling EMT.
1612 * @param uIss The instruction specific syndrome value.
1613 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1614 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
1615 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
1616 */
1617static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
1618 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
1619{
1620 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
1621 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
1622 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
1623 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
1624 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
1625 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
1626 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
1627 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
1628 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
1629 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
1630
1631 RT_NOREF(fL2Fault, GCPtrDataAbrt);
1632
1633 if (fWrite)
1634 {
1635 /*
1636 * Check whether this is one of the dirty tracked regions, mark it as dirty
1637 * and enable write support for this region again.
1638 *
1639 * This is required for proper VRAM tracking or the display might not get updated
1640 * and it is impossible to use the PGM generic facility as it operates on guest page sizes
1641 * but setting protection flags with Hypervisor.framework works only host page sized regions, so
1642 * we have to cook our own. Additionally the VRAM region is marked as prefetchable (write-back)
1643 * which doesn't produce a valid instruction syndrome requiring restarting the instruction after enabling
1644 * write access again (due to a missing interpreter right now).
1645 */
1646 for (uint32_t idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
1647 {
1648 PNEMHVMMIO2REGION pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
1649
1650 if ( GCPhysDataAbrt >= pMmio2Region->GCPhysStart
1651 && GCPhysDataAbrt <= pMmio2Region->GCPhysLast)
1652 {
1653 pMmio2Region->fDirty = true;
1654
1655 uint8_t u2State;
1656 int rc = nemR3DarwinProtect(pMmio2Region->GCPhysStart, pMmio2Region->GCPhysLast - pMmio2Region->GCPhysStart + 1,
1657 NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE, &u2State);
1658
1659 /* Restart the instruction if there is no instruction syndrome available. */
1660 if (RT_FAILURE(rc) || !fIsv)
1661 return rc;
1662 }
1663 }
1664 }
1665
1666 VBOXSTRICTRC rcStrict;
1667 if (fIsv)
1668 {
1669 EMHistoryAddExit(pVCpu,
1670 fWrite
1671 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1672 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1673 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1674
1675 uint64_t u64Val = 0;
1676 if (fWrite)
1677 {
1678 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1679 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1680 Log4(("MmioExit/%u: %08RX64: WRITE %#RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
1681 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1682 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1683 }
1684 else
1685 {
1686 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1687 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1688 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1689 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1690 if (rcStrict == VINF_SUCCESS)
1691 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
1692 }
1693 }
1694 else
1695 {
1696 /** @todo Our UEFI firmware accesses the flash region with the following instruction
1697 * when the NVRAM actually contains data:
1698 * ldrb w9, [x6, #-0x0001]!
1699 * This is too complicated for the hardware so the ISV bit is not set. Until there
1700 * is a proper IEM implementation we just handle this here for now to avoid annoying
1701 * users too much.
1702 */
1703 /* The following ASSUMES that the vCPU state is completely synced. */
1704
1705 /* Read instruction. */
1706 RTGCPTR GCPtrPage = pVCpu->cpum.GstCtx.Pc.u64 & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
1707 const void *pvPageR3 = NULL;
1708 PGMPAGEMAPLOCK PageMapLock;
1709
1710 rcStrict = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrPage, &pvPageR3, &PageMapLock);
1711 if (rcStrict == VINF_SUCCESS)
1712 {
1713 uint32_t u32Instr = *(uint32_t *)((uint8_t *)pvPageR3 + (pVCpu->cpum.GstCtx.Pc.u64 - GCPtrPage));
1714 PGMPhysReleasePageMappingLock(pVCpu->pVMR3, &PageMapLock);
1715
1716 DISSTATE Dis;
1717 rcStrict = DISInstrWithPrefetchedBytes((uintptr_t)pVCpu->cpum.GstCtx.Pc.u64, DISCPUMODE_ARMV8_A64, 0 /*fFilter - none */,
1718 &u32Instr, sizeof(u32Instr), NULL, NULL, &Dis, NULL);
1719 if (rcStrict == VINF_SUCCESS)
1720 {
1721 if ( Dis.pCurInstr->uOpcode == OP_ARMV8_A64_LDRB
1722 && Dis.aParams[0].armv8.enmType == kDisArmv8OpParmReg
1723 && Dis.aParams[0].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_32Bit
1724 && Dis.aParams[1].armv8.enmType == kDisArmv8OpParmAddrInGpr
1725 && Dis.aParams[1].armv8.Op.Reg.enmRegType == kDisOpParamArmV8RegType_Gpr_64Bit
1726 && (Dis.aParams[1].fUse & DISUSE_PRE_INDEXED))
1727 {
1728 /* The fault address is already the final address. */
1729 uint8_t bVal = 0;
1730 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &bVal, 1, PGMACCESSORIGIN_HM);
1731 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1732 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, sizeof(bVal), sizeof(bVal),
1733 &bVal, VBOXSTRICTRC_VAL(rcStrict) ));
1734 if (rcStrict == VINF_SUCCESS)
1735 {
1736 nemR3DarwinSetGReg(pVCpu, Dis.aParams[0].armv8.Op.Reg.idReg, false /*f64BitReg*/, false /*fSignExtend*/, bVal);
1737 /* Update the indexed register. */
1738 pVCpu->cpum.GstCtx.aGRegs[Dis.aParams[1].armv8.Op.Reg.idReg].x += Dis.aParams[1].armv8.u.offBase;
1739 }
1740 }
1741 else
1742 AssertFailedReturn(VERR_NOT_SUPPORTED);
1743 }
1744 }
1745 }
1746
1747 if (rcStrict == VINF_SUCCESS)
1748 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1749
1750 return rcStrict;
1751}
1752
1753
1754/**
1755 * Works on the trapped MRS, MSR and system instruction exception.
1756 *
1757 * @returns VBox strict status code.
1758 * @param pVM The cross context VM structure.
1759 * @param pVCpu The cross context virtual CPU structure of the
1760 * calling EMT.
1761 * @param uIss The instruction specific syndrome value.
1762 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1763 */
1764static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
1765{
1766 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
1767 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
1768 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
1769 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
1770 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
1771 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
1772 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
1773 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
1774 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
1775 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
1776
1777 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
1778 EMHistoryAddExit(pVCpu,
1779 fRead
1780 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1781 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1782 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1783
1784 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1785 uint64_t u64Val = 0;
1786 if (fRead)
1787 {
1788 RT_NOREF(pVM);
1789 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
1790 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
1791 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1792 VBOXSTRICTRC_VAL(rcStrict) ));
1793 if (rcStrict == VINF_SUCCESS)
1794 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
1795 }
1796 else
1797 {
1798 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1799 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
1800 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
1801 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1802 VBOXSTRICTRC_VAL(rcStrict) ));
1803 }
1804
1805 if (rcStrict == VINF_SUCCESS)
1806 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1807
1808 return rcStrict;
1809}
1810
1811
1812/**
1813 * Works on the trapped HVC instruction exception.
1814 *
1815 * @returns VBox strict status code.
1816 * @param pVM The cross context VM structure.
1817 * @param pVCpu The cross context virtual CPU structure of the
1818 * calling EMT.
1819 * @param uIss The instruction specific syndrome value.
1820 * @param fAdvancePc Flag whether to advance the guest program counter.
1821 */
1822static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fAdvancePc = false)
1823{
1824 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
1825 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
1826
1827#if 0 /** @todo For later */
1828 EMHistoryAddExit(pVCpu,
1829 fRead
1830 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1831 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1832 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1833#endif
1834
1835 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1836 if (u16Imm == 0)
1837 {
1838 /** @todo Raise exception to EL1 if PSCI not configured. */
1839 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
1840 uint32_t uFunId = pVCpu->cpum.GstCtx.aGRegs[ARMV8_AARCH64_REG_X0].w;
1841 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
1842 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
1843 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
1844 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
1845 {
1846 switch (uFunNum)
1847 {
1848 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1849 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
1850 break;
1851 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1852 rcStrict = VMR3PowerOff(pVM->pUVM);
1853 break;
1854 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1855 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1856 {
1857 bool fHaltOnReset;
1858 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
1859 if (RT_SUCCESS(rc) && fHaltOnReset)
1860 {
1861 Log(("nemR3DarwinHandleExitExceptionTrappedHvcInsn: Halt On Reset!\n"));
1862 rc = VINF_EM_HALT;
1863 }
1864 else
1865 {
1866 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
1867 VM_FF_SET(pVM, VM_FF_RESET);
1868 rc = VINF_EM_RESET;
1869 }
1870 break;
1871 }
1872 case ARM_PSCI_FUNC_ID_CPU_ON:
1873 {
1874 uint64_t u64TgtCpu = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1875 RTGCPHYS GCPhysExecAddr = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X2);
1876 uint64_t u64CtxId = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X3);
1877 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
1878 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
1879 break;
1880 }
1881 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
1882 {
1883 uint32_t u32FunNum = (uint32_t)nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1884 switch (u32FunNum)
1885 {
1886 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1887 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1888 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1889 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1890 case ARM_PSCI_FUNC_ID_CPU_ON:
1891 case ARM_PSCI_FUNC_ID_MIGRATE_INFO_TYPE:
1892 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1893 false /*f64BitReg*/, false /*fSignExtend*/,
1894 (uint64_t)ARM_PSCI_STS_SUCCESS);
1895 break;
1896 default:
1897 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1898 false /*f64BitReg*/, false /*fSignExtend*/,
1899 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1900 }
1901 break;
1902 }
1903 case ARM_PSCI_FUNC_ID_MIGRATE_INFO_TYPE:
1904 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_MIGRATE_INFO_TYPE_TOS_NOT_PRESENT);
1905 break;
1906 default:
1907 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1908 }
1909 }
1910 else
1911 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1912 }
1913
1914 /** @todo What to do if immediate is != 0? */
1915
1916 if ( rcStrict == VINF_SUCCESS
1917 && fAdvancePc)
1918 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
1919
1920 return rcStrict;
1921}
1922
1923
1924/**
1925 * Handles an exception VM exit.
1926 *
1927 * @returns VBox strict status code.
1928 * @param pVM The cross context VM structure.
1929 * @param pVCpu The cross context virtual CPU structure of the
1930 * calling EMT.
1931 * @param pExit Pointer to the exit information.
1932 */
1933static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1934{
1935 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1936 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1937 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1938
1939 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1940 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1941
1942 switch (uEc)
1943 {
1944 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1945 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1946 pExit->exception.physical_address);
1947 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1948 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1949 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
1950 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
1951 case ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN:
1952 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss, true);
1953 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
1954 {
1955 /* No need to halt if there is an interrupt pending already. */
1956 if (VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ)))
1957 {
1958 LogFlowFunc(("IRQ | FIQ set => VINF_SUCCESS\n"));
1959 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1960 return VINF_SUCCESS;
1961 }
1962
1963 /* Set the vTimer expiration in order to get out of the halt at the right point in time. */
1964 if ( (pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE)
1965 && !(pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_IMASK))
1966 {
1967 uint64_t cTicksVTimer = mach_absolute_time() - pVM->nem.s.u64VTimerOff;
1968
1969 /* Check whether it expired and start executing guest code. */
1970 if (cTicksVTimer >= pVCpu->cpum.GstCtx.CntvCValEl0)
1971 {
1972 LogFlowFunc(("Guest timer expired (cTicksVTimer=%RU64 CntvCValEl0=%RU64) => VINF_SUCCESS\n",
1973 cTicksVTimer, pVCpu->cpum.GstCtx.CntvCValEl0));
1974 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1975 return VINF_SUCCESS;
1976 }
1977
1978 uint64_t cTicksVTimerToExpire = pVCpu->cpum.GstCtx.CntvCValEl0 - cTicksVTimer;
1979 uint64_t cNanoSecsVTimerToExpire = ASMMultU64ByU32DivByU32(cTicksVTimerToExpire, RT_NS_1SEC, (uint32_t)pVM->nem.s.u64CntFrqHz);
1980
1981 /*
1982 * Our halt method doesn't work with sub millisecond granularity at the moment causing a huge slowdown
1983 * + scheduling overhead which would increase the wakeup latency.
1984 * So only halt when the threshold is exceeded (needs more experimentation but 5ms turned out to be a good compromise
1985 * between CPU load when the guest is idle and performance).
1986 */
1987 if (cNanoSecsVTimerToExpire < 2 * RT_NS_1MS)
1988 {
1989 LogFlowFunc(("Guest timer expiration < 2ms (cNanoSecsVTimerToExpire=%RU64) => VINF_SUCCESS\n",
1990 cNanoSecsVTimerToExpire));
1991 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1992 return VINF_SUCCESS;
1993 }
1994
1995 LogFlowFunc(("Set vTimer activation to cNanoSecsVTimerToExpire=%#RX64 (CntvCValEl0=%#RX64, u64VTimerOff=%#RX64 cTicksVTimer=%#RX64 u64CntFrqHz=%#RX64)\n",
1996 cNanoSecsVTimerToExpire, pVCpu->cpum.GstCtx.CntvCValEl0, pVM->nem.s.u64VTimerOff, cTicksVTimer, pVM->nem.s.u64CntFrqHz));
1997 TMCpuSetVTimerNextActivation(pVCpu, cNanoSecsVTimerToExpire);
1998 }
1999 else
2000 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
2001
2002 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
2003 return VINF_EM_HALT;
2004 }
2005 case ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN:
2006 {
2007 VBOXSTRICTRC rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
2008 /** @todo Forward genuine guest traps to the guest by either single stepping instruction with debug exception trapping turned off
2009 * or create instruction interpreter and inject exception ourselves. */
2010 Assert(rcStrict == VINF_EM_DBG_BREAKPOINT);
2011 return rcStrict;
2012 }
2013 case ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL:
2014 return VINF_EM_DBG_STEPPED;
2015 case ARMV8_ESR_EL2_EC_UNKNOWN:
2016 default:
2017 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
2018 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
2019 AssertReleaseFailed();
2020 return VERR_NOT_IMPLEMENTED;
2021 }
2022
2023 return VINF_SUCCESS;
2024}
2025
2026
2027/**
2028 * Handles an exit from hv_vcpu_run().
2029 *
2030 * @returns VBox strict status code.
2031 * @param pVM The cross context VM structure.
2032 * @param pVCpu The cross context virtual CPU structure of the
2033 * calling EMT.
2034 */
2035static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
2036{
2037 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
2038 if (RT_FAILURE(rc))
2039 return rc;
2040
2041#ifdef LOG_ENABLED
2042 if (LogIs3Enabled())
2043 nemR3DarwinLogState(pVM, pVCpu);
2044#endif
2045
2046 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
2047 switch (pExit->reason)
2048 {
2049 case HV_EXIT_REASON_CANCELED:
2050 return VINF_EM_RAW_INTERRUPT;
2051 case HV_EXIT_REASON_EXCEPTION:
2052 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
2053 case HV_EXIT_REASON_VTIMER_ACTIVATED:
2054 {
2055 LogFlowFunc(("vTimer got activated\n"));
2056 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
2057 pVCpu->nem.s.fVTimerActivated = true;
2058 return GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, true /*fAsserted*/);
2059 }
2060 default:
2061 AssertReleaseFailed();
2062 break;
2063 }
2064
2065 return VERR_INVALID_STATE;
2066}
2067
2068
2069/**
2070 * Runs the guest once until an exit occurs.
2071 *
2072 * @returns HV status code.
2073 * @param pVM The cross context VM structure.
2074 * @param pVCpu The cross context virtual CPU structure.
2075 */
2076static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
2077{
2078 TMNotifyStartOfExecution(pVM, pVCpu);
2079
2080 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
2081
2082 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2083
2084 return hrc;
2085}
2086
2087
2088/**
2089 * Prepares the VM to run the guest.
2090 *
2091 * @returns Strict VBox status code.
2092 * @param pVM The cross context VM structure.
2093 * @param pVCpu The cross context virtual CPU structure.
2094 * @param fSingleStepping Flag whether we run in single stepping mode.
2095 */
2096static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
2097{
2098#ifdef LOG_ENABLED
2099 bool fIrq = false;
2100 bool fFiq = false;
2101
2102 if (LogIs3Enabled())
2103 nemR3DarwinLogState(pVM, pVCpu);
2104#endif
2105
2106 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
2107 AssertRCReturn(rc, rc);
2108
2109 /* In single stepping mode we will re-read SPSR and MDSCR and enable the software step bits. */
2110 if (fSingleStepping)
2111 {
2112 uint64_t u64Tmp;
2113 hv_return_t hrc = hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
2114 if (hrc == HV_SUCCESS)
2115 {
2116 u64Tmp |= ARMV8_SPSR_EL2_AARCH64_SS;
2117 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, u64Tmp);
2118 }
2119
2120 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MDSCR_EL1, &u64Tmp);
2121 if (hrc == HV_SUCCESS)
2122 {
2123 u64Tmp |= ARMV8_MDSCR_EL1_AARCH64_SS;
2124 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MDSCR_EL1, u64Tmp);
2125 }
2126
2127 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2128 }
2129
2130 /* Check whether the vTimer interrupt was handled by the guest and we can unmask the vTimer. */
2131 if (pVCpu->nem.s.fVTimerActivated)
2132 {
2133 /* Read the CNTV_CTL_EL0 register. */
2134 uint64_t u64CntvCtl = 0;
2135
2136 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &u64CntvCtl);
2137 AssertRCReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2138
2139 if ( (u64CntvCtl & (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_IMASK | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
2140 != (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
2141 {
2142 /* Clear the interrupt. */
2143 GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, false /*fAsserted*/);
2144
2145 pVCpu->nem.s.fVTimerActivated = false;
2146 hrc = hv_vcpu_set_vtimer_mask(pVCpu->nem.s.hVCpu, false /*vtimer_is_masked*/);
2147 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2148 }
2149 }
2150
2151 /* Set the pending interrupt state. */
2152 hv_return_t hrc = HV_SUCCESS;
2153 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
2154 {
2155 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
2156 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2157#ifdef LOG_ENABLED
2158 fIrq = true;
2159#endif
2160 }
2161 else
2162 {
2163 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
2164 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2165 }
2166
2167 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
2168 {
2169 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
2170 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2171#ifdef LOG_ENABLED
2172 fFiq = true;
2173#endif
2174 }
2175 else
2176 {
2177 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
2178 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2179 }
2180
2181 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
2182 pVCpu->nem.s.fEventPending = false;
2183 return VINF_SUCCESS;
2184}
2185
2186
2187/**
2188 * The normal runloop (no debugging features enabled).
2189 *
2190 * @returns Strict VBox status code.
2191 * @param pVM The cross context VM structure.
2192 * @param pVCpu The cross context virtual CPU structure.
2193 */
2194static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
2195{
2196 /*
2197 * The run loop.
2198 *
2199 * Current approach to state updating to use the sledgehammer and sync
2200 * everything every time. This will be optimized later.
2201 */
2202
2203 /* Update the vTimer offset after resuming if instructed. */
2204 if (pVCpu->nem.s.fVTimerOffUpdate)
2205 {
2206 hv_return_t hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
2207 if (hrc != HV_SUCCESS)
2208 return nemR3DarwinHvSts2Rc(hrc);
2209
2210 pVCpu->nem.s.fVTimerOffUpdate = false;
2211
2212 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, pVCpu->cpum.GstCtx.CntvCtlEl0);
2213 if (hrc == HV_SUCCESS)
2214 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, pVCpu->cpum.GstCtx.CntvCValEl0);
2215 if (hrc != HV_SUCCESS)
2216 return nemR3DarwinHvSts2Rc(hrc);
2217 }
2218
2219 /*
2220 * Poll timers and run for a bit.
2221 */
2222 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2223 * the whole polling job when timers have changed... */
2224 uint64_t offDeltaIgnored;
2225 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2226 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2227 for (unsigned iLoop = 0;; iLoop++)
2228 {
2229 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
2230 if (rcStrict != VINF_SUCCESS)
2231 break;
2232
2233 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
2234 if (hrc == HV_SUCCESS)
2235 {
2236 /*
2237 * Deal with the message.
2238 */
2239 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
2240 if (rcStrict == VINF_SUCCESS)
2241 { /* hopefully likely */ }
2242 else
2243 {
2244 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2245 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2246 break;
2247 }
2248 }
2249 else
2250 {
2251 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
2252 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
2253 }
2254 } /* the run loop */
2255
2256 return rcStrict;
2257}
2258
2259
2260/**
2261 * The debug runloop.
2262 *
2263 * @returns Strict VBox status code.
2264 * @param pVM The cross context VM structure.
2265 * @param pVCpu The cross context virtual CPU structure.
2266 */
2267static VBOXSTRICTRC nemR3DarwinRunGuestDebug(PVM pVM, PVMCPU pVCpu)
2268{
2269 /*
2270 * The run loop.
2271 *
2272 * Current approach to state updating to use the sledgehammer and sync
2273 * everything every time. This will be optimized later.
2274 */
2275
2276 bool const fSavedSingleInstruction = pVCpu->nem.s.fSingleInstruction;
2277 pVCpu->nem.s.fSingleInstruction = pVCpu->nem.s.fSingleInstruction || DBGFIsStepping(pVCpu);
2278 pVCpu->nem.s.fUsingDebugLoop = true;
2279
2280 /* Trap any debug exceptions. */
2281 hv_return_t hrc = hv_vcpu_set_trap_debug_exceptions(pVCpu->nem.s.hVCpu, true);
2282 if (hrc != HV_SUCCESS)
2283 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2284 "Trapping debug exceptions on vCPU %u failed: %#x (%Rrc)", pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2285
2286 /* Update the vTimer offset after resuming if instructed. */
2287 if (pVCpu->nem.s.fVTimerOffUpdate)
2288 {
2289 hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
2290 if (hrc != HV_SUCCESS)
2291 return nemR3DarwinHvSts2Rc(hrc);
2292
2293 pVCpu->nem.s.fVTimerOffUpdate = false;
2294
2295 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, pVCpu->cpum.GstCtx.CntvCtlEl0);
2296 if (hrc == HV_SUCCESS)
2297 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, pVCpu->cpum.GstCtx.CntvCValEl0);
2298 if (hrc != HV_SUCCESS)
2299 return nemR3DarwinHvSts2Rc(hrc);
2300 }
2301
2302 /* Save the guest MDSCR_EL1 */
2303 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_DEBUG | CPUMCTX_EXTRN_PSTATE);
2304 uint64_t u64RegMdscrEl1 = pVCpu->cpum.GstCtx.Mdscr.u64;
2305
2306 /*
2307 * Poll timers and run for a bit.
2308 */
2309 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2310 * the whole polling job when timers have changed... */
2311 uint64_t offDeltaIgnored;
2312 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2313 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2314 for (unsigned iLoop = 0;; iLoop++)
2315 {
2316 bool const fStepping = pVCpu->nem.s.fSingleInstruction;
2317
2318 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, fStepping);
2319 if (rcStrict != VINF_SUCCESS)
2320 break;
2321
2322 hrc = nemR3DarwinRunGuest(pVM, pVCpu);
2323 if (hrc == HV_SUCCESS)
2324 {
2325 /*
2326 * Deal with the message.
2327 */
2328 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
2329 if (rcStrict == VINF_SUCCESS)
2330 { /* hopefully likely */ }
2331 else
2332 {
2333 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2334 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2335 break;
2336 }
2337 }
2338 else
2339 {
2340 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
2341 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
2342 }
2343 } /* the run loop */
2344
2345 /* Restore single stepping state. */
2346 if (pVCpu->nem.s.fSingleInstruction)
2347 {
2348 /** @todo This ASSUMES that guest code being single stepped is not modifying the MDSCR_EL1 register. */
2349 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_DEBUG | CPUMCTX_EXTRN_PSTATE);
2350 Assert(pVCpu->cpum.GstCtx.Mdscr.u64 & ARMV8_MDSCR_EL1_AARCH64_SS);
2351
2352 pVCpu->cpum.GstCtx.Mdscr.u64 = u64RegMdscrEl1;
2353 }
2354
2355 /* Restore debug exceptions trapping. */
2356 hrc != hv_vcpu_set_trap_debug_exceptions(pVCpu->nem.s.hVCpu, false);
2357 if (hrc != HV_SUCCESS)
2358 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2359 "Clearing trapping of debug exceptions on vCPU %u failed: %#x (%Rrc)", pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2360
2361 pVCpu->nem.s.fUsingDebugLoop = false;
2362 pVCpu->nem.s.fSingleInstruction = fSavedSingleInstruction;
2363
2364 return rcStrict;
2365
2366}
2367
2368
2369VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2370{
2371#ifdef LOG_ENABLED
2372 if (LogIs3Enabled())
2373 nemR3DarwinLogState(pVM, pVCpu);
2374#endif
2375
2376 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
2377
2378 if (RT_UNLIKELY(!pVCpu->nem.s.fIdRegsSynced))
2379 {
2380 /*
2381 * Sync the guest ID registers which are per VM once (they are readonly and stay constant during VM lifetime).
2382 * Need to do it here and not during the init because loading a saved state might change the ID registers from what
2383 * done in the call to CPUMR3PopulateFeaturesByIdRegisters().
2384 */
2385 static const struct
2386 {
2387 const char *pszIdReg;
2388 hv_sys_reg_t enmHvReg;
2389 uint32_t offIdStruct;
2390 } s_aSysIdRegs[] =
2391 {
2392#define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg, RT_UOFFSETOF(CPUMIDREGS, a_CpumIdReg) }
2393 ID_SYS_REG_CREATE(ID_AA64DFR0_EL1, u64RegIdAa64Dfr0El1),
2394 ID_SYS_REG_CREATE(ID_AA64DFR1_EL1, u64RegIdAa64Dfr1El1),
2395 ID_SYS_REG_CREATE(ID_AA64ISAR0_EL1, u64RegIdAa64Isar0El1),
2396 ID_SYS_REG_CREATE(ID_AA64ISAR1_EL1, u64RegIdAa64Isar1El1),
2397 ID_SYS_REG_CREATE(ID_AA64MMFR0_EL1, u64RegIdAa64Mmfr0El1),
2398 ID_SYS_REG_CREATE(ID_AA64MMFR1_EL1, u64RegIdAa64Mmfr1El1),
2399 ID_SYS_REG_CREATE(ID_AA64MMFR2_EL1, u64RegIdAa64Mmfr2El1),
2400 ID_SYS_REG_CREATE(ID_AA64PFR0_EL1, u64RegIdAa64Pfr0El1),
2401 ID_SYS_REG_CREATE(ID_AA64PFR1_EL1, u64RegIdAa64Pfr1El1),
2402#undef ID_SYS_REG_CREATE
2403 };
2404
2405 PCCPUMIDREGS pIdRegsGst = NULL;
2406 int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
2407 AssertRCReturn(rc, rc);
2408
2409 for (uint32_t i = 0; i < RT_ELEMENTS(s_aSysIdRegs); i++)
2410 {
2411 uint64_t *pu64 = (uint64_t *)((uint8_t *)pIdRegsGst + s_aSysIdRegs[i].offIdStruct);
2412 hv_return_t hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aSysIdRegs[i].enmHvReg, *pu64);
2413 if (hrc != HV_SUCCESS)
2414 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2415 "Setting %s failed on vCPU %u: %#x (%Rrc)", s_aSysIdRegs[i].pszIdReg, pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2416 }
2417
2418 pVCpu->nem.s.fIdRegsSynced = true;
2419 }
2420
2421 /*
2422 * Try switch to NEM runloop state.
2423 */
2424 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2425 { /* likely */ }
2426 else
2427 {
2428 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2429 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2430 return VINF_SUCCESS;
2431 }
2432
2433 VBOXSTRICTRC rcStrict;
2434 if ( !pVCpu->nem.s.fUseDebugLoop
2435 /*&& !nemR3DarwinAnyExpensiveProbesEnabled()*/
2436 && !DBGFIsStepping(pVCpu)
2437 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledSwBreakpoints)
2438 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
2439 else
2440 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
2441
2442 if (rcStrict == VINF_EM_RAW_TO_R3)
2443 rcStrict = VINF_SUCCESS;
2444
2445 /*
2446 * Convert any pending HM events back to TRPM due to premature exits.
2447 *
2448 * This is because execution may continue from IEM and we would need to inject
2449 * the event from there (hence place it back in TRPM).
2450 */
2451 if (pVCpu->nem.s.fEventPending)
2452 {
2453 /** @todo */
2454 }
2455
2456
2457 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2458 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2459
2460 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
2461 {
2462 /* Try anticipate what we might need. */
2463 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
2464 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2465 || RT_FAILURE(rcStrict))
2466 fImport = CPUMCTX_EXTRN_ALL;
2467 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
2468 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
2469 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2470
2471 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2472 {
2473 /* Only import what is external currently. */
2474 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
2475 if (RT_SUCCESS(rc2))
2476 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2477 else if (RT_SUCCESS(rcStrict))
2478 rcStrict = rc2;
2479 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2480 pVCpu->cpum.GstCtx.fExtrn = 0;
2481 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2482 }
2483 else
2484 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2485 }
2486 else
2487 {
2488 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2489 pVCpu->cpum.GstCtx.fExtrn = 0;
2490 }
2491
2492 return rcStrict;
2493}
2494
2495
2496VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2497{
2498 RT_NOREF(pVM, pVCpu);
2499 return true; /** @todo Are there any cases where we have to emulate? */
2500}
2501
2502
2503bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2504{
2505 VMCPU_ASSERT_EMT(pVCpu);
2506 bool fOld = pVCpu->nem.s.fSingleInstruction;
2507 pVCpu->nem.s.fSingleInstruction = fEnable;
2508 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
2509 return fOld;
2510}
2511
2512
2513void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2514{
2515 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
2516
2517 RT_NOREF(pVM, fFlags);
2518
2519 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
2520 if (hrc != HV_SUCCESS)
2521 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
2522}
2523
2524
2525DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2526{
2527 RT_NOREF(pVM, fUseDebugLoop);
2528 //AssertReleaseFailed();
2529 return false;
2530}
2531
2532
2533DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2534{
2535 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2536 return fUseDebugLoop;
2537}
2538
2539
2540VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2541 uint8_t *pu2State, uint32_t *puNemRange)
2542{
2543 RT_NOREF(pVM, puNemRange);
2544
2545 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
2546#if defined(VBOX_WITH_PGM_NEM_MODE)
2547 if (pvR3)
2548 {
2549 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2550 if (RT_FAILURE(rc))
2551 {
2552 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
2553 return VERR_NEM_MAP_PAGES_FAILED;
2554 }
2555 }
2556 return VINF_SUCCESS;
2557#else
2558 RT_NOREF(pVM, GCPhys, cb, pvR3);
2559 return VERR_NEM_MAP_PAGES_FAILED;
2560#endif
2561}
2562
2563
2564VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2565{
2566 RT_NOREF(pVM);
2567 return true;
2568}
2569
2570
2571VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2572 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2573{
2574 RT_NOREF(pvRam);
2575
2576 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
2577 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
2578
2579#if defined(VBOX_WITH_PGM_NEM_MODE)
2580 /*
2581 * Unmap the RAM we're replacing.
2582 */
2583 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2584 {
2585 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2586 if (RT_SUCCESS(rc))
2587 { /* likely */ }
2588 else if (pvMmio2)
2589 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
2590 GCPhys, cb, fFlags, rc));
2591 else
2592 {
2593 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2594 GCPhys, cb, fFlags, rc));
2595 return VERR_NEM_UNMAP_PAGES_FAILED;
2596 }
2597 }
2598
2599 /*
2600 * Map MMIO2 if any.
2601 */
2602 if (pvMmio2)
2603 {
2604 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2605
2606 /* We need to set up our own dirty tracking due to Hypervisor.framework only working on host page sized aligned regions. */
2607 uint32_t fProt = NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
2608 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
2609 {
2610 /* Find a slot for dirty tracking. */
2611 PNEMHVMMIO2REGION pMmio2Region = NULL;
2612 uint32_t idSlot;
2613 for (idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
2614 {
2615 if ( pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart == 0
2616 && pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast == 0)
2617 {
2618 pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
2619 break;
2620 }
2621 }
2622
2623 if (!pMmio2Region)
2624 {
2625 LogRel(("NEMR3NotifyPhysMmioExMapEarly: Out of dirty tracking structures -> VERR_NEM_MAP_PAGES_FAILED\n"));
2626 return VERR_NEM_MAP_PAGES_FAILED;
2627 }
2628
2629 pMmio2Region->GCPhysStart = GCPhys;
2630 pMmio2Region->GCPhysLast = GCPhys + cb - 1;
2631 pMmio2Region->fDirty = false;
2632 *puNemRange = idSlot;
2633 }
2634 else
2635 fProt |= NEM_PAGE_PROT_WRITE;
2636
2637 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, fProt, pu2State);
2638 if (RT_FAILURE(rc))
2639 {
2640 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
2641 GCPhys, cb, fFlags, pvMmio2, rc));
2642 return VERR_NEM_MAP_PAGES_FAILED;
2643 }
2644 }
2645 else
2646 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2647
2648#else
2649 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
2650 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
2651#endif
2652 return VINF_SUCCESS;
2653}
2654
2655
2656VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2657 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2658{
2659 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2660 return VINF_SUCCESS;
2661}
2662
2663
2664VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2665 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2666{
2667 RT_NOREF(pVM, puNemRange);
2668
2669 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2670 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2671
2672 int rc = VINF_SUCCESS;
2673#if defined(VBOX_WITH_PGM_NEM_MODE)
2674 /*
2675 * Unmap the MMIO2 pages.
2676 */
2677 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2678 * we may have more stuff to unmap even in case of pure MMIO... */
2679 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2680 {
2681 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2682 if (RT_FAILURE(rc))
2683 {
2684 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2685 GCPhys, cb, fFlags, rc));
2686 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2687 }
2688
2689 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
2690 {
2691 /* Reset tracking structure. */
2692 uint32_t idSlot = *puNemRange;
2693 *puNemRange = UINT32_MAX;
2694
2695 Assert(idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
2696 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart = 0;
2697 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast = 0;
2698 pVM->nem.s.aMmio2DirtyTracking[idSlot].fDirty = false;
2699 }
2700 }
2701
2702 /* Ensure the page is masked as unmapped if relevant. */
2703 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
2704
2705 /*
2706 * Restore the RAM we replaced.
2707 */
2708 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2709 {
2710 AssertPtr(pvRam);
2711 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2712 if (RT_SUCCESS(rc))
2713 { /* likely */ }
2714 else
2715 {
2716 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
2717 rc = VERR_NEM_MAP_PAGES_FAILED;
2718 }
2719 }
2720
2721 RT_NOREF(pvMmio2);
2722#else
2723 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
2724 if (pu2State)
2725 *pu2State = UINT8_MAX;
2726 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2727#endif
2728 return rc;
2729}
2730
2731
2732VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2733 void *pvBitmap, size_t cbBitmap)
2734{
2735 LogFlowFunc(("NEMR3PhysMmio2QueryAndResetDirtyBitmap: %RGp LB %RGp UnemRange=%u\n", GCPhys, cb, uNemRange));
2736 Assert(uNemRange < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
2737
2738 /* Keep it simple for now and mark everything as dirty if it is. */
2739 int rc = VINF_SUCCESS;
2740 if (pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty)
2741 {
2742 ASMBitSetRange(pvBitmap, 0, cbBitmap * 8);
2743
2744 pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty = false;
2745 /* Restore as RX only. */
2746 uint8_t u2State;
2747 rc = nemR3DarwinProtect(GCPhys, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, &u2State);
2748 }
2749 else
2750 ASMBitClearRange(pvBitmap, 0, cbBitmap * 8);
2751
2752 return rc;
2753}
2754
2755
2756VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2757 uint8_t *pu2State, uint32_t *puNemRange)
2758{
2759 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2760
2761 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2762 *pu2State = UINT8_MAX;
2763 *puNemRange = 0;
2764 return VINF_SUCCESS;
2765}
2766
2767
2768VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2769 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2770{
2771 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2772 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2773 *pu2State = UINT8_MAX;
2774
2775#if defined(VBOX_WITH_PGM_NEM_MODE)
2776 /*
2777 * (Re-)map readonly.
2778 */
2779 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2780
2781 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2782 AssertRC(rc);
2783
2784 rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
2785 if (RT_FAILURE(rc))
2786 {
2787 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
2788 GCPhys, cb, pvPages, fFlags, rc));
2789 return VERR_NEM_MAP_PAGES_FAILED;
2790 }
2791 RT_NOREF(fFlags, puNemRange);
2792 return VINF_SUCCESS;
2793#else
2794 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2795 return VERR_NEM_MAP_PAGES_FAILED;
2796#endif
2797}
2798
2799
2800VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2801 RTR3PTR pvMemR3, uint8_t *pu2State)
2802{
2803 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2804 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2805
2806 *pu2State = UINT8_MAX;
2807#if defined(VBOX_WITH_PGM_NEM_MODE)
2808 if (pvMemR3)
2809 {
2810 /* Unregister what was there before. */
2811 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2812 AssertRC(rc);
2813
2814 rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2815 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
2816 pvMemR3, GCPhys, cb, rc));
2817 }
2818 RT_NOREF(enmKind);
2819#else
2820 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
2821 AssertFailed();
2822#endif
2823}
2824
2825
2826VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2827{
2828 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
2829 RT_NOREF(pVCpu, fEnabled);
2830}
2831
2832
2833void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2834{
2835 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2836 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2837}
2838
2839
2840void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2841 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2842{
2843 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2844 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2845 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2846}
2847
2848
2849int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
2850 PGMPAGETYPE enmType, uint8_t *pu2State)
2851{
2852 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2853 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2854 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
2855
2856 AssertFailed();
2857 return VINF_SUCCESS;
2858}
2859
2860
2861VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
2862 PGMPAGETYPE enmType, uint8_t *pu2State)
2863{
2864 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2865 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2866 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
2867}
2868
2869
2870VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
2871 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
2872{
2873 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2874 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
2875 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
2876
2877 AssertFailed();
2878}
2879
2880
2881/**
2882 * Interface for importing state on demand (used by IEM).
2883 *
2884 * @returns VBox status code.
2885 * @param pVCpu The cross context CPU structure.
2886 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2887 */
2888VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
2889{
2890 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
2891 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
2892
2893 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
2894}
2895
2896
2897/**
2898 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
2899 *
2900 * @returns VBox status code.
2901 * @param pVCpu The cross context CPU structure.
2902 * @param pcTicks Where to return the CPU tick count.
2903 * @param puAux Where to return the TSC_AUX register value.
2904 */
2905VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
2906{
2907 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
2908 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
2909
2910 if (puAux)
2911 *puAux = 0;
2912 *pcTicks = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff; /* This is the host timer minus the offset. */
2913 return VINF_SUCCESS;
2914}
2915
2916
2917/**
2918 * Resumes CPU clock (TSC) on all virtual CPUs.
2919 *
2920 * This is called by TM when the VM is started, restored, resumed or similar.
2921 *
2922 * @returns VBox status code.
2923 * @param pVM The cross context VM structure.
2924 * @param pVCpu The cross context CPU structure of the calling EMT.
2925 * @param uPausedTscValue The TSC value at the time of pausing.
2926 */
2927VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
2928{
2929 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVM, pVCpu, uPausedTscValue));
2930 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
2931 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
2932
2933 /*
2934 * Calculate the new offset, first get the new TSC value with the old vTimer offset and then adjust the
2935 * the new offset to let the guest not notice the pause.
2936 */
2937 uint64_t u64TscNew = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff;
2938 Assert(u64TscNew >= uPausedTscValue);
2939 LogFlowFunc(("u64VTimerOffOld=%#RX64 u64TscNew=%#RX64 u64VTimerValuePaused=%#RX64 -> u64VTimerOff=%#RX64\n",
2940 pVM->nem.s.u64VTimerOff, u64TscNew, uPausedTscValue,
2941 pVM->nem.s.u64VTimerOff + (u64TscNew - uPausedTscValue)));
2942
2943 pVM->nem.s.u64VTimerOff += u64TscNew - uPausedTscValue;
2944
2945 /*
2946 * Set the flag to update the vTimer offset when the vCPU resumes for the first time
2947 * (needs to be done on the actual EMT).
2948 */
2949 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2950 {
2951 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
2952 pVCpuDst->nem.s.fVTimerOffUpdate = true;
2953 }
2954
2955 return VINF_SUCCESS;
2956}
2957
2958
2959/**
2960 * Returns features supported by the NEM backend.
2961 *
2962 * @returns Flags of features supported by the native NEM backend.
2963 * @param pVM The cross context VM structure.
2964 */
2965VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
2966{
2967 RT_NOREF(pVM);
2968 /*
2969 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
2970 * and unrestricted guest execution support so we can safely return these flags here always.
2971 */
2972 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
2973}
2974
2975
2976/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
2977 *
2978 * @todo Add notes as the implementation progresses...
2979 */
2980
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette