VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 106407

Last change on this file since 106407 was 106373, checked in by vboxsync, 6 months ago

VMM/NEMR3Native-darwin-armv8.cpp: Crude workaround for getting Windows/ARM booting as a guest because it assumes a working PMU which AppleSilicon hardware doesn't support, bugref:10732

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 121.0 KB
Line 
1/* $Id: NEMR3Native-darwin-armv8.cpp 106373 2024-10-16 13:38:57Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.virtualbox.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#include <VBox/vmm/nem.h>
39#include <VBox/vmm/iem.h>
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/gic.h>
42#include <VBox/vmm/pdm.h>
43#include <VBox/vmm/dbgftrace.h>
44#include <VBox/vmm/gcm.h>
45#include "NEMInternal.h"
46#include <VBox/vmm/vmcc.h>
47#include <VBox/vmm/vmm.h>
48#include <VBox/gic.h>
49#include "dtrace/VBoxVMM.h"
50
51#include <iprt/armv8.h>
52#include <iprt/asm.h>
53#include <iprt/asm-arm.h>
54#include <iprt/asm-math.h>
55#include <iprt/ldr.h>
56#include <iprt/mem.h>
57#include <iprt/path.h>
58#include <iprt/string.h>
59#include <iprt/system.h>
60#include <iprt/utf16.h>
61
62#include <iprt/formats/arm-psci.h>
63
64#include <mach/mach_time.h>
65#include <mach/kern_return.h>
66
67#include <Hypervisor/Hypervisor.h>
68
69
70/*********************************************************************************************************************************
71* Defined Constants And Macros *
72*********************************************************************************************************************************/
73
74
75/*********************************************************************************************************************************
76* Structures and Typedefs *
77*********************************************************************************************************************************/
78
79#if MAC_OS_X_VERSION_MIN_REQUIRED < 150000
80
81/* Since 15.0+ */
82typedef enum hv_gic_distributor_reg_t : uint16_t
83{
84 HV_GIC_DISTRIBUTOR_REG_GICD_CTLR,
85 HV_GIC_DISTRIBUTOR_REG_GICD_ICACTIVER0
86 /** @todo */
87} hv_gic_distributor_reg_t;
88
89
90typedef enum hv_gic_icc_reg_t : uint16_t
91{
92 HV_GIC_ICC_REG_AP0R0_EL1
93 /** @todo */
94} hv_gic_icc_reg_t;
95
96
97typedef enum hv_gic_ich_reg_t : uint16_t
98{
99 HV_GIC_ICH_REG_AP0R0_EL2
100 /** @todo */
101} hv_gic_ich_reg_t;
102
103
104typedef enum hv_gic_icv_reg_t : uint16_t
105{
106 HV_GIC_ICV_REG_AP0R0_EL1
107 /** @todo */
108} hv_gic_icv_reg_t;
109
110
111typedef enum hv_gic_msi_reg_t : uint16_t
112{
113 HV_GIC_REG_GICM_SET_SPI_NSR
114 /** @todo */
115} hv_gic_msi_reg_t;
116
117
118typedef enum hv_gic_redistributor_reg_t : uint16_t
119{
120 HV_GIC_REDISTRIBUTOR_REG_GICR_ICACTIVER0
121 /** @todo */
122} hv_gic_redistributor_reg_t;
123
124
125typedef enum hv_gic_intid_t : uint16_t
126{
127 HV_GIC_INT_EL1_PHYSICAL_TIMER = 23,
128 HV_GIC_INT_EL1_VIRTUAL_TIMER = 25,
129 HV_GIC_INT_EL2_PHYSICAL_TIMER = 26,
130 HV_GIC_INT_MAINTENANCE = 27,
131 HV_GIC_INT_PERFORMANCE_MONITOR = 30
132} hv_gic_intid_t;
133
134#endif
135
136typedef hv_vm_config_t FN_HV_VM_CONFIG_CREATE(void);
137typedef hv_return_t FN_HV_VM_CONFIG_GET_EL2_SUPPORTED(bool *el2_supported);
138typedef hv_return_t FN_HV_VM_CONFIG_GET_EL2_ENABLED(hv_vm_config_t config, bool *el2_enabled);
139typedef hv_return_t FN_HV_VM_CONFIG_SET_EL2_ENABLED(hv_vm_config_t config, bool el2_enabled);
140
141typedef struct hv_gic_config_s *hv_gic_config_t;
142typedef hv_return_t FN_HV_GIC_CREATE(hv_gic_config_t gic_config);
143typedef hv_return_t FN_HV_GIC_RESET(void);
144typedef hv_gic_config_t FN_HV_GIC_CONFIG_CREATE(void);
145typedef hv_return_t FN_HV_GIC_CONFIG_SET_DISTRIBUTOR_BASE(hv_gic_config_t config, hv_ipa_t distributor_base_address);
146typedef hv_return_t FN_HV_GIC_CONFIG_SET_REDISTRIBUTOR_BASE(hv_gic_config_t config, hv_ipa_t redistributor_base_address);
147typedef hv_return_t FN_HV_GIC_CONFIG_SET_MSI_REGION_BASE(hv_gic_config_t config, hv_ipa_t msi_region_base_address);
148typedef hv_return_t FN_HV_GIC_CONFIG_SET_MSI_INTERRUPT_RANGE(hv_gic_config_t config, uint32_t msi_intid_base, uint32_t msi_intid_count);
149
150typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_BASE(hv_vcpu_t vcpu, hv_ipa_t *redistributor_base_address);
151typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_REGION_SIZE(size_t *redistributor_region_size);
152typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_SIZE(size_t *redistributor_size);
153typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_SIZE(size_t *distributor_size);
154typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_BASE_ALIGNMENT(size_t *distributor_base_alignment);
155typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_BASE_ALIGNMENT(size_t *redistributor_base_alignment);
156typedef hv_return_t FN_HV_GIC_GET_MSI_REGION_BASE_ALIGNMENT(size_t *msi_region_base_alignment);
157typedef hv_return_t FN_HV_GIC_GET_MSI_REGION_SIZE(size_t *msi_region_size);
158typedef hv_return_t FN_HV_GIC_GET_SPI_INTERRUPT_RANGE(uint32_t *spi_intid_base, uint32_t *spi_intid_count);
159
160typedef struct hv_gic_state_s *hv_gic_state_t;
161typedef hv_gic_state_t FN_HV_GIC_STATE_CREATE(void);
162typedef hv_return_t FN_HV_GIC_SET_STATE(const void *gic_state_data, size_t gic_state_size);
163typedef hv_return_t FN_HV_GIC_STATE_GET_SIZE(hv_gic_state_t state, size_t *gic_state_size);
164typedef hv_return_t FN_HV_GIC_STATE_GET_DATA(hv_gic_state_t state, void *gic_state_data);
165
166typedef hv_return_t FN_HV_GIC_SEND_MSI(hv_ipa_t address, uint32_t intid);
167typedef hv_return_t FN_HV_GIC_SET_SPI(uint32_t intid, bool level);
168
169typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_REG(hv_gic_distributor_reg_t reg, uint64_t *value);
170typedef hv_return_t FN_HV_GIC_GET_MSI_REG(hv_gic_msi_reg_t reg, uint64_t *value);
171typedef hv_return_t FN_HV_GIC_GET_ICC_REG(hv_vcpu_t vcpu, hv_gic_icc_reg_t reg, uint64_t *value);
172typedef hv_return_t FN_HV_GIC_GET_ICH_REG(hv_vcpu_t vcpu, hv_gic_ich_reg_t reg, uint64_t *value);
173typedef hv_return_t FN_HV_GIC_GET_ICV_REG(hv_vcpu_t vcpu, hv_gic_icv_reg_t reg, uint64_t *value);
174typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_REG(hv_vcpu_t vcpu, hv_gic_redistributor_reg_t reg, uint64_t *value);
175
176typedef hv_return_t FN_HV_GIC_SET_DISTRIBUTOR_REG(hv_gic_distributor_reg_t reg, uint64_t value);
177typedef hv_return_t FN_HV_GIC_SET_MSI_REG(hv_gic_msi_reg_t reg, uint64_t value);
178typedef hv_return_t FN_HV_GIC_SET_ICC_REG(hv_vcpu_t vcpu, hv_gic_icc_reg_t reg, uint64_t value);
179typedef hv_return_t FN_HV_GIC_SET_ICH_REG(hv_vcpu_t vcpu, hv_gic_ich_reg_t reg, uint64_t value);
180typedef hv_return_t FN_HV_GIC_SET_ICV_REG(hv_vcpu_t vcpu, hv_gic_icv_reg_t reg, uint64_t value);
181typedef hv_return_t FN_HV_GIC_SET_REDISTRIBUTOR_REG(hv_vcpu_t vcpu, hv_gic_redistributor_reg_t reg, uint64_t value);
182
183typedef hv_return_t FN_HV_GIC_GET_INTID(hv_gic_intid_t interrupt, uint32_t *intid);
184
185
186/*********************************************************************************************************************************
187* Global Variables *
188*********************************************************************************************************************************/
189/** @name Optional APIs imported from Hypervisor.framework.
190 * @{ */
191static FN_HV_VM_CONFIG_CREATE *g_pfnHvVmConfigCreate = NULL; /* Since 13.0 */
192static FN_HV_VM_CONFIG_GET_EL2_SUPPORTED *g_pfnHvVmConfigGetEl2Supported = NULL; /* Since 15.0 */
193static FN_HV_VM_CONFIG_GET_EL2_ENABLED *g_pfnHvVmConfigGetEl2Enabled = NULL; /* Since 15.0 */
194static FN_HV_VM_CONFIG_SET_EL2_ENABLED *g_pfnHvVmConfigSetEl2Enabled = NULL; /* Since 15.0 */
195
196static FN_HV_GIC_CREATE *g_pfnHvGicCreate = NULL; /* Since 15.0 */
197static FN_HV_GIC_RESET *g_pfnHvGicReset = NULL; /* Since 15.0 */
198static FN_HV_GIC_CONFIG_CREATE *g_pfnHvGicConfigCreate = NULL; /* Since 15.0 */
199static FN_HV_GIC_CONFIG_SET_DISTRIBUTOR_BASE *g_pfnHvGicConfigSetDistributorBase = NULL; /* Since 15.0 */
200static FN_HV_GIC_CONFIG_SET_REDISTRIBUTOR_BASE *g_pfnHvGicConfigSetRedistributorBase = NULL; /* Since 15.0 */
201static FN_HV_GIC_CONFIG_SET_MSI_REGION_BASE *g_pfnHvGicConfigSetMsiRegionBase = NULL; /* Since 15.0 */
202static FN_HV_GIC_CONFIG_SET_MSI_INTERRUPT_RANGE *g_pfnHvGicConfigSetMsiInterruptRange = NULL; /* Since 15.0 */
203static FN_HV_GIC_GET_REDISTRIBUTOR_BASE *g_pfnHvGicGetRedistributorBase = NULL; /* Since 15.0 */
204static FN_HV_GIC_GET_REDISTRIBUTOR_REGION_SIZE *g_pfnHvGicGetRedistributorRegionSize = NULL; /* Since 15.0 */
205static FN_HV_GIC_GET_REDISTRIBUTOR_SIZE *g_pfnHvGicGetRedistributorSize = NULL; /* Since 15.0 */
206static FN_HV_GIC_GET_DISTRIBUTOR_SIZE *g_pfnHvGicGetDistributorSize = NULL; /* Since 15.0 */
207static FN_HV_GIC_GET_DISTRIBUTOR_BASE_ALIGNMENT *g_pfnHvGicGetDistributorBaseAlignment = NULL; /* Since 15.0 */
208static FN_HV_GIC_GET_REDISTRIBUTOR_BASE_ALIGNMENT *g_pfnHvGicGetRedistributorBaseAlignment = NULL; /* Since 15.0 */
209static FN_HV_GIC_GET_MSI_REGION_BASE_ALIGNMENT *g_pfnHvGicGetMsiRegionBaseAlignment = NULL; /* Since 15.0 */
210static FN_HV_GIC_GET_MSI_REGION_SIZE *g_pfnHvGicGetMsiRegionSize = NULL; /* Since 15.0 */
211static FN_HV_GIC_GET_SPI_INTERRUPT_RANGE *g_pfnHvGicGetSpiInterruptRange = NULL; /* Since 15.0 */
212static FN_HV_GIC_STATE_CREATE *g_pfnHvGicStateCreate = NULL; /* Since 15.0 */
213static FN_HV_GIC_SET_STATE *g_pfnHvGicSetState = NULL; /* Since 15.0 */
214static FN_HV_GIC_STATE_GET_SIZE *g_pfnHvGicStateGetSize = NULL; /* Since 15.0 */
215static FN_HV_GIC_STATE_GET_DATA *g_pfnHvGicStateGetData = NULL; /* Since 15.0 */
216static FN_HV_GIC_SEND_MSI *g_pfnHvGicSendMsi = NULL; /* Since 15.0 */
217static FN_HV_GIC_SET_SPI *g_pfnHvGicSetSpi = NULL; /* Since 15.0 */
218static FN_HV_GIC_GET_DISTRIBUTOR_REG *g_pfnHvGicGetDistributorReg = NULL; /* Since 15.0 */
219static FN_HV_GIC_GET_MSI_REG *g_pfnHvGicGetMsiReg = NULL; /* Since 15.0 */
220static FN_HV_GIC_GET_ICC_REG *g_pfnHvGicGetIccReg = NULL; /* Since 15.0 */
221static FN_HV_GIC_GET_ICH_REG *g_pfnHvGicGetIchReg = NULL; /* Since 15.0 */
222static FN_HV_GIC_GET_ICV_REG *g_pfnHvGicGetIcvReg = NULL; /* Since 15.0 */
223static FN_HV_GIC_GET_REDISTRIBUTOR_REG *g_pfnHvGicGetRedistributorReg = NULL; /* Since 15.0 */
224static FN_HV_GIC_SET_DISTRIBUTOR_REG *g_pfnHvGicSetDistributorReg = NULL; /* Since 15.0 */
225static FN_HV_GIC_SET_MSI_REG *g_pfnHvGicSetMsiReg = NULL; /* Since 15.0 */
226static FN_HV_GIC_SET_ICC_REG *g_pfnHvGicSetIccReg = NULL; /* Since 15.0 */
227static FN_HV_GIC_SET_ICH_REG *g_pfnHvGicSetIchReg = NULL; /* Since 15.0 */
228static FN_HV_GIC_SET_ICV_REG *g_pfnHvGicSetIcvReg = NULL; /* Since 15.0 */
229static FN_HV_GIC_SET_REDISTRIBUTOR_REG *g_pfnHvGicSetRedistributorReg = NULL; /* Since 15.0 */
230static FN_HV_GIC_GET_INTID *g_pfnHvGicGetIntid = NULL; /* Since 15.0 */
231/** @} */
232
233
234/**
235 * Import instructions.
236 */
237static const struct
238{
239 void **ppfn; /**< The function pointer variable. */
240 const char *pszName; /**< The function name. */
241} g_aImports[] =
242{
243#define NEM_DARWIN_IMPORT(a_Pfn, a_Name) { (void **)&(a_Pfn), #a_Name }
244 NEM_DARWIN_IMPORT(g_pfnHvVmConfigCreate, hv_vm_config_create),
245 NEM_DARWIN_IMPORT(g_pfnHvVmConfigGetEl2Supported, hv_vm_config_get_el2_supported),
246 NEM_DARWIN_IMPORT(g_pfnHvVmConfigGetEl2Enabled, hv_vm_config_get_el2_enabled),
247 NEM_DARWIN_IMPORT(g_pfnHvVmConfigSetEl2Enabled, hv_vm_config_set_el2_enabled),
248
249 NEM_DARWIN_IMPORT(g_pfnHvGicCreate, hv_gic_create),
250 NEM_DARWIN_IMPORT(g_pfnHvGicReset, hv_gic_reset),
251 NEM_DARWIN_IMPORT(g_pfnHvGicConfigCreate, hv_gic_config_create),
252 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetDistributorBase, hv_gic_config_set_distributor_base),
253 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetRedistributorBase, hv_gic_config_set_redistributor_base),
254 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetMsiRegionBase, hv_gic_config_set_msi_region_base),
255 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetMsiInterruptRange, hv_gic_config_set_msi_interrupt_range),
256 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorBase, hv_gic_get_redistributor_base),
257 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorRegionSize, hv_gic_get_redistributor_region_size),
258 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorSize, hv_gic_get_redistributor_size),
259 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorSize, hv_gic_get_distributor_size),
260 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorBaseAlignment, hv_gic_get_distributor_base_alignment),
261 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorBaseAlignment, hv_gic_get_redistributor_base_alignment),
262 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiRegionBaseAlignment, hv_gic_get_msi_region_base_alignment),
263 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiRegionSize, hv_gic_get_msi_region_size),
264 NEM_DARWIN_IMPORT(g_pfnHvGicGetSpiInterruptRange, hv_gic_get_spi_interrupt_range),
265 NEM_DARWIN_IMPORT(g_pfnHvGicStateCreate, hv_gic_state_create),
266 NEM_DARWIN_IMPORT(g_pfnHvGicSetState, hv_gic_set_state),
267 NEM_DARWIN_IMPORT(g_pfnHvGicStateGetSize, hv_gic_state_get_size),
268 NEM_DARWIN_IMPORT(g_pfnHvGicStateGetData, hv_gic_state_get_data),
269 NEM_DARWIN_IMPORT(g_pfnHvGicSendMsi, hv_gic_send_msi),
270 NEM_DARWIN_IMPORT(g_pfnHvGicSetSpi, hv_gic_set_spi),
271 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorReg, hv_gic_get_distributor_reg),
272 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiReg, hv_gic_get_msi_reg),
273 NEM_DARWIN_IMPORT(g_pfnHvGicGetIccReg, hv_gic_get_icc_reg),
274 NEM_DARWIN_IMPORT(g_pfnHvGicGetIchReg, hv_gic_get_ich_reg),
275 NEM_DARWIN_IMPORT(g_pfnHvGicGetIcvReg, hv_gic_get_icv_reg),
276 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorReg, hv_gic_get_redistributor_reg),
277 NEM_DARWIN_IMPORT(g_pfnHvGicSetDistributorReg, hv_gic_set_distributor_reg),
278 NEM_DARWIN_IMPORT(g_pfnHvGicSetMsiReg, hv_gic_set_msi_reg),
279 NEM_DARWIN_IMPORT(g_pfnHvGicSetIccReg, hv_gic_set_icc_reg),
280 NEM_DARWIN_IMPORT(g_pfnHvGicSetIchReg, hv_gic_set_ich_reg),
281 NEM_DARWIN_IMPORT(g_pfnHvGicSetIcvReg, hv_gic_set_icv_reg),
282 NEM_DARWIN_IMPORT(g_pfnHvGicSetRedistributorReg, hv_gic_set_redistributor_reg),
283 NEM_DARWIN_IMPORT(g_pfnHvGicGetIntid, hv_gic_get_intid)
284#undef NEM_DARWIN_IMPORT
285};
286
287
288/*
289 * Let the preprocessor alias the APIs to import variables for better autocompletion.
290 */
291#ifndef IN_SLICKEDIT
292# define hv_vm_config_create g_pfnHvVmConfigCreate
293# define hv_vm_config_get_el2_supported g_pfnHvVmConfigGetEl2Supported
294# define hv_vm_config_get_el2_enabled g_pfnHvVmConfigGetEl2Enabled
295# define hv_vm_config_set_el2_enabled g_pfnHvVmConfigSetEl2Enabled
296
297# define hv_gic_create g_pfnHvGicCreate
298# define hv_gic_reset g_pfnHvGicReset
299# define hv_gic_config_create g_pfnHvGicConfigCreate
300# define hv_gic_config_set_distributor_base g_pfnHvGicConfigSetDistributorBase
301# define hv_gic_config_set_redistributor_base g_pfnHvGicConfigSetRedistributorBase
302# define hv_gic_config_set_msi_region_base g_pfnHvGicConfigSetMsiRegionBase
303# define hv_gic_config_set_msi_interrupt_range g_pfnHvGicConfigSetMsiInterruptRange
304# define hv_gic_get_redistributor_base g_pfnHvGicGetRedistributorBase
305# define hv_gic_get_redistributor_region_size g_pfnHvGicGetRedistributorRegionSize
306# define hv_gic_get_redistributor_size g_pfnHvGicGetRedistributorSize
307# define hv_gic_get_distributor_size g_pfnHvGicGetDistributorSize
308# define hv_gic_get_distributor_base_alignment g_pfnHvGicGetDistributorBaseAlignment
309# define hv_gic_get_redistributor_base_alignment g_pfnHvGicGetRedistributorBaseAlignment
310# define hv_gic_get_msi_region_base_alignment g_pfnHvGicGetMsiRegionBaseAlignment
311# define hv_gic_get_msi_region_size g_pfnHvGicGetMsiRegionSize
312# define hv_gic_get_spi_interrupt_range g_pfnHvGicGetSpiInterruptRange
313# define hv_gic_state_create g_pfnHvGicStateCreate
314# define hv_gic_set_state g_pfnHvGicSetState
315# define hv_gic_state_get_size g_pfnHvGicStateGetSize
316# define hv_gic_state_get_data g_pfnHvGicStateGetData
317# define hv_gic_send_msi g_pfnHvGicSendMsi
318# define hv_gic_set_spi g_pfnHvGicSetSpi
319# define hv_gic_get_distributor_reg g_pfnHvGicGetDistributorReg
320# define hv_gic_get_msi_reg g_pfnHvGicGetMsiReg
321# define hv_gic_get_icc_reg g_pfnHvGicGetIccReg
322# define hv_gic_get_ich_reg g_pfnHvGicGetIchReg
323# define hv_gic_get_icv_reg g_pfnHvGicGetIcvReg
324# define hv_gic_get_redistributor_reg g_pfnHvGicGetRedistributorReg
325# define hv_gic_set_distributor_reg g_pfnHvGicSetDistributorReg
326# define hv_gic_set_msi_reg g_pfnHvGicSetMsiReg
327# define hv_gic_set_icc_reg g_pfnHvGicSetIccReg
328# define hv_gic_set_ich_reg g_pfnHvGicSetIchReg
329# define hv_gic_set_icv_reg g_pfnHvGicSetIcvReg
330# define hv_gic_set_redistributor_reg g_pfnHvGicSetRedistributorReg
331# define hv_gic_get_intid g_pfnHvGicGetIntid
332#endif
333
334
335/** The general registers. */
336static const struct
337{
338 hv_reg_t enmHvReg;
339 uint32_t fCpumExtrn;
340 uint32_t offCpumCtx;
341} s_aCpumRegs[] =
342{
343#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
344#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
345 CPUM_GREG_EMIT_X0_X3(0),
346 CPUM_GREG_EMIT_X0_X3(1),
347 CPUM_GREG_EMIT_X0_X3(2),
348 CPUM_GREG_EMIT_X0_X3(3),
349 CPUM_GREG_EMIT_X4_X28(4),
350 CPUM_GREG_EMIT_X4_X28(5),
351 CPUM_GREG_EMIT_X4_X28(6),
352 CPUM_GREG_EMIT_X4_X28(7),
353 CPUM_GREG_EMIT_X4_X28(8),
354 CPUM_GREG_EMIT_X4_X28(9),
355 CPUM_GREG_EMIT_X4_X28(10),
356 CPUM_GREG_EMIT_X4_X28(11),
357 CPUM_GREG_EMIT_X4_X28(12),
358 CPUM_GREG_EMIT_X4_X28(13),
359 CPUM_GREG_EMIT_X4_X28(14),
360 CPUM_GREG_EMIT_X4_X28(15),
361 CPUM_GREG_EMIT_X4_X28(16),
362 CPUM_GREG_EMIT_X4_X28(17),
363 CPUM_GREG_EMIT_X4_X28(18),
364 CPUM_GREG_EMIT_X4_X28(19),
365 CPUM_GREG_EMIT_X4_X28(20),
366 CPUM_GREG_EMIT_X4_X28(21),
367 CPUM_GREG_EMIT_X4_X28(22),
368 CPUM_GREG_EMIT_X4_X28(23),
369 CPUM_GREG_EMIT_X4_X28(24),
370 CPUM_GREG_EMIT_X4_X28(25),
371 CPUM_GREG_EMIT_X4_X28(26),
372 CPUM_GREG_EMIT_X4_X28(27),
373 CPUM_GREG_EMIT_X4_X28(28),
374 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
375 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
376 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
377 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
378 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
379#undef CPUM_GREG_EMIT_X0_X3
380#undef CPUM_GREG_EMIT_X4_X28
381};
382/** SIMD/FP registers. */
383static const struct
384{
385 hv_simd_fp_reg_t enmHvReg;
386 uint32_t offCpumCtx;
387} s_aCpumFpRegs[] =
388{
389#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
390 CPUM_VREG_EMIT(0),
391 CPUM_VREG_EMIT(1),
392 CPUM_VREG_EMIT(2),
393 CPUM_VREG_EMIT(3),
394 CPUM_VREG_EMIT(4),
395 CPUM_VREG_EMIT(5),
396 CPUM_VREG_EMIT(6),
397 CPUM_VREG_EMIT(7),
398 CPUM_VREG_EMIT(8),
399 CPUM_VREG_EMIT(9),
400 CPUM_VREG_EMIT(10),
401 CPUM_VREG_EMIT(11),
402 CPUM_VREG_EMIT(12),
403 CPUM_VREG_EMIT(13),
404 CPUM_VREG_EMIT(14),
405 CPUM_VREG_EMIT(15),
406 CPUM_VREG_EMIT(16),
407 CPUM_VREG_EMIT(17),
408 CPUM_VREG_EMIT(18),
409 CPUM_VREG_EMIT(19),
410 CPUM_VREG_EMIT(20),
411 CPUM_VREG_EMIT(21),
412 CPUM_VREG_EMIT(22),
413 CPUM_VREG_EMIT(23),
414 CPUM_VREG_EMIT(24),
415 CPUM_VREG_EMIT(25),
416 CPUM_VREG_EMIT(26),
417 CPUM_VREG_EMIT(27),
418 CPUM_VREG_EMIT(28),
419 CPUM_VREG_EMIT(29),
420 CPUM_VREG_EMIT(30),
421 CPUM_VREG_EMIT(31)
422#undef CPUM_VREG_EMIT
423};
424/** Debug system registers. */
425static const struct
426{
427 hv_sys_reg_t enmHvReg;
428 uint32_t offCpumCtx;
429} s_aCpumDbgRegs[] =
430{
431#define CPUM_DBGREG_EMIT(a_BorW, a_Idx) \
432 { HV_SYS_REG_DBG ## a_BorW ## CR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Ctrl.u64) }, \
433 { HV_SYS_REG_DBG ## a_BorW ## VR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Value.u64) }
434 /* Breakpoint registers. */
435 CPUM_DBGREG_EMIT(B, 0),
436 CPUM_DBGREG_EMIT(B, 1),
437 CPUM_DBGREG_EMIT(B, 2),
438 CPUM_DBGREG_EMIT(B, 3),
439 CPUM_DBGREG_EMIT(B, 4),
440 CPUM_DBGREG_EMIT(B, 5),
441 CPUM_DBGREG_EMIT(B, 6),
442 CPUM_DBGREG_EMIT(B, 7),
443 CPUM_DBGREG_EMIT(B, 8),
444 CPUM_DBGREG_EMIT(B, 9),
445 CPUM_DBGREG_EMIT(B, 10),
446 CPUM_DBGREG_EMIT(B, 11),
447 CPUM_DBGREG_EMIT(B, 12),
448 CPUM_DBGREG_EMIT(B, 13),
449 CPUM_DBGREG_EMIT(B, 14),
450 CPUM_DBGREG_EMIT(B, 15),
451 /* Watchpoint registers. */
452 CPUM_DBGREG_EMIT(W, 0),
453 CPUM_DBGREG_EMIT(W, 1),
454 CPUM_DBGREG_EMIT(W, 2),
455 CPUM_DBGREG_EMIT(W, 3),
456 CPUM_DBGREG_EMIT(W, 4),
457 CPUM_DBGREG_EMIT(W, 5),
458 CPUM_DBGREG_EMIT(W, 6),
459 CPUM_DBGREG_EMIT(W, 7),
460 CPUM_DBGREG_EMIT(W, 8),
461 CPUM_DBGREG_EMIT(W, 9),
462 CPUM_DBGREG_EMIT(W, 10),
463 CPUM_DBGREG_EMIT(W, 11),
464 CPUM_DBGREG_EMIT(W, 12),
465 CPUM_DBGREG_EMIT(W, 13),
466 CPUM_DBGREG_EMIT(W, 14),
467 CPUM_DBGREG_EMIT(W, 15),
468 { HV_SYS_REG_MDSCR_EL1, RT_UOFFSETOF(CPUMCTX, Mdscr.u64) }
469#undef CPUM_DBGREG_EMIT
470};
471/** PAuth key system registers. */
472static const struct
473{
474 hv_sys_reg_t enmHvReg;
475 uint32_t offCpumCtx;
476} s_aCpumPAuthKeyRegs[] =
477{
478 { HV_SYS_REG_APDAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apda.Low.u64) },
479 { HV_SYS_REG_APDAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apda.High.u64) },
480 { HV_SYS_REG_APDBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.Low.u64) },
481 { HV_SYS_REG_APDBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.High.u64) },
482 { HV_SYS_REG_APGAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apga.Low.u64) },
483 { HV_SYS_REG_APGAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apga.High.u64) },
484 { HV_SYS_REG_APIAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apia.Low.u64) },
485 { HV_SYS_REG_APIAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apia.High.u64) },
486 { HV_SYS_REG_APIBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apib.Low.u64) },
487 { HV_SYS_REG_APIBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apib.High.u64) }
488};
489/** System registers. */
490static const struct
491{
492 hv_sys_reg_t enmHvReg;
493 uint32_t fCpumExtrn;
494 uint32_t offCpumCtx;
495} s_aCpumSysRegs[] =
496{
497 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
498 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
499 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
500 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
501 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
502 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
503 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
504 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
505 { HV_SYS_REG_VBAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
506 { HV_SYS_REG_AFSR0_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr0.u64) },
507 { HV_SYS_REG_AFSR1_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr1.u64) },
508 { HV_SYS_REG_AMAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Amair.u64) },
509 { HV_SYS_REG_CNTKCTL_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, CntKCtl.u64) },
510 { HV_SYS_REG_CONTEXTIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, ContextIdr.u64) },
511 { HV_SYS_REG_CPACR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Cpacr.u64) },
512 { HV_SYS_REG_CSSELR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Csselr.u64) },
513 { HV_SYS_REG_ESR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Esr.u64) },
514 { HV_SYS_REG_FAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Far.u64) },
515 { HV_SYS_REG_MAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Mair.u64) },
516 { HV_SYS_REG_PAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Par.u64) },
517 { HV_SYS_REG_TPIDRRO_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, TpIdrRoEl0.u64) },
518 { HV_SYS_REG_TPIDR_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[0].u64) },
519 { HV_SYS_REG_TPIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[1].u64) },
520 { HV_SYS_REG_MDCCINT_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, MDccInt.u64) }
521
522};
523/** EL2 support system registers. */
524static const struct
525{
526 uint16_t idSysReg;
527 uint32_t offCpumCtx;
528} s_aCpumEl2SysRegs[] =
529{
530 { ARMV8_AARCH64_SYSREG_CNTHCTL_EL2, RT_UOFFSETOF(CPUMCTX, CntHCtlEl2.u64) },
531 { ARMV8_AARCH64_SYSREG_CNTHP_CTL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpCtlEl2.u64) },
532 { ARMV8_AARCH64_SYSREG_CNTHP_CVAL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpCValEl2.u64) },
533 { ARMV8_AARCH64_SYSREG_CNTHP_TVAL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpTValEl2.u64) },
534 { ARMV8_AARCH64_SYSREG_CNTVOFF_EL2, RT_UOFFSETOF(CPUMCTX, CntVOffEl2.u64) },
535 { ARMV8_AARCH64_SYSREG_CPTR_EL2, RT_UOFFSETOF(CPUMCTX, CptrEl2.u64) },
536 { ARMV8_AARCH64_SYSREG_ELR_EL2, RT_UOFFSETOF(CPUMCTX, ElrEl2.u64) },
537 { ARMV8_AARCH64_SYSREG_ESR_EL2, RT_UOFFSETOF(CPUMCTX, EsrEl2.u64) },
538 { ARMV8_AARCH64_SYSREG_FAR_EL2, RT_UOFFSETOF(CPUMCTX, FarEl2.u64) },
539 { ARMV8_AARCH64_SYSREG_HCR_EL2, RT_UOFFSETOF(CPUMCTX, HcrEl2.u64) },
540 { ARMV8_AARCH64_SYSREG_HPFAR_EL2, RT_UOFFSETOF(CPUMCTX, HpFarEl2.u64) },
541 { ARMV8_AARCH64_SYSREG_MAIR_EL2, RT_UOFFSETOF(CPUMCTX, MairEl2.u64) },
542 //{ ARMV8_AARCH64_SYSREG_MDCR_EL2, RT_UOFFSETOF(CPUMCTX, MdcrEl2.u64) },
543 { ARMV8_AARCH64_SYSREG_SCTLR_EL2, RT_UOFFSETOF(CPUMCTX, SctlrEl2.u64) },
544 { ARMV8_AARCH64_SYSREG_SPSR_EL2, RT_UOFFSETOF(CPUMCTX, SpsrEl2.u64) },
545 { ARMV8_AARCH64_SYSREG_SP_EL2, RT_UOFFSETOF(CPUMCTX, SpEl2.u64) },
546 { ARMV8_AARCH64_SYSREG_TCR_EL2, RT_UOFFSETOF(CPUMCTX, TcrEl2.u64) },
547 { ARMV8_AARCH64_SYSREG_TPIDR_EL2, RT_UOFFSETOF(CPUMCTX, TpidrEl2.u64) },
548 { ARMV8_AARCH64_SYSREG_TTBR0_EL2, RT_UOFFSETOF(CPUMCTX, Ttbr0El2.u64) },
549 { ARMV8_AARCH64_SYSREG_TTBR1_EL2, RT_UOFFSETOF(CPUMCTX, Ttbr1El2.u64) },
550 { ARMV8_AARCH64_SYSREG_VBAR_EL2, RT_UOFFSETOF(CPUMCTX, VBarEl2.u64) },
551 { ARMV8_AARCH64_SYSREG_VMPIDR_EL2, RT_UOFFSETOF(CPUMCTX, VMpidrEl2.u64) },
552 { ARMV8_AARCH64_SYSREG_VPIDR_EL2, RT_UOFFSETOF(CPUMCTX, VPidrEl2.u64) },
553 { ARMV8_AARCH64_SYSREG_VTCR_EL2, RT_UOFFSETOF(CPUMCTX, VTcrEl2.u64) },
554 { ARMV8_AARCH64_SYSREG_VTTBR_EL2, RT_UOFFSETOF(CPUMCTX, VTtbrEl2.u64) }
555};
556/** ID registers. */
557static const struct
558{
559 hv_feature_reg_t enmHvReg;
560 uint32_t offIdStruct;
561} s_aIdRegs[] =
562{
563 { HV_FEATURE_REG_ID_AA64DFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr0El1) },
564 { HV_FEATURE_REG_ID_AA64DFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr1El1) },
565 { HV_FEATURE_REG_ID_AA64ISAR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar0El1) },
566 { HV_FEATURE_REG_ID_AA64ISAR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar1El1) },
567 { HV_FEATURE_REG_ID_AA64MMFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr0El1) },
568 { HV_FEATURE_REG_ID_AA64MMFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr1El1) },
569 { HV_FEATURE_REG_ID_AA64MMFR2_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr2El1) },
570 { HV_FEATURE_REG_ID_AA64PFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr0El1) },
571 { HV_FEATURE_REG_ID_AA64PFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr1El1) },
572 { HV_FEATURE_REG_CLIDR_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegClidrEl1) },
573 { HV_FEATURE_REG_CTR_EL0, RT_UOFFSETOF(CPUMIDREGS, u64RegCtrEl0) },
574 { HV_FEATURE_REG_DCZID_EL0, RT_UOFFSETOF(CPUMIDREGS, u64RegDczidEl0) }
575};
576
577
578/*********************************************************************************************************************************
579* Internal Functions *
580*********************************************************************************************************************************/
581
582
583/**
584 * Converts a HV return code to a VBox status code.
585 *
586 * @returns VBox status code.
587 * @param hrc The HV return code to convert.
588 */
589DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
590{
591 if (hrc == HV_SUCCESS)
592 return VINF_SUCCESS;
593
594 switch (hrc)
595 {
596 case HV_ERROR: return VERR_INVALID_STATE;
597 case HV_BUSY: return VERR_RESOURCE_BUSY;
598 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
599 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
600 case HV_NO_DEVICE: return VERR_NOT_FOUND;
601 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
602 }
603
604 return VERR_IPE_UNEXPECTED_STATUS;
605}
606
607
608/**
609 * Returns a human readable string of the given exception class.
610 *
611 * @returns Pointer to the string matching the given EC.
612 * @param u32Ec The exception class to return the string for.
613 */
614static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
615{
616 switch (u32Ec)
617 {
618#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
619 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
620 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
621 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
622 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
623 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
624 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
625 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
626 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
627 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
628 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
629 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
630 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
631 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
632 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
633 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
634 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
635 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
636 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
637 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
638 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
639 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
640 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
641 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
642 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
643 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
644 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
645 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
646 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
647 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
648 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
649 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
650 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
651 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
652 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
653 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
654 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
655 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
656 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
657 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
658 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
659 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
660 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
661 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
662 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
663 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
664#undef ARMV8_EC_CASE
665 default:
666 break;
667 }
668
669 return "<INVALID>";
670}
671
672
673/**
674 * Resolves a NEM page state from the given protection flags.
675 *
676 * @returns NEM page state.
677 * @param fPageProt The page protection flags.
678 */
679DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
680{
681 switch (fPageProt)
682 {
683 case NEM_PAGE_PROT_NONE:
684 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
685 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
686 return NEM_DARWIN_PAGE_STATE_RX;
687 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
688 return NEM_DARWIN_PAGE_STATE_RW;
689 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
690 return NEM_DARWIN_PAGE_STATE_RWX;
691 default:
692 break;
693 }
694
695 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
696 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
697}
698
699
700/**
701 * Unmaps the given guest physical address range (page aligned).
702 *
703 * @returns VBox status code.
704 * @param pVM The cross context VM structure.
705 * @param GCPhys The guest physical address to start unmapping at.
706 * @param cb The size of the range to unmap in bytes.
707 * @param pu2State Where to store the new state of the unmappd page, optional.
708 */
709DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
710{
711 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
712 {
713 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
714 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
715 return VINF_SUCCESS;
716 }
717
718 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
719 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
720 if (RT_LIKELY(hrc == HV_SUCCESS))
721 {
722 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
723 if (pu2State)
724 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
725 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
726 return VINF_SUCCESS;
727 }
728
729 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
730 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
731 GCPhys, hrc));
732 return VERR_NEM_IPE_6;
733}
734
735
736/**
737 * Maps a given guest physical address range backed by the given memory with the given
738 * protection flags.
739 *
740 * @returns VBox status code.
741 * @param pVM The cross context VM structure.
742 * @param GCPhys The guest physical address to start mapping.
743 * @param pvRam The R3 pointer of the memory to back the range with.
744 * @param cb The size of the range, page aligned.
745 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
746 * @param pu2State Where to store the state for the new page, optional.
747 */
748DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
749{
750 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
751
752 Assert(fPageProt != NEM_PAGE_PROT_NONE);
753 RT_NOREF(pVM);
754
755 hv_memory_flags_t fHvMemProt = 0;
756 if (fPageProt & NEM_PAGE_PROT_READ)
757 fHvMemProt |= HV_MEMORY_READ;
758 if (fPageProt & NEM_PAGE_PROT_WRITE)
759 fHvMemProt |= HV_MEMORY_WRITE;
760 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
761 fHvMemProt |= HV_MEMORY_EXEC;
762
763 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
764 if (hrc == HV_SUCCESS)
765 {
766 if (pu2State)
767 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
768 return VINF_SUCCESS;
769 }
770
771 return nemR3DarwinHvSts2Rc(hrc);
772}
773
774
775/**
776 * Changes the protection flags for the given guest physical address range.
777 *
778 * @returns VBox status code.
779 * @param GCPhys The guest physical address to start mapping.
780 * @param cb The size of the range, page aligned.
781 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
782 * @param pu2State Where to store the state for the new page, optional.
783 */
784DECLINLINE(int) nemR3DarwinProtect(RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
785{
786 hv_memory_flags_t fHvMemProt = 0;
787 if (fPageProt & NEM_PAGE_PROT_READ)
788 fHvMemProt |= HV_MEMORY_READ;
789 if (fPageProt & NEM_PAGE_PROT_WRITE)
790 fHvMemProt |= HV_MEMORY_WRITE;
791 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
792 fHvMemProt |= HV_MEMORY_EXEC;
793
794 hv_return_t hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
795 if (hrc == HV_SUCCESS)
796 {
797 if (pu2State)
798 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
799 return VINF_SUCCESS;
800 }
801
802 LogRel(("nemR3DarwinProtect(%RGp,%zu,%#x): failed! hrc=%#x\n",
803 GCPhys, cb, fPageProt, hrc));
804 return nemR3DarwinHvSts2Rc(hrc);
805}
806
807
808#ifdef LOG_ENABLED
809/**
810 * Logs the current CPU state.
811 */
812static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
813{
814 if (LogIs3Enabled())
815 {
816 char szRegs[4096];
817 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
818 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
819 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
820 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
821 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
822 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
823 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
824 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
825 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
826 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
827 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
828 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
829 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
830 "vbar_el1=%016VR{vbar_el1}\n"
831 );
832 if (pVM->nem.s.fEl2Enabled)
833 {
834 Log3(("%s\n", szRegs));
835 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
836 "sp_el2=%016VR{sp_el2} elr_el2=%016VR{elr_el2}\n"
837 "spsr_el2=%016VR{spsr_el2} tpidr_el2=%016VR{tpidr_el2}\n"
838 "sctlr_el2=%016VR{sctlr_el2} tcr_el2=%016VR{tcr_el2}\n"
839 "ttbr0_el2=%016VR{ttbr0_el2} ttbr1_el2=%016VR{ttbr1_el2}\n"
840 "esr_el2=%016VR{esr_el2} far_el2=%016VR{far_el2}\n"
841 "hcr_el2=%016VR{hcr_el2} tcr_el2=%016VR{tcr_el2}\n"
842 "vbar_el2=%016VR{vbar_el2} cptr_el2=%016VR{cptr_el2}\n"
843 );
844 }
845 char szInstr[256]; RT_ZERO(szInstr);
846 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
847 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
848 szInstr, sizeof(szInstr), NULL);
849 Log3(("%s%s\n", szRegs, szInstr));
850 }
851}
852#endif /* LOG_ENABLED */
853
854
855static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
856{
857 RT_NOREF(pVM);
858
859 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
860 if (hrc == HV_SUCCESS)
861 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
862
863 if ( hrc == HV_SUCCESS
864 && (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR)))
865 {
866 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
867 {
868 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
869 {
870 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
871 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
872 }
873 }
874 }
875
876 if ( hrc == HV_SUCCESS
877 && (fWhat & CPUMCTX_EXTRN_V0_V31))
878 {
879 /* SIMD/FP registers. */
880 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
881 {
882 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
883 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
884 }
885 }
886
887 if ( hrc == HV_SUCCESS
888 && (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG))
889 {
890 /* Debug registers. */
891 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
892 {
893 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
894 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, pu64);
895 }
896 }
897
898 if ( hrc == HV_SUCCESS
899 && (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
900 {
901 /* Debug registers. */
902 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
903 {
904 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
905 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, pu64);
906 }
907 }
908
909 if ( hrc == HV_SUCCESS
910 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC)))
911 {
912 /* System registers. */
913 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
914 {
915 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
916 {
917 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
918 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
919 }
920 }
921 }
922
923 if ( hrc == HV_SUCCESS
924 && (fWhat & CPUMCTX_EXTRN_SYSREG_EL2)
925 && pVM->nem.s.fEl2Enabled)
926 {
927 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumEl2SysRegs); i++)
928 {
929 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumEl2SysRegs[i].offCpumCtx);
930 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, (hv_sys_reg_t)s_aCpumEl2SysRegs[i].idSysReg, pu64);
931 }
932 }
933
934 if ( hrc == HV_SUCCESS
935 && (fWhat & CPUMCTX_EXTRN_PSTATE))
936 {
937 uint64_t u64Tmp;
938 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
939 if (hrc == HV_SUCCESS)
940 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
941 }
942
943 /* Almost done, just update extern flags. */
944 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
945 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
946 pVCpu->cpum.GstCtx.fExtrn = 0;
947
948 return nemR3DarwinHvSts2Rc(hrc);
949}
950
951
952/**
953 * Exports the guest state to HV for execution.
954 *
955 * @returns VBox status code.
956 * @param pVM The cross context VM structure.
957 * @param pVCpu The cross context virtual CPU structure of the
958 * calling EMT.
959 */
960static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
961{
962 RT_NOREF(pVM);
963 hv_return_t hrc = HV_SUCCESS;
964
965 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
966 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
967 {
968 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
969 {
970 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
971 {
972 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
973 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
974 }
975 }
976 }
977
978 if ( hrc == HV_SUCCESS
979 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
980 {
981 /* SIMD/FP registers. */
982 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
983 {
984 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
985 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
986 }
987 }
988
989 if ( hrc == HV_SUCCESS
990 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_DEBUG))
991 {
992 /* Debug registers. */
993 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
994 {
995 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
996 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, *pu64);
997 }
998 }
999
1000 if ( hrc == HV_SUCCESS
1001 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
1002 {
1003 /* Debug registers. */
1004 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1005 {
1006 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1007 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, *pu64);
1008 }
1009 }
1010
1011 if ( hrc == HV_SUCCESS
1012 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
1013 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
1014 {
1015 /* System registers. */
1016 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1017 {
1018 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1019 {
1020 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1021 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
1022 }
1023 }
1024 }
1025
1026 if ( hrc == HV_SUCCESS
1027 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_EL2)
1028 && pVM->nem.s.fEl2Enabled)
1029 {
1030 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumEl2SysRegs); i++)
1031 {
1032 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumEl2SysRegs[i].offCpumCtx);
1033 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, (hv_sys_reg_t)s_aCpumEl2SysRegs[i].idSysReg, *pu64);
1034 Assert(hrc == HV_SUCCESS);
1035 }
1036 }
1037
1038 if ( hrc == HV_SUCCESS
1039 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
1040 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
1041
1042 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1043 return nemR3DarwinHvSts2Rc(hrc);
1044}
1045
1046
1047/**
1048 * Worker for nemR3NativeInit that loads the Hypervisor.framework shared library.
1049 *
1050 * @returns VBox status code.
1051 * @param pErrInfo Where to always return error info.
1052 */
1053static int nemR3DarwinLoadHv(PRTERRINFO pErrInfo)
1054{
1055 RTLDRMOD hMod = NIL_RTLDRMOD;
1056 static const char *s_pszHvPath = "/System/Library/Frameworks/Hypervisor.framework/Hypervisor";
1057
1058 int rc = RTLdrLoadEx(s_pszHvPath, &hMod, RTLDRLOAD_FLAGS_NO_UNLOAD | RTLDRLOAD_FLAGS_NO_SUFFIX, pErrInfo);
1059 if (RT_SUCCESS(rc))
1060 {
1061 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
1062 {
1063 int rc2 = RTLdrGetSymbol(hMod, g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
1064 if (RT_SUCCESS(rc2))
1065 {
1066 LogRel(("NEM: info: Found optional import Hypervisor!%s.\n",
1067 g_aImports[i].pszName));
1068 }
1069 else
1070 {
1071 *g_aImports[i].ppfn = NULL;
1072
1073 LogRel(("NEM: info: Failed to import Hypervisor!%s: %Rrc\n",
1074 g_aImports[i].pszName, rc2));
1075 }
1076 }
1077 if (RT_SUCCESS(rc))
1078 {
1079 Assert(!RTErrInfoIsSet(pErrInfo));
1080 }
1081
1082 RTLdrClose(hMod);
1083 }
1084 else
1085 {
1086 RTErrInfoAddF(pErrInfo, rc, "Failed to load Hypervisor.framwork: %s: %Rrc", s_pszHvPath, rc);
1087 rc = VERR_NEM_INIT_FAILED;
1088 }
1089
1090 return rc;
1091}
1092
1093
1094/**
1095 * Dumps some GIC information to the release log.
1096 */
1097static void nemR3DarwinDumpGicInfo(void)
1098{
1099 size_t val = 0;
1100 hv_return_t hrc = hv_gic_get_redistributor_size(&val);
1101 LogRel(("GICNem: hv_gic_get_redistributor_size() -> hrc=%#x / size=%zu\n", hrc, val));
1102 hrc = hv_gic_get_distributor_size(&val);
1103 LogRel(("GICNem: hv_gic_get_distributor_size() -> hrc=%#x / size=%zu\n", hrc, val));
1104 hrc = hv_gic_get_distributor_base_alignment(&val);
1105 LogRel(("GICNem: hv_gic_get_distributor_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1106 hrc = hv_gic_get_redistributor_base_alignment(&val);
1107 LogRel(("GICNem: hv_gic_get_redistributor_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1108 hrc = hv_gic_get_msi_region_base_alignment(&val);
1109 LogRel(("GICNem: hv_gic_get_msi_region_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1110 hrc = hv_gic_get_msi_region_size(&val);
1111 LogRel(("GICNem: hv_gic_get_msi_region_size() -> hrc=%#x / size=%zu\n", hrc, val));
1112 uint32_t u32SpiIntIdBase = 0;
1113 uint32_t cSpiIntIds = 0;
1114 hrc = hv_gic_get_spi_interrupt_range(&u32SpiIntIdBase, &cSpiIntIds);
1115 LogRel(("GICNem: hv_gic_get_spi_interrupt_range() -> hrc=%#x / SpiIntIdBase=%u, cSpiIntIds=%u\n", hrc, u32SpiIntIdBase, cSpiIntIds));
1116
1117 uint32_t u32IntId = 0;
1118 hrc = hv_gic_get_intid(HV_GIC_INT_EL1_PHYSICAL_TIMER, &u32IntId);
1119 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL1_PHYSICAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1120 hrc = hv_gic_get_intid(HV_GIC_INT_EL1_VIRTUAL_TIMER, &u32IntId);
1121 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL1_VIRTUAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1122 hrc = hv_gic_get_intid(HV_GIC_INT_EL2_PHYSICAL_TIMER, &u32IntId);
1123 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL2_PHYSICAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1124 hrc = hv_gic_get_intid(HV_GIC_INT_MAINTENANCE, &u32IntId);
1125 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_MAINTENANCE) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1126 hrc = hv_gic_get_intid(HV_GIC_INT_PERFORMANCE_MONITOR, &u32IntId);
1127 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_PERFORMANCE_MONITOR) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1128}
1129
1130
1131/**
1132 * Sets the given SPI inside the in-kernel KVM GIC.
1133 *
1134 * @returns VBox status code.
1135 * @param pVM The VM instance.
1136 * @param uIntId The SPI ID to update.
1137 * @param fAsserted Flag whether the interrupt is asserted (true) or not (false).
1138 */
1139VMMR3_INT_DECL(int) GICR3NemSpiSet(PVMCC pVM, uint32_t uIntId, bool fAsserted)
1140{
1141 RT_NOREF(pVM);
1142 Assert(hv_gic_set_spi);
1143
1144 hv_return_t hrc = hv_gic_set_spi(uIntId + GIC_INTID_RANGE_SPI_START, fAsserted);
1145 return nemR3DarwinHvSts2Rc(hrc);
1146}
1147
1148
1149/**
1150 * Sets the given PPI inside the in-kernel KVM GIC.
1151 *
1152 * @returns VBox status code.
1153 * @param pVCpu The vCPU for whih the PPI state is updated.
1154 * @param uIntId The PPI ID to update.
1155 * @param fAsserted Flag whether the interrupt is asserted (true) or not (false).
1156 */
1157VMMR3_INT_DECL(int) GICR3NemPpiSet(PVMCPUCC pVCpu, uint32_t uIntId, bool fAsserted)
1158{
1159 RT_NOREF(pVCpu, uIntId, fAsserted);
1160
1161 /* Should never be called as the PPIs are handled entirely in Hypervisor.framework/AppleHV. */
1162 AssertFailed();
1163 return VERR_NEM_IPE_9;
1164}
1165
1166
1167static int nemR3DarwinGicCreate(PVM pVM)
1168{
1169 nemR3DarwinDumpGicInfo();
1170
1171 //PCFGMNODE pGicDev = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic/0");
1172 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
1173 AssertPtrReturn(pGicCfg, VERR_NEM_IPE_5);
1174
1175 hv_gic_config_t hGicCfg = hv_gic_config_create();
1176
1177 /*
1178 * Query the MMIO ranges.
1179 */
1180 RTGCPHYS GCPhysMmioBaseDist = 0;
1181 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
1182 if (RT_FAILURE(rc))
1183 return VMSetError(pVM, rc, RT_SRC_POS,
1184 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
1185
1186 RTGCPHYS GCPhysMmioBaseReDist = 0;
1187 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
1188 if (RT_FAILURE(rc))
1189 return VMSetError(pVM, rc, RT_SRC_POS,
1190 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
1191
1192 hv_return_t hrc = hv_gic_config_set_distributor_base(hGicCfg, GCPhysMmioBaseDist);
1193 if (hrc != HV_SUCCESS)
1194 return nemR3DarwinHvSts2Rc(hrc);
1195
1196 hrc = hv_gic_config_set_redistributor_base(hGicCfg, GCPhysMmioBaseReDist);
1197 if (hrc != HV_SUCCESS)
1198 return nemR3DarwinHvSts2Rc(hrc);
1199
1200 hrc = hv_gic_create(hGicCfg);
1201 os_release(hGicCfg);
1202 if (hrc != HV_SUCCESS)
1203 return nemR3DarwinHvSts2Rc(hrc);
1204
1205 /* Make sure the device is not instantiated as Hypervisor.framework provides it. */
1206 //CFGMR3RemoveNode(pGicDev);
1207 return rc;
1208}
1209
1210
1211/**
1212 * Try initialize the native API.
1213 *
1214 * This may only do part of the job, more can be done in
1215 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
1216 *
1217 * @returns VBox status code.
1218 * @param pVM The cross context VM structure.
1219 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
1220 * the latter we'll fail if we cannot initialize.
1221 * @param fForced Whether the HMForced flag is set and we should
1222 * fail if we cannot initialize.
1223 */
1224int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
1225{
1226 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
1227
1228 /*
1229 * Some state init.
1230 */
1231 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
1232 RT_NOREF(pCfgNem);
1233
1234 /*
1235 * Error state.
1236 * The error message will be non-empty on failure and 'rc' will be set too.
1237 */
1238 RTERRINFOSTATIC ErrInfo;
1239 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
1240
1241 /* Resolve optional imports */
1242 int rc = nemR3DarwinLoadHv(pErrInfo);
1243 if (RT_FAILURE(rc))
1244 return rc;
1245
1246 /*
1247 * Need to enable nested virt here if supported and reset the CFGM value to false
1248 * if not supported. This ASSUMES that NEM is initialized before CPUM.
1249 */
1250 PCFGMNODE pCfgCpum = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/");
1251 hv_vm_config_t hVmCfg = NULL;
1252
1253 if ( hv_vm_config_create
1254 && hv_vm_config_get_el2_supported)
1255 {
1256 hVmCfg = hv_vm_config_create();
1257
1258 bool fHvEl2Supported = false;
1259 hv_return_t hrc = hv_vm_config_get_el2_supported(&fHvEl2Supported);
1260 if ( hrc == HV_SUCCESS
1261 && fHvEl2Supported)
1262 {
1263 /** @cfgm{/CPUM/NestedHWVirt, bool, false}
1264 * Whether to expose the hardware virtualization (EL2/VHE) feature to the guest.
1265 * The default is false. Only supported on M3 and later and macOS 15.0+ (Sonoma).
1266 */
1267 bool fNestedHWVirt = false;
1268 rc = CFGMR3QueryBoolDef(pCfgCpum, "NestedHWVirt", &fNestedHWVirt, false);
1269 AssertLogRelRCReturn(rc, rc);
1270 if (fNestedHWVirt)
1271 {
1272 hrc = hv_vm_config_set_el2_enabled(hVmCfg, fNestedHWVirt);
1273 if (hrc != HV_SUCCESS)
1274 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
1275 "Cannot enable nested virtualization (hrc=%#x)!\n", hrc);
1276 else
1277 {
1278 pVM->nem.s.fEl2Enabled = true;
1279 LogRel(("NEM: Enabled nested virtualization (EL2) support\n"));
1280 }
1281 }
1282 }
1283 else
1284 {
1285 /* Ensure nested virt is not set. */
1286 rc = CFGMR3RemoveValue(pCfgCpum, "NestedHWVirt");
1287
1288 LogRel(("NEM: The host doesn't supported nested virtualization! (hrc=%#x fHvEl2Supported=%RTbool)\n",
1289 hrc, fHvEl2Supported));
1290 }
1291 }
1292 else
1293 {
1294 /* Ensure nested virt is not set. */
1295 rc = CFGMR3RemoveValue(pCfgCpum, "NestedHWVirt");
1296 LogRel(("NEM: Hypervisor.framework doesn't supported nested virtualization!\n"));
1297 }
1298
1299 hv_return_t hrc = hv_vm_create(hVmCfg);
1300 os_release(hVmCfg);
1301 if (hrc == HV_SUCCESS)
1302 {
1303 pVM->nem.s.fCreatedVm = true;
1304 pVM->nem.s.u64CntFrqHz = ASMReadCntFrqEl0();
1305
1306 /* Will be initialized in NEMHCResumeCpuTickOnAll() before executing guest code. */
1307 pVM->nem.s.u64VTimerOff = 0;
1308
1309 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
1310 Log(("NEM: Marked active!\n"));
1311 PGMR3EnableNemMode(pVM);
1312 }
1313 else
1314 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
1315 "hv_vm_create() failed: %#x", hrc);
1316
1317 /*
1318 * We only fail if in forced mode, otherwise just log the complaint and return.
1319 */
1320 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1321 if ( (fForced || !fFallback)
1322 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1323 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1324
1325 if (RTErrInfoIsSet(pErrInfo))
1326 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1327 return VINF_SUCCESS;
1328}
1329
1330
1331/**
1332 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
1333 *
1334 * @returns VBox status code
1335 * @param pVM The VM handle.
1336 * @param pVCpu The vCPU handle.
1337 * @param idCpu ID of the CPU to create.
1338 */
1339static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
1340{
1341 if (idCpu == 0)
1342 {
1343 Assert(pVM->nem.s.hVCpuCfg == NULL);
1344
1345 /* Create a new vCPU config and query the ID registers. */
1346 pVM->nem.s.hVCpuCfg = hv_vcpu_config_create();
1347 if (!pVM->nem.s.hVCpuCfg)
1348 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1349 "Call to hv_vcpu_config_create failed on vCPU %u", idCpu);
1350
1351 /* Query ID registers and hand them to CPUM. */
1352 CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
1353 for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
1354 {
1355 uint64_t *pu64 = (uint64_t *)((uint8_t *)&IdRegs + s_aIdRegs[i].offIdStruct);
1356 hv_return_t hrc = hv_vcpu_config_get_feature_reg(pVM->nem.s.hVCpuCfg, s_aIdRegs[i].enmHvReg, pu64);
1357 if (hrc != HV_SUCCESS)
1358 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1359 "Call to hv_vcpu_get_feature_reg(, %#x, ) failed: %#x (%Rrc)", hrc, nemR3DarwinHvSts2Rc(hrc));
1360 }
1361
1362 int rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
1363 if (RT_FAILURE(rc))
1364 return rc;
1365 }
1366
1367 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, pVM->nem.s.hVCpuCfg);
1368 if (hrc != HV_SUCCESS)
1369 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1370 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1371
1372 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MPIDR_EL1, idCpu);
1373 if (hrc != HV_SUCCESS)
1374 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1375 "Setting MPIDR_EL1 failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1376
1377 return VINF_SUCCESS;
1378}
1379
1380
1381/**
1382 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
1383 *
1384 * @returns VBox status code.
1385 * @param pVM The VM handle.
1386 * @param pVCpu The vCPU handle.
1387 */
1388static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVM pVM, PVMCPU pVCpu)
1389{
1390 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
1391 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
1392
1393 if (pVCpu->idCpu == 0)
1394 {
1395 os_release(pVM->nem.s.hVCpuCfg);
1396 pVM->nem.s.hVCpuCfg = NULL;
1397 }
1398 return VINF_SUCCESS;
1399}
1400
1401
1402/**
1403 * This is called after CPUMR3Init is done.
1404 *
1405 * @returns VBox status code.
1406 * @param pVM The VM handle..
1407 */
1408int nemR3NativeInitAfterCPUM(PVM pVM)
1409{
1410 /*
1411 * Validate sanity.
1412 */
1413 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
1414 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1415
1416 /*
1417 * Need to create the GIC here if the NEM variant is configured
1418 * before any vCPU is created according to the Apple docs.
1419 */
1420 if ( hv_gic_create
1421 && CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0"))
1422 {
1423 int rc = nemR3DarwinGicCreate(pVM);
1424 if (RT_FAILURE(rc))
1425 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Creating the GIC failed: %Rrc", rc);
1426 }
1427
1428 /*
1429 * Setup the EMTs.
1430 */
1431 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1432 {
1433 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1434
1435 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
1436 if (RT_FAILURE(rc))
1437 {
1438 /* Rollback. */
1439 while (idCpu--)
1440 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 2, pVM, pVCpu);
1441
1442 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
1443 }
1444 }
1445
1446 pVM->nem.s.fCreatedEmts = true;
1447 return VINF_SUCCESS;
1448}
1449
1450
1451int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1452{
1453 RT_NOREF(pVM, enmWhat);
1454 return VINF_SUCCESS;
1455}
1456
1457
1458int nemR3NativeTerm(PVM pVM)
1459{
1460 /*
1461 * Delete the VM.
1462 */
1463
1464 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
1465 {
1466 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1467
1468 /*
1469 * Apple's documentation states that the vCPU should be destroyed
1470 * on the thread running the vCPU but as all the other EMTs are gone
1471 * at this point, destroying the VM would hang.
1472 *
1473 * We seem to be at luck here though as destroying apparently works
1474 * from EMT(0) as well.
1475 */
1476 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
1477 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
1478 }
1479
1480 pVM->nem.s.fCreatedEmts = false;
1481 if (pVM->nem.s.fCreatedVm)
1482 {
1483 hv_return_t hrc = hv_vm_destroy();
1484 if (hrc != HV_SUCCESS)
1485 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
1486
1487 pVM->nem.s.fCreatedVm = false;
1488 }
1489 return VINF_SUCCESS;
1490}
1491
1492
1493/**
1494 * VM reset notification.
1495 *
1496 * @param pVM The cross context VM structure.
1497 */
1498void nemR3NativeReset(PVM pVM)
1499{
1500 RT_NOREF(pVM);
1501}
1502
1503
1504/**
1505 * Reset CPU due to INIT IPI or hot (un)plugging.
1506 *
1507 * @param pVCpu The cross context virtual CPU structure of the CPU being
1508 * reset.
1509 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1510 */
1511void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1512{
1513 RT_NOREF(pVCpu, fInitIpi);
1514}
1515
1516
1517/**
1518 * Returns the byte size from the given access SAS value.
1519 *
1520 * @returns Number of bytes to transfer.
1521 * @param uSas The SAS value to convert.
1522 */
1523DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
1524{
1525 switch (uSas)
1526 {
1527 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
1528 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
1529 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
1530 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
1531 default:
1532 AssertReleaseFailed();
1533 }
1534
1535 return 0;
1536}
1537
1538
1539/**
1540 * Sets the given general purpose register to the given value.
1541 *
1542 * @param pVCpu The cross context virtual CPU structure of the
1543 * calling EMT.
1544 * @param uReg The register index.
1545 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
1546 * @param fSignExtend Flag whether to sign extend the value.
1547 * @param u64Val The value.
1548 */
1549DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
1550{
1551 AssertReturnVoid(uReg < 31);
1552
1553 if (f64BitReg)
1554 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1555 else
1556 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
1557
1558 /* Mark the register as not extern anymore. */
1559 switch (uReg)
1560 {
1561 case 0:
1562 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1563 break;
1564 case 1:
1565 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1566 break;
1567 case 2:
1568 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1569 break;
1570 case 3:
1571 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1572 break;
1573 default:
1574 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1575 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1576 }
1577}
1578
1579
1580/**
1581 * Gets the given general purpose register and returns the value.
1582 *
1583 * @returns Value from the given register.
1584 * @param pVCpu The cross context virtual CPU structure of the
1585 * calling EMT.
1586 * @param uReg The register index.
1587 */
1588DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
1589{
1590 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
1591
1592 if (uReg == ARMV8_AARCH64_REG_ZR)
1593 return 0;
1594
1595 /** @todo Import the register if extern. */
1596 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1597
1598 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1599}
1600
1601
1602/**
1603 * Works on the data abort exception (which will be a MMIO access most of the time).
1604 *
1605 * @returns VBox strict status code.
1606 * @param pVM The cross context VM structure.
1607 * @param pVCpu The cross context virtual CPU structure of the
1608 * calling EMT.
1609 * @param uIss The instruction specific syndrome value.
1610 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1611 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
1612 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
1613 */
1614static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
1615 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
1616{
1617 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
1618 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
1619 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
1620 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
1621 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
1622 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
1623 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
1624 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
1625 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
1626 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
1627
1628 RT_NOREF(fL2Fault, GCPtrDataAbrt);
1629
1630 if (fWrite)
1631 {
1632 /*
1633 * Check whether this is one of the dirty tracked regions, mark it as dirty
1634 * and enable write support for this region again.
1635 *
1636 * This is required for proper VRAM tracking or the display might not get updated
1637 * and it is impossible to use the PGM generic facility as it operates on guest page sizes
1638 * but setting protection flags with Hypervisor.framework works only host page sized regions, so
1639 * we have to cook our own. Additionally the VRAM region is marked as prefetchable (write-back)
1640 * which doesn't produce a valid instruction syndrome requiring restarting the instruction after enabling
1641 * write access again (due to a missing interpreter right now).
1642 */
1643 for (uint32_t idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
1644 {
1645 PNEMHVMMIO2REGION pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
1646
1647 if ( GCPhysDataAbrt >= pMmio2Region->GCPhysStart
1648 && GCPhysDataAbrt <= pMmio2Region->GCPhysLast)
1649 {
1650 pMmio2Region->fDirty = true;
1651
1652 uint8_t u2State;
1653 int rc = nemR3DarwinProtect(pMmio2Region->GCPhysStart, pMmio2Region->GCPhysLast - pMmio2Region->GCPhysStart + 1,
1654 NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE, &u2State);
1655
1656 /* Restart the instruction if there is no instruction syndrome available. */
1657 if (RT_FAILURE(rc) || !fIsv)
1658 return rc;
1659 }
1660 }
1661 }
1662
1663 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
1664
1665 EMHistoryAddExit(pVCpu,
1666 fWrite
1667 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1668 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1669 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1670
1671 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1672 uint64_t u64Val = 0;
1673 if (fWrite)
1674 {
1675 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1676 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1677 Log4(("MmioExit/%u: %08RX64: WRITE %#RGp LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
1678 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1679 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1680 }
1681 else
1682 {
1683 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1684 Log4(("MmioExit/%u: %08RX64: READ %#RGp LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1685 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1686 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1687 if (rcStrict == VINF_SUCCESS)
1688 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
1689 }
1690
1691 if (rcStrict == VINF_SUCCESS)
1692 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1693
1694 return rcStrict;
1695}
1696
1697
1698/**
1699 * Works on the trapped MRS, MSR and system instruction exception.
1700 *
1701 * @returns VBox strict status code.
1702 * @param pVM The cross context VM structure.
1703 * @param pVCpu The cross context virtual CPU structure of the
1704 * calling EMT.
1705 * @param uIss The instruction specific syndrome value.
1706 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1707 */
1708static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
1709{
1710 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
1711 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
1712 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
1713 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
1714 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
1715 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
1716 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
1717 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
1718 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
1719 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
1720
1721 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
1722 EMHistoryAddExit(pVCpu,
1723 fRead
1724 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1725 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1726 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1727
1728 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1729 uint64_t u64Val = 0;
1730 if (fRead)
1731 {
1732 RT_NOREF(pVM);
1733
1734 /** @todo Windows assumes a working PMU which is not available on AppleSilicon. It uses it
1735 * to determine the base frequency it seems (because without the * 100 multiplier the frequency
1736 * is detetcted as 24MHz. This needs to be moved into a separate PMU emulation (to be created) for
1737 * all backends to share (Linux/KVM when running on AppleSilicon hardware). */
1738 if ( idSysReg == ARMV8_AARCH64_SYSREG_PMCR_EL0
1739 || idSysReg == ARMV8_AARCH64_SYSREG_PMCCNTR_EL0)
1740 {
1741 if (idSysReg == ARMV8_AARCH64_SYSREG_PMCCNTR_EL0)
1742 {
1743 u64Val = ASMReadTSC() * 100;
1744 }
1745 else
1746 u64Val = 0;
1747 }
1748 else
1749 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
1750 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
1751 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1752 VBOXSTRICTRC_VAL(rcStrict) ));
1753 if (rcStrict == VINF_SUCCESS)
1754 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
1755 }
1756 else
1757 {
1758 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1759 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
1760 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
1761 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1762 VBOXSTRICTRC_VAL(rcStrict) ));
1763 }
1764
1765 if (rcStrict == VINF_SUCCESS)
1766 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1767
1768 return rcStrict;
1769}
1770
1771
1772/**
1773 * Works on the trapped HVC instruction exception.
1774 *
1775 * @returns VBox strict status code.
1776 * @param pVM The cross context VM structure.
1777 * @param pVCpu The cross context virtual CPU structure of the
1778 * calling EMT.
1779 * @param uIss The instruction specific syndrome value.
1780 * @param fAdvancePc Flag whether to advance the guest program counter.
1781 */
1782static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fAdvancePc = false)
1783{
1784 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
1785 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
1786
1787#if 0 /** @todo For later */
1788 EMHistoryAddExit(pVCpu,
1789 fRead
1790 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1791 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1792 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1793#endif
1794
1795 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1796 if (u16Imm == 0)
1797 {
1798 /** @todo Raise exception to EL1 if PSCI not configured. */
1799 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
1800 uint32_t uFunId = pVCpu->cpum.GstCtx.aGRegs[ARMV8_AARCH64_REG_X0].w;
1801 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
1802 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
1803 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
1804 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
1805 {
1806 switch (uFunNum)
1807 {
1808 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1809 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
1810 break;
1811 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1812 rcStrict = VMR3PowerOff(pVM->pUVM);
1813 break;
1814 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1815 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1816 {
1817 bool fHaltOnReset;
1818 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
1819 if (RT_SUCCESS(rc) && fHaltOnReset)
1820 {
1821 Log(("nemR3DarwinHandleExitExceptionTrappedHvcInsn: Halt On Reset!\n"));
1822 rc = VINF_EM_HALT;
1823 }
1824 else
1825 {
1826 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
1827 VM_FF_SET(pVM, VM_FF_RESET);
1828 rc = VINF_EM_RESET;
1829 }
1830 break;
1831 }
1832 case ARM_PSCI_FUNC_ID_CPU_ON:
1833 {
1834 uint64_t u64TgtCpu = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1835 RTGCPHYS GCPhysExecAddr = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X2);
1836 uint64_t u64CtxId = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X3);
1837 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
1838 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
1839 break;
1840 }
1841 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
1842 {
1843 uint32_t u32FunNum = (uint32_t)nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1844 switch (u32FunNum)
1845 {
1846 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1847 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1848 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1849 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1850 case ARM_PSCI_FUNC_ID_CPU_ON:
1851 case ARM_PSCI_FUNC_ID_MIGRATE_INFO_TYPE:
1852 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1853 false /*f64BitReg*/, false /*fSignExtend*/,
1854 (uint64_t)ARM_PSCI_STS_SUCCESS);
1855 break;
1856 default:
1857 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1858 false /*f64BitReg*/, false /*fSignExtend*/,
1859 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1860 }
1861 break;
1862 }
1863 case ARM_PSCI_FUNC_ID_MIGRATE_INFO_TYPE:
1864 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_MIGRATE_INFO_TYPE_TOS_NOT_PRESENT);
1865 break;
1866 default:
1867 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1868 }
1869 }
1870 else
1871 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1872 }
1873
1874 /** @todo What to do if immediate is != 0? */
1875
1876 if ( rcStrict == VINF_SUCCESS
1877 && fAdvancePc)
1878 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
1879
1880 return rcStrict;
1881}
1882
1883
1884/**
1885 * Handles an exception VM exit.
1886 *
1887 * @returns VBox strict status code.
1888 * @param pVM The cross context VM structure.
1889 * @param pVCpu The cross context virtual CPU structure of the
1890 * calling EMT.
1891 * @param pExit Pointer to the exit information.
1892 */
1893static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1894{
1895 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1896 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1897 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1898
1899 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1900 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1901
1902 switch (uEc)
1903 {
1904 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1905 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1906 pExit->exception.physical_address);
1907 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1908 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1909 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
1910 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
1911 case ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN:
1912 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss, true);
1913 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
1914 {
1915 /* No need to halt if there is an interrupt pending already. */
1916 if (VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ)))
1917 {
1918 LogFlowFunc(("IRQ | FIQ set => VINF_SUCCESS\n"));
1919 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1920 return VINF_SUCCESS;
1921 }
1922
1923 /* Set the vTimer expiration in order to get out of the halt at the right point in time. */
1924 if ( (pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE)
1925 && !(pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_IMASK))
1926 {
1927 uint64_t cTicksVTimer = mach_absolute_time() - pVM->nem.s.u64VTimerOff;
1928
1929 /* Check whether it expired and start executing guest code. */
1930 if (cTicksVTimer >= pVCpu->cpum.GstCtx.CntvCValEl0)
1931 {
1932 LogFlowFunc(("Guest timer expired (cTicksVTimer=%RU64 CntvCValEl0=%RU64) => VINF_SUCCESS\n",
1933 cTicksVTimer, pVCpu->cpum.GstCtx.CntvCValEl0));
1934 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1935 return VINF_SUCCESS;
1936 }
1937
1938 uint64_t cTicksVTimerToExpire = pVCpu->cpum.GstCtx.CntvCValEl0 - cTicksVTimer;
1939 uint64_t cNanoSecsVTimerToExpire = ASMMultU64ByU32DivByU32(cTicksVTimerToExpire, RT_NS_1SEC, (uint32_t)pVM->nem.s.u64CntFrqHz);
1940
1941 /*
1942 * Our halt method doesn't work with sub millisecond granularity at the moment causing a huge slowdown
1943 * + scheduling overhead which would increase the wakeup latency.
1944 * So only halt when the threshold is exceeded (needs more experimentation but 5ms turned out to be a good compromise
1945 * between CPU load when the guest is idle and performance).
1946 */
1947 if (cNanoSecsVTimerToExpire < 2 * RT_NS_1MS)
1948 {
1949 LogFlowFunc(("Guest timer expiration < 2ms (cNanoSecsVTimerToExpire=%RU64) => VINF_SUCCESS\n",
1950 cNanoSecsVTimerToExpire));
1951 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1952 return VINF_SUCCESS;
1953 }
1954
1955 LogFlowFunc(("Set vTimer activation to cNanoSecsVTimerToExpire=%#RX64 (CntvCValEl0=%#RX64, u64VTimerOff=%#RX64 cTicksVTimer=%#RX64 u64CntFrqHz=%#RX64)\n",
1956 cNanoSecsVTimerToExpire, pVCpu->cpum.GstCtx.CntvCValEl0, pVM->nem.s.u64VTimerOff, cTicksVTimer, pVM->nem.s.u64CntFrqHz));
1957 TMCpuSetVTimerNextActivation(pVCpu, cNanoSecsVTimerToExpire);
1958 }
1959 else
1960 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1961
1962 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1963 return VINF_EM_HALT;
1964 }
1965 case ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN:
1966 {
1967 VBOXSTRICTRC rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.GstCtx);
1968 /** @todo Forward genuine guest traps to the guest by either single stepping instruction with debug exception trapping turned off
1969 * or create instruction interpreter and inject exception ourselves. */
1970 Assert(rcStrict == VINF_EM_DBG_BREAKPOINT);
1971 return rcStrict;
1972 }
1973 case ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL:
1974 return VINF_EM_DBG_STEPPED;
1975 case ARMV8_ESR_EL2_EC_UNKNOWN:
1976 default:
1977 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1978 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1979 AssertReleaseFailed();
1980 return VERR_NOT_IMPLEMENTED;
1981 }
1982
1983 return VINF_SUCCESS;
1984}
1985
1986
1987/**
1988 * Handles an exit from hv_vcpu_run().
1989 *
1990 * @returns VBox strict status code.
1991 * @param pVM The cross context VM structure.
1992 * @param pVCpu The cross context virtual CPU structure of the
1993 * calling EMT.
1994 */
1995static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
1996{
1997 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1998 if (RT_FAILURE(rc))
1999 return rc;
2000
2001#ifdef LOG_ENABLED
2002 if (LogIs3Enabled())
2003 nemR3DarwinLogState(pVM, pVCpu);
2004#endif
2005
2006 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
2007 switch (pExit->reason)
2008 {
2009 case HV_EXIT_REASON_CANCELED:
2010 return VINF_EM_RAW_INTERRUPT;
2011 case HV_EXIT_REASON_EXCEPTION:
2012 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
2013 case HV_EXIT_REASON_VTIMER_ACTIVATED:
2014 {
2015 LogFlowFunc(("vTimer got activated\n"));
2016 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
2017 pVCpu->nem.s.fVTimerActivated = true;
2018 return GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, true /*fAsserted*/);
2019 }
2020 default:
2021 AssertReleaseFailed();
2022 break;
2023 }
2024
2025 return VERR_INVALID_STATE;
2026}
2027
2028
2029/**
2030 * Runs the guest once until an exit occurs.
2031 *
2032 * @returns HV status code.
2033 * @param pVM The cross context VM structure.
2034 * @param pVCpu The cross context virtual CPU structure.
2035 */
2036static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
2037{
2038 TMNotifyStartOfExecution(pVM, pVCpu);
2039
2040 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
2041
2042 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
2043
2044 return hrc;
2045}
2046
2047
2048/**
2049 * Prepares the VM to run the guest.
2050 *
2051 * @returns Strict VBox status code.
2052 * @param pVM The cross context VM structure.
2053 * @param pVCpu The cross context virtual CPU structure.
2054 * @param fSingleStepping Flag whether we run in single stepping mode.
2055 */
2056static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
2057{
2058#ifdef LOG_ENABLED
2059 bool fIrq = false;
2060 bool fFiq = false;
2061
2062 if (LogIs3Enabled())
2063 nemR3DarwinLogState(pVM, pVCpu);
2064#endif
2065
2066 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
2067 AssertRCReturn(rc, rc);
2068
2069 /* In single stepping mode we will re-read SPSR and MDSCR and enable the software step bits. */
2070 if (fSingleStepping)
2071 {
2072 uint64_t u64Tmp;
2073 hv_return_t hrc = hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
2074 if (hrc == HV_SUCCESS)
2075 {
2076 u64Tmp |= ARMV8_SPSR_EL2_AARCH64_SS;
2077 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, u64Tmp);
2078 }
2079
2080 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MDSCR_EL1, &u64Tmp);
2081 if (hrc == HV_SUCCESS)
2082 {
2083 u64Tmp |= ARMV8_MDSCR_EL1_AARCH64_SS;
2084 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MDSCR_EL1, u64Tmp);
2085 }
2086
2087 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2088 }
2089
2090 /* Check whether the vTimer interrupt was handled by the guest and we can unmask the vTimer. */
2091 if (pVCpu->nem.s.fVTimerActivated)
2092 {
2093 /* Read the CNTV_CTL_EL0 register. */
2094 uint64_t u64CntvCtl = 0;
2095
2096 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &u64CntvCtl);
2097 AssertRCReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2098
2099 if ( (u64CntvCtl & (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_IMASK | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
2100 != (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
2101 {
2102 /* Clear the interrupt. */
2103 GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, false /*fAsserted*/);
2104
2105 pVCpu->nem.s.fVTimerActivated = false;
2106 hrc = hv_vcpu_set_vtimer_mask(pVCpu->nem.s.hVCpu, false /*vtimer_is_masked*/);
2107 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2108 }
2109 }
2110
2111 /* Set the pending interrupt state. */
2112 hv_return_t hrc = HV_SUCCESS;
2113 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
2114 {
2115 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
2116 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2117#ifdef LOG_ENABLED
2118 fIrq = true;
2119#endif
2120 }
2121 else
2122 {
2123 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
2124 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2125 }
2126
2127 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
2128 {
2129 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
2130 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2131#ifdef LOG_ENABLED
2132 fFiq = true;
2133#endif
2134 }
2135 else
2136 {
2137 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
2138 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2139 }
2140
2141 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
2142 pVCpu->nem.s.fEventPending = false;
2143 return VINF_SUCCESS;
2144}
2145
2146
2147/**
2148 * The normal runloop (no debugging features enabled).
2149 *
2150 * @returns Strict VBox status code.
2151 * @param pVM The cross context VM structure.
2152 * @param pVCpu The cross context virtual CPU structure.
2153 */
2154static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
2155{
2156 /*
2157 * The run loop.
2158 *
2159 * Current approach to state updating to use the sledgehammer and sync
2160 * everything every time. This will be optimized later.
2161 */
2162
2163 /* Update the vTimer offset after resuming if instructed. */
2164 if (pVCpu->nem.s.fVTimerOffUpdate)
2165 {
2166 hv_return_t hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
2167 if (hrc != HV_SUCCESS)
2168 return nemR3DarwinHvSts2Rc(hrc);
2169
2170 pVCpu->nem.s.fVTimerOffUpdate = false;
2171
2172 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, pVCpu->cpum.GstCtx.CntvCtlEl0);
2173 if (hrc == HV_SUCCESS)
2174 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, pVCpu->cpum.GstCtx.CntvCValEl0);
2175 if (hrc != HV_SUCCESS)
2176 return nemR3DarwinHvSts2Rc(hrc);
2177 }
2178
2179 /*
2180 * Poll timers and run for a bit.
2181 */
2182 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2183 * the whole polling job when timers have changed... */
2184 uint64_t offDeltaIgnored;
2185 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2186 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2187 for (unsigned iLoop = 0;; iLoop++)
2188 {
2189 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
2190 if (rcStrict != VINF_SUCCESS)
2191 break;
2192
2193 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
2194 if (hrc == HV_SUCCESS)
2195 {
2196 /*
2197 * Deal with the message.
2198 */
2199 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
2200 if (rcStrict == VINF_SUCCESS)
2201 { /* hopefully likely */ }
2202 else
2203 {
2204 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2205 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2206 break;
2207 }
2208 }
2209 else
2210 {
2211 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
2212 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
2213 }
2214 } /* the run loop */
2215
2216 return rcStrict;
2217}
2218
2219
2220/**
2221 * The debug runloop.
2222 *
2223 * @returns Strict VBox status code.
2224 * @param pVM The cross context VM structure.
2225 * @param pVCpu The cross context virtual CPU structure.
2226 */
2227static VBOXSTRICTRC nemR3DarwinRunGuestDebug(PVM pVM, PVMCPU pVCpu)
2228{
2229 /*
2230 * The run loop.
2231 *
2232 * Current approach to state updating to use the sledgehammer and sync
2233 * everything every time. This will be optimized later.
2234 */
2235
2236 bool const fSavedSingleInstruction = pVCpu->nem.s.fSingleInstruction;
2237 pVCpu->nem.s.fSingleInstruction = pVCpu->nem.s.fSingleInstruction || DBGFIsStepping(pVCpu);
2238 pVCpu->nem.s.fUsingDebugLoop = true;
2239
2240 /* Trap any debug exceptions. */
2241 hv_return_t hrc = hv_vcpu_set_trap_debug_exceptions(pVCpu->nem.s.hVCpu, true);
2242 if (hrc != HV_SUCCESS)
2243 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2244 "Trapping debug exceptions on vCPU %u failed: %#x (%Rrc)", pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2245
2246 /* Update the vTimer offset after resuming if instructed. */
2247 if (pVCpu->nem.s.fVTimerOffUpdate)
2248 {
2249 hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
2250 if (hrc != HV_SUCCESS)
2251 return nemR3DarwinHvSts2Rc(hrc);
2252
2253 pVCpu->nem.s.fVTimerOffUpdate = false;
2254
2255 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, pVCpu->cpum.GstCtx.CntvCtlEl0);
2256 if (hrc == HV_SUCCESS)
2257 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, pVCpu->cpum.GstCtx.CntvCValEl0);
2258 if (hrc != HV_SUCCESS)
2259 return nemR3DarwinHvSts2Rc(hrc);
2260 }
2261
2262 /* Save the guest MDSCR_EL1 */
2263 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_DEBUG | CPUMCTX_EXTRN_PSTATE);
2264 uint64_t u64RegMdscrEl1 = pVCpu->cpum.GstCtx.Mdscr.u64;
2265
2266 /*
2267 * Poll timers and run for a bit.
2268 */
2269 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2270 * the whole polling job when timers have changed... */
2271 uint64_t offDeltaIgnored;
2272 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2273 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2274 for (unsigned iLoop = 0;; iLoop++)
2275 {
2276 bool const fStepping = pVCpu->nem.s.fSingleInstruction;
2277
2278 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, fStepping);
2279 if (rcStrict != VINF_SUCCESS)
2280 break;
2281
2282 hrc = nemR3DarwinRunGuest(pVM, pVCpu);
2283 if (hrc == HV_SUCCESS)
2284 {
2285 /*
2286 * Deal with the message.
2287 */
2288 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
2289 if (rcStrict == VINF_SUCCESS)
2290 { /* hopefully likely */ }
2291 else
2292 {
2293 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2294 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2295 break;
2296 }
2297 }
2298 else
2299 {
2300 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
2301 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
2302 }
2303 } /* the run loop */
2304
2305 /* Restore single stepping state. */
2306 if (pVCpu->nem.s.fSingleInstruction)
2307 {
2308 /** @todo This ASSUMES that guest code being single stepped is not modifying the MDSCR_EL1 register. */
2309 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_SYSREG_DEBUG | CPUMCTX_EXTRN_PSTATE);
2310 Assert(pVCpu->cpum.GstCtx.Mdscr.u64 & ARMV8_MDSCR_EL1_AARCH64_SS);
2311
2312 pVCpu->cpum.GstCtx.Mdscr.u64 = u64RegMdscrEl1;
2313 }
2314
2315 /* Restore debug exceptions trapping. */
2316 hrc != hv_vcpu_set_trap_debug_exceptions(pVCpu->nem.s.hVCpu, false);
2317 if (hrc != HV_SUCCESS)
2318 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2319 "Clearing trapping of debug exceptions on vCPU %u failed: %#x (%Rrc)", pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2320
2321 pVCpu->nem.s.fUsingDebugLoop = false;
2322 pVCpu->nem.s.fSingleInstruction = fSavedSingleInstruction;
2323
2324 return rcStrict;
2325
2326}
2327
2328
2329VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2330{
2331#ifdef LOG_ENABLED
2332 if (LogIs3Enabled())
2333 nemR3DarwinLogState(pVM, pVCpu);
2334#endif
2335
2336 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
2337
2338 if (RT_UNLIKELY(!pVCpu->nem.s.fIdRegsSynced))
2339 {
2340 /*
2341 * Sync the guest ID registers which are per VM once (they are readonly and stay constant during VM lifetime).
2342 * Need to do it here and not during the init because loading a saved state might change the ID registers from what
2343 * done in the call to CPUMR3PopulateFeaturesByIdRegisters().
2344 */
2345 static const struct
2346 {
2347 const char *pszIdReg;
2348 hv_sys_reg_t enmHvReg;
2349 uint32_t offIdStruct;
2350 } s_aSysIdRegs[] =
2351 {
2352#define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg, RT_UOFFSETOF(CPUMIDREGS, a_CpumIdReg) }
2353 ID_SYS_REG_CREATE(ID_AA64DFR0_EL1, u64RegIdAa64Dfr0El1),
2354 ID_SYS_REG_CREATE(ID_AA64DFR1_EL1, u64RegIdAa64Dfr1El1),
2355 ID_SYS_REG_CREATE(ID_AA64ISAR0_EL1, u64RegIdAa64Isar0El1),
2356 ID_SYS_REG_CREATE(ID_AA64ISAR1_EL1, u64RegIdAa64Isar1El1),
2357 ID_SYS_REG_CREATE(ID_AA64MMFR0_EL1, u64RegIdAa64Mmfr0El1),
2358 ID_SYS_REG_CREATE(ID_AA64MMFR1_EL1, u64RegIdAa64Mmfr1El1),
2359 ID_SYS_REG_CREATE(ID_AA64MMFR2_EL1, u64RegIdAa64Mmfr2El1),
2360 ID_SYS_REG_CREATE(ID_AA64PFR0_EL1, u64RegIdAa64Pfr0El1),
2361 ID_SYS_REG_CREATE(ID_AA64PFR1_EL1, u64RegIdAa64Pfr1El1),
2362#undef ID_SYS_REG_CREATE
2363 };
2364
2365 PCCPUMIDREGS pIdRegsGst = NULL;
2366 int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
2367 AssertRCReturn(rc, rc);
2368
2369 for (uint32_t i = 0; i < RT_ELEMENTS(s_aSysIdRegs); i++)
2370 {
2371 uint64_t *pu64 = (uint64_t *)((uint8_t *)pIdRegsGst + s_aSysIdRegs[i].offIdStruct);
2372 hv_return_t hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aSysIdRegs[i].enmHvReg, *pu64);
2373 if (hrc != HV_SUCCESS)
2374 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2375 "Setting %s failed on vCPU %u: %#x (%Rrc)", s_aSysIdRegs[i].pszIdReg, pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2376 }
2377
2378 pVCpu->nem.s.fIdRegsSynced = true;
2379 }
2380
2381 /*
2382 * Try switch to NEM runloop state.
2383 */
2384 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2385 { /* likely */ }
2386 else
2387 {
2388 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2389 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2390 return VINF_SUCCESS;
2391 }
2392
2393 VBOXSTRICTRC rcStrict;
2394 if ( !pVCpu->nem.s.fUseDebugLoop
2395 /*&& !nemR3DarwinAnyExpensiveProbesEnabled()*/
2396 && !DBGFIsStepping(pVCpu)
2397 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledSwBreakpoints)
2398 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
2399 else
2400 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
2401
2402 if (rcStrict == VINF_EM_RAW_TO_R3)
2403 rcStrict = VINF_SUCCESS;
2404
2405 /*
2406 * Convert any pending HM events back to TRPM due to premature exits.
2407 *
2408 * This is because execution may continue from IEM and we would need to inject
2409 * the event from there (hence place it back in TRPM).
2410 */
2411 if (pVCpu->nem.s.fEventPending)
2412 {
2413 /** @todo */
2414 }
2415
2416
2417 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2418 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2419
2420 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
2421 {
2422 /* Try anticipate what we might need. */
2423 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
2424 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2425 || RT_FAILURE(rcStrict))
2426 fImport = CPUMCTX_EXTRN_ALL;
2427 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
2428 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
2429 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2430
2431 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2432 {
2433 /* Only import what is external currently. */
2434 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
2435 if (RT_SUCCESS(rc2))
2436 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2437 else if (RT_SUCCESS(rcStrict))
2438 rcStrict = rc2;
2439 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2440 pVCpu->cpum.GstCtx.fExtrn = 0;
2441 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2442 }
2443 else
2444 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2445 }
2446 else
2447 {
2448 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2449 pVCpu->cpum.GstCtx.fExtrn = 0;
2450 }
2451
2452 return rcStrict;
2453}
2454
2455
2456VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2457{
2458 RT_NOREF(pVM, pVCpu);
2459 return true; /** @todo Are there any cases where we have to emulate? */
2460}
2461
2462
2463bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2464{
2465 VMCPU_ASSERT_EMT(pVCpu);
2466 bool fOld = pVCpu->nem.s.fSingleInstruction;
2467 pVCpu->nem.s.fSingleInstruction = fEnable;
2468 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
2469 return fOld;
2470}
2471
2472
2473void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2474{
2475 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
2476
2477 RT_NOREF(pVM, fFlags);
2478
2479 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
2480 if (hrc != HV_SUCCESS)
2481 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
2482}
2483
2484
2485DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2486{
2487 RT_NOREF(pVM, fUseDebugLoop);
2488 //AssertReleaseFailed();
2489 return false;
2490}
2491
2492
2493DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2494{
2495 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2496 return fUseDebugLoop;
2497}
2498
2499
2500VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2501 uint8_t *pu2State, uint32_t *puNemRange)
2502{
2503 RT_NOREF(pVM, puNemRange);
2504
2505 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
2506#if defined(VBOX_WITH_PGM_NEM_MODE)
2507 if (pvR3)
2508 {
2509 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2510 if (RT_FAILURE(rc))
2511 {
2512 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
2513 return VERR_NEM_MAP_PAGES_FAILED;
2514 }
2515 }
2516 return VINF_SUCCESS;
2517#else
2518 RT_NOREF(pVM, GCPhys, cb, pvR3);
2519 return VERR_NEM_MAP_PAGES_FAILED;
2520#endif
2521}
2522
2523
2524VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2525{
2526 RT_NOREF(pVM);
2527 return true;
2528}
2529
2530
2531VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2532 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2533{
2534 RT_NOREF(pvRam);
2535
2536 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
2537 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
2538
2539#if defined(VBOX_WITH_PGM_NEM_MODE)
2540 /*
2541 * Unmap the RAM we're replacing.
2542 */
2543 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2544 {
2545 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2546 if (RT_SUCCESS(rc))
2547 { /* likely */ }
2548 else if (pvMmio2)
2549 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
2550 GCPhys, cb, fFlags, rc));
2551 else
2552 {
2553 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2554 GCPhys, cb, fFlags, rc));
2555 return VERR_NEM_UNMAP_PAGES_FAILED;
2556 }
2557 }
2558
2559 /*
2560 * Map MMIO2 if any.
2561 */
2562 if (pvMmio2)
2563 {
2564 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2565
2566 /* We need to set up our own dirty tracking due to Hypervisor.framework only working on host page sized aligned regions. */
2567 uint32_t fProt = NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
2568 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
2569 {
2570 /* Find a slot for dirty tracking. */
2571 PNEMHVMMIO2REGION pMmio2Region = NULL;
2572 uint32_t idSlot;
2573 for (idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
2574 {
2575 if ( pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart == 0
2576 && pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast == 0)
2577 {
2578 pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
2579 break;
2580 }
2581 }
2582
2583 if (!pMmio2Region)
2584 {
2585 LogRel(("NEMR3NotifyPhysMmioExMapEarly: Out of dirty tracking structures -> VERR_NEM_MAP_PAGES_FAILED\n"));
2586 return VERR_NEM_MAP_PAGES_FAILED;
2587 }
2588
2589 pMmio2Region->GCPhysStart = GCPhys;
2590 pMmio2Region->GCPhysLast = GCPhys + cb - 1;
2591 pMmio2Region->fDirty = false;
2592 *puNemRange = idSlot;
2593 }
2594 else
2595 fProt |= NEM_PAGE_PROT_WRITE;
2596
2597 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, fProt, pu2State);
2598 if (RT_FAILURE(rc))
2599 {
2600 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
2601 GCPhys, cb, fFlags, pvMmio2, rc));
2602 return VERR_NEM_MAP_PAGES_FAILED;
2603 }
2604 }
2605 else
2606 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2607
2608#else
2609 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
2610 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
2611#endif
2612 return VINF_SUCCESS;
2613}
2614
2615
2616VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2617 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2618{
2619 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2620 return VINF_SUCCESS;
2621}
2622
2623
2624VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2625 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2626{
2627 RT_NOREF(pVM, puNemRange);
2628
2629 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2630 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2631
2632 int rc = VINF_SUCCESS;
2633#if defined(VBOX_WITH_PGM_NEM_MODE)
2634 /*
2635 * Unmap the MMIO2 pages.
2636 */
2637 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2638 * we may have more stuff to unmap even in case of pure MMIO... */
2639 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2640 {
2641 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2642 if (RT_FAILURE(rc))
2643 {
2644 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2645 GCPhys, cb, fFlags, rc));
2646 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2647 }
2648
2649 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
2650 {
2651 /* Reset tracking structure. */
2652 uint32_t idSlot = *puNemRange;
2653 *puNemRange = UINT32_MAX;
2654
2655 Assert(idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
2656 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart = 0;
2657 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast = 0;
2658 pVM->nem.s.aMmio2DirtyTracking[idSlot].fDirty = false;
2659 }
2660 }
2661
2662 /* Ensure the page is masked as unmapped if relevant. */
2663 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
2664
2665 /*
2666 * Restore the RAM we replaced.
2667 */
2668 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2669 {
2670 AssertPtr(pvRam);
2671 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2672 if (RT_SUCCESS(rc))
2673 { /* likely */ }
2674 else
2675 {
2676 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
2677 rc = VERR_NEM_MAP_PAGES_FAILED;
2678 }
2679 }
2680
2681 RT_NOREF(pvMmio2);
2682#else
2683 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
2684 if (pu2State)
2685 *pu2State = UINT8_MAX;
2686 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2687#endif
2688 return rc;
2689}
2690
2691
2692VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2693 void *pvBitmap, size_t cbBitmap)
2694{
2695 LogFlowFunc(("NEMR3PhysMmio2QueryAndResetDirtyBitmap: %RGp LB %RGp UnemRange=%u\n", GCPhys, cb, uNemRange));
2696 Assert(uNemRange < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
2697
2698 /* Keep it simple for now and mark everything as dirty if it is. */
2699 int rc = VINF_SUCCESS;
2700 if (pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty)
2701 {
2702 ASMBitSetRange(pvBitmap, 0, cbBitmap * 8);
2703
2704 pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty = false;
2705 /* Restore as RX only. */
2706 uint8_t u2State;
2707 rc = nemR3DarwinProtect(GCPhys, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, &u2State);
2708 }
2709 else
2710 ASMBitClearRange(pvBitmap, 0, cbBitmap * 8);
2711
2712 return rc;
2713}
2714
2715
2716VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2717 uint8_t *pu2State, uint32_t *puNemRange)
2718{
2719 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2720
2721 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2722 *pu2State = UINT8_MAX;
2723 *puNemRange = 0;
2724 return VINF_SUCCESS;
2725}
2726
2727
2728VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2729 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2730{
2731 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2732 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2733 *pu2State = UINT8_MAX;
2734
2735#if defined(VBOX_WITH_PGM_NEM_MODE)
2736 /*
2737 * (Re-)map readonly.
2738 */
2739 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2740
2741 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2742 AssertRC(rc);
2743
2744 rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
2745 if (RT_FAILURE(rc))
2746 {
2747 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
2748 GCPhys, cb, pvPages, fFlags, rc));
2749 return VERR_NEM_MAP_PAGES_FAILED;
2750 }
2751 RT_NOREF(fFlags, puNemRange);
2752 return VINF_SUCCESS;
2753#else
2754 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2755 return VERR_NEM_MAP_PAGES_FAILED;
2756#endif
2757}
2758
2759
2760VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2761 RTR3PTR pvMemR3, uint8_t *pu2State)
2762{
2763 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2764 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2765
2766 *pu2State = UINT8_MAX;
2767#if defined(VBOX_WITH_PGM_NEM_MODE)
2768 if (pvMemR3)
2769 {
2770 /* Unregister what was there before. */
2771 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2772 AssertRC(rc);
2773
2774 rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2775 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
2776 pvMemR3, GCPhys, cb, rc));
2777 }
2778 RT_NOREF(enmKind);
2779#else
2780 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
2781 AssertFailed();
2782#endif
2783}
2784
2785
2786VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2787{
2788 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
2789 RT_NOREF(pVCpu, fEnabled);
2790}
2791
2792
2793void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2794{
2795 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2796 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2797}
2798
2799
2800void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2801 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2802{
2803 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2804 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2805 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2806}
2807
2808
2809int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
2810 PGMPAGETYPE enmType, uint8_t *pu2State)
2811{
2812 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2813 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2814 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
2815
2816 AssertFailed();
2817 return VINF_SUCCESS;
2818}
2819
2820
2821VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
2822 PGMPAGETYPE enmType, uint8_t *pu2State)
2823{
2824 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2825 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2826 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
2827}
2828
2829
2830VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
2831 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
2832{
2833 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2834 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
2835 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
2836
2837 AssertFailed();
2838}
2839
2840
2841/**
2842 * Interface for importing state on demand (used by IEM).
2843 *
2844 * @returns VBox status code.
2845 * @param pVCpu The cross context CPU structure.
2846 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2847 */
2848VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
2849{
2850 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
2851 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
2852
2853 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
2854}
2855
2856
2857/**
2858 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
2859 *
2860 * @returns VBox status code.
2861 * @param pVCpu The cross context CPU structure.
2862 * @param pcTicks Where to return the CPU tick count.
2863 * @param puAux Where to return the TSC_AUX register value.
2864 */
2865VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
2866{
2867 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
2868 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
2869
2870 if (puAux)
2871 *puAux = 0;
2872 *pcTicks = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff; /* This is the host timer minus the offset. */
2873 return VINF_SUCCESS;
2874}
2875
2876
2877/**
2878 * Resumes CPU clock (TSC) on all virtual CPUs.
2879 *
2880 * This is called by TM when the VM is started, restored, resumed or similar.
2881 *
2882 * @returns VBox status code.
2883 * @param pVM The cross context VM structure.
2884 * @param pVCpu The cross context CPU structure of the calling EMT.
2885 * @param uPausedTscValue The TSC value at the time of pausing.
2886 */
2887VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
2888{
2889 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVM, pVCpu, uPausedTscValue));
2890 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
2891 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
2892
2893 /*
2894 * Calculate the new offset, first get the new TSC value with the old vTimer offset and then adjust the
2895 * the new offset to let the guest not notice the pause.
2896 */
2897 uint64_t u64TscNew = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff;
2898 Assert(u64TscNew >= uPausedTscValue);
2899 LogFlowFunc(("u64VTimerOffOld=%#RX64 u64TscNew=%#RX64 u64VTimerValuePaused=%#RX64 -> u64VTimerOff=%#RX64\n",
2900 pVM->nem.s.u64VTimerOff, u64TscNew, uPausedTscValue,
2901 pVM->nem.s.u64VTimerOff + (u64TscNew - uPausedTscValue)));
2902
2903 pVM->nem.s.u64VTimerOff += u64TscNew - uPausedTscValue;
2904
2905 /*
2906 * Set the flag to update the vTimer offset when the vCPU resumes for the first time
2907 * (needs to be done on the actual EMT).
2908 */
2909 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2910 {
2911 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
2912 pVCpuDst->nem.s.fVTimerOffUpdate = true;
2913 }
2914
2915 return VINF_SUCCESS;
2916}
2917
2918
2919/**
2920 * Returns features supported by the NEM backend.
2921 *
2922 * @returns Flags of features supported by the native NEM backend.
2923 * @param pVM The cross context VM structure.
2924 */
2925VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
2926{
2927 RT_NOREF(pVM);
2928 /*
2929 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
2930 * and unrestricted guest execution support so we can safely return these flags here always.
2931 */
2932 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
2933}
2934
2935
2936/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
2937 *
2938 * @todo Add notes as the implementation progresses...
2939 */
2940
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette