VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux-armv8.cpp@ 104725

Last change on this file since 104725 was 104725, checked in by vboxsync, 8 months ago

VMM/NEM: Factor some bits common between x86 and arm64 KVM out into a template to reduce code duplication, bugref:10391

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 59.6 KB
Line 
1/* $Id: NEMR3Native-linux-armv8.cpp 104725 2024-05-20 15:31:01Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 Linux backend arm64 version.
4 */
5
6/*
7 * Copyright (C) 2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_NEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/nem.h>
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/em.h>
37#include <VBox/vmm/gic.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/trpm.h>
40#include "NEMInternal.h"
41#include <VBox/vmm/vmcc.h>
42
43#include <iprt/alloca.h>
44#include <iprt/string.h>
45#include <iprt/system.h>
46#include <iprt/armv8.h>
47
48#include <iprt/formats/arm-psci.h>
49
50#include <errno.h>
51#include <unistd.h>
52#include <sys/ioctl.h>
53#include <sys/fcntl.h>
54#include <sys/mman.h>
55#include <linux/kvm.h>
56
57
58/*********************************************************************************************************************************
59* Defined Constants And Macros *
60*********************************************************************************************************************************/
61
62/** Core register group. */
63#define KVM_ARM64_REG_CORE_GROUP UINT64_C(0x6030000000100000)
64/** System register group. */
65#define KVM_ARM64_REG_SYS_GROUP UINT64_C(0x6030000000130000)
66/** System register group. */
67#define KVM_ARM64_REG_SIMD_GROUP UINT64_C(0x6040000000100050)
68/** FP register group. */
69#define KVM_ARM64_REG_FP_GROUP UINT64_C(0x6020000000100000)
70
71#define KVM_ARM64_REG_CORE_CREATE(a_idReg) (KVM_ARM64_REG_CORE_GROUP | ((uint64_t)(a_idReg) & 0xffff))
72#define KVM_ARM64_REG_GPR(a_iGpr) KVM_ARM64_REG_CORE_CREATE((a_iGpr) << 1)
73#define KVM_ARM64_REG_SP_EL0 KVM_ARM64_REG_CORE_CREATE(0x3e)
74#define KVM_ARM64_REG_PC KVM_ARM64_REG_CORE_CREATE(0x40)
75#define KVM_ARM64_REG_PSTATE KVM_ARM64_REG_CORE_CREATE(0x42)
76#define KVM_ARM64_REG_SP_EL1 KVM_ARM64_REG_CORE_CREATE(0x44)
77#define KVM_ARM64_REG_ELR_EL1 KVM_ARM64_REG_CORE_CREATE(0x46)
78#define KVM_ARM64_REG_SPSR_EL1 KVM_ARM64_REG_CORE_CREATE(0x48)
79#define KVM_ARM64_REG_SPSR_ABT KVM_ARM64_REG_CORE_CREATE(0x4a)
80#define KVM_ARM64_REG_SPSR_UND KVM_ARM64_REG_CORE_CREATE(0x4c)
81#define KVM_ARM64_REG_SPSR_IRQ KVM_ARM64_REG_CORE_CREATE(0x4e)
82#define KVM_ARM64_REG_SPSR_FIQ KVM_ARM64_REG_CORE_CREATE(0x50)
83
84/** This maps to our IPRT representation of system register IDs, yay! */
85#define KVM_ARM64_REG_SYS_CREATE(a_idSysReg) (KVM_ARM64_REG_SYS_GROUP | ((uint64_t)(a_idSysReg) & 0xffff))
86
87#define KVM_ARM64_REG_SIMD_CREATE(a_iVecReg) (KVM_ARM64_REG_SIMD_GROUP | (((uint64_t)(a_iVecReg) << 2) & 0xffff))
88
89#define KVM_ARM64_REG_FP_CREATE(a_idReg) (KVM_ARM64_REG_FP_GROUP | ((uint64_t)(a_idReg) & 0xffff))
90#define KVM_ARM64_REG_FP_FPSR KVM_ARM64_REG_FP_CREATE(0xd4)
91#define KVM_ARM64_REG_FP_FPCR KVM_ARM64_REG_FP_CREATE(0xd5)
92
93
94/*********************************************************************************************************************************
95* Structures and Typedefs *
96*********************************************************************************************************************************/
97
98
99/*********************************************************************************************************************************
100* Global Variables *
101*********************************************************************************************************************************/
102/** The general registers. */
103static const struct
104{
105 uint64_t idKvmReg;
106 uint32_t fCpumExtrn;
107 uint32_t offCpumCtx;
108} s_aCpumRegs[] =
109{
110#define CPUM_GREG_EMIT_X0_X3(a_Idx) { KVM_ARM64_REG_GPR(a_Idx), CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
111#define CPUM_GREG_EMIT_X4_X28(a_Idx) { KVM_ARM64_REG_GPR(a_Idx), CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
112 CPUM_GREG_EMIT_X0_X3(0),
113 CPUM_GREG_EMIT_X0_X3(1),
114 CPUM_GREG_EMIT_X0_X3(2),
115 CPUM_GREG_EMIT_X0_X3(3),
116 CPUM_GREG_EMIT_X4_X28(4),
117 CPUM_GREG_EMIT_X4_X28(5),
118 CPUM_GREG_EMIT_X4_X28(6),
119 CPUM_GREG_EMIT_X4_X28(7),
120 CPUM_GREG_EMIT_X4_X28(8),
121 CPUM_GREG_EMIT_X4_X28(9),
122 CPUM_GREG_EMIT_X4_X28(10),
123 CPUM_GREG_EMIT_X4_X28(11),
124 CPUM_GREG_EMIT_X4_X28(12),
125 CPUM_GREG_EMIT_X4_X28(13),
126 CPUM_GREG_EMIT_X4_X28(14),
127 CPUM_GREG_EMIT_X4_X28(15),
128 CPUM_GREG_EMIT_X4_X28(16),
129 CPUM_GREG_EMIT_X4_X28(17),
130 CPUM_GREG_EMIT_X4_X28(18),
131 CPUM_GREG_EMIT_X4_X28(19),
132 CPUM_GREG_EMIT_X4_X28(20),
133 CPUM_GREG_EMIT_X4_X28(21),
134 CPUM_GREG_EMIT_X4_X28(22),
135 CPUM_GREG_EMIT_X4_X28(23),
136 CPUM_GREG_EMIT_X4_X28(24),
137 CPUM_GREG_EMIT_X4_X28(25),
138 CPUM_GREG_EMIT_X4_X28(26),
139 CPUM_GREG_EMIT_X4_X28(27),
140 CPUM_GREG_EMIT_X4_X28(28),
141 { KVM_ARM64_REG_GPR(29), CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
142 { KVM_ARM64_REG_GPR(30), CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
143 { KVM_ARM64_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
144#undef CPUM_GREG_EMIT_X0_X3
145#undef CPUM_GREG_EMIT_X4_X28
146};
147/** SIMD/FP registers. */
148static const struct
149{
150 uint64_t idKvmReg;
151 uint32_t offCpumCtx;
152} s_aCpumFpRegs[] =
153{
154#define CPUM_VREG_EMIT(a_Idx) { KVM_ARM64_REG_SIMD_CREATE(a_Idx), RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
155 CPUM_VREG_EMIT(0),
156 CPUM_VREG_EMIT(1),
157 CPUM_VREG_EMIT(2),
158 CPUM_VREG_EMIT(3),
159 CPUM_VREG_EMIT(4),
160 CPUM_VREG_EMIT(5),
161 CPUM_VREG_EMIT(6),
162 CPUM_VREG_EMIT(7),
163 CPUM_VREG_EMIT(8),
164 CPUM_VREG_EMIT(9),
165 CPUM_VREG_EMIT(10),
166 CPUM_VREG_EMIT(11),
167 CPUM_VREG_EMIT(12),
168 CPUM_VREG_EMIT(13),
169 CPUM_VREG_EMIT(14),
170 CPUM_VREG_EMIT(15),
171 CPUM_VREG_EMIT(16),
172 CPUM_VREG_EMIT(17),
173 CPUM_VREG_EMIT(18),
174 CPUM_VREG_EMIT(19),
175 CPUM_VREG_EMIT(20),
176 CPUM_VREG_EMIT(21),
177 CPUM_VREG_EMIT(22),
178 CPUM_VREG_EMIT(23),
179 CPUM_VREG_EMIT(24),
180 CPUM_VREG_EMIT(25),
181 CPUM_VREG_EMIT(26),
182 CPUM_VREG_EMIT(27),
183 CPUM_VREG_EMIT(28),
184 CPUM_VREG_EMIT(29),
185 CPUM_VREG_EMIT(30),
186 CPUM_VREG_EMIT(31)
187#undef CPUM_VREG_EMIT
188};
189/** System registers. */
190static const struct
191{
192 uint64_t idKvmReg;
193 uint32_t fCpumExtrn;
194 uint32_t offCpumCtx;
195} s_aCpumSysRegs[] =
196{
197 { KVM_ARM64_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
198 { KVM_ARM64_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
199 { KVM_ARM64_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
200 { KVM_ARM64_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
201 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_SCTRL_EL1), CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
202 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_TCR_EL1), CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
203 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_TTBR0_EL1), CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
204 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_TTBR1_EL1), CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
205 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_VBAR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
206 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_AFSR0_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr0.u64) },
207 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_AFSR1_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr1.u64) },
208 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_AMAIR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Amair.u64) },
209 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_CNTKCTL_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, CntKCtl.u64) },
210 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_CONTEXTIDR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, ContextIdr.u64) },
211 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_CPACR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Cpacr.u64) },
212 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_CSSELR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Csselr.u64) },
213 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ESR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Esr.u64) },
214 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_FAR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Far.u64) },
215 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_MAIR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Mair.u64) },
216 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_PAR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Par.u64) },
217 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_TPIDRRO_EL0), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, TpIdrRoEl0.u64) },
218 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_TPIDR_EL0), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[0].u64) },
219 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_TPIDR_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[1].u64) },
220 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_MDCCINT_EL1), CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, MDccInt.u64) }
221};
222/** ID registers. */
223static const struct
224{
225 uint64_t idKvmReg;
226 uint32_t offIdStruct;
227} s_aIdRegs[] =
228{
229 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR0_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr0El1) },
230 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR1_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr1El1) },
231 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR0_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar0El1) },
232 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR1_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar1El1) },
233 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR0_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr0El1) },
234 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR1_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr1El1) },
235 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR2_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr2El1) },
236 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR0_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr0El1) },
237 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR1_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr1El1) }
238};
239
240
241/* Forward declarations of things called by the template. */
242static int nemR3LnxInitSetupVm(PVM pVM, PRTERRINFO pErrInfo);
243
244
245/* Instantiate the common bits we share with the x86 KVM backend. */
246#include "NEMR3NativeTemplate-linux.cpp.h"
247
248
249/**
250 * Queries and logs the supported register list from KVM.
251 *
252 * @returns VBox status code.
253 * @param fdVCpu The file descriptor number of vCPU 0.
254 */
255static int nemR3LnxLogRegList(int fdVCpu)
256{
257 struct KVM_REG_LIST
258 {
259 uint64_t cRegs;
260 uint64_t aRegs[1024];
261 } RegList; RT_ZERO(RegList);
262
263 RegList.cRegs = RT_ELEMENTS(RegList.aRegs);
264 int rcLnx = ioctl(fdVCpu, KVM_GET_REG_LIST, &RegList);
265 if (rcLnx != 0)
266 return RTErrConvertFromErrno(errno);
267
268 LogRel(("NEM: KVM vCPU registers:\n"));
269
270 for (uint32_t i = 0; i < RegList.cRegs; i++)
271 LogRel(("NEM: %36s: %#RX64\n", "Unknown" /** @todo */, RegList.aRegs[i]));
272
273 return VINF_SUCCESS;
274}
275
276
277/**
278 * Sets the given attribute in KVM to the given value.
279 *
280 * @returns VBox status code.
281 * @param pVM The VM instance.
282 * @param u32Grp The device attribute group being set.
283 * @param u32Attr The actual attribute inside the group being set.
284 * @param pvAttrVal Where the attribute value to set.
285 * @param pszAttribute Attribute description for logging.
286 * @param pErrInfo Optional error information.
287 */
288static int nemR3LnxSetAttribute(PVM pVM, uint32_t u32Grp, uint32_t u32Attr, const void *pvAttrVal, const char *pszAttribute,
289 PRTERRINFO pErrInfo)
290{
291 struct kvm_device_attr DevAttr;
292
293 DevAttr.flags = 0;
294 DevAttr.group = u32Grp;
295 DevAttr.attr = u32Attr;
296 DevAttr.addr = (uintptr_t)pvAttrVal;
297 int rcLnx = ioctl(pVM->nem.s.fdVm, KVM_HAS_DEVICE_ATTR, &DevAttr);
298 if (rcLnx < 0)
299 return RTErrInfoSetF(pErrInfo, RTErrConvertFromErrno(errno),
300 N_("KVM error: KVM doesn't support setting the attribute \"%s\" (%d)"),
301 pszAttribute, errno);
302
303 rcLnx = ioctl(pVM->nem.s.fdVm, KVM_SET_DEVICE_ATTR, &DevAttr);
304 if (rcLnx < 0)
305 return RTErrInfoSetF(pErrInfo, RTErrConvertFromErrno(errno),
306 N_("KVM error: Setting the attribute \"%s\" for KVM failed (%d)"),
307 pszAttribute, errno);
308
309 return VINF_SUCCESS;
310}
311
312
313DECL_FORCE_INLINE(int) nemR3LnxKvmSetQueryReg(PVMCPUCC pVCpu, bool fQuery, uint64_t idKvmReg, const void *pv)
314{
315 struct kvm_one_reg Reg;
316 Reg.id = idKvmReg;
317 Reg.addr = (uintptr_t)pv;
318
319 /*
320 * Who thought that this API was a good idea? Supporting to query/set just one register
321 * at a time is horribly inefficient.
322 */
323 int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, fQuery ? KVM_GET_ONE_REG : KVM_SET_ONE_REG, &Reg);
324 if (!rcLnx)
325 return 0;
326
327 return RTErrConvertFromErrno(-rcLnx);
328}
329
330DECLINLINE(int) nemR3LnxKvmQueryRegU64(PVMCPUCC pVCpu, uint64_t idKvmReg, uint64_t *pu64)
331{
332 return nemR3LnxKvmSetQueryReg(pVCpu, true /*fQuery*/, idKvmReg, pu64);
333}
334
335
336DECLINLINE(int) nemR3LnxKvmQueryRegU32(PVMCPUCC pVCpu, uint64_t idKvmReg, uint32_t *pu32)
337{
338 return nemR3LnxKvmSetQueryReg(pVCpu, true /*fQuery*/, idKvmReg, pu32);
339}
340
341
342DECLINLINE(int) nemR3LnxKvmQueryRegPV(PVMCPUCC pVCpu, uint64_t idKvmReg, void *pv)
343{
344 return nemR3LnxKvmSetQueryReg(pVCpu, true /*fQuery*/, idKvmReg, pv);
345}
346
347
348DECLINLINE(int) nemR3LnxKvmSetRegU64(PVMCPUCC pVCpu, uint64_t idKvmReg, const uint64_t *pu64)
349{
350 return nemR3LnxKvmSetQueryReg(pVCpu, false /*fQuery*/, idKvmReg, pu64);
351}
352
353
354DECLINLINE(int) nemR3LnxKvmSetRegU32(PVMCPUCC pVCpu, uint64_t idKvmReg, const uint32_t *pu32)
355{
356 return nemR3LnxKvmSetQueryReg(pVCpu, false /*fQuery*/, idKvmReg, pu32);
357}
358
359
360DECLINLINE(int) nemR3LnxKvmSetRegPV(PVMCPUCC pVCpu, uint64_t idKvmReg, const void *pv)
361{
362 return nemR3LnxKvmSetQueryReg(pVCpu, false /*fQuery*/, idKvmReg, pv);
363}
364
365
366/**
367 * Does the early setup of a KVM VM.
368 *
369 * @returns VBox status code.
370 * @param pVM The cross context VM structure.
371 * @param pErrInfo Where to always return error info.
372 */
373static int nemR3LnxInitSetupVm(PVM pVM, PRTERRINFO pErrInfo)
374{
375 AssertReturn(pVM->nem.s.fdVm != -1, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
376
377 /*
378 * Create the VCpus.
379 */
380 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
381 {
382 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
383
384 /* Create it. */
385 pVCpu->nem.s.fdVCpu = ioctl(pVM->nem.s.fdVm, KVM_CREATE_VCPU, (unsigned long)idCpu);
386 if (pVCpu->nem.s.fdVCpu < 0)
387 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "KVM_CREATE_VCPU failed for VCpu #%u: %d", idCpu, errno);
388
389 /* Map the KVM_RUN area. */
390 pVCpu->nem.s.pRun = (struct kvm_run *)mmap(NULL, pVM->nem.s.cbVCpuMmap, PROT_READ | PROT_WRITE, MAP_SHARED,
391 pVCpu->nem.s.fdVCpu, 0 /*offset*/);
392 if ((void *)pVCpu->nem.s.pRun == MAP_FAILED)
393 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "mmap failed for VCpu #%u: %d", idCpu, errno);
394
395 /* Initialize the vCPU. */
396 struct kvm_vcpu_init VCpuInit; RT_ZERO(VCpuInit);
397 VCpuInit.target = KVM_ARM_TARGET_GENERIC_V8;
398 /** @todo Enable features. */
399 if (ioctl(pVCpu->nem.s.fdVCpu, KVM_ARM_VCPU_INIT, &VCpuInit) != 0)
400 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "KVM_ARM_VCPU_INIT failed for VCpu #%u: %d", idCpu, errno);
401
402#if 0
403 uint32_t fFeatures = 0; /** @todo SVE */
404 if (ioctl(pVCpu->nem.s.fdVCpu, KVM_ARM_VCPU_FINALIZE, &fFeatures) != 0)
405 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "KVM_ARM_VCPU_FINALIZE failed for VCpu #%u: %d", idCpu, errno);
406#endif
407
408 if (idCpu == 0)
409 {
410 /* Query the supported register list and log it. */
411 int rc = nemR3LnxLogRegList(pVCpu->nem.s.fdVCpu);
412 if (RT_FAILURE(rc))
413 return RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "Querying the supported register list failed with %Rrc", rc);
414
415 /* Need to query the ID registers and populate CPUM. */
416 CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
417 for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
418 {
419 uint64_t *pu64 = (uint64_t *)((uint8_t *)&IdRegs + s_aIdRegs[i].offIdStruct);
420 rc = nemR3LnxKvmQueryRegU64(pVCpu, s_aIdRegs[i].idKvmReg, pu64);
421 if (RT_FAILURE(rc))
422 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
423 "Querying register %#x failed: %Rrc", s_aIdRegs[i].idKvmReg, rc);
424 }
425
426 rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
427 if (RT_FAILURE(rc))
428 return rc;
429 }
430 }
431
432 /*
433 * Setup the SMCCC filter to get exits for PSCI related
434 * guest calls (to support SMP, power off and reset).
435 */
436 struct kvm_smccc_filter SmcccPsciFilter; RT_ZERO(SmcccPsciFilter);
437 SmcccPsciFilter.base = ARM_PSCI_FUNC_ID_CREATE_FAST_64(ARM_PSCI_FUNC_ID_PSCI_VERSION);
438 SmcccPsciFilter.nr_functions = ARM_PSCI_FUNC_ID_CREATE_FAST_64(ARM_PSCI_FUNC_ID_SYSTEM_RESET) - SmcccPsciFilter.base + 1;
439 SmcccPsciFilter.action = KVM_SMCCC_FILTER_FWD_TO_USER;
440 int rc = nemR3LnxSetAttribute(pVM, KVM_ARM_VM_SMCCC_CTRL, KVM_ARM_VM_SMCCC_FILTER, &SmcccPsciFilter,
441 "KVM_ARM_VM_SMCCC_FILTER", pErrInfo);
442 if (RT_FAILURE(rc))
443 return rc;
444
445 SmcccPsciFilter.base = ARM_PSCI_FUNC_ID_CREATE_FAST_32(ARM_PSCI_FUNC_ID_PSCI_VERSION);
446 SmcccPsciFilter.nr_functions = ARM_PSCI_FUNC_ID_CREATE_FAST_32(ARM_PSCI_FUNC_ID_SYSTEM_RESET) - SmcccPsciFilter.base + 1;
447 SmcccPsciFilter.action = KVM_SMCCC_FILTER_FWD_TO_USER;
448 rc = nemR3LnxSetAttribute(pVM, KVM_ARM_VM_SMCCC_CTRL, KVM_ARM_VM_SMCCC_FILTER, &SmcccPsciFilter,
449 "KVM_ARM_VM_SMCCC_FILTER", pErrInfo);
450 if (RT_FAILURE(rc))
451 return rc;
452
453 return VINF_SUCCESS;
454}
455
456
457int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
458{
459 /*
460 * Make RTThreadPoke work again (disabled for avoiding unnecessary
461 * critical section issues in ring-0).
462 */
463 if (enmWhat == VMINITCOMPLETED_RING3)
464 VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, nemR3LnxFixThreadPoke, NULL);
465
466 return VINF_SUCCESS;
467}
468
469
470/*********************************************************************************************************************************
471* CPU State *
472*********************************************************************************************************************************/
473
474/**
475 * Sets the given general purpose register to the given value.
476 *
477 * @param pVCpu The cross context virtual CPU structure of the
478 * calling EMT.
479 * @param uReg The register index.
480 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
481 * @param fSignExtend Flag whether to sign extend the value.
482 * @param u64Val The value.
483 */
484DECLINLINE(void) nemR3LnxSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
485{
486 AssertReturnVoid(uReg < 31);
487
488 if (f64BitReg)
489 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
490 else
491 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
492
493 /* Mark the register as not extern anymore. */
494 switch (uReg)
495 {
496 case 0:
497 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
498 break;
499 case 1:
500 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
501 break;
502 case 2:
503 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
504 break;
505 case 3:
506 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
507 break;
508 default:
509 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
510 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
511 }
512}
513
514
515/**
516 * Gets the given general purpose register and returns the value.
517 *
518 * @returns Value from the given register.
519 * @param pVCpu The cross context virtual CPU structure of the
520 * calling EMT.
521 * @param uReg The register index.
522 */
523DECLINLINE(uint64_t) nemR3LnxGetGReg(PVMCPU pVCpu, uint8_t uReg)
524{
525 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
526
527 if (uReg == ARMV8_AARCH64_REG_ZR)
528 return 0;
529
530 /** @todo Import the register if extern. */
531 //AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
532
533 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
534}
535
536/**
537 * Worker that imports selected state from KVM.
538 */
539static int nemHCLnxImportState(PVMCPUCC pVCpu, uint64_t fWhat, PCPUMCTX pCtx)
540{
541 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
542 if (!fWhat)
543 return VINF_SUCCESS;
544
545#if 0
546 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
547 if (hrc == HV_SUCCESS)
548 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
549#endif
550
551 int rc = VINF_SUCCESS;
552 if (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_FP | CPUMCTX_EXTRN_LR | CPUMCTX_EXTRN_PC))
553 {
554 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
555 {
556 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
557 {
558 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
559 rc |= nemR3LnxKvmQueryRegU64(pVCpu, s_aCpumRegs[i].idKvmReg, pu64);
560 }
561 }
562 }
563
564 if ( rc == VINF_SUCCESS
565 && (fWhat & CPUMCTX_EXTRN_FPCR))
566 {
567 uint32_t u32Tmp;
568 rc |= nemR3LnxKvmQueryRegU32(pVCpu, KVM_ARM64_REG_FP_FPCR, &u32Tmp);
569 if (rc == VINF_SUCCESS)
570 pVCpu->cpum.GstCtx.fpcr = u32Tmp;
571 }
572
573 if ( rc == VINF_SUCCESS
574 && (fWhat & CPUMCTX_EXTRN_FPSR))
575 {
576 uint32_t u32Tmp;
577 rc |= nemR3LnxKvmQueryRegU32(pVCpu, KVM_ARM64_REG_FP_FPSR, &u32Tmp);
578 if (rc == VINF_SUCCESS)
579 pVCpu->cpum.GstCtx.fpsr = u32Tmp;
580 }
581
582 if ( rc == VINF_SUCCESS
583 && (fWhat & CPUMCTX_EXTRN_V0_V31))
584 {
585 /* SIMD/FP registers. */
586 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
587 {
588 void *pu128 = (void *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
589 rc |= nemR3LnxKvmQueryRegPV(pVCpu, s_aCpumFpRegs[i].idKvmReg, pu128);
590 }
591 }
592
593 if ( rc == VINF_SUCCESS
594 && (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG))
595 {
596#if 0
597 /* Debug registers. */
598 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
599 {
600 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
601 rc |= nemR3LnxKvmQueryReg(pVCpu, s_aCpumDbgRegs[i].idKvmReg, pu64);
602 }
603#endif
604 }
605
606 if ( rc == VINF_SUCCESS
607 && (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
608 {
609#if 0
610 /* PAuth registers. */
611 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
612 {
613 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
614 hrc |= nemR3LnxKvmQueryReg(pVCpu, s_aCpumPAuthKeyRegs[i].idKvmReg, pu64);
615 }
616#endif
617 }
618
619 if ( rc == VINF_SUCCESS
620 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC)))
621 {
622 /* System registers. */
623 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
624 {
625 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
626 {
627 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
628 rc |= nemR3LnxKvmQueryRegU64(pVCpu, s_aCpumSysRegs[i].idKvmReg, pu64);
629 }
630 }
631 }
632
633 if ( rc == VINF_SUCCESS
634 && (fWhat & CPUMCTX_EXTRN_PSTATE))
635 {
636 uint64_t u64Tmp;
637 rc |= nemR3LnxKvmQueryRegU64(pVCpu, KVM_ARM64_REG_PSTATE, &u64Tmp);
638 if (rc == VINF_SUCCESS)
639 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
640
641 }
642
643 /*
644 * Update the external mask.
645 */
646 pCtx->fExtrn &= ~fWhat;
647 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
648 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
649 pVCpu->cpum.GstCtx.fExtrn = 0;
650
651 return VINF_SUCCESS;
652}
653
654
655/**
656 * Interface for importing state on demand (used by IEM).
657 *
658 * @returns VBox status code.
659 * @param pVCpu The cross context CPU structure.
660 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
661 */
662VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
663{
664 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
665 return nemHCLnxImportState(pVCpu, fWhat, &pVCpu->cpum.GstCtx);
666}
667
668
669/**
670 * Exports state to KVM.
671 */
672static int nemHCLnxExportState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
673{
674 uint64_t const fExtrn = ~pCtx->fExtrn & CPUMCTX_EXTRN_ALL;
675 Assert((~fExtrn & CPUMCTX_EXTRN_ALL) != CPUMCTX_EXTRN_ALL); RT_NOREF(fExtrn);
676
677 RT_NOREF(pVM);
678 int rc = VINF_SUCCESS;
679 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_FP | CPUMCTX_EXTRN_LR | CPUMCTX_EXTRN_PC))
680 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_FP | CPUMCTX_EXTRN_LR | CPUMCTX_EXTRN_PC))
681 {
682 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
683 {
684 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
685 {
686 const uint64_t *pu64 = (const uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
687 rc |= nemR3LnxKvmSetRegU64(pVCpu, s_aCpumRegs[i].idKvmReg, pu64);
688 }
689 }
690 }
691
692 if ( rc == VINF_SUCCESS
693 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_FPCR))
694 {
695 uint32_t u32Tmp = pVCpu->cpum.GstCtx.fpcr;
696 rc |= nemR3LnxKvmSetRegU32(pVCpu, KVM_ARM64_REG_FP_FPCR, &u32Tmp);
697 }
698
699 if ( rc == VINF_SUCCESS
700 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_FPSR))
701 {
702 uint32_t u32Tmp = pVCpu->cpum.GstCtx.fpsr;
703 rc |= nemR3LnxKvmSetRegU32(pVCpu, KVM_ARM64_REG_FP_FPSR, &u32Tmp);
704 }
705
706 if ( rc == VINF_SUCCESS
707 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
708 {
709 /* SIMD/FP registers. */
710 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
711 {
712 void *pu128 = (void *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
713 rc |= nemR3LnxKvmSetRegPV(pVCpu, s_aCpumFpRegs[i].idKvmReg, pu128);
714 }
715 }
716
717 if ( rc == VINF_SUCCESS
718 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_DEBUG))
719 {
720#if 0
721 /* Debug registers. */
722 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
723 {
724 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
725 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, *pu64);
726 }
727#endif
728 }
729
730 if ( rc == VINF_SUCCESS
731 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
732 {
733#if 0
734 /* Debug registers. */
735 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
736 {
737 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
738 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, *pu64);
739 }
740#endif
741 }
742
743 if ( rc == VINF_SUCCESS
744 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
745 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
746 {
747 /* System registers. */
748 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
749 {
750 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
751 {
752 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
753 rc |= nemR3LnxKvmSetRegU64(pVCpu, s_aCpumSysRegs[i].idKvmReg, pu64);
754 }
755 }
756 }
757
758 if ( rc == VINF_SUCCESS
759 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
760 {
761 uint64_t u64Tmp = pVCpu->cpum.GstCtx.fPState;
762 rc = nemR3LnxKvmSetRegU64(pVCpu, KVM_ARM64_REG_PSTATE, &u64Tmp);
763 }
764
765 /*
766 * KVM now owns all the state.
767 */
768 pCtx->fExtrn = CPUMCTX_EXTRN_KEEPER_NEM | CPUMCTX_EXTRN_ALL;
769 return VINF_SUCCESS;
770}
771
772
773/**
774 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
775 *
776 * @returns VBox status code.
777 * @param pVCpu The cross context CPU structure.
778 * @param pcTicks Where to return the CPU tick count.
779 * @param puAux Where to return the TSC_AUX register value.
780 */
781VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
782{
783 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
784 // KVM_GET_CLOCK?
785 RT_NOREF(pVCpu, pcTicks, puAux);
786 return VINF_SUCCESS;
787}
788
789
790/**
791 * Resumes CPU clock (TSC) on all virtual CPUs.
792 *
793 * This is called by TM when the VM is started, restored, resumed or similar.
794 *
795 * @returns VBox status code.
796 * @param pVM The cross context VM structure.
797 * @param pVCpu The cross context CPU structure of the calling EMT.
798 * @param uPausedTscValue The TSC value at the time of pausing.
799 */
800VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
801{
802 // KVM_SET_CLOCK?
803 RT_NOREF(pVM, pVCpu, uPausedTscValue);
804 return VINF_SUCCESS;
805}
806
807
808VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
809{
810 RT_NOREF(pVM);
811 return NEM_FEAT_F_NESTED_PAGING
812 | NEM_FEAT_F_FULL_GST_EXEC;
813}
814
815
816
817/*********************************************************************************************************************************
818* Execution *
819*********************************************************************************************************************************/
820
821
822VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
823{
824 RT_NOREF(pVM, pVCpu);
825 Assert(VM_IS_NEM_ENABLED(pVM));
826 return true;
827}
828
829
830bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
831{
832 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable);
833 return false;
834}
835
836
837void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
838{
839 int rc = RTThreadPoke(pVCpu->hThread);
840 LogFlow(("nemR3NativeNotifyFF: #%u -> %Rrc\n", pVCpu->idCpu, rc));
841 AssertRC(rc);
842 RT_NOREF(pVM, fFlags);
843}
844
845
846DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
847{
848 RT_NOREF(pVM, fUseDebugLoop);
849 return false;
850}
851
852
853DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
854{
855 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
856 return false;
857}
858
859
860DECL_FORCE_INLINE(int) nemR3LnxKvmUpdateIntrState(PVM pVM, PVMCPU pVCpu, bool fIrq, bool fAsserted)
861{
862 struct kvm_irq_level IrqLvl;
863
864 LogFlowFunc(("pVM=%p pVCpu=%p fIrq=%RTbool fAsserted=%RTbool\n",
865 pVM, pVCpu, fIrq, fAsserted));
866
867 IrqLvl.irq = ((uint32_t)KVM_ARM_IRQ_TYPE_CPU << 24) /* Directly drives CPU interrupt lines. */
868 | (pVCpu->idCpu & 0xff) << 16
869 | (fIrq ? 0 : 1);
870 IrqLvl.level = fAsserted ? 1 : 0;
871 int rcLnx = ioctl(pVM->nem.s.fdVm, KVM_IRQ_LINE, &IrqLvl);
872 AssertReturn(rcLnx == 0, VERR_NEM_IPE_9);
873
874 return VINF_SUCCESS;
875}
876
877
878/**
879 * Deals with pending interrupt FFs prior to executing guest code.
880 */
881static VBOXSTRICTRC nemHCLnxHandleInterruptFF(PVM pVM, PVMCPU pVCpu)
882{
883 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n",
884 pVCpu, pVCpu->idCpu,
885 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ),
886 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ)));
887
888 bool fIrq = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ);
889 bool fFiq = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ);
890
891 /* Update the pending interrupt state. */
892 if (fIrq != pVCpu->nem.s.fIrqLastSeen)
893 {
894 int rc = nemR3LnxKvmUpdateIntrState(pVM, pVCpu, true /*fIrq*/, fIrq);
895 AssertRCReturn(rc, VERR_NEM_IPE_9);
896 pVCpu->nem.s.fIrqLastSeen = fIrq;
897 }
898
899 if (fFiq != pVCpu->nem.s.fIrqLastSeen)
900 {
901 int rc = nemR3LnxKvmUpdateIntrState(pVM, pVCpu, false /*fIrq*/, fFiq);
902 AssertRCReturn(rc, VERR_NEM_IPE_9);
903 pVCpu->nem.s.fFiqLastSeen = fFiq;
904 }
905
906 return VINF_SUCCESS;
907}
908
909
910#if 0
911/**
912 * Handles KVM_EXIT_INTERNAL_ERROR.
913 */
914static VBOXSTRICTRC nemR3LnxHandleInternalError(PVMCPU pVCpu, struct kvm_run *pRun)
915{
916 Log(("NEM: KVM_EXIT_INTERNAL_ERROR! suberror=%#x (%d) ndata=%u data=%.*Rhxs\n", pRun->internal.suberror,
917 pRun->internal.suberror, pRun->internal.ndata, sizeof(pRun->internal.data), &pRun->internal.data[0]));
918
919 /*
920 * Deal with each suberror, returning if we don't want IEM to handle it.
921 */
922 switch (pRun->internal.suberror)
923 {
924 case KVM_INTERNAL_ERROR_EMULATION:
925 {
926 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTERNAL_ERROR_EMULATION),
927 pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
928 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInternalErrorEmulation);
929 break;
930 }
931
932 case KVM_INTERNAL_ERROR_SIMUL_EX:
933 case KVM_INTERNAL_ERROR_DELIVERY_EV:
934 case KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON:
935 default:
936 {
937 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTERNAL_ERROR_FATAL),
938 pRun->s.regs.regs.rip + pRun->s.regs.sregs.cs.base, ASMReadTSC());
939 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInternalErrorFatal);
940 const char *pszName;
941 switch (pRun->internal.suberror)
942 {
943 case KVM_INTERNAL_ERROR_EMULATION: pszName = "KVM_INTERNAL_ERROR_EMULATION"; break;
944 case KVM_INTERNAL_ERROR_SIMUL_EX: pszName = "KVM_INTERNAL_ERROR_SIMUL_EX"; break;
945 case KVM_INTERNAL_ERROR_DELIVERY_EV: pszName = "KVM_INTERNAL_ERROR_DELIVERY_EV"; break;
946 case KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON: pszName = "KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON"; break;
947 default: pszName = "unknown"; break;
948 }
949 LogRel(("NEM: KVM_EXIT_INTERNAL_ERROR! suberror=%#x (%s) ndata=%u data=%.*Rhxs\n", pRun->internal.suberror, pszName,
950 pRun->internal.ndata, sizeof(pRun->internal.data), &pRun->internal.data[0]));
951 return VERR_NEM_IPE_0;
952 }
953 }
954
955 /*
956 * Execute instruction in IEM and try get on with it.
957 */
958 Log2(("nemR3LnxHandleInternalError: Executing instruction at %04x:%08RX64 in IEM\n",
959 pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip));
960 VBOXSTRICTRC rcStrict = nemHCLnxImportState(pVCpu,
961 IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_INHIBIT_INT
962 | CPUMCTX_EXTRN_INHIBIT_NMI,
963 &pVCpu->cpum.GstCtx, pRun);
964 if (RT_SUCCESS(rcStrict))
965 rcStrict = IEMExecOne(pVCpu);
966 return rcStrict;
967}
968#endif
969
970
971/**
972 * Handles KVM_EXIT_MMIO.
973 */
974static VBOXSTRICTRC nemHCLnxHandleExitMmio(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun)
975{
976 /*
977 * Input validation.
978 */
979 Assert(pRun->mmio.len <= sizeof(pRun->mmio.data));
980 Assert(pRun->mmio.is_write <= 1);
981
982#if 0
983 /*
984 * We cannot easily act on the exit history here, because the MMIO port
985 * exit is stateful and the instruction will be completed in the next
986 * KVM_RUN call. There seems no way to circumvent this.
987 */
988 EMHistoryAddExit(pVCpu,
989 pRun->mmio.is_write
990 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
991 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
992 pRun->s.regs.regs.pc, ASMReadTSC());
993#else
994 RT_NOREF(pVCpu);
995#endif
996
997 /*
998 * Do the requested job.
999 */
1000 VBOXSTRICTRC rcStrict;
1001 if (pRun->mmio.is_write)
1002 {
1003 rcStrict = PGMPhysWrite(pVM, pRun->mmio.phys_addr, pRun->mmio.data, pRun->mmio.len, PGMACCESSORIGIN_HM);
1004 Log4(("MmioExit/%u:WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
1005 pVCpu->idCpu,
1006 pRun->mmio.phys_addr, pRun->mmio.len, pRun->mmio.len, pRun->mmio.data, VBOXSTRICTRC_VAL(rcStrict) ));
1007 }
1008 else
1009 {
1010 rcStrict = PGMPhysRead(pVM, pRun->mmio.phys_addr, pRun->mmio.data, pRun->mmio.len, PGMACCESSORIGIN_HM);
1011 Log4(("MmioExit/%u: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1012 pVCpu->idCpu,
1013 pRun->mmio.phys_addr, pRun->mmio.len, pRun->mmio.len, pRun->mmio.data, VBOXSTRICTRC_VAL(rcStrict) ));
1014 }
1015 return rcStrict;
1016}
1017
1018
1019/**
1020 * Handles KVM_EXIT_HYPERCALL.
1021 */
1022static VBOXSTRICTRC nemHCLnxHandleExitHypercall(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun)
1023{
1024#if 0
1025 /*
1026 * We cannot easily act on the exit history here, because the MMIO port
1027 * exit is stateful and the instruction will be completed in the next
1028 * KVM_RUN call. There seems no way to circumvent this.
1029 */
1030 EMHistoryAddExit(pVCpu,
1031 pRun->mmio.is_write
1032 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1033 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1034 pRun->s.regs.regs.pc, ASMReadTSC());
1035#else
1036 RT_NOREF(pVCpu);
1037#endif
1038
1039 /*
1040 * Do the requested job.
1041 */
1042 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1043
1044 /** @todo Raise exception to EL1 if PSCI not configured. */
1045 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
1046 uint32_t uFunId = pRun->hypercall.nr;
1047 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
1048 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
1049 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
1050 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
1051 {
1052 rcStrict = nemHCLnxImportState(pVCpu, CPUMCTX_EXTRN_X0 | CPUMCTX_EXTRN_X1 | CPUMCTX_EXTRN_X2 | CPUMCTX_EXTRN_X3,
1053 &pVCpu->cpum.GstCtx);
1054 if (rcStrict != VINF_SUCCESS)
1055 return rcStrict;
1056
1057 switch (uFunNum)
1058 {
1059 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1060 nemR3LnxSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
1061 break;
1062 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1063 rcStrict = VMR3PowerOff(pVM->pUVM);
1064 break;
1065 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1066 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1067 {
1068 bool fHaltOnReset;
1069 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
1070 if (RT_SUCCESS(rc) && fHaltOnReset)
1071 {
1072 Log(("nemHCLnxHandleExitHypercall: Halt On Reset!\n"));
1073 rcStrict = VINF_EM_HALT;
1074 }
1075 else
1076 {
1077 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
1078 VM_FF_SET(pVM, VM_FF_RESET);
1079 rcStrict = VINF_EM_RESET;
1080 }
1081 break;
1082 }
1083 case ARM_PSCI_FUNC_ID_CPU_ON:
1084 {
1085 uint64_t u64TgtCpu = nemR3LnxGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1086 RTGCPHYS GCPhysExecAddr = nemR3LnxGetGReg(pVCpu, ARMV8_AARCH64_REG_X2);
1087 uint64_t u64CtxId = nemR3LnxGetGReg(pVCpu, ARMV8_AARCH64_REG_X3);
1088 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
1089 nemR3LnxSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
1090 break;
1091 }
1092 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
1093 {
1094 uint32_t u32FunNum = (uint32_t)nemR3LnxGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1095 switch (u32FunNum)
1096 {
1097 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1098 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1099 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1100 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1101 case ARM_PSCI_FUNC_ID_CPU_ON:
1102 nemR3LnxSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1103 false /*f64BitReg*/, false /*fSignExtend*/,
1104 (uint64_t)ARM_PSCI_STS_SUCCESS);
1105 break;
1106 default:
1107 nemR3LnxSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1108 false /*f64BitReg*/, false /*fSignExtend*/,
1109 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1110 }
1111 break;
1112 }
1113 default:
1114 nemR3LnxSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1115 }
1116 }
1117 else
1118 nemR3LnxSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1119
1120
1121 return rcStrict;
1122}
1123
1124
1125static VBOXSTRICTRC nemHCLnxHandleExit(PVMCC pVM, PVMCPUCC pVCpu, struct kvm_run *pRun, bool *pfStatefulExit)
1126{
1127 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitTotal);
1128
1129 if (pVCpu->nem.s.fIrqDeviceLvls != pRun->s.regs.device_irq_level)
1130 {
1131 uint64_t fChanged = pVCpu->nem.s.fIrqDeviceLvls ^ pRun->s.regs.device_irq_level;
1132
1133 if (fChanged & KVM_ARM_DEV_EL1_VTIMER)
1134 {
1135 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1136 GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, RT_BOOL(pRun->s.regs.device_irq_level & KVM_ARM_DEV_EL1_VTIMER));
1137 }
1138
1139 if (fChanged & KVM_ARM_DEV_EL1_PTIMER)
1140 {
1141 //TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1142 GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, RT_BOOL(pRun->s.regs.device_irq_level & KVM_ARM_DEV_EL1_PTIMER));
1143 }
1144
1145 pVCpu->nem.s.fIrqDeviceLvls = pRun->s.regs.device_irq_level;
1146 }
1147
1148 switch (pRun->exit_reason)
1149 {
1150 case KVM_EXIT_EXCEPTION:
1151 AssertFailed();
1152 break;
1153
1154 case KVM_EXIT_MMIO:
1155 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMmio);
1156 *pfStatefulExit = true;
1157 return nemHCLnxHandleExitMmio(pVM, pVCpu, pRun);
1158
1159 case KVM_EXIT_INTR: /* EINTR */
1160 //EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_INTERRUPTED),
1161 // pRun->s.regs.regs.pc, ASMReadTSC());
1162 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitIntr);
1163 Log5(("Intr/%u\n", pVCpu->idCpu));
1164 return VINF_SUCCESS;
1165
1166 case KVM_EXIT_HYPERCALL:
1167 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHypercall);
1168 return nemHCLnxHandleExitHypercall(pVM, pVCpu, pRun);
1169
1170#if 0
1171 case KVM_EXIT_DEBUG:
1172 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitDebug);
1173 AssertFailed();
1174 break;
1175
1176 case KVM_EXIT_SYSTEM_EVENT:
1177 AssertFailed();
1178 break;
1179
1180 case KVM_EXIT_DIRTY_RING_FULL:
1181 AssertFailed();
1182 break;
1183 case KVM_EXIT_AP_RESET_HOLD:
1184 AssertFailed();
1185 break;
1186
1187
1188 case KVM_EXIT_SHUTDOWN:
1189 AssertFailed();
1190 break;
1191
1192 case KVM_EXIT_FAIL_ENTRY:
1193 LogRel(("NEM: KVM_EXIT_FAIL_ENTRY! hardware_entry_failure_reason=%#x cpu=%#x\n",
1194 pRun->fail_entry.hardware_entry_failure_reason, pRun->fail_entry.cpu));
1195 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_FAILED_ENTRY),
1196 pRun->s.regs.regs.pc, ASMReadTSC());
1197 return VERR_NEM_IPE_1;
1198
1199 case KVM_EXIT_INTERNAL_ERROR:
1200 /* we're counting sub-reasons inside the function. */
1201 return nemR3LnxHandleInternalError(pVCpu, pRun);
1202#endif
1203
1204 /*
1205 * Foreign and unknowns.
1206 */
1207#if 0
1208 case KVM_EXIT_IO:
1209 AssertLogRelMsgFailedReturn(("KVM_EXIT_IO on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1210 case KVM_EXIT_NMI:
1211 AssertLogRelMsgFailedReturn(("KVM_EXIT_NMI on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1212 case KVM_EXIT_EPR:
1213 AssertLogRelMsgFailedReturn(("KVM_EXIT_EPR on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1214 case KVM_EXIT_WATCHDOG:
1215 AssertLogRelMsgFailedReturn(("KVM_EXIT_WATCHDOG on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1216 case KVM_EXIT_ARM_NISV:
1217 AssertLogRelMsgFailedReturn(("KVM_EXIT_ARM_NISV on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1218 case KVM_EXIT_S390_STSI:
1219 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_STSI on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1220 case KVM_EXIT_S390_TSCH:
1221 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_TSCH on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1222 case KVM_EXIT_OSI:
1223 AssertLogRelMsgFailedReturn(("KVM_EXIT_OSI on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1224 case KVM_EXIT_PAPR_HCALL:
1225 AssertLogRelMsgFailedReturn(("KVM_EXIT_PAPR_HCALL on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1226 case KVM_EXIT_S390_UCONTROL:
1227 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_UCONTROL on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1228 case KVM_EXIT_DCR:
1229 AssertLogRelMsgFailedReturn(("KVM_EXIT_DCR on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1230 case KVM_EXIT_S390_SIEIC:
1231 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_SIEIC on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1232 case KVM_EXIT_S390_RESET:
1233 AssertLogRelMsgFailedReturn(("KVM_EXIT_S390_RESET on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1234 case KVM_EXIT_UNKNOWN:
1235 AssertLogRelMsgFailedReturn(("KVM_EXIT_UNKNOWN on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1236 case KVM_EXIT_XEN:
1237 AssertLogRelMsgFailedReturn(("KVM_EXIT_XEN on VCpu #%u at %RX64!\n", pVCpu->idCpu, pRun->s.regs.pc), VERR_NEM_IPE_1);
1238#endif
1239 default:
1240 AssertLogRelMsgFailedReturn(("Unknown exit reason %u on VCpu #%u!\n", pRun->exit_reason, pVCpu->idCpu), VERR_NEM_IPE_1);
1241 }
1242 RT_NOREF(pVM, pVCpu);
1243 return VERR_NOT_IMPLEMENTED;
1244}
1245
1246
1247VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1248{
1249 /*
1250 * Try switch to NEM runloop state.
1251 */
1252 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
1253 { /* likely */ }
1254 else
1255 {
1256 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1257 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
1258 return VINF_SUCCESS;
1259 }
1260
1261 /*
1262 * The run loop.
1263 */
1264 struct kvm_run * const pRun = pVCpu->nem.s.pRun;
1265 const bool fSingleStepping = DBGFIsStepping(pVCpu);
1266 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1267 bool fStatefulExit = false; /* For MMIO and IO exits. */
1268 for (unsigned iLoop = 0;; iLoop++)
1269 {
1270 /*
1271 * Sync the interrupt state.
1272 */
1273 rcStrict = nemHCLnxHandleInterruptFF(pVM, pVCpu);
1274 if (rcStrict == VINF_SUCCESS)
1275 { /* likely */ }
1276 else
1277 {
1278 LogFlow(("NEM/%u: breaking: nemHCLnxHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
1279 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
1280 break;
1281 }
1282
1283 /*
1284 * Ensure KVM has the whole state.
1285 */
1286 if ((pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL) != CPUMCTX_EXTRN_ALL)
1287 {
1288 int rc2 = nemHCLnxExportState(pVM, pVCpu, &pVCpu->cpum.GstCtx);
1289 AssertRCReturn(rc2, rc2);
1290 }
1291
1292 /*
1293 * Poll timers and run for a bit.
1294 *
1295 * With the VID approach (ring-0 or ring-3) we can specify a timeout here,
1296 * so we take the time of the next timer event and uses that as a deadline.
1297 * The rounding heuristics are "tuned" so that rhel5 (1K timer) will boot fine.
1298 */
1299 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
1300 * the whole polling job when timers have changed... */
1301 uint64_t offDeltaIgnored;
1302 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
1303 if ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
1304 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
1305 {
1306 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
1307 {
1308 //LogFlow(("NEM/%u: Entry @ %04x:%08RX64 IF=%d EFL=%#RX64 SS:RSP=%04x:%08RX64 cr0=%RX64\n",
1309 // pVCpu->idCpu, pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip,
1310 // !!(pRun->s.regs.regs.rflags & X86_EFL_IF), pRun->s.regs.regs.rflags,
1311 // pRun->s.regs.sregs.ss.selector, pRun->s.regs.regs.rsp, pRun->s.regs.sregs.cr0));
1312 TMNotifyStartOfExecution(pVM, pVCpu);
1313
1314 int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_RUN, 0UL);
1315
1316 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
1317 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
1318
1319#if 0 //def LOG_ENABLED
1320 if (LogIsFlowEnabled())
1321 {
1322 struct kvm_mp_state MpState = {UINT32_MAX};
1323 ioctl(pVCpu->nem.s.fdVCpu, KVM_GET_MP_STATE, &MpState);
1324 LogFlow(("NEM/%u: Exit @ %04x:%08RX64 IF=%d EFL=%#RX64 CR8=%#x Reason=%#x IrqReady=%d Flags=%#x %#lx\n", pVCpu->idCpu,
1325 pRun->s.regs.sregs.cs.selector, pRun->s.regs.regs.rip, pRun->if_flag,
1326 pRun->s.regs.regs.rflags, pRun->s.regs.sregs.cr8, pRun->exit_reason,
1327 pRun->ready_for_interrupt_injection, pRun->flags, MpState.mp_state));
1328 }
1329#endif
1330 fStatefulExit = false;
1331 if (RT_LIKELY(rcLnx == 0 || errno == EINTR))
1332 {
1333 /*
1334 * Deal with the exit.
1335 */
1336 rcStrict = nemHCLnxHandleExit(pVM, pVCpu, pRun, &fStatefulExit);
1337 if (rcStrict == VINF_SUCCESS)
1338 { /* hopefully likely */ }
1339 else
1340 {
1341 LogFlow(("NEM/%u: breaking: nemHCLnxHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
1342 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
1343 break;
1344 }
1345 }
1346 else
1347 {
1348 int rc2 = RTErrConvertFromErrno(errno);
1349 AssertLogRelMsgFailedReturn(("KVM_RUN failed: rcLnx=%d errno=%u rc=%Rrc\n", rcLnx, errno, rc2), rc2);
1350 }
1351
1352 /*
1353 * If no relevant FFs are pending, loop.
1354 */
1355 if ( !VM_FF_IS_ANY_SET( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
1356 && !VMCPU_FF_IS_ANY_SET(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
1357 { /* likely */ }
1358 else
1359 {
1360
1361 /** @todo Try handle pending flags, not just return to EM loops. Take care
1362 * not to set important RCs here unless we've handled an exit. */
1363 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#RX64)\n",
1364 pVCpu->idCpu, pVM->fGlobalForcedActions, (uint64_t)pVCpu->fLocalForcedActions));
1365 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
1366 break;
1367 }
1368 }
1369 else
1370 {
1371 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
1372 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
1373 break;
1374 }
1375 }
1376 else
1377 {
1378 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
1379 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
1380 break;
1381 }
1382 } /* the run loop */
1383
1384
1385 /*
1386 * If the last exit was stateful, commit the state we provided before
1387 * returning to the EM loop so we have a consistent state and can safely
1388 * be rescheduled and whatnot. This may require us to make multiple runs
1389 * for larger MMIO and I/O operations. Sigh^3.
1390 *
1391 * Note! There is no 'ing way to reset the kernel side completion callback
1392 * for these stateful i/o exits. Very annoying interface.
1393 */
1394 /** @todo check how this works with string I/O and string MMIO. */
1395 if (fStatefulExit && RT_SUCCESS(rcStrict))
1396 {
1397 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn);
1398 uint32_t const uOrgExit = pRun->exit_reason;
1399 for (uint32_t i = 0; ; i++)
1400 {
1401 pRun->immediate_exit = 1;
1402 int rcLnx = ioctl(pVCpu->nem.s.fdVCpu, KVM_RUN, 0UL);
1403 Log(("NEM/%u: Flushed stateful exit -> %d/%d exit_reason=%d\n", pVCpu->idCpu, rcLnx, errno, pRun->exit_reason));
1404 if (rcLnx == -1 && errno == EINTR)
1405 {
1406 switch (i)
1407 {
1408 case 0: STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn1Loop); break;
1409 case 1: STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn2Loops); break;
1410 case 2: STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn3Loops); break;
1411 default: STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatFlushExitOnReturn4PlusLoops); break;
1412 }
1413 break;
1414 }
1415 AssertLogRelMsgBreakStmt(rcLnx == 0 && pRun->exit_reason == uOrgExit,
1416 ("rcLnx=%d errno=%d exit_reason=%d uOrgExit=%d\n", rcLnx, errno, pRun->exit_reason, uOrgExit),
1417 rcStrict = VERR_NEM_IPE_6);
1418 VBOXSTRICTRC rcStrict2 = nemHCLnxHandleExit(pVM, pVCpu, pRun, &fStatefulExit);
1419 if (rcStrict2 == VINF_SUCCESS || rcStrict2 == rcStrict)
1420 { /* likely */ }
1421 else if (RT_FAILURE(rcStrict2))
1422 {
1423 rcStrict = rcStrict2;
1424 break;
1425 }
1426 else
1427 {
1428 AssertLogRelMsgBreakStmt(rcStrict == VINF_SUCCESS,
1429 ("rcStrict=%Rrc rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict), VBOXSTRICTRC_VAL(rcStrict2)),
1430 rcStrict = VERR_NEM_IPE_7);
1431 rcStrict = rcStrict2;
1432 }
1433 }
1434 pRun->immediate_exit = 0;
1435 }
1436
1437 /*
1438 * If the CPU is running, make sure to stop it before we try sync back the
1439 * state and return to EM. We don't sync back the whole state if we can help it.
1440 */
1441 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
1442 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1443
1444 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)
1445 {
1446 /* Try anticipate what we might need. */
1447 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK /*?*/;
1448 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
1449 || RT_FAILURE(rcStrict))
1450 fImport = CPUMCTX_EXTRN_ALL;
1451 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ))
1452 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
1453
1454 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
1455 {
1456 int rc2 = nemHCLnxImportState(pVCpu, fImport, &pVCpu->cpum.GstCtx);
1457 if (RT_SUCCESS(rc2))
1458 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
1459 else if (RT_SUCCESS(rcStrict))
1460 rcStrict = rc2;
1461 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1462 pVCpu->cpum.GstCtx.fExtrn = 0;
1463 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
1464 }
1465 else
1466 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1467 }
1468 else
1469 {
1470 pVCpu->cpum.GstCtx.fExtrn = 0;
1471 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1472 }
1473
1474 LogFlow(("NEM/%u: %08RX64 => %Rrc\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc, VBOXSTRICTRC_VAL(rcStrict) ));
1475 return rcStrict;
1476}
1477
1478
1479/** @page pg_nem_linux_armv8 NEM/linux - Native Execution Manager, Linux.
1480 *
1481 * This is using KVM.
1482 *
1483 */
1484
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette