Changeset 99051 in vbox
- Timestamp:
- Mar 19, 2023 4:40:06 PM (22 months ago)
- Location:
- trunk
- Files:
-
- 4 added
- 35 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum-armv8.h
r99023 r99051 141 141 typedef CPUMDBENTRY const *PCCPUMDBENTRY; 142 142 143 144 /** @name Changed flags. 145 * These flags are used to keep track of which important register that 146 * have been changed since last they were reset. The only one allowed 147 * to clear them is REM! 148 * 149 * @todo This is obsolete, but remains as it will be refactored for coordinating 150 * IEM and NEM/HM later. Probably. 151 * @{ 152 */ 153 #define CPUM_CHANGED_GLOBAL_TLB_FLUSH RT_BIT(0) 154 #define CPUM_CHANGED_ALL ( CPUM_CHANGED_GLOBAL_TLB_FLUSH ) 155 /** @} */ 156 143 157 /** @} */ 144 158 RT_C_DECLS_END -
trunk/include/VBox/vmm/cpum-x86-amd64.h
r99023 r99051 2772 2772 /** @} */ 2773 2773 2774 VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd);2775 2774 VMMDECL(bool) CPUMSupportsXSave(PVM pVM); 2776 2775 VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM); … … 2783 2782 VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu); 2784 2783 VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu); 2785 VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu);2786 VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu);2787 2784 VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM); 2788 2785 VMMDECL(uint64_t) CPUMGetGuestScalableBusFrequency(PVM pVM); -
trunk/include/VBox/vmm/cpum.h
r99025 r99051 341 341 #ifndef VBOX_FOR_DTRACE_LIB 342 342 343 VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd); 343 344 VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu); 344 345 VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu); 346 VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu); 347 VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu); 345 348 346 349 /** @name Guest Register Getters. -
trunk/include/VBox/vmm/cpumctx-armv8.h
r98972 r99051 145 145 /** Floating point status register. */ 146 146 uint64_t fpsr; 147 /** The internal PSTATE value (accessible in CPSR with AARCH32 and through 148 * NZCV and DAIF special purpose registers. */ 149 uint32_t fPState; 147 /** The internal PSTATE state (as given from SPSR_EL2). */ 148 uint64_t fPState; 150 149 151 150 uint32_t fPadding0; … … 213 212 #define CPUMCTX_EXTRN_FPCR UINT64_C(0x0000000000004000) 214 213 /** The FPSR (Floating Point Status Register) is kept externally. */ 215 #define CPUMCTX_EXTRN_F CSR UINT64_C(0x0000000000008000)214 #define CPUMCTX_EXTRN_FPSR UINT64_C(0x0000000000008000) 216 215 217 216 /** Mask of bits the keepers can use for state tracking. */ -
trunk/src/VBox/VMM/Makefile.kmk
r98980 r99051 354 354 VMMR3/DBGFR3Bp.cpp \ 355 355 VMMR3/DBGFR3BugCheck.cpp \ 356 VMMR3/DBGFCoreWrite.cpp \ 356 357 VMMR3/DBGFCpu.cpp \ 357 358 VMMR3/DBGFDisas.cpp \ … … 365 366 VMMR3/DBGFStack.cpp \ 366 367 VMMR3/DBGFR3Flow.cpp \ 368 VMMR3/DBGFR3FlowTrace.cpp \ 367 369 VMMR3/DBGFR3Trace.cpp \ 368 370 VMMR3/DBGFR3SampleReport.cpp \ … … 371 373 VMMR3/EMR3Dbg.cpp \ 372 374 VMMR3/EMR3Nem.cpp \ 373 VMMR3/GCM.cpp \374 375 VMMR3/GIM.cpp \ 375 376 VMMR3/IEMR3.cpp \ … … 392 393 VMMR3/PDMR3Task.cpp \ 393 394 VMMR3/PDMThread.cpp \ 394 VMMR3/PGM .cpp \395 VMMR3/PGM-armv8.cpp \ 395 396 VMMR3/PGMDbg.cpp \ 396 397 VMMR3/PGMHandler.cpp \ … … 409 410 VMMR3/VMMGuruMeditation.cpp \ 410 411 VMMR3/VMMTests.cpp \ 411 VMMR3/HM.cpp \ 412 VMMAll/APICAll.cpp \ 413 VMMAll/CPUMAllCpuId.cpp \ 414 VMMAll/CPUMAllRegs.cpp \ 415 VMMAll/CPUMAllMsrs.cpp \ 412 VMMR3/HM-armv8.cpp \ 413 VMMAll/CPUMAllRegs-armv8.cpp \ 416 414 VMMAll/DBGFAll.cpp \ 417 VMMAll/DBGFAllBp.cpp \418 415 $(if-expr defined(VBOX_WITH_DBGF_TRACING), VMMAll/DBGFAllTracer.cpp,) \ 416 VMMAll/IEMAll-armv8.cpp \ 419 417 VMMAll/IOMAll.cpp \ 420 418 VMMAll/IOMAllMmioNew.cpp \ … … 427 425 VMMAll/PDMAllQueue.cpp \ 428 426 VMMAll/PDMAllTask.cpp \ 429 VMMAll/PGMAll.cpp \430 427 VMMAll/PGMAllHandler.cpp \ 431 428 VMMAll/PGMAllPhys.cpp \ 432 429 VMMAll/PGMAllPool.cpp \ 433 430 VMMAll/EMAll.cpp \ 434 VMMAll/GCMAll.cpp \435 431 VMMAll/GIMAll.cpp \ 436 VMMAll/GIMAllHv.cpp \437 VMMAll/GIMAllKvm.cpp \438 432 VMMAll/TMAll.cpp \ 439 433 VMMAll/TMAllCpu.cpp \ … … 460 454 461 455 VBoxVMMArm_SOURCES.darwin.arm64 += \ 462 VMMR3/NEMR3Native-darwin .cpp456 VMMR3/NEMR3Native-darwin-armv8.cpp 463 457 VBoxVMMArm_DEFS.darwin.arm64 += VBOX_WITH_NATIVE_NEM 458 VBoxVMMArm_LDFLAGS.darwin.arm64 = -framework Hypervisor 464 459 465 460 VBoxVMMArm_LIBS = \ 466 461 $(PATH_STAGE_LIB)/DisasmR3$(VBOX_SUFF_LIB) 467 #ifdef VBOX_WITH_DEBUGGER468 #VBoxVMMArm_LIBS += \469 # $(PATH_STAGE_LIB)/Debugger$(VBOX_SUFF_LIB)470 #endif462 ifdef VBOX_WITH_DEBUGGER 463 VBoxVMMArm_LIBS += \ 464 $(PATH_STAGE_LIB)/Debugger-armv8$(VBOX_SUFF_LIB) 465 endif 471 466 VBoxVMMArm_LIBS += \ 472 467 $(LIB_RUNTIME) -
trunk/src/VBox/VMM/VMMAll/DBGFAll.cpp
r98103 r99051 53 53 54 54 55 #if !defined(VBOX_VMM_TARGET_ARMV8) 55 56 /** 56 57 * Gets the hardware breakpoint configuration as DR7. … … 444 445 return 0; 445 446 } 447 #endif /* VBOX_VMM_TARGET_ARMV8 */ 446 448 447 449 … … 524 526 * Any events on the stack. Should the incoming event be ignored? 525 527 */ 528 #if defined(VBOX_VMM_TARGET_ARMV8) 529 uint64_t const rip = CPUMGetGuestFlatPC(pVCpu); /* rip is a misnomer but saves us #ifdef's later on. */ 530 #else 526 531 uint64_t const rip = CPUMGetGuestRIP(pVCpu); 532 #endif 527 533 uint32_t i = pVCpu->dbgf.s.cEvents; 528 534 if (i > 0) -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r98103 r99051 110 110 111 111 112 #if !defined(VBOX_VMM_TARGET_ARMV8) 112 113 /** 113 114 * Prepare an MWAIT - essentials of the MONITOR instruction. … … 251 252 return false; 252 253 } 254 #endif 253 255 254 256 … … 876 878 if (RT_FAILURE(rc)) 877 879 { 880 #if defined(VBOX_VMM_TARGET_ARMV8) 881 AssertReleaseFailed(); 882 #else 878 883 /* 879 884 * If we fail to find the page via the guest's page tables … … 887 892 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1); 888 893 } 894 #endif 889 895 } 890 896 } … … 907 913 VMM_INT_DECL(int) EMInterpretDisasCurrent(PVMCPUCC pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr) 908 914 { 915 #if defined(VBOX_VMM_TARGET_ARMV8) 916 return EMInterpretDisasOneEx(pVCpu, (RTGCUINTPTR)CPUMGetGuestFlatPC(pVCpu), pDis, pcbInstr); 917 #else 909 918 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 910 919 RTGCPTR GCPtrInstr; 911 #if 0 920 921 # if 0 912 922 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtx, pCtx->rip, 0, &GCPtrInstr); 913 # else923 # else 914 924 /** @todo Get the CPU mode as well while we're at it! */ 915 925 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtx->eflags.u, pCtx->ss.Sel, pCtx->cs.Sel, &pCtx->cs, pCtx->rip, &GCPtrInstr); 916 # endif926 # endif 917 927 if (RT_SUCCESS(rc)) 918 928 return EMInterpretDisasOneEx(pVCpu, (RTGCUINTPTR)GCPtrInstr, pDis, pcbInstr); … … 921 931 pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->ss.Sel & X86_SEL_RPL, rc)); 922 932 return rc; 933 #endif 923 934 } 924 935 … … 965 976 VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPUCC pVCpu) 966 977 { 978 #if defined(VBOX_VMM_TARGET_ARMV8) 979 LogFlow(("EMInterpretInstruction %RGv\n", (RTGCPTR)CPUMGetGuestFlatPC(pVCpu))); 980 #else 967 981 LogFlow(("EMInterpretInstruction %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu))); 982 #endif 968 983 969 984 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, NULL /*pcbWritten*/); -
trunk/src/VBox/VMM/VMMAll/GIMAll.cpp
r98103 r99051 91 91 switch (pVM->gim.s.enmProviderId) 92 92 { 93 #if !defined(VBOX_VMM_TARGET_ARMV8) 93 94 case GIMPROVIDERID_HYPERV: 94 95 return gimHvGetMmio2Regions(pVM, pcRegions); 95 96 #endif 96 97 default: 97 98 break; … … 116 117 switch (pVM->gim.s.enmProviderId) 117 118 { 119 #if !defined(VBOX_VMM_TARGET_ARMV8) 118 120 case GIMPROVIDERID_HYPERV: 119 121 return gimHvAreHypercallsEnabled(pVM); … … 121 123 case GIMPROVIDERID_KVM: 122 124 return gimKvmAreHypercallsEnabled(pVCpu); 123 125 #endif 124 126 default: 125 127 return false; … … 161 163 switch (pVM->gim.s.enmProviderId) 162 164 { 165 #if !defined(VBOX_VMM_TARGET_ARMV8) 163 166 case GIMPROVIDERID_HYPERV: 164 167 return gimHvHypercall(pVCpu, pCtx); … … 166 169 case GIMPROVIDERID_KVM: 167 170 return gimKvmHypercall(pVCpu, pCtx); 168 171 #endif 169 172 default: 170 173 AssertMsgFailed(("GIMHypercall: for provider %u not available/implemented\n", pVM->gim.s.enmProviderId)); … … 212 215 switch (pVM->gim.s.enmProviderId) 213 216 { 217 #if !defined(VBOX_VMM_TARGET_ARMV8) 214 218 case GIMPROVIDERID_HYPERV: 215 219 return gimHvHypercallEx(pVCpu, pCtx, uDisOpcode, cbInstr); … … 217 221 case GIMPROVIDERID_KVM: 218 222 return gimKvmHypercallEx(pVCpu, pCtx, uDisOpcode, cbInstr); 219 223 #endif 220 224 default: 221 225 AssertMsgFailedReturn(("enmProviderId=%u\n", pVM->gim.s.enmProviderId), VERR_GIM_HYPERCALLS_NOT_AVAILABLE); … … 254 258 switch (pVM->gim.s.enmProviderId) 255 259 { 260 #if !defined(VBOX_VMM_TARGET_ARMV8) 256 261 case GIMPROVIDERID_HYPERV: 257 262 return gimHvHypercallEx(pVCpu, pCtx, Dis.pCurInstr->uOpcode, Dis.cbInstr); … … 259 264 case GIMPROVIDERID_KVM: 260 265 return gimKvmHypercallEx(pVCpu, pCtx, Dis.pCurInstr->uOpcode, Dis.cbInstr); 261 266 #endif 262 267 default: 263 268 AssertMsgFailed(("GIMExecHypercallInstr: for provider %u not available/implemented\n", pVM->gim.s.enmProviderId)); … … 266 271 } 267 272 273 #if !defined(VBOX_VMM_TARGET_ARMV8) 268 274 Log(("GIM: GIMExecHypercallInstr: Failed to disassemble CS:RIP=%04x:%08RX64. rc=%Rrc\n", pCtx->cs.Sel, pCtx->rip, rc)); 275 #endif 269 276 return rc; 270 277 } … … 285 292 switch (pVM->gim.s.enmProviderId) 286 293 { 294 #if !defined(VBOX_VMM_TARGET_ARMV8) 287 295 case GIMPROVIDERID_HYPERV: 288 296 return gimHvIsParavirtTscEnabled(pVM); … … 290 298 case GIMPROVIDERID_KVM: 291 299 return gimKvmIsParavirtTscEnabled(pVM); 292 300 #endif 293 301 default: 294 302 break; … … 318 326 switch (pVM->gim.s.enmProviderId) 319 327 { 328 #if !defined(VBOX_VMM_TARGET_ARMV8) 320 329 case GIMPROVIDERID_KVM: 321 330 return gimKvmShouldTrapXcptUD(pVM); … … 323 332 case GIMPROVIDERID_HYPERV: 324 333 return gimHvShouldTrapXcptUD(pVCpu); 325 334 #endif 326 335 default: 327 336 return false; … … 362 371 switch (pVM->gim.s.enmProviderId) 363 372 { 373 #if !defined(VBOX_VMM_TARGET_ARMV8) 364 374 case GIMPROVIDERID_KVM: 365 375 return gimKvmXcptUD(pVM, pVCpu, pCtx, pDis, pcbInstr); … … 367 377 case GIMPROVIDERID_HYPERV: 368 378 return gimHvXcptUD(pVCpu, pCtx, pDis, pcbInstr); 369 379 #endif 370 380 default: 371 381 return VERR_GIM_OPERATION_FAILED; … … 374 384 375 385 386 #if !defined(VBOX_VMM_TARGET_ARMV8) 376 387 /** 377 388 * Invokes the read-MSR handler for the GIM provider configured for the VM. … … 504 515 return VERR_BUFFER_OVERFLOW; 505 516 } 506 517 #endif 518 -
trunk/src/VBox/VMM/VMMAll/IOMAllMmioNew.cpp
r98103 r99051 1088 1088 AssertReturn(pDevIns, VERR_INVALID_POINTER); 1089 1089 1090 #if defined(VBOX_VMM_TARGET_ARMV8) 1091 /** @todo NEM: MMIO page aliasing. */ 1092 return VINF_SUCCESS; /* ignore */ /** @todo return some indicator if we fail here */ 1093 #else 1090 1094 /** @todo Why is this restricted to protected mode??? Try it in all modes! */ 1091 1095 PVMCPUCC pVCpu = VMMGetCpu(pVM); … … 1102 1106 */ 1103 1107 AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), VERR_IOM_INVALID_MMIO_HANDLE); 1104 # ifdef IN_RING01108 # ifdef IN_RING0 1105 1109 AssertReturn(hRegion < pVM->iomr0.s.cMmioAlloc, VERR_IOM_INVALID_MMIO_HANDLE); 1106 1110 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iomr0.s.paMmioRing3Regs[hRegion]; … … 1110 1114 || ( pVM->iomr0.s.paMmioRegs[hRegion].pDevIns == NULL 1111 1115 && pRegEntry->pDevIns == pDevIns->pDevInsForR3), VERR_ACCESS_DENIED); 1112 # else1116 # else 1113 1117 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion]; 1114 1118 AssertReturn(pRegEntry->cbRegion > 0, VERR_IOM_INVALID_MMIO_HANDLE); 1115 1119 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_ACCESS_DENIED); 1116 # endif1120 # endif 1117 1121 AssertReturn(offRegion < pRegEntry->cbRegion, VERR_OUT_OF_RANGE); 1118 1122 Assert((pRegEntry->cbRegion & GUEST_PAGE_OFFSET_MASK) == 0); … … 1144 1148 /** @todo either ditch this or replace it with something that works in the 1145 1149 * nested case, since we really only care about nested paging! */ 1146 # if 01150 # if 0 1147 1151 /* 1148 1152 * Modify the shadow page table. Since it's an MMIO page it won't be present and we … … 1151 1155 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page. 1152 1156 */ 1153 # if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */1154 # ifdef VBOX_STRICT1157 # if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */ 1158 # ifdef VBOX_STRICT 1155 1159 uint64_t fFlags; 1156 1160 RTHCPHYS HCPhys; 1157 1161 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys); 1158 1162 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); 1163 # endif 1159 1164 # endif 1160 # endif1161 1165 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys); 1162 1166 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); 1163 # endif1167 # endif 1164 1168 return rc; 1169 #endif 1165 1170 } 1166 1171 … … 1253 1258 AssertReturn(pDevIns, VERR_INVALID_POINTER); 1254 1259 1260 #if defined(VBOX_VMM_TARGET_ARMV8) 1261 /** @todo NEM: MMIO page aliasing. */ 1262 return VINF_SUCCESS; /* ignore */ /** @todo return some indicator if we fail here */ 1263 #else 1255 1264 /** @todo Get rid of this this real/protected or nested paging restriction, 1256 1265 * it probably shouldn't be here and would be nasty when the CPU … … 1270 1279 */ 1271 1280 AssertReturn(hRegion < RT_MIN(pVM->iom.s.cMmioRegs, pVM->iom.s.cMmioAlloc), VERR_IOM_INVALID_MMIO_HANDLE); 1272 # ifdef IN_RING01281 # ifdef IN_RING0 1273 1282 AssertReturn(hRegion < pVM->iomr0.s.cMmioAlloc, VERR_IOM_INVALID_MMIO_HANDLE); 1274 1283 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iomr0.s.paMmioRing3Regs[hRegion]; … … 1277 1286 || ( pVM->iomr0.s.paMmioRegs[hRegion].pDevIns == NULL 1278 1287 && pRegEntry->pDevIns == pDevIns->pDevInsForR3), VERR_ACCESS_DENIED); 1279 # else1288 # else 1280 1289 PIOMMMIOENTRYR3 const pRegEntry = &pVM->iom.s.paMmioRegs[hRegion]; 1281 1290 AssertReturn(pRegEntry->cbRegion > 0, VERR_IOM_INVALID_MMIO_HANDLE); 1282 1291 AssertReturn(pRegEntry->pDevIns == pDevIns, VERR_ACCESS_DENIED); 1283 # endif1292 # endif 1284 1293 Assert((pRegEntry->cbRegion & GUEST_PAGE_OFFSET_MASK) == 0); 1285 1294 … … 1317 1326 # endif 1318 1327 return rc; 1319 } 1320 1328 #endif 1329 } 1330 -
trunk/src/VBox/VMM/VMMAll/PDMAll.cpp
r98103 r99051 70 70 { 71 71 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); 72 #if defined(VBOX_VMM_TARGET_ARMV8) 73 AssertReleaseFailed(); 74 #else 72 75 uint32_t uTagSrc; 73 76 rc = APICGetInterrupt(pVCpu, pu8Interrupt, &uTagSrc); … … 80 83 /* else if it's masked by TPR/PPR/whatever, go ahead checking the PIC. Such masked 81 84 interrupts shouldn't prevent ExtINT from being delivered. */ 85 #endif 82 86 } 83 87 -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r98103 r99051 757 757 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs); 758 758 AssertLogRelRCReturnVoid(rc); 759 #if defined(VBOX_VMM_TARGET_ARMV8) 760 AssertReleaseFailed(); 761 #else 759 762 HMFlushTlbOnAllVCpus(pVM); 763 #endif 760 764 761 765 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r98103 r99051 5180 5180 if (pgmPoolIsPageLocked(pPage)) 5181 5181 { 5182 #if !defined(VBOX_VMM_TARGET_ARMV8) 5182 5183 AssertMsg( pPage->enmKind == PGMPOOLKIND_64BIT_PML4 5183 5184 || pPage->enmKind == PGMPOOLKIND_PAE_PDPT … … 5191 5192 || pPage->enmKind == PGMPOOLKIND_ROOT_NESTED, 5192 5193 ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(VMMGetCpu(pVM)), pPage->Core.Key, pPage->enmKind)); 5194 #endif 5193 5195 Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx)); 5194 5196 PGM_UNLOCK(pVM); -
trunk/src/VBox/VMM/VMMR3/CPUM-armv8.cpp
r99026 r99051 67 67 #include <iprt/mp.h> 68 68 #include <iprt/string.h> 69 #include <iprt/armv8.h> 69 70 70 71 … … 298 299 */ 299 300 RT_BZERO(pCtx, sizeof(*pCtx)); 301 302 /* Start in Supervisor mode. */ 303 /** @todo Differentiate between Aarch64 and Aarch32 configuation. */ 304 pCtx->fPState = ARMV8_SPSR_EL2_AARCH64_SET_EL(ARMV8_AARCH64_EL_1) 305 | ARMV8_SPSR_EL2_AARCH64_SP 306 | ARMV8_SPSR_EL2_AARCH64_D 307 | ARMV8_SPSR_EL2_AARCH64_A 308 | ARMV8_SPSR_EL2_AARCH64_I 309 | ARMV8_SPSR_EL2_AARCH64_F; 300 310 /** @todo */ 301 311 } -
trunk/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp
r98103 r99051 337 337 } while (0) 338 338 339 #if defined(VBOX_VMM_TARGET_ARMV8) 340 AssertReleaseFailed(); 341 RT_NOREF(pVCpu, pDbgfCpu); 342 #else 339 343 PVM pVM = pVCpu->CTX_SUFF(pVM); 340 344 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); … … 394 398 if (RT_LIKELY(pDbgfCpu->cbExt)) 395 399 memcpy(&pDbgfCpu->ext, &pCtx->XState, pDbgfCpu->cbExt); 400 #endif 396 401 397 402 #undef DBGFCOPYSEL -
trunk/src/VBox/VMM/VMMR3/DBGFMem.cpp
r98972 r99051 365 365 366 366 367 #if !defined(VBOX_VMM_TARGET_ARMV8) 367 368 /** 368 369 * Worker for DBGFR3SelQueryInfo that calls into SELM. … … 414 415 return rc; 415 416 } 417 #endif 416 418 417 419 … … 451 453 memset(pSelInfo, 0, sizeof(*pSelInfo)); 452 454 455 #if defined(VBOX_VMM_TARGET_ARMV8) 456 RT_NOREF(Sel); 457 return VERR_NOT_SUPPORTED; 458 #else 453 459 /* 454 460 * Dispatch the request to a worker running on the target CPU. 455 461 */ 456 462 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3SelQueryInfo, 5, pUVM, idCpu, Sel, fFlags, pSelInfo); 463 #endif 457 464 } 458 465 … … 579 586 } 580 587 588 #if !defined(VBOX_VMM_TARGET_ARMV8) 581 589 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3) 582 590 cr3 = PGMGetHyperCR3(pVCpu); 591 #endif 583 592 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE) 584 593 fFlags |= dbgfR3PagingDumpModeToFlags(PGMGetShadowMode(pVCpu)); -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r98980 r99051 119 119 120 120 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, 121 #if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN) 121 #if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN) && !defined(VBOX_VMM_TARGET_ARMV8) 122 122 true 123 123 #else … … 869 869 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER) 870 870 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR); 871 #if !defined(VBOX_VMM_TARGET_ARMV8) 871 872 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM) 872 873 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/); 874 #endif 873 875 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM) 874 876 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/)); … … 1324 1326 return EMSTATE_IEM; 1325 1327 1328 #if !defined(VBOX_VMM_TARGET_ARMV8) 1326 1329 if (VM_IS_HM_ENABLED(pVM)) 1327 1330 { … … 1329 1332 return EMSTATE_HM; 1330 1333 } 1331 else if (NEMR3CanExecuteGuest(pVM, pVCpu)) 1334 else 1335 #endif 1336 if (NEMR3CanExecuteGuest(pVM, pVCpu)) 1332 1337 return EMSTATE_NEM; 1333 1338 … … 2274 2279 case VINF_EM_RESCHEDULE_HM: 2275 2280 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM); 2281 #if !defined(VBOX_VMM_TARGET_ARMV8) 2276 2282 if (VM_IS_HM_ENABLED(pVM)) 2277 2283 { … … 2287 2293 } 2288 2294 } 2289 else if (VM_IS_NEM_ENABLED(pVM)) 2295 else 2296 #endif 2297 if (VM_IS_NEM_ENABLED(pVM)) 2290 2298 { 2291 2299 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM)); … … 2577 2585 */ 2578 2586 case EMSTATE_HM: 2587 #if defined(VBOX_VMM_TARGET_ARMV8) 2588 AssertReleaseFailed(); /* Should never get here. */ 2589 #else 2579 2590 rc = emR3HmExecute(pVM, pVCpu, &fFFDone); 2591 #endif 2580 2592 break; 2581 2593 … … 2661 2673 if (rc == VINF_SUCCESS) 2662 2674 { 2675 #if defined(VBOX_VMM_TARGET_ARMV8) 2676 AssertReleaseFailed(); 2677 #else 2663 2678 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC)) 2664 2679 APICUpdatePendingInterrupts(pVCpu); 2680 #endif 2665 2681 2666 2682 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC -
trunk/src/VBox/VMM/VMMR3/EMR3Dbg.cpp
r98103 r99051 141 141 break; 142 142 143 #if !defined(VBOX_VMM_TARGET_ARMV8) 143 144 case EMEXIT_F_KIND_VMX: 144 145 pszExitName = HMGetVmxExitName( uFlagsAndType & EMEXIT_F_TYPE_MASK); … … 148 149 pszExitName = HMGetSvmExitName( uFlagsAndType & EMEXIT_F_TYPE_MASK); 149 150 break; 151 #endif 150 152 151 153 case EMEXIT_F_KIND_NEM: … … 154 156 155 157 case EMEXIT_F_KIND_XCPT: 158 #if defined(VBOX_VMM_TARGET_ARMV8) 159 pszExitName = NULL; 160 AssertReleaseFailed(); 161 #else 156 162 switch (uFlagsAndType & EMEXIT_F_TYPE_MASK) 157 163 { … … 181 187 break; 182 188 } 189 #endif 183 190 break; 184 191 -
trunk/src/VBox/VMM/VMMR3/GIM.cpp
r98103 r99051 74 74 #include <iprt/string.h> 75 75 76 #if !defined(VBOX_VMM_TARGET_ARMV8) 76 77 /* Include all GIM providers. */ 77 #include "GIMMinimalInternal.h" 78 #include "GIMHvInternal.h" 79 #include "GIMKvmInternal.h" 78 # include "GIMMinimalInternal.h" 79 # include "GIMHvInternal.h" 80 # include "GIMKvmInternal.h" 81 #endif 80 82 81 83 … … 161 163 * 'most up-to-date implementation' version number when 0. Otherwise, 162 164 * we'll have abiguities when loading the state of older VMs. */ 165 #if !defined(VBOX_VMM_TARGET_ARMV8) 163 166 if (!RTStrCmp(szProvider, "Minimal")) 164 167 { … … 177 180 } 178 181 else 182 #endif 179 183 rc = VMR3SetError(pVM->pUVM, VERR_GIM_INVALID_PROVIDER, RT_SRC_POS, "Provider '%s' unknown.", szProvider); 180 184 } … … 206 210 switch (pVM->gim.s.enmProviderId) 207 211 { 212 #if !defined(VBOX_VMM_TARGET_ARMV8) 208 213 case GIMPROVIDERID_MINIMAL: 209 214 return gimR3MinimalInitCompleted(pVM); … … 214 219 case GIMPROVIDERID_KVM: 215 220 return gimR3KvmInitCompleted(pVM); 216 221 #endif 217 222 default: 218 223 break; … … 256 261 switch (pVM->gim.s.enmProviderId) 257 262 { 263 #if !defined(VBOX_VMM_TARGET_ARMV8) 258 264 case GIMPROVIDERID_HYPERV: 259 265 rc = gimR3HvSave(pVM, pSSM); … … 265 271 AssertRCReturn(rc, rc); 266 272 break; 267 273 #endif 268 274 default: 269 275 break; … … 320 326 switch (pVM->gim.s.enmProviderId) 321 327 { 328 #if !defined(VBOX_VMM_TARGET_ARMV8) 322 329 case GIMPROVIDERID_HYPERV: 323 330 rc = gimR3HvLoad(pVM, pSSM); … … 329 336 AssertRCReturn(rc, rc); 330 337 break; 331 338 #endif 332 339 default: 333 340 break; … … 345 352 switch (pVM->gim.s.enmProviderId) 346 353 { 354 #if !defined(VBOX_VMM_TARGET_ARMV8) 347 355 case GIMPROVIDERID_HYPERV: 348 356 return gimR3HvLoadDone(pVM, pSSM); 349 357 #endif 350 358 default: 351 359 return VINF_SUCCESS; … … 367 375 switch (pVM->gim.s.enmProviderId) 368 376 { 377 #if !defined(VBOX_VMM_TARGET_ARMV8) 369 378 case GIMPROVIDERID_HYPERV: 370 379 return gimR3HvTerm(pVM); … … 372 381 case GIMPROVIDERID_KVM: 373 382 return gimR3KvmTerm(pVM); 374 383 #endif 375 384 default: 376 385 break; … … 392 401 switch (pVM->gim.s.enmProviderId) 393 402 { 403 #if !defined(VBOX_VMM_TARGET_ARMV8) 394 404 case GIMPROVIDERID_HYPERV: 395 405 gimR3HvRelocate(pVM, offDelta); 396 406 break; 397 407 #endif 398 408 default: 399 409 break; … … 415 425 switch (pVM->gim.s.enmProviderId) 416 426 { 427 #if !defined(VBOX_VMM_TARGET_ARMV8) 417 428 case GIMPROVIDERID_HYPERV: 418 429 return gimR3HvReset(pVM); … … 420 431 case GIMPROVIDERID_KVM: 421 432 return gimR3KvmReset(pVM); 422 433 #endif 423 434 default: 424 435 break; … … 456 467 switch (pVM->gim.s.enmProviderId) 457 468 { 469 #if !defined(VBOX_VMM_TARGET_ARMV8) 458 470 case GIMPROVIDERID_HYPERV: 459 471 return gimR3HvGetDebugSetup(pVM, pDbgSetup); 472 #endif 460 473 default: 461 474 break; -
trunk/src/VBox/VMM/VMMR3/IOM.cpp
r98103 r99051 176 176 * Info. 177 177 */ 178 #if !defined(VBOX_VMM_TARGET_ARMV8) 178 179 DBGFR3InfoRegisterInternal(pVM, "ioport", "Dumps all IOPort ranges. No arguments.", &iomR3IoPortInfo); 180 #endif 179 181 DBGFR3InfoRegisterInternal(pVM, "mmio", "Dumps all MMIO ranges. No arguments.", &iomR3MmioInfo); 180 182 … … 234 236 * regions and won't grow the table again. 235 237 */ 238 # if !defined(VBOX_VMM_TARGET_ARMV8) 236 239 for (uint32_t i = 0; i < pVM->iom.s.cIoPortRegs; i++) 237 240 { … … 241 244 iomR3IoPortRegStats(pVM, pRegEntry); 242 245 } 246 # endif 243 247 244 248 for (uint32_t i = 0; i < pVM->iom.s.cMmioRegs; i++) -
trunk/src/VBox/VMM/VMMR3/MM.cpp
r98103 r99051 405 405 pVM->mm.s.cbRamAbove4GB = cbRam > offRamHole ? cbRam - offRamHole : 0; 406 406 407 #if defined(VBOX_VMM_TARGET_ARMV8) 408 rc = PGMR3PhysRegisterRam(pVM, 0, cbRam, "Conventional RAM"); 409 #else 407 410 /* First the conventional memory: */ 408 411 rc = PGMR3PhysRegisterRam(pVM, 0, RT_MIN(cbRam, 640*_1K), "Conventional RAM"); … … 421 424 } 422 425 } 426 #endif 423 427 424 428 /* … … 427 431 */ 428 432 pVM->mm.s.fDoneMMR3InitPaging = true; 433 #if !defined(VBOX_VMM_TARGET_ARMV8) 429 434 AssertMsg(pVM->mm.s.cBasePages == cBasePages || RT_FAILURE(rc), ("%RX64 != %RX64\n", pVM->mm.s.cBasePages, cBasePages)); 435 #endif 430 436 431 437 LogFlow(("MMR3InitPaging: returns %Rrc\n", rc)); -
trunk/src/VBox/VMM/VMMR3/PDMDevHlp.cpp
r98993 r99051 90 90 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE); 91 91 92 #if defined(VBOX_VMM_TARGET_ARMV8) 93 int rc = VERR_NOT_SUPPORTED; 94 AssertReleaseFailed(); 95 #else 92 96 int rc = IOMR3IoPortCreate(pVM, pDevIns, cPorts, fFlags, pPciDev, iPciRegion, 93 97 pfnOut, pfnIn, pfnOutStr, pfnInStr, pvUser, pszDesc, paExtDescs, phIoPorts); 98 #endif 94 99 95 100 LogFlow(("pdmR3DevHlp_IoPortCreateEx: caller='%s'/%d: returns %Rrc (*phIoPorts=%#x)\n", … … 107 112 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 108 113 114 #if defined(VBOX_VMM_TARGET_ARMV8) 115 int rc = VERR_NOT_SUPPORTED; 116 AssertReleaseFailed(); 117 #else 109 118 int rc = IOMR3IoPortMap(pVM, pDevIns, hIoPorts, Port); 119 #endif 110 120 111 121 LogFlow(("pdmR3DevHlp_IoPortMap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc)); … … 122 132 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 123 133 134 #if defined(VBOX_VMM_TARGET_ARMV8) 135 int rc = VERR_NOT_SUPPORTED; 136 AssertReleaseFailed(); 137 #else 124 138 int rc = IOMR3IoPortUnmap(pVM, pDevIns, hIoPorts); 139 #endif 125 140 126 141 LogFlow(("pdmR3DevHlp_IoPortMap: caller='%s'/%d: returns %Rrc\n", pDevIns->pReg->szName, pDevIns->iInstance, rc)); … … 135 150 LogFlow(("pdmR3DevHlp_IoPortGetMappingAddress: caller='%s'/%d: hIoPorts=%#x\n", pDevIns->pReg->szName, pDevIns->iInstance, hIoPorts)); 136 151 152 #if defined(VBOX_VMM_TARGET_ARMV8) 153 uint32_t uAddress = UINT32_MAX; 154 AssertReleaseFailed(); 155 #else 137 156 uint32_t uAddress = IOMR3IoPortGetMappingAddress(pDevIns->Internal.s.pVMR3, pDevIns, hIoPorts); 157 #endif 138 158 139 159 LogFlow(("pdmR3DevHlp_IoPortGetMappingAddress: caller='%s'/%d: returns %#RX32\n", pDevIns->pReg->szName, pDevIns->iInstance, uAddress)); … … 150 170 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT); 151 171 172 #if defined(VBOX_VMM_TARGET_ARMV8) 173 RT_NOREF(Port, u32Value, cbValue); 174 VBOXSTRICTRC rcStrict = VERR_NOT_SUPPORTED; 175 AssertReleaseFailed(); 176 #else 152 177 PVMCPU pVCpu = VMMGetCpu(pVM); 153 178 AssertPtrReturn(pVCpu, VERR_ACCESS_DENIED); 154 179 155 180 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, Port, u32Value, cbValue); 181 #endif 156 182 157 183 LogFlow(("pdmR3DevHlp_IoPortWrite: caller='%s'/%d: returns %Rrc\n", … … 2094 2120 case PDMPCIDEV_IORGN_F_IOPORT_HANDLE: 2095 2121 AssertReturn(enmType == PCI_ADDRESS_SPACE_IO, VERR_INVALID_FLAGS); 2122 #if defined(VBOX_VMM_TARGET_ARMV8) 2123 rc = VERR_NOT_SUPPORTED; 2124 AssertReleaseFailed(); 2125 AssertRCReturn(rc, rc); 2126 #else 2096 2127 rc = IOMR3IoPortValidateHandle(pVM, pDevIns, (IOMIOPORTHANDLE)hHandle); 2097 2128 AssertRCReturn(rc, rc); 2129 #endif 2098 2130 break; 2099 2131 case PDMPCIDEV_IORGN_F_MMIO_HANDLE: … … 4616 4648 VM_ASSERT_EMT(pDevIns->Internal.s.pVMR3); 4617 4649 LogFlow(("pdmR3DevHlp_A20Set: caller='%s'/%d: fEnable=%d\n", pDevIns->pReg->szName, pDevIns->iInstance, fEnable)); 4650 #ifdef VBOX_VMM_TARGET_ARMV8 4651 AssertReleaseFailed(); 4652 #else 4618 4653 PGMR3PhysSetA20(VMMGetCpu(pDevIns->Internal.s.pVMR3), fEnable); 4654 #endif 4619 4655 } 4620 4656 -
trunk/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp
r98993 r99051 66 66 Assert(pVM->enmVMState != VMSTATE_LOADING || pVM->pdm.s.fStateLoaded); 67 67 68 #if defined(VBOX_VMM_TARGET_ARMV8) 69 AssertReleaseFailed(); 70 #else 68 71 APICLocalInterrupt(pVCpu, 0 /* u8Pin */, 1 /* u8Level */, VINF_SUCCESS /* rcRZ */); 72 #endif 69 73 } 70 74 … … 80 84 Assert(pVM->enmVMState != VMSTATE_LOADING || pVM->pdm.s.fStateLoaded); 81 85 86 #if defined(VBOX_VMM_TARGET_ARMV8) 87 AssertReleaseFailed(); 88 #else 82 89 APICLocalInterrupt(pVCpu, 0 /* u8Pin */, 0 /* u8Level */, VINF_SUCCESS /* rcRZ */); 90 #endif 83 91 } 84 92 … … 129 137 LogFlow(("pdmR3IoApicHlp_ApicBusDeliver: caller='%s'/%d: u8Dest=%RX8 u8DestMode=%RX8 u8DeliveryMode=%RX8 uVector=%RX8 u8Polarity=%RX8 u8TriggerMode=%RX8 uTagSrc=%#x\n", 130 138 pDevIns->pReg->szName, pDevIns->iInstance, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc)); 139 #if defined(VBOX_VMM_TARGET_ARMV8) 140 AssertReleaseFailed(); 141 return VERR_NOT_IMPLEMENTED; 142 #else 131 143 return APICBusDeliver(pVM, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc); 144 #endif 132 145 } 133 146 -
trunk/src/VBox/VMM/VMMR3/PDMDevice.cpp
r98103 r99051 675 675 RegCB.pCfgNode = NULL; 676 676 677 #if defined(VBOX_VMM_TARGET_ARMV8) 678 int rc; 679 #else 677 680 /* 678 681 * Register the internal VMM APIC device. … … 680 683 int rc = pdmR3DevReg_Register(&RegCB.Core, &g_DeviceAPIC); 681 684 AssertRCReturn(rc, rc); 685 #endif 682 686 683 687 /* -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r98103 r99051 5962 5962 *********************************************************************************************************************************/ 5963 5963 5964 #if !defined(VBOX_VMM_TARGET_ARMV8) 5964 5965 /** 5965 5966 * Sets the Address Gate 20 state. … … 6000 6001 } 6001 6002 } 6002 6003 #endif 6004 -
trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp
r98103 r99051 3221 3221 AssertLogRelRCReturn(rc, rc); 3222 3222 3223 #if !defined(VBOX_VMM_TARGET_ARMV8) 3223 3224 /* Update the PSE, NX flags and validity masks. */ 3224 3225 pVCpu->pgm.s.fGst32BitPageSizeExtension = CPUMIsGuestPageSizeExtEnabled(pVCpu); 3225 3226 PGMNotifyNxeChanged(pVCpu, CPUMIsGuestNXEnabled(pVCpu)); 3227 #endif 3226 3228 } 3227 3229 } -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r98103 r99051 638 638 */ 639 639 pVM->tm.s.enmOriginalTSCMode = pVM->tm.s.enmTSCMode; 640 #if !defined(VBOX_VMM_TARGET_ARMV8) 640 641 CPUMR3SetCR4Feature(pVM, X86_CR4_TSD, ~X86_CR4_TSD); 642 #endif 641 643 LogRel(("TM: cTSCTicksPerSecond=%'RU64 (%#RX64) enmTSCMode=%d (%s)\n" 642 644 "TM: cTSCTicksPerSecondHost=%'RU64 (%#RX64)\n" -
trunk/src/VBox/VMM/VMMR3/TRPM.cpp
r98103 r99051 367 367 VMMR3DECL(int) TRPMR3InjectEvent(PVM pVM, PVMCPU pVCpu, TRPMEVENT enmEvent, bool *pfInjected) 368 368 { 369 #if defined(VBOX_VMM_TARGET_ARMV8) 370 RT_NOREF(pVM, pVCpu, enmEvent, pfInjected); 371 AssertReleaseFailed(); 372 return VERR_NOT_IMPLEMENTED; 373 #else 369 374 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 370 375 Assert(!CPUMIsInInterruptShadow(pCtx)); … … 382 387 { 383 388 *pfInjected = true; 384 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX389 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 385 390 if ( CPUMIsGuestInVmxNonRootMode(pCtx) 386 391 && CPUMIsGuestVmxInterceptEvents(pCtx) … … 391 396 return VBOXSTRICTRC_VAL(rcStrict); 392 397 } 393 # endif394 # ifdef RT_OS_WINDOWS398 # endif 399 # ifdef RT_OS_WINDOWS 395 400 if (!VM_IS_NEM_ENABLED(pVM)) 396 401 { 397 # endif402 # endif 398 403 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT); 399 404 AssertRC(rc); 400 # ifdef RT_OS_WINDOWS405 # ifdef RT_OS_WINDOWS 401 406 } 402 407 else … … 408 413 return VBOXSTRICTRC_TODO(rcStrict); 409 414 } 410 # endif415 # endif 411 416 STAM_REL_COUNTER_INC(&pVM->trpm.s.aStatForwardedIRQ[u8Interrupt]); 412 417 } … … 419 424 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE 420 425 : VINF_EM_RESCHEDULE_REM; /* (Heed the halted state if this is changed!) */ 426 #endif 421 427 } 422 428 -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r98644 r99051 873 873 if (RT_SUCCESS(rc)) 874 874 { 875 #if !defined(VBOX_VMM_TARGET_ARMV8) 875 876 rc = SELMR3Init(pVM); 877 #endif 876 878 if (RT_SUCCESS(rc)) 877 879 { … … 902 904 if (RT_SUCCESS(rc)) 903 905 { 906 #if !defined(VBOX_VMM_TARGET_ARMV8) 904 907 rc = GCMR3Init(pVM); 908 #endif 905 909 if (RT_SUCCESS(rc)) 906 910 { … … 927 931 AssertRC(rc2); 928 932 } 933 #if !defined(VBOX_VMM_TARGET_ARMV8) 929 934 int rc2 = GCMR3Term(pVM); 930 935 AssertRC(rc2); 936 #endif 931 937 } 932 938 int rc2 = GIMR3Term(pVM); … … 950 956 AssertRC(rc2); 951 957 } 958 #if !defined(VBOX_VMM_TARGET_ARMV8) 952 959 int rc2 = SELMR3Term(pVM); 953 960 AssertRC(rc2); 961 #endif 954 962 } 955 963 int rc2 = VMMR3Term(pVM); … … 1071 1079 CPUMR3Relocate(pVM); 1072 1080 HMR3Relocate(pVM); 1081 #if !defined(VBOX_VMM_TARGET_ARMV8) 1073 1082 SELMR3Relocate(pVM); 1083 #endif 1074 1084 VMMR3Relocate(pVM, offDelta); 1085 #if !defined(VBOX_VMM_TARGET_ARMV8) 1075 1086 SELMR3Relocate(pVM); /* !hack! fix stack! */ 1087 #endif 1076 1088 TRPMR3Relocate(pVM, offDelta); 1077 1089 IOMR3Relocate(pVM, offDelta); … … 1082 1094 PDMR3Relocate(pVM, offDelta); 1083 1095 GIMR3Relocate(pVM, offDelta); 1096 #if !defined(VBOX_VMM_TARGET_ARMV8) 1084 1097 GCMR3Relocate(pVM, offDelta); 1098 #endif 1085 1099 } 1086 1100 … … 2224 2238 rc = TRPMR3Term(pVM); 2225 2239 AssertRC(rc); 2240 #if !defined(VBOX_VMM_TARGET_ARMV8) 2226 2241 rc = SELMR3Term(pVM); 2227 2242 AssertRC(rc); 2243 #endif 2228 2244 rc = HMR3Term(pVM); 2229 2245 AssertRC(rc); … … 2606 2622 PDMR3Reset(pVM); 2607 2623 PGMR3Reset(pVM); 2624 #if !defined(VBOX_VMM_TARGET_ARMV8) 2608 2625 SELMR3Reset(pVM); 2626 #endif 2609 2627 TRPMR3Reset(pVM); 2610 2628 IOMR3Reset(pVM); … … 4224 4242 switch (pVM->bMainExecutionEngine) 4225 4243 { 4244 #if !defined(VBOX_VMM_TARGET_ARMV8) 4226 4245 case VM_EXEC_ENGINE_HW_VIRT: 4227 4246 return HMIsLongModeAllowed(pVM); 4247 #endif 4228 4248 4229 4249 case VM_EXEC_ENGINE_NATIVE_API: -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r98103 r99051 570 570 case VMINITCOMPLETED_HM: 571 571 { 572 #if !defined(VBOX_VMM_TARGET_ARMV8) 572 573 /* 573 574 * Disable the periodic preemption timers if we can use the … … 578 579 pVM->vmm.s.fUsePeriodicPreemptionTimers = false; 579 580 LogRel(("VMM: fUsePeriodicPreemptionTimers=%RTbool\n", pVM->vmm.s.fUsePeriodicPreemptionTimers)); 581 #endif 580 582 581 583 /* … … 1210 1212 VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu) 1211 1213 { 1214 #if defined(VBOX_VMM_TARGET_ARMV8) 1215 /* We should actually never get here as the only execution engine is NEM. */ 1216 RT_NOREF(pVM, pVCpu); 1217 AssertReleaseFailed(); 1218 return VERR_NOT_SUPPORTED; 1219 #else 1212 1220 Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu))); 1213 1221 … … 1215 1223 do 1216 1224 { 1217 # ifdef NO_SUPCALLR0VMM1225 # ifdef NO_SUPCALLR0VMM 1218 1226 rc = VERR_GENERAL_FAILURE; 1219 # else1227 # else 1220 1228 rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu); 1221 1229 if (RT_LIKELY(rc == VINF_SUCCESS)) 1222 1230 rc = pVCpu->vmm.s.iLastGZRc; 1223 # endif1231 # endif 1224 1232 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); 1225 1233 1226 # if 0 /** @todo triggers too often */1234 # if 0 /** @todo triggers too often */ 1227 1235 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3)); 1228 # endif1236 # endif 1229 1237 1230 1238 /* 1231 1239 * Flush the logs 1232 1240 */ 1233 # ifdef LOG_ENABLED1241 # ifdef LOG_ENABLED 1234 1242 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.Logger, NULL); 1235 # endif1243 # endif 1236 1244 VMM_FLUSH_R0_LOG(pVM, pVCpu, &pVCpu->vmm.s.u.s.RelLogger, RTLogRelGetDefaultInstance()); 1237 1245 if (rc != VERR_VMM_RING0_ASSERTION) … … 1241 1249 } 1242 1250 return vmmR3HandleRing0Assert(pVM, pVCpu); 1251 #endif 1243 1252 } 1244 1253 … … 1301 1310 return VINF_SUCCESS; 1302 1311 1312 #if defined(VBOX_VMM_TARGET_ARMV8) 1313 AssertReleaseFailed(); /** @todo */ 1314 #else 1303 1315 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1304 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX1316 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1305 1317 if (CPUMIsGuestInVmxRootMode(pCtx)) 1306 1318 { … … 1312 1324 return VINF_SUCCESS; 1313 1325 } 1314 # endif1326 # endif 1315 1327 1316 1328 pCtx->cs.Sel = uVector << 8; … … 1320 1332 pCtx->cs.u32Limit = UINT32_C(0x0000ffff); 1321 1333 pCtx->rip = 0; 1334 #endif 1322 1335 1323 1336 Log(("vmmR3SendSipi for VCPU %d with vector %x\n", idCpu, uVector)); … … 1363 1376 PGMR3ResetCpu(pVM, pVCpu); 1364 1377 PDMR3ResetCpu(pVCpu); /* Only clears pending interrupts force flags */ 1378 #if !defined(VBOX_VMM_TARGET_ARMV8) 1365 1379 APICR3InitIpi(pVCpu); 1380 #endif 1366 1381 TRPMR3ResetCpu(pVCpu); 1367 1382 CPUMR3ResetCpu(pVM, pVCpu); -
trunk/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp
r98103 r99051 374 374 case VERR_VMM_LONG_JMP_ERROR: 375 375 { 376 #if defined(VBOX_VMM_TARGET_ARMV8) 377 AssertReleaseFailed(); 378 #else 376 379 /* 377 380 * Active trap? This is only of partial interest when in hardware … … 585 588 "!! Skipping ring-0 registers and stack, rcErr=%Rrc\n", rcErr); 586 589 } 590 #endif /* !VBOX_VMM_TARGET_ARMV8 */ 587 591 break; 588 592 } -
trunk/src/VBox/VMM/include/EMHandleRCTmpl.h
r98103 r99051 113 113 break; 114 114 115 #if !defined(VBOX_VMM_TARGET_ARMV8) 115 116 /* 116 117 * Execute pending I/O Port access. … … 122 123 rc = VBOXSTRICTRC_TODO(emR3ExecutePendingIoPortRead(pVM, pVCpu)); 123 124 break; 125 #endif 124 126 125 127 /* … … 167 169 break; 168 170 171 #if !defined(VBOX_VMM_TARGET_ARMV8) 169 172 case VINF_EM_EMULATE_SPLIT_LOCK: 170 173 rc = VBOXSTRICTRC_TODO(emR3ExecuteSplitLockInstruction(pVM, pVCpu)); 171 174 break; 175 #endif 172 176 173 177 -
trunk/src/VBox/VMM/include/IOMInternal.h
r98103 r99051 574 574 575 575 #ifdef IN_RING3 576 # if !defined(VBOX_VMM_TARGET_ARMV8) 576 577 DECLCALLBACK(void) iomR3IoPortInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 577 578 void iomR3IoPortRegStats(PVM pVM, PIOMIOPORTENTRYR3 pRegEntry); 579 # endif 578 580 DECLCALLBACK(void) iomR3MmioInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 579 581 void iomR3MmioRegStats(PVM pVM, PIOMMMIOENTRYR3 pRegEntry); -
trunk/src/VBox/VMM/include/NEMInternal.h
r98103 r99051 42 42 #include <iprt/critsect.h> 43 43 #elif defined(RT_OS_DARWIN) 44 # include "VMXInternal.h" 44 # if defined(VBOX_VMM_TARGET_ARMV8) 45 # include <Hypervisor/Hypervisor.h> 46 # else 47 # include "VMXInternal.h" 48 # endif 45 49 #endif 46 50 … … 108 112 109 113 #ifdef RT_OS_DARWIN 114 # if !defined(VBOX_VMM_TARGET_ARMV8) 110 115 /** vCPU ID declaration to avoid dragging in HV headers here. */ 111 116 typedef unsigned hv_vcpuid_t; 112 117 /** The HV VM memory space ID (ASID). */ 113 118 typedef unsigned hv_vm_space_t; 119 # endif 114 120 115 121 … … 122 128 /** @} */ 123 129 130 # if defined(VBOX_VMM_TARGET_ARMV8) 124 131 /** The CPUMCTX_EXTRN_XXX mask for IEM. */ 125 # define NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM ( IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_INHIBIT_INT \ 126 | CPUMCTX_EXTRN_INHIBIT_NMI ) 132 # define NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM ( IEM_CPUMCTX_EXTRN_MUST_MASK ) 133 # else 134 /** The CPUMCTX_EXTRN_XXX mask for IEM. */ 135 # define NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM ( IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_INHIBIT_INT \ 136 | CPUMCTX_EXTRN_INHIBIT_NMI ) 137 #endif 138 127 139 /** The CPUMCTX_EXTRN_XXX mask for IEM when raising exceptions. */ 128 140 # define NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT (IEM_CPUMCTX_EXTRN_XCPT_MASK | NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM) … … 274 286 /** Set if hv_vm_create() was called successfully. */ 275 287 bool fCreatedVm : 1; 288 # if defined(VBOX_VMM_TARGET_ARMV8) 289 /** @todo */ 290 # else 276 291 /** Set if hv_vm_space_create() was called successfully. */ 277 292 bool fCreatedAsid : 1; … … 310 325 /** The last valid host LBR info stack range. */ 311 326 uint32_t idLbrInfoMsrLast; 327 # endif 312 328 313 329 STAMCOUNTER StatMapPage; … … 460 476 461 477 #elif defined(RT_OS_DARWIN) 478 # if defined(VBOX_VMM_TARGET_ARMV8) 479 /** The vCPU handle associated with the EMT executing this vCPU. */ 480 hv_vcpu_t hVCpu; 481 /** Pointer to the exit information structure. */ 482 hv_vcpu_exit_t *pHvExit; 483 /** Flag whether an event is pending. */ 484 bool fEventPending; 485 # else 462 486 /** The vCPU handle associated with the EMT executing this vCPU. */ 463 487 hv_vcpuid_t hVCpuId; … … 530 554 /** Pointer to the VMX statistics. */ 531 555 PVMXSTATISTICS pVmxStats; 556 # endif 532 557 533 558 /** @name Statistics -
trunk/src/VBox/VMM/include/PGMInline.h
r98103 r99051 374 374 375 375 376 #ifndef VBOX_VMM_TARGET_ARMV8 376 377 /** 377 378 * Checks if the no-execute (NX) feature is active (EFER.NXE=1). … … 978 979 return NULL; 979 980 } 981 #endif /* !VBOX_VMM_TARGET_ARMV8 */ 980 982 981 983 -
trunk/src/VBox/VMM/include/PGMInternal.h
r98103 r99051 374 374 * @param GCVirt The virtual address of the page to invalidate. 375 375 */ 376 #if def IN_RING0377 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))376 #if defined(VBOX_VMM_TARGET_ARMV8) 377 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) do { } while(0) 378 378 #else 379 379 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt)) … … 386 386 * @param GCVirt The virtual address within the page directory to invalidate. 387 387 */ 388 #if def IN_RING0389 # define PGM_INVL_BIG_PG(pVCpu, GCVirt) HMFlushTlb(pVCpu)388 #if defined(VBOX_VMM_TARGET_ARMV8) 389 # define PGM_INVL_BIG_PG(pVCpu, GCVirt) do { } while(0) 390 390 #else 391 391 # define PGM_INVL_BIG_PG(pVCpu, GCVirt) HMFlushTlb(pVCpu) … … 397 397 * @param pVCpu The cross context virtual CPU structure. 398 398 */ 399 #if def IN_RING0400 # define PGM_INVL_VCPU_TLBS(pVCpu) HMFlushTlb(pVCpu)399 #if defined(VBOX_VMM_TARGET_ARMV8) 400 # define PGM_INVL_VCPU_TLBS(pVCpu) do { } while(0) 401 401 #else 402 402 # define PGM_INVL_VCPU_TLBS(pVCpu) HMFlushTlb(pVCpu) … … 408 408 * @param pVM The cross context VM structure. 409 409 */ 410 #if def IN_RING0411 # define PGM_INVL_ALL_VCPU_TLBS(pVM) HMFlushTlbOnAllVCpus(pVM)410 #if defined(VBOX_VMM_TARGET_ARMV8) 411 # define PGM_INVL_ALL_VCPU_TLBS(pVM) do { } while(0) 412 412 #else 413 413 # define PGM_INVL_ALL_VCPU_TLBS(pVM) HMFlushTlbOnAllVCpus(pVM)
Note:
See TracChangeset
for help on using the changeset viewer.