Changeset 107227 in vbox
- Timestamp:
- Dec 4, 2024 3:20:14 PM (6 weeks ago)
- Location:
- trunk
- Files:
-
- 43 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/dbgf.h
r106362 r107227 1812 1812 typedef enum DBGFREG 1813 1813 { 1814 DBGFREG_X86_FIRST = 0, 1814 1815 /* General purpose registers: */ 1815 DBGFREG_AL = 0,1816 DBGFREG_AL = DBGFREG_X86_FIRST, 1816 1817 DBGFREG_AX = DBGFREG_AL, 1817 1818 DBGFREG_EAX = DBGFREG_AL, … … 2055 2056 DBGFREG_IDTR, 2056 2057 2057 /** The end of the x86 registers. */ 2058 DBGFREG_X86_END = DBGFREG_IDTR, 2058 /** The last of the x86 registers. */ 2059 DBGFREG_X86_LAST = DBGFREG_IDTR, 2060 /* Misnomer. */ 2061 DBGFREG_X86_END = DBGFREG_X86_LAST, 2059 2062 2060 2063 /** @name ARMv8 register identifiers. -
trunk/include/VBox/vmm/pgm.h
r106061 r107227 1328 1328 const void *pvBinary, uint32_t cbBinary, uint8_t fFlags, const char *pszDesc); 1329 1329 VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt); 1330 # if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC) 1330 1331 VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable); 1332 # endif 1331 1333 1332 1334 VMMR3_INT_DECL(int) PGMR3HandlerPhysicalTypeRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, uint32_t fFlags, -
trunk/include/VBox/vmm/tm.h
r106061 r107227 172 172 VMM_INT_DECL(bool) TMCpuTickIsTicking(PVMCPUCC pVCpu); 173 173 174 #if defined(VBOX_VMM_TARGET_ARMV8)174 #ifdef VBOX_VMM_TARGET_ARMV8 175 175 VMM_INT_DECL(void) TMCpuSetVTimerNextActivation(PVMCPUCC pVCpu, uint64_t cNanoSecs); 176 176 VMM_INT_DECL(uint64_t) TMCpuGetVTimerActivationNano(PVMCPUCC pVCpu); -
trunk/include/VBox/vmm/vm.h
r107113 r107227 56 56 # pragma D depends_on library CPUMInternal.d 57 57 # define VMM_INCLUDED_SRC_include_CPUMInternal_h 58 # define VBOX_VMM_TARGET_AGNOSTIC 59 #endif 60 61 #if !defined(VBOX_VMM_TARGET_AGNOSTIC) \ 62 && !defined(VBOX_VMM_TARGET_X86) \ 63 && !defined(VBOX_VMM_TARGET_ARMV8) 64 # error "VMM target not defined" 58 65 #endif 59 66 … … 130 137 VMCPUSTATE volatile enmState; 131 138 132 #if defined(VBOX_VMM_TARGET_ARMV8)139 #ifdef VBOX_VMM_TARGET_ARMV8 133 140 uint32_t u32Alignment0; 134 141 /** The number of nano seconds when the vTimer of the associated vCPU is supposed to activate … … 288 295 } gim; 289 296 290 #if defined(VBOX_VMM_TARGET_ARMV8) 291 /** GIC part. */ 292 union VMCPUUNIONGIC 293 { 297 /* Interrupt controller, target specific. */ 298 RT_GCC_EXTENSION 299 union 300 { 301 #if defined(VBOX_VMM_TARGET_ARMV8) || defined(VBOX_VMM_TARGET_AGNOSTIC) 302 /** GIC part. */ 303 union 304 { 294 305 # ifdef VMM_INCLUDED_SRC_include_GICInternal_h 295 struct GICCPU s;306 struct GICCPU s; 296 307 # endif 297 uint8_t padding[3840]; /* multiple of 64 */ 298 } gic; 299 #else 300 /** APIC part. */ 301 union VMCPUUNIONAPIC 302 { 308 uint8_t padding[3840]; /* multiple of 64 */ 309 } gic; 310 #endif 311 #if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC) 312 /** APIC part. */ 313 union 314 { 303 315 # ifdef VMM_INCLUDED_SRC_include_APICInternal_h 304 struct APICCPU s;316 struct APICCPU s; 305 317 # endif 306 uint8_t padding[3840]; /* multiple of 64 */ 307 } apic; 308 #endif 318 uint8_t padding[3840]; /* multiple of 64 */ 319 } apic; 320 #endif 321 }; 309 322 310 323 /* … … 483 496 484 497 485 #if defined(VBOX_VMM_TARGET_ARMV8) 498 #if defined(VBOX_VMM_TARGET_ARMV8) || defined(VBOX_VMM_TARGET_AGNOSTIC) 486 499 /** This action forces the VM to inject an IRQ into the guest. */ 487 500 # define VMCPU_FF_INTERRUPT_IRQ RT_BIT_64(VMCPU_FF_INTERRUPT_IRQ_BIT) … … 490 503 # define VMCPU_FF_INTERRUPT_FIQ RT_BIT_64(VMCPU_FF_INTERRUPT_FIQ_BIT) 491 504 # define VMCPU_FF_INTERRUPT_FIQ_BIT 1 492 #else 505 #endif 506 #if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC) 493 507 /** This action forces the VM to check any pending interrupts on the APIC. */ 494 508 # define VMCPU_FF_INTERRUPT_APIC RT_BIT_64(VMCPU_FF_INTERRUPT_APIC_BIT) … … 541 555 #define VMCPU_FF_HM_UPDATE_CR3 RT_BIT_64(VMCPU_FF_HM_UPDATE_CR3_BIT) 542 556 #define VMCPU_FF_HM_UPDATE_CR3_BIT 12 543 #if defined(VBOX_VMM_TARGET_ARMV8) 557 #if defined(VBOX_VMM_TARGET_ARMV8) || defined(VBOX_VMM_TARGET_AGNOSTIC) 544 558 # define VMCPU_FF_VTIMER_ACTIVATED RT_BIT_64(VMCPU_FF_VTIMER_ACTIVATED_BIT) 545 559 # define VMCPU_FF_VTIMER_ACTIVATED_BIT 13 546 #else 560 #endif 561 #if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC) 547 562 /* Bit 13 used to be VMCPU_FF_HM_UPDATE_PAE_PDPES. */ 548 563 #endif … … 618 633 #define VM_FF_EXTERNAL_HALTED_MASK ( VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_REQUEST \ 619 634 | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS ) 635 636 #ifndef VBOX_VMM_TARGET_AGNOSTIC 620 637 /** Externally forced VMCPU actions. Used to quit the idle/wait loop. */ 621 # if defined(VBOX_VMM_TARGET_ARMV8)622 # define VMCPU_FF_EXTERNAL_HALTED_MASK( VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ \638 # if defined(VBOX_VMM_TARGET_ARMV8) 639 # define VMCPU_FF_EXTERNAL_HALTED_MASK ( VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ \ 623 640 | VMCPU_FF_REQUEST | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI \ 624 641 | VMCPU_FF_UNHALT | VMCPU_FF_TIMER | VMCPU_FF_DBGF \ 625 642 | VMCPU_FF_VTIMER_ACTIVATED) 626 # else627 # define VMCPU_FF_EXTERNAL_HALTED_MASK( VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \643 # else 644 # define VMCPU_FF_EXTERNAL_HALTED_MASK ( VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \ 628 645 | VMCPU_FF_REQUEST | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI \ 629 646 | VMCPU_FF_UNHALT | VMCPU_FF_TIMER | VMCPU_FF_DBGF \ 630 647 | VMCPU_FF_INTERRUPT_NESTED_GUEST) 648 # endif 631 649 #endif 632 650 … … 635 653 | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY \ 636 654 | VM_FF_EMT_RENDEZVOUS ) 655 #ifndef VBOX_VMM_TARGET_AGNOSTIC 637 656 /** High priority VMCPU pre-execution actions. */ 638 # if defined(VBOX_VMM_TARGET_ARMV8)639 # define VMCPU_FF_HIGH_PRIORITY_PRE_MASK( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ \657 # if defined(VBOX_VMM_TARGET_ARMV8) 658 # define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ \ 640 659 | VMCPU_FF_DBGF ) 641 # else642 # define VMCPU_FF_HIGH_PRIORITY_PRE_MASK( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \660 # else 661 # define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \ 643 662 | VMCPU_FF_UPDATE_APIC | VMCPU_FF_DBGF \ 644 663 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \ 645 664 | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE \ 646 665 | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW ) 666 # endif 647 667 #endif 648 668 … … 685 705 #endif 686 706 687 #if !defined(VBOX_VMM_TARGET_ARMV8)707 #if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC) 688 708 /** VMCPU flags that cause the REP[|NE|E] STRINS loops to yield, interrupts 689 709 * enabled. */ … … 1504 1524 } gim; 1505 1525 1506 #if defined(VBOX_VMM_TARGET_ARMV8) 1507 union 1508 { 1526 /** Interrupt controller, target specific. */ 1527 RT_GCC_EXTENSION 1528 union 1529 { 1530 #if defined(VBOX_VMM_TARGET_ARMV8) || defined(VBOX_VMM_TARGET_AGNOSTIC) 1531 union 1532 { 1509 1533 # ifdef VMM_INCLUDED_SRC_include_GICInternal_h 1510 struct GIC s;1534 struct GIC s; 1511 1535 # endif 1512 uint8_t padding[128]; /* multiple of 8 */ 1513 } gic; 1514 #else 1515 union 1516 { 1536 uint8_t padding[128]; /* multiple of 8 */ 1537 } gic; 1538 #endif 1539 #if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_AGNOSTIC) 1540 union 1541 { 1517 1542 # ifdef VMM_INCLUDED_SRC_include_APICInternal_h 1518 struct APIC s;1543 struct APIC s; 1519 1544 # endif 1520 uint8_t padding[128]; /* multiple of 8 */ 1521 } apic; 1522 #endif 1545 uint8_t padding[128]; /* multiple of 8 */ 1546 } apic; 1547 #endif 1548 }; 1523 1549 1524 1550 /* ---- begin small stuff ---- */ -
trunk/src/VBox/HostDrivers/Support/testcase/Makefile.kmk
r106945 r107227 83 83 84 84 tstInt_TEMPLATE = VBoxR3SignedExe 85 tstInt_DEFS = $(VMM_COMMON_DEFS)85 tstInt_DEFS = VBOX_VMM_TARGET_AGNOSTIC $(VMM_COMMON_DEFS) 86 86 tstInt_SOURCES = tstInt.cpp 87 87 tstInt_LIBS = $(LIB_RUNTIME) -
trunk/src/VBox/HostDrivers/Support/testcase/tstInt.cpp
r106061 r107227 106 106 if (RT_SUCCESS(rc)) 107 107 { 108 PVM pVM = CreateVMReq.pVMR3;108 PVM const pVM = CreateVMReq.pVMR3; 109 109 AssertRelease(RT_VALID_PTR(pVM)); 110 110 AssertRelease(pVM->pVMR0ForCall == CreateVMReq.pVMR0); -
trunk/src/VBox/VMM/Makefile.kmk
r107194 r107227 62 62 VBoxVMM_SONAME.linux = VBoxVMM.so 63 63 64 VBoxVMM_DEFS = VBOX_ IN_VMM IN_VMM_R3 IN_DIS IN_GMM_R3 IN_DBG $(VMM_COMMON_DEFS)64 VBoxVMM_DEFS = VBOX_VMM_TARGET_X86 VBOX_IN_VMM IN_VMM_R3 IN_DIS IN_GMM_R3 IN_DBG $(VMM_COMMON_DEFS) 65 65 ## @todo eliminate IN_GMM_R3 66 66 ifdef VBOX_WITH_PREALLOC_RAM_BY_DEFAULT … … 835 835 VMMR0_SYSSUFF = .r0 836 836 837 VMMR0_DEFS = VBOX_ IN_VMM IN_VMM_R0 IN_RT_R0 IN_DIS DIS_CORE_ONLY IN_GVMM_R0 IN_GMM_R0 IN_INTNET_R0 \837 VMMR0_DEFS = VBOX_VMM_TARGET_AGNOSTIC VBOX_IN_VMM IN_VMM_R0 IN_RT_R0 IN_DIS DIS_CORE_ONLY IN_GVMM_R0 IN_GMM_R0 IN_INTNET_R0 \ 838 838 $(VMM_COMMON_DEFS) RTASSERT_HAVE_SHOULD_PANIC 839 839 ## @todo eliminate IN_GVMM_R0 IN_GMM_R0 … … 1043 1043 LIBRARIES += SSMStandalone 1044 1044 SSMStandalone_TEMPLATE = VBoxR3Exe 1045 SSMStandalone_DEFS = VBOX_ IN_VMM IN_VMM_R3 IN_VMM_STATIC SSM_STANDALONE CPUM_DB_STANDALONE $(VMM_COMMON_DEFS)1045 SSMStandalone_DEFS = VBOX_VMM_TARGET_AGNOSTIC VBOX_IN_VMM IN_VMM_R3 IN_VMM_STATIC SSM_STANDALONE CPUM_DB_STANDALONE $(VMM_COMMON_DEFS) 1046 1046 SSMStandalone_INCS = include 1047 1047 SSMStandalone_SOURCES = \ -
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r106061 r107227 661 661 662 662 663 #if defined(VBOX_VMM_TARGET_ARMV8) 663 #ifdef VBOX_VMM_TARGET_ARMV8 664 664 665 /** 665 666 * Sets the number of nanoseconds from now when the vTiemr is supposed to expire next. … … 688 689 return pVCpu->cNsVTimerActivate; 689 690 } 690 #endif 691 692 #endif /* VBOX_VMM_TARGET_ARMV8 */ -
trunk/src/VBox/VMM/VMMR3/DBGF.cpp
r107030 r107227 1598 1598 * Do minimal parsing. No real need to involve the disassembler here. 1599 1599 */ 1600 if ( (u32Insn & 0xfffffc1f) == 0xd65f0000/* RET */1601 || (u32Insn & 0xfffffc1f) == 0xd65f081f/* RETAA */1602 || (u32Insn & 0xfffffc1f) == 0xd65f0c1f/* RETAB */1603 || (u32Insn & 0xffffffff) == 0xd69f03e0/* ERET */1604 || (u32Insn & 0xffffffff) == 0xd69f0bff/* ERETAA */1605 || (u32Insn & 0xffffffff) == 0xd69f0fff/* ERETAB */)1600 if ( (u32Insn & UINT32_C(0xfffffc1f)) == UINT32_C(0xd65f0000) /* RET */ 1601 || (u32Insn & UINT32_C(0xfffffc1f)) == UINT32_C(0xd65f081f) /* RETAA */ 1602 || (u32Insn & UINT32_C(0xfffffc1f)) == UINT32_C(0xd65f0c1f) /* RETAB */ 1603 || (u32Insn & UINT32_C(0xffffffff)) == UINT32_C(0xd69f03e0) /* ERET */ 1604 || (u32Insn & UINT32_C(0xffffffff)) == UINT32_C(0xd69f0bff) /* ERETAA */ 1605 || (u32Insn & UINT32_C(0xffffffff)) == UINT32_C(0xd69f0fff) /* ERETAB */) 1606 1606 return DBGFSTEPINSTRTYPE_RET; 1607 else if ( (u32Insn & 0xfffffc1f) == 0xd63f0000/* BLR */1608 || (u32Insn & 0xfffffc1f) == 0xd63f081f/* BLRAAZ */1609 || (u32Insn & 0xfffffc1f) == 0xd63f0c1f/* BLRABZ */1610 || (u32Insn & 0xfffffc00) == 0xd73f0800/* BLRAA */1611 || (u32Insn & 0xfffffc00) == 0xd73f0c00/* BLRAB */1612 || (u32Insn & 0xfc000000) == 0x14000000/* BL */1613 || (u32Insn & 0xffe0001f) == 0xd4000001/* SVC */1614 || (u32Insn & 0xffe0001f) == 0xd4000002/* HVC */1615 || (u32Insn & 0xffe0001f) == 0xd4000003/* SMC */1616 || (u32Insn & 0xffe0001f) == 0xd4200000/* BRK */1617 || (u32Insn & 0xffe0001f) == 0xd4400000/* HLT */)1607 if ( (u32Insn & UINT32_C(0xfffffc1f)) == UINT32_C(0xd63f0000) /* BLR */ 1608 || (u32Insn & UINT32_C(0xfffffc1f)) == UINT32_C(0xd63f081f) /* BLRAAZ */ 1609 || (u32Insn & UINT32_C(0xfffffc1f)) == UINT32_C(0xd63f0c1f) /* BLRABZ */ 1610 || (u32Insn & UINT32_C(0xfffffc00)) == UINT32_C(0xd73f0800) /* BLRAA */ 1611 || (u32Insn & UINT32_C(0xfffffc00)) == UINT32_C(0xd73f0c00) /* BLRAB */ 1612 || (u32Insn & UINT32_C(0xfc000000)) == UINT32_C(0x14000000) /* BL */ 1613 || (u32Insn & UINT32_C(0xffe0001f)) == UINT32_C(0xd4000001) /* SVC */ 1614 || (u32Insn & UINT32_C(0xffe0001f)) == UINT32_C(0xd4000002) /* HVC */ 1615 || (u32Insn & UINT32_C(0xffe0001f)) == UINT32_C(0xd4000003) /* SMC */ 1616 || (u32Insn & UINT32_C(0xffe0001f)) == UINT32_C(0xd4200000) /* BRK */ 1617 || (u32Insn & UINT32_C(0xffe0001f)) == UINT32_C(0xd4400000) /* HLT */) 1618 1618 return DBGFSTEPINSTRTYPE_CALL; 1619 else1620 return DBGFSTEPINSTRTYPE_OTHER;1621 } 1622 #el se1619 return DBGFSTEPINSTRTYPE_OTHER; 1620 } 1621 1622 #elif defined(VBOX_VMM_TARGET_X86) 1623 1623 /* 1624 1624 * Read the instruction. … … 1694 1694 } 1695 1695 } 1696 1697 #else 1698 # error "port me" 1696 1699 #endif 1697 1700 -
trunk/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp
r107113 r107227 329 329 static void dbgfR3GetCoreCpu(PVMCPU pVCpu, PDBGFCORECPU pDbgfCpu) 330 330 { 331 #define DBGFCOPYSEL(a_dbgfsel, a_cpumselreg) \ 331 PCCPUMCTX const pCtx = CPUMQueryGuestCtxPtr(pVCpu); 332 333 #ifdef VBOX_VMM_TARGET_X86 334 # define DBGFCOPYSEL(a_dbgfsel, a_cpumselreg) \ 332 335 do { \ 333 336 (a_dbgfsel).uBase = (a_cpumselreg).u64Base; \ … … 337 340 } while (0) 338 341 339 #if defined(VBOX_VMM_TARGET_ARMV8)340 AssertReleaseFailed();341 RT_NOREF(pVCpu, pDbgfCpu);342 #else343 PVM pVM = pVCpu->CTX_SUFF(pVM);344 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);345 342 pDbgfCpu->rax = pCtx->rax; 346 343 pDbgfCpu->rbx = pCtx->rbx; … … 395 392 pDbgfCpu->aXcr[1] = pCtx->aXcr[1]; 396 393 AssertCompile(sizeof(pDbgfCpu->ext) == sizeof(pCtx->XState)); 394 395 PVM const pVM = pVCpu->CTX_SUFF(pVM); 397 396 pDbgfCpu->cbExt = pVM->cpum.ro.GuestFeatures.cbMaxExtendedState; 398 397 if (RT_LIKELY(pDbgfCpu->cbExt)) 399 398 memcpy(&pDbgfCpu->ext, &pCtx->XState, pDbgfCpu->cbExt); 399 400 # undef DBGFCOPYSEL 401 402 #elif defined(VBOX_VMM_TARGET_ARMV8) 403 RT_NOREF(pCtx, pDbgfCpu); 404 AssertReleaseFailed(); 405 406 #else 407 # error "port me" 400 408 #endif 401 402 #undef DBGFCOPYSEL403 409 } 404 410 -
trunk/src/VBox/VMM/VMMR3/DBGFCpu.cpp
r106061 r107227 55 55 Assert(idCpu == VMMGetCpuId(pVM)); 56 56 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu); 57 #if defined(VBOX_VMM_TARGET_ARMV8)57 #ifdef VBOX_VMM_TARGET_ARMV8 58 58 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_PSTATE); 59 #elif defined(VBOX_VMM_TARGET_X86) 60 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER); 59 61 #else 60 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER); 62 # error "port me" 61 63 #endif 62 64 *penmMode = CPUMGetGuestMode(pVCpu); … … 98 100 Assert(idCpu == VMMGetCpuId(pVM)); 99 101 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu); 100 #if defined(VBOX_VMM_TARGET_ARMV8)102 #ifdef VBOX_VMM_TARGET_ARMV8 101 103 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_PSTATE); 104 #elif defined(VBOX_VMM_TARGET_X86) 105 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER); 102 106 #else 103 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER); 107 # error "port me" 104 108 #endif 105 109 *pfIn64BitCode = CPUMIsGuestIn64BitCode(pVCpu); … … 129 133 130 134 131 #if !defined(VBOX_VMM_TARGET_ARMV8)135 #ifdef VBOX_VMM_TARGET_X86 132 136 /** 133 137 * Wrapper around CPUMIsGuestInV86Code. … … 162 166 AssertReturn(idCpu < pUVM->pVM->cCpus, false); 163 167 164 #if defined(VBOX_VMM_TARGET_ARMV8) 165 /* This is a public visible API, so we need to fill in a stub. */ 166 return false; 167 #else 168 #ifdef VBOX_VMM_TARGET_X86 168 169 bool fInV86Code; 169 170 int rc = VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3CpuInV86Code, 3, pUVM->pVM, idCpu, &fInV86Code); … … 171 172 return false; 172 173 return fInV86Code; 174 #else 175 return false; 173 176 #endif 174 177 } -
trunk/src/VBox/VMM/VMMR3/DBGFDisas.cpp
r106743 r107227 68 68 /** The address space for resolving symbol. */ 69 69 RTDBGAS hDbgAs; 70 #if !defined(VBOX_VMM_TARGET_ARMV8)70 #ifdef VBOX_VMM_TARGET_X86 71 71 /** Pointer to the first byte in the segment. */ 72 72 RTGCUINTPTR GCPtrSegBase; … … 115 115 RTGCPTR GCPtr, uint32_t fFlags, PDBGFDISASSTATE pState) 116 116 { 117 #if !defined(VBOX_VMM_TARGET_ARMV8) 117 #ifndef VBOX_VMM_TARGET_X86 118 RT_NOREF_PV(pSelInfo); 119 pState->f64Bits = CPUMIsGuestIn64BitCode(pVCpu); 120 #else 118 121 pState->GCPtrSegBase = pSelInfo->GCPtrBase; 119 122 pState->GCPtrSegEnd = pSelInfo->cbLimit + 1 + (RTGCUINTPTR)pSelInfo->GCPtrBase; 120 123 pState->cbSegLimit = pSelInfo->cbLimit; 121 124 pState->f64Bits = enmMode >= PGMMODE_AMD64 && pSelInfo->u.Raw.Gen.u1Long; 122 #else123 RT_NOREF(pSelInfo);124 125 pState->f64Bits = CPUMGetGuestCodeBits(pVCpu) == 64;126 125 #endif 127 126 pState->enmMode = enmMode; … … 142 141 enmCpuMode = CPUMGetGuestDisMode(pVCpu); 143 142 break; 144 #if !defined(VBOX_VMM_TARGET_ARMV8)143 #ifdef VBOX_VMM_TARGET_X86 145 144 case DBGF_DISAS_FLAGS_16BIT_MODE: 146 145 case DBGF_DISAS_FLAGS_16BIT_REAL_MODE: … … 153 152 enmCpuMode = DISCPUMODE_64BIT; 154 153 break; 155 #el se154 #elif defined(VBOX_VMM_TARGET_ARMV8) 156 155 case DBGF_DISAS_FLAGS_16BIT_MODE: /** @todo r=aeichner This is a bit abusive... */ 157 156 case DBGF_DISAS_FLAGS_16BIT_REAL_MODE: … … 164 163 enmCpuMode = DISCPUMODE_ARMV8_A64; 165 164 break; 165 #else 166 # error "port me" 166 167 #endif 167 168 } … … 237 238 for (;;) 238 239 { 239 #if !defined(VBOX_VMM_TARGET_ARMV8)240 #ifdef VBOX_VMM_TARGET_X86 240 241 RTGCUINTPTR GCPtr = pDis->uInstrAddr + offInstr + pState->GCPtrSegBase; 241 242 #else … … 269 270 } 270 271 271 uint32_t cb = GUEST_PAGE_SIZE - (GCPtr & GUEST_PAGE_OFFSET_MASK); 272 #if !defined(VBOX_VMM_TARGET_ARMV8) 272 #ifdef VBOX_VMM_TARGET_X86 273 273 /* 274 274 * Check the segment limit. … … 276 276 if (!pState->f64Bits && pDis->uInstrAddr + offInstr > pState->cbSegLimit) 277 277 return VERR_OUT_OF_SELECTOR_BOUNDS; 278 #endif 278 279 279 280 /* 280 281 * Calc how much we can read, maxing out the read. 281 282 */ 283 uint32_t cb = GUEST_PAGE_SIZE - (GCPtr & GUEST_PAGE_OFFSET_MASK); 284 #ifdef VBOX_VMM_TARGET_X86 282 285 if (!pState->f64Bits) 283 286 { … … 313 316 { 314 317 PDBGFDISASSTATE pState = (PDBGFDISASSTATE)pDis; 315 PCDBGFSELINFO pSelInfo = (PCDBGFSELINFO)pvUser;316 318 317 319 /* … … 320 322 DBGFADDRESS Addr; 321 323 int rc; 322 #if !defined(VBOX_VMM_TARGET_ARMV8) 324 #ifdef VBOX_VMM_TARGET_X86 325 PCDBGFSELINFO pSelInfo = (PCDBGFSELINFO)pvUser; 326 323 327 /* Start with CS. */ 324 328 if ( DIS_FMT_SEL_IS_REG(u32Sel) … … 349 353 rc = VERR_SYMBOL_NOT_FOUND; 350 354 } 355 351 356 #else 352 RT_NOREF(pSelInfo, u32Sel); 353 357 RT_NOREF(pvUser, u32Sel); 354 358 DBGFR3AddrFromFlat(pState->pVM->pUVM, &Addr, uAddress); 355 359 rc = VINF_SUCCESS; … … 410 414 int rc; 411 415 412 #if defined(VBOX_VMM_TARGET_ARMV8) 413 DBGFSELINFO SelInfo; RT_ZERO(SelInfo); 414 const PGMMODE enmMode = PGMGetGuestMode(pVCpu); 415 const bool fRealModeAddress = false; 416 if (fFlags & DBGF_DISAS_FLAGS_CURRENT_GUEST) 417 GCPtr = CPUMGetGuestFlatPC(pVCpu); 418 #else 416 #ifdef VBOX_VMM_TARGET_X86 419 417 /* 420 418 * Get the Sel and GCPtr if fFlags requests that. … … 542 540 } 543 541 } 544 #endif 542 543 #else /* !VBOX_VMM_TARGET_X86 */ 544 const PGMMODE enmMode = PGMGetGuestMode(pVCpu); 545 const bool fRealModeAddress = false; 546 if (fFlags & DBGF_DISAS_FLAGS_CURRENT_GUEST) 547 GCPtr = CPUMGetGuestFlatPC(pVCpu); 548 #endif /* !VBOX_VMM_TARGET_X86 */ 545 549 546 550 /* … … 548 552 */ 549 553 DBGFDISASSTATE State; 554 #ifdef VBOX_VMM_TARGET_X86 550 555 rc = dbgfR3DisasInstrFirst(pVM, pVCpu, &SelInfo, enmMode, GCPtr, fFlags, &State); 556 #else 557 rc = dbgfR3DisasInstrFirst(pVM, pVCpu, NULL, enmMode, GCPtr, fFlags, &State); 558 #endif 551 559 if (RT_FAILURE(rc)) 552 560 { … … 562 570 */ 563 571 char szBuf[512]; 564 #if defined(VBOX_VMM_TARGET_ARMV8)572 #ifdef VBOX_VMM_TARGET_ARMV8 565 573 DISFormatArmV8Ex(&State.Dis, szBuf, sizeof(szBuf), 566 574 DIS_FMT_FLAGS_RELATIVE_BRANCH, 567 575 fFlags & DBGF_DISAS_FLAGS_NO_SYMBOLS ? NULL : dbgfR3DisasGetSymbol, 568 576 NULL); 569 #el se577 #elif defined(VBOX_VMM_TARGET_X86) 570 578 DISFormatYasmEx(&State.Dis, szBuf, sizeof(szBuf), 571 579 DIS_FMT_FLAGS_RELATIVE_BRANCH, 572 580 fFlags & DBGF_DISAS_FLAGS_NO_SYMBOLS ? NULL : dbgfR3DisasGetSymbol, 573 581 &SelInfo); 582 #else 583 # error "port me" 574 584 #endif 575 585 … … 651 661 pDisState->Param3 = State.Dis.aParams[2]; 652 662 pDisState->Param4 = State.Dis.aParams[3]; 653 #if defined(VBOX_VMM_TARGET_ARMV8)663 #ifdef VBOX_VMM_TARGET_ARMV8 654 664 memcpy(&pDisState->armv8, &State.Dis.armv8, sizeof(State.Dis.armv8)); 665 #elif defined(VBOX_VMM_TARGET_X86) 666 memcpy(&pDisState->x86, &State.Dis.x86, sizeof(State.Dis.x86)); 655 667 #else 656 memcpy(&pDisState->x86, &State.Dis.x86, sizeof(State.Dis.x86)); 668 # error "port me" 657 669 #endif 658 670 } -
trunk/src/VBox/VMM/VMMR3/DBGFMem.cpp
r106061 r107227 359 359 360 360 361 #if !defined(VBOX_VMM_TARGET_ARMV8)361 #ifdef VBOX_VMM_TARGET_X86 362 362 /** 363 363 * Worker for DBGFR3SelQueryInfo that calls into SELM. … … 409 409 return rc; 410 410 } 411 #endif 411 #endif /* VBOX_VMM_TARGET_X86 */ 412 412 413 413 … … 447 447 memset(pSelInfo, 0, sizeof(*pSelInfo)); 448 448 449 #if defined(VBOX_VMM_TARGET_ARMV8) 449 #ifdef VBOX_VMM_TARGET_X86 450 /* 451 * Dispatch the request to a worker running on the target CPU. 452 */ 453 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3SelQueryInfo, 5, pUVM, idCpu, Sel, fFlags, pSelInfo); 454 #else 450 455 RT_NOREF(Sel); 451 456 return VERR_NOT_SUPPORTED; 452 #else453 /*454 * Dispatch the request to a worker running on the target CPU.455 */456 return VMR3ReqPriorityCallWaitU(pUVM, idCpu, (PFNRT)dbgfR3SelQueryInfo, 5, pUVM, idCpu, Sel, fFlags, pSelInfo);457 457 #endif 458 458 } … … 496 496 497 497 498 #ifdef VBOX_WITH_HWVIRT 498 499 /** 499 500 * Converts a PGM paging mode to a set of DBGFPGDMP_XXX flags. … … 506 507 switch (enmMode) 507 508 { 508 # if !defined(VBOX_VMM_TARGET_ARMV8)509 # ifdef VBOX_VMM_TARGET_X86 509 510 case PGMMODE_32_BIT: 510 511 return DBGFPGDMP_FLAGS_PSE; … … 525 526 case PGMMODE_EPT: 526 527 return DBGFPGDMP_FLAGS_EPT; 528 529 # elif defined(VBOX_VMM_TARGET_ARM64) 530 /** @todo arm64: dumping page tables. */ 531 # else 532 # error "port me" 533 # endif 527 534 case PGMMODE_NONE: 528 535 return 0; 529 536 default: 530 537 AssertFailedReturn(UINT32_MAX); 531 #else 532 case PGMMODE_NONE: 533 return 0; 534 default: 535 AssertFailedReturn(UINT32_MAX); 536 #endif 537 } 538 } 538 } 539 } 540 #endif /* VBOX_WITH_HWVIRT */ 539 541 540 542 … … 586 588 return VINF_SUCCESS; 587 589 } 588 589 #if !defined(VBOX_VMM_TARGET_ARMV8) 590 #if defined(VBOX_WITH_HWVIRT) 590 591 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3) 591 592 cr3 = PGMGetHyperCR3(pVCpu); 592 #endif593 593 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_MODE) 594 594 fFlags |= dbgfR3PagingDumpModeToFlags(PGMGetShadowMode(pVCpu)); 595 #else 596 pHlp->pfnPrintf(pHlp, "Expected shadowing mode PGMMODE_NONE, found %d!\n", PGMGetShadowMode(pVCpu)); 597 return VINF_SUCCESS; 598 #endif 595 599 } 596 600 else 597 601 { 598 #if defined(VBOX_VMM_TARGET_ARMV8) 599 AssertReleaseFailed(); 600 #else 602 #ifdef VBOX_VMM_TARGET_X86 601 603 if (fFlags & DBGFPGDMP_FLAGS_CURRENT_CR3) 602 604 cr3 = CPUMGetGuestCR3(pVCpu); … … 608 610 fFlags |= CPUMGetGuestEFER(pVCpu) & (MSR_K6_EFER_LME | MSR_K6_EFER_NXE); 609 611 } 612 #elif defined(VBOX_VMM_TARGET_ARMV8) 613 /** @todo arm64: port me */ 614 AssertReleaseFailed(); 615 return VERR_NOT_IMPLEMENTED; 616 #else 617 # error "port me" 610 618 #endif 611 619 } … … 645 653 uint64_t u64LastAddr, uint32_t cMaxDepth, PCDBGFINFOHLP pHlp) 646 654 { 655 /** @todo adjust this for ARMv8. Probably need two root parameters (instead of 656 * cr3) as well as a bunch new flags. */ 647 657 /* 648 658 * Input validation. -
trunk/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp
r107194 r107227 177 177 #include <iprt/assert.h> 178 178 #include <iprt/mem.h> 179 #if defined(VBOX_VMM_TARGET_ARMV8)179 #ifdef VBOX_VMM_TARGET_ARMV8 180 180 # include <iprt/armv8.h> 181 181 #endif … … 1786 1786 RT_NOREF(pvUser); 1787 1787 1788 #if defined(VBOX_VMM_TARGET_ARMV8) 1789 RT_NOREF(pVM, pVCpu); 1790 AssertReleaseFailed(); 1791 return VERR_NOT_IMPLEMENTED; 1792 #else 1788 #ifdef VBOX_VMM_TARGET_X86 1793 1789 /* 1794 1790 * CPU 0 updates the enabled hardware breakpoint counts. … … 1810 1806 1811 1807 return CPUMRecalcHyperDRx(pVCpu, UINT8_MAX); 1808 1809 #else 1810 /** @todo arm64: hardware breakpoints. */ 1811 RT_NOREF(pVM, pVCpu); 1812 AssertReleaseFailed(); 1813 return VERR_NOT_IMPLEMENTED; 1812 1814 #endif 1813 1815 } … … 1855 1857 * as the VMX code intercepts #BP only when at least one int3 breakpoint is enabled. 1856 1858 * A racing vCPU might trigger it and forward it to the guest causing panics/crashes/havoc. */ 1859 /* 1860 * Save original instruction and replace a breakpoint instruction. 1861 */ 1857 1862 #ifdef VBOX_VMM_TARGET_ARMV8 1858 /* 1859 * Save original instruction and replace with brk 1860 */ 1861 rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Sw.Arch.armv8.u32Org, pBp->Pub.u.Sw.PhysAddr, sizeof(pBp->Pub.u.Sw.Arch.armv8.u32Org)); 1862 if (RT_SUCCESS(rc)) 1863 { 1864 static const uint32_t s_u32Brk = Armv8A64MkInstrBrk(0xc0de); 1865 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &s_u32Brk, sizeof(s_u32Brk)); 1866 } 1863 static const uint32_t s_BreakpointInstr = Armv8A64MkInstrBrk(0xc0de); 1864 rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Sw.Arch.armv8.u32Org, pBp->Pub.u.Sw.PhysAddr, 1865 sizeof(pBp->Pub.u.Sw.Arch.armv8.u32Org)); 1866 #elif defined(VBOX_VMM_TARGET_X86) 1867 static const uint8_t s_BreakpointInstr = 0xcc; 1868 rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Sw.Arch.x86.bOrg, pBp->Pub.u.Sw.PhysAddr, 1869 sizeof(pBp->Pub.u.Sw.Arch.x86.bOrg)); 1867 1870 #else 1868 /* 1869 * Save current byte and write the int3 instruction byte. 1870 */ 1871 rc = PGMPhysSimpleReadGCPhys(pVM, &pBp->Pub.u.Sw.Arch.x86.bOrg, pBp->Pub.u.Sw.PhysAddr, sizeof(pBp->Pub.u.Sw.Arch.x86.bOrg)); 1872 if (RT_SUCCESS(rc)) 1873 { 1874 static const uint8_t s_bInt3 = 0xcc; 1875 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &s_bInt3, sizeof(s_bInt3)); 1876 } 1871 # error "port me" 1877 1872 #endif 1878 1873 if (RT_SUCCESS(rc)) 1879 1874 { 1880 ASMAtomicIncU32(&pVM->dbgf.s.cEnabledSwBreakpoints); 1881 Log(("DBGF: Set breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Sw.GCPtr, pBp->Pub.u.Sw.PhysAddr)); 1882 } 1883 1884 if (RT_FAILURE(rc)) 1885 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/); 1886 1875 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &s_BreakpointInstr, sizeof(s_BreakpointInstr)); 1876 if (RT_SUCCESS(rc)) 1877 { 1878 ASMAtomicIncU32(&pVM->dbgf.s.cEnabledSwBreakpoints); 1879 Log(("DBGF: Set breakpoint at %RGv (Phys %RGp)\n", pBp->Pub.u.Sw.GCPtr, pBp->Pub.u.Sw.PhysAddr)); 1880 break; 1881 } 1882 } 1883 dbgfR3BpSetEnabled(pBp, false /*fEnabled*/); 1887 1884 break; 1888 1885 } … … 1952 1949 if ( RT_SUCCESS(rc) 1953 1950 && u32Current == Armv8A64MkInstrBrk(0xc0de)) 1954 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &pBp->Pub.u.Sw.Arch.armv8.u32Org, sizeof(pBp->Pub.u.Sw.Arch.armv8.u32Org)); 1951 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &pBp->Pub.u.Sw.Arch.armv8.u32Org, 1952 sizeof(pBp->Pub.u.Sw.Arch.armv8.u32Org)); 1955 1953 #else 1956 1954 uint8_t bCurrent = 0; … … 1958 1956 if ( RT_SUCCESS(rc) 1959 1957 && bCurrent == 0xcc) 1960 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &pBp->Pub.u.Sw.Arch.x86.bOrg, sizeof(pBp->Pub.u.Sw.Arch.x86.bOrg)); 1958 rc = PGMPhysSimpleWriteGCPhys(pVM, pBp->Pub.u.Sw.PhysAddr, &pBp->Pub.u.Sw.Arch.x86.bOrg, 1959 sizeof(pBp->Pub.u.Sw.Arch.x86.bOrg)); 1961 1960 #endif 1962 1963 1961 if (RT_SUCCESS(rc)) 1964 1962 { … … 1991 1989 1992 1990 /** 1993 * Worker for DBGFR3BpHit() differ netiating on the breakpoint type.1991 * Worker for DBGFR3BpHit() differentiating on the breakpoint type. 1994 1992 * 1995 1993 * @returns Strict VBox status code. … … 2015 2013 if (rcStrict == VINF_SUCCESS) 2016 2014 { 2015 /** @todo Need to take more care with the reading there if the breakpoint is 2016 * on the edge of a page. */ 2017 2017 uint8_t abInstr[DBGF_BP_INSN_MAX]; 2018 2018 RTGCPTR const GCPtrInstr = CPUMGetGuestFlatPC(pVCpu); … … 2020 2020 if (rcStrict == VINF_SUCCESS) 2021 2021 { 2022 #ifdef VBOX_VMM_TARGET_ARMV8 2023 AssertFailed(); 2024 rcStrict = VERR_NOT_IMPLEMENTED; 2025 #else 2022 #ifdef VBOX_VMM_TARGET_X86 2026 2023 /* Replace the int3 with the original instruction byte. */ 2027 2024 abInstr[0] = pBp->Pub.u.Sw.Arch.x86.bOrg; 2028 2025 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, GCPtrInstr, &abInstr[0], sizeof(abInstr)); 2026 #else 2027 /** @todo arm64: implement stepping over breakpoint. Fix unnecessary opcode reading. */ 2028 AssertFailed(); 2029 rcStrict = VERR_NOT_IMPLEMENTED; 2029 2030 #endif 2030 2031 if ( rcStrict == VINF_SUCCESS -
trunk/src/VBox/VMM/VMMR3/DBGFR3Flow.cpp
r106784 r107227 232 232 if ( uOpc == OP_ARMV8_A64_B 233 233 || uOpc == OP_ARMV8_A64_BC) 234 { 235 return pDis->armv8.enmCond == kDisArmv8InstrCond_Al 236 || pDis->armv8.enmCond == kDisArmv8InstrCond_Al1; 237 } 234 return pDis->armv8.enmCond == kDisArmv8InstrCond_Al 235 || pDis->armv8.enmCond == kDisArmv8InstrCond_Al1; 238 236 239 237 return false; 238 239 #elif defined(VBOX_VMM_TARGET_X86) 240 RT_NOREF_PV(pDis); 241 return uOpc == OP_JMP; 242 240 243 #else 241 RT_NOREF(pDis); 242 243 return uOpc == OP_JMP; 244 # error "port me" 244 245 #endif 245 246 } … … 269 270 270 271 return false; 272 273 #elif defined(VBOX_VMM_TARGET_X86) 274 RT_NOREF_PV(fOpType); 275 return uOpc == OP_CALL; 276 271 277 #else 272 RT_NOREF(fOpType); 273 return uOpc == OP_CALL; 278 # error "port me" 274 279 #endif 275 280 } … … 285 290 { 286 291 #ifdef VBOX_VMM_TARGET_ARMV8 287 if (uOpc == OP_ARMV8_A64_RET292 return uOpc == OP_ARMV8_A64_RET 288 293 || uOpc == OP_ARMV8_A64_RETAA 289 294 || uOpc == OP_ARMV8_A64_RETAB 290 295 || uOpc == OP_ARMV8_A64_ERET 291 296 || uOpc == OP_ARMV8_A64_ERETAA 292 || uOpc == OP_ARMV8_A64_ERETAB) 293 return true; 294 295 return false; 296 #else 297 if ( uOpc == OP_RETN 297 || uOpc == OP_ARMV8_A64_ERETAB; 298 299 #elif defined(VBOX_VMM_TARGET_X86) 300 return uOpc == OP_RETN 298 301 || uOpc == OP_RETF 299 302 || uOpc == OP_IRET 300 303 || uOpc == OP_SYSEXIT 301 || uOpc == OP_SYSRET )302 return true; 303 304 return false; 304 || uOpc == OP_SYSRET; 305 306 #else 307 # error "port me" 305 308 #endif 306 309 } … … 316 319 static bool dbgfR3FlowAddrEqual(PDBGFADDRESS pAddr1, PDBGFADDRESS pAddr2) 317 320 { 318 return 319 321 return pAddr1->Sel == pAddr2->Sel 322 && pAddr1->off == pAddr2->off; 320 323 } 321 324 … … 330 333 static bool dbgfR3FlowAddrLower(PDBGFADDRESS pAddr1, PDBGFADDRESS pAddr2) 331 334 { 332 return 333 335 return pAddr1->Sel == pAddr2->Sel 336 && pAddr1->off < pAddr2->off; 334 337 } 335 338 … … 344 347 static bool dbgfR3FlowAddrIntersect(PDBGFFLOWBBINT pFlowBb, PDBGFADDRESS pAddr) 345 348 { 346 return (pFlowBb->AddrStart.Sel == pAddr->Sel)347 && (pFlowBb->AddrStart.off <= pAddr->off)348 && (pFlowBb->AddrEnd.off >= pAddr->off);349 return pFlowBb->AddrStart.Sel == pAddr->Sel 350 && pFlowBb->AddrStart.off <= pAddr->off 351 && pFlowBb->AddrEnd.off >= pAddr->off; 349 352 } 350 353 … … 363 366 if (pAddr1->off >= pAddr2->off) 364 367 return pAddr1->off - pAddr2->off; 365 else 366 return pAddr2->off - pAddr1->off; 367 } 368 else 369 AssertFailed(); 370 368 return pAddr2->off - pAddr1->off; 369 } 370 AssertFailed(); 371 371 return 0; 372 372 } … … 774 774 uint32_t cbInstr, bool fRelJmp, PDBGFADDRESS pAddrJmpTarget) 775 775 { 776 int rc = VINF_SUCCESS;777 778 776 Assert(!dbgfR3FlowBranchTargetIsIndirect(pDisParam)); 779 777 780 /* Relative jumps are always from the beginning of the next instruction. */781 778 *pAddrJmpTarget = *pAddrInstr; 782 #ifdef VBOX_VMM_TARGET_ARMV8 783 /* On ARM relative jumps are always from the beginning of the curent instruction (b #0 will jump to itself for instance). */ 779 #ifdef VBOX_VMM_TARGET_X86 780 /* Relative to the next instruction. */ 781 DBGFR3AddrAdd(pAddrJmpTarget, cbInstr); 782 #elif defined(VBOX_VMM_TARGET_ARMV8) 783 /* Relative to the start of the instruction. */ 784 784 RT_NOREF(cbInstr); 785 785 #else 786 DBGFR3AddrAdd(pAddrJmpTarget, cbInstr); 786 # error "port me" 787 787 #endif 788 788 … … 799 799 iRel = (int64_t)pDisParam->uValue; 800 800 else 801 AssertFailed Stmt(rc =VERR_NOT_SUPPORTED);801 AssertFailedReturn(VERR_NOT_SUPPORTED); 802 802 803 803 if (iRel < 0) … … 816 816 } 817 817 else 818 AssertFailed Stmt(rc =VERR_INVALID_STATE);819 } 820 821 return rc;818 AssertFailedReturn(VERR_INVALID_STATE); 819 } 820 821 return VINF_SUCCESS; 822 822 } 823 823 … … 1386 1386 1387 1387 #ifdef VBOX_VMM_TARGET_ARMV8 1388 PDISOPPARAM pParam = 1389 1390 1391 1392 1388 PDISOPPARAM pParam = uOpc == OP_ARMV8_A64_B || uOpc == OP_ARMV8_A64_BC 1389 ? &DisState.Param1 1390 : uOpc == OP_ARMV8_A64_CBZ || uOpc == OP_ARMV8_A64_CBNZ 1391 ? &DisState.Param2 /* cbz/cbnz. */ 1392 : &DisState.Param3; /* tbz/tbnz. */ 1393 1393 #else 1394 1394 PDISOPPARAM pParam = &DisState.Param1; … … 1466 1466 static int dbgfR3FlowPopulate(PUVM pUVM, VMCPUID idCpu, PDBGFFLOWINT pThis, uint32_t cbDisasmMax, uint32_t fFlags) 1467 1467 { 1468 int rc = VINF_SUCCESS;1469 1468 PDBGFFLOWBBINT pFlowBb = dbgfR3FlowGetUnpopulatedBb(pThis); 1470 1471 1469 while (pFlowBb != NULL) 1472 1470 { 1473 rc = dbgfR3FlowBbProcess(pUVM, idCpu, pThis, pFlowBb, cbDisasmMax, fFlags);1474 if (RT_ FAILURE(rc))1475 break;1476 1477 pFlowBb = dbgfR3FlowGetUnpopulatedBb(pThis);1478 } 1479 1480 return rc;1471 int rc = dbgfR3FlowBbProcess(pUVM, idCpu, pThis, pFlowBb, cbDisasmMax, fFlags); 1472 if (RT_SUCCESS(rc)) 1473 pFlowBb = dbgfR3FlowGetUnpopulatedBb(pThis); 1474 else 1475 return rc; 1476 } 1477 1478 return VINF_SUCCESS; 1481 1479 } 1482 1480 … … 1506 1504 1507 1505 /* Create the control flow graph container. */ 1508 int rc = VINF_SUCCESS;1506 int rc; 1509 1507 PDBGFFLOWINT pThis = (PDBGFFLOWINT)RTMemAllocZ(sizeof(DBGFFLOWINT)); 1510 1508 if (RT_LIKELY(pThis)) -
trunk/src/VBox/VMM/VMMR3/DBGFReg.cpp
r106365 r107227 298 298 299 299 /* The descriptors. */ 300 uint32_t cLookupRecs = 0; 301 uint32_t iDesc; 300 #ifdef VBOX_VMM_TARGET_X86 301 DBGFREG const enmCpuFirst = DBGFREG_X86_FIRST; 302 DBGFREG const enmCpuLast = DBGFREG_X86_LAST; 303 #elif defined(VBOX_VMM_TARGET_ARMV8) 304 DBGFREG const enmCpuFirst = DBGFREG_ARMV8_FIRST; 305 DBGFREG const enmCpuLast = DBGFREG_ARMV8_LAST; 306 #else 307 # error "port me" 308 #endif 309 unsigned const cCpuDescs = (unsigned)enmCpuLast - (unsigned)enmCpuFirst + 1; 310 uint32_t cLookupRecs = 0; 311 uint32_t iDesc; 302 312 for (iDesc = 0; paRegisters[iDesc].pszName != NULL; iDesc++) 303 313 { 304 314 AssertMsgReturn(dbgfR3RegIsNameValid(paRegisters[iDesc].pszName, 0), ("%s (#%u)\n", paRegisters[iDesc].pszName, iDesc), VERR_INVALID_NAME); 305 315 306 if (enmType == DBGFREGSETTYPE_CPU) 307 #if defined(VBOX_VMM_TARGET_ARMV8) 308 /** @todo This needs a general solution to avoid architecture dependent stuff here. */ 309 AssertMsgReturn(iDesc < (unsigned)DBGFREG_END, 310 ("%d iDesc=%d\n", paRegisters[iDesc].enmReg, iDesc), 316 if (enmType == DBGFREGSETTYPE_CPU) /* The CPU descriptors must be in enum order. */ 317 AssertMsgReturn(iDesc < cCpuDescs && (unsigned)paRegisters[iDesc].enmReg == iDesc + (unsigned)enmCpuFirst, 318 ("%d iDesc=%u+%d=%u\n", paRegisters[iDesc].enmReg, iDesc, enmCpuFirst, iDesc + (unsigned)enmCpuFirst), 311 319 VERR_INVALID_PARAMETER); 312 #else313 AssertMsgReturn(iDesc < (unsigned)DBGFREG_END && (unsigned)paRegisters[iDesc].enmReg == iDesc,314 ("%d iDesc=%d\n", paRegisters[iDesc].enmReg, iDesc),315 VERR_INVALID_PARAMETER);316 #endif317 320 else 318 321 AssertReturn(paRegisters[iDesc].enmReg == DBGFREG_END, VERR_INVALID_PARAMETER); … … 906 909 * Look up the register and get the register value. 907 910 */ 908 #ifndef VBOX_VMM_TARGET_ARMV8 909 if (RT_LIKELY(pSet->cDescs > (size_t)enmReg)) 911 #ifdef VBOX_VMM_TARGET_X86 912 DBGFREG const enmCpuFirst = DBGFREG_X86_FIRST; 913 #elif defined(VBOX_VMM_TARGET_ARMV8) 914 DBGFREG const enmCpuFirst = DBGFREG_ARMV8_FIRST; 915 #else 916 # error "port me" 917 #endif 918 uint32_t const idxDesc = (uint32_t)enmReg - (uint32_t)enmCpuFirst; 919 if (RT_LIKELY(idxDesc < pSet->cDescs)) 910 920 { 911 PCDBGFREGDESC pDesc = &pSet->paDescs[enmReg]; 912 #else 913 if (RT_LIKELY(pSet->cDescs > (size_t)(enmReg - DBGFREG_ARMV8_FIRST))) 914 { 915 PCDBGFREGDESC pDesc = &pSet->paDescs[enmReg - DBGFREG_ARMV8_FIRST]; 916 #endif 921 PCDBGFREGDESC const pDesc = &pSet->paDescs[idxDesc]; 917 922 918 923 pValue->au64[0] = pValue->au64[1] = 0; -
trunk/src/VBox/VMM/VMMR3/DBGFStack.cpp
r106383 r107227 76 76 { 77 77 m_State.u32Magic = RTDBGUNWINDSTATE_MAGIC; 78 #if defined(VBOX_VMM_TARGET_ARMV8)78 #ifdef VBOX_VMM_TARGET_ARMV8 79 79 m_State.enmArch = RTLDRARCH_ARM64; 80 #elif defined(VBOX_VMM_TARGET_X86) 81 m_State.enmArch = RTLDRARCH_AMD64; 80 82 #else 81 m_State.enmArch = RTLDRARCH_AMD64; 83 # error "port me" 82 84 #endif 83 85 m_State.pfnReadStack = dbgfR3StackReadCallback; … … 86 88 if (pInitialCtx) 87 89 { 88 #if defined(VBOX_VMM_TARGET_ARMV8)90 #ifdef VBOX_VMM_TARGET_ARMV8 89 91 AssertCompile(RT_ELEMENTS(m_State.u.armv8.auGprs) == RT_ELEMENTS(pInitialCtx->aGRegs)); 90 92 … … 95 97 for (uint32_t i = 0; i < RT_ELEMENTS(m_State.u.armv8.auGprs); i++) 96 98 m_State.u.armv8.auGprs[i] = pInitialCtx->aGRegs[i].x; 97 #else 99 100 #elif defined(VBOX_VMM_TARGET_X86) 98 101 m_State.u.x86.auRegs[X86_GREG_xAX] = pInitialCtx->rax; 99 102 m_State.u.x86.auRegs[X86_GREG_xCX] = pInitialCtx->rcx; … … 174 177 static DECLCALLBACK(int) dbgfR3StackReadCallback(PRTDBGUNWINDSTATE pThis, RTUINTPTR uSp, size_t cbToRead, void *pvDst) 175 178 { 176 #if defined(VBOX_VMM_TARGET_ARMV8)179 #ifdef VBOX_VMM_TARGET_ARMV8 177 180 Assert(pThis->enmArch == RTLDRARCH_ARM64); 178 #el se181 #elif defined(VBOX_VMM_TARGET_X86) 179 182 Assert( pThis->enmArch == RTLDRARCH_AMD64 180 183 || pThis->enmArch == RTLDRARCH_X86_32); 184 #else 185 # error "port me" 181 186 #endif 182 187 … … 188 193 else 189 194 { 190 #if defined(VBOX_VMM_TARGET_ARMV8) 191 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp); 192 #else 195 #ifdef VBOX_VMM_TARGET_X86 193 196 if ( pThis->enmArch == RTLDRARCH_X86_32 194 197 || pThis->enmArch == RTLDRARCH_X86_16) … … 201 204 else 202 205 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp); 206 #else 207 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &SrcAddr, uSp); 203 208 #endif 204 209 } … … 221 226 static bool dbgfR3UnwindCtxSetPcAndSp(PDBGFUNWINDCTX pUnwindCtx, PCDBGFADDRESS pAddrPC, PCDBGFADDRESS pAddrStack) 222 227 { 223 #if defined(VBOX_VMM_TARGET_ARMV8)228 #ifdef VBOX_VMM_TARGET_ARMV8 224 229 Assert(pUnwindCtx->m_State.enmArch == RTLDRARCH_ARM64); 225 230 … … 228 233 Assert(!DBGFADDRESS_IS_FAR(pAddrStack)); 229 234 pUnwindCtx->m_State.u.armv8.uSpEl1 = pAddrStack->FlatPtr; /** @todo EL0 stack pointer. */ 230 #else 235 236 #elif defined(VBOX_VMM_TARGET_X86) 231 237 Assert( pUnwindCtx->m_State.enmArch == RTLDRARCH_AMD64 232 238 || pUnwindCtx->m_State.enmArch == RTLDRARCH_X86_32); … … 246 252 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_SS] = pAddrStack->Sel; 247 253 } 254 255 #else 256 # error "port me" 248 257 #endif 249 258 … … 502 511 503 512 504 #if defined(VBOX_VMM_TARGET_ARMV8)505 513 /** 506 514 * Internal worker routine. … … 510 518 * 4 return address 511 519 * 0 old fp; current fp points here 520 * 521 * On x86 the typical stack frame layout is like this: 522 * .. .. 523 * 16 parameter 2 524 * 12 parameter 1 525 * 8 parameter 0 526 * 4 return address 527 * 0 old ebp; current ebp points here 512 528 */ 513 529 DECL_NO_INLINE(static, int) dbgfR3StackWalk(PDBGFUNWINDCTX pUnwindCtx, PDBGFSTACKFRAME pFrame, bool fFirst) … … 552 568 * Figure the return address size and use the old PC to guess stack item size. 553 569 */ 570 #ifdef VBOX_VMM_TARGET_ARMV8 554 571 unsigned const cbRetAddr = 8; 555 572 unsigned const cbStackItem = 8; /** @todo AARCH32. */ 556 573 PVMCPUCC const pVCpu = pUnwindCtx->m_pUVM->pVM->apCpusR3[pUnwindCtx->m_idCpu]; 557 574 558 /* 559 * Read the raw frame data. 560 * We double cbRetAddr in case we have a far return. 561 */ 562 union 563 { 564 uint64_t *pu64; 565 uint32_t *pu32; 566 uint8_t *pb; 567 void *pv; 568 } u, uRet, uArgs, uBp; 569 size_t cbRead = cbRetAddr * 2 + cbStackItem + sizeof(pFrame->Args); 570 u.pv = alloca(cbRead); 571 uBp = u; 572 uRet.pb = u.pb + cbStackItem; 573 uArgs.pb = u.pb + cbStackItem + cbRetAddr; 574 575 Assert(DBGFADDRESS_IS_VALID(&pFrame->AddrFrame)); 576 int rc = dbgfR3StackRead(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, u.pv, &pFrame->AddrFrame, cbRead, &cbRead); 577 if ( RT_FAILURE(rc) 578 || cbRead < cbRetAddr + cbStackItem) 579 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_LAST; 580 581 /* 582 * Return Frame address. 583 * 584 * If we used unwind info to get here, the unwind register context will be 585 * positioned after the return instruction has been executed. We start by 586 * picking up the rBP register here for return frame and will try improve 587 * on it further down by using unwind info. 588 */ 589 pFrame->AddrReturnFrame = pFrame->AddrFrame; 590 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO) 591 { 592 AssertFailed(); /** @todo */ 593 } 594 else 595 { 596 switch (cbStackItem) 597 { 598 case 4: pFrame->AddrReturnFrame.off = *uBp.pu32; break; 599 case 8: pFrame->AddrReturnFrame.off = CPUMGetGCPtrPacStripped(pVCpu, *uBp.pu64); break; 600 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_1); 601 } 602 603 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off; 604 } 605 606 /* 607 * Return Stack Address. 608 */ 609 pFrame->AddrReturnStack = pFrame->AddrReturnFrame; 610 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO) 611 { 612 AssertFailed(); 613 } 614 else 615 { 616 pFrame->AddrReturnStack.off += cbStackItem + cbRetAddr; 617 pFrame->AddrReturnStack.FlatPtr += cbStackItem + cbRetAddr; 618 } 619 620 /* 621 * Return PC. 622 */ 623 pFrame->AddrReturnPC = pFrame->AddrPC; 624 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO) 625 { 626 AssertFailed(); 627 } 628 else 629 { 630 switch (pFrame->enmReturnType) 631 { 632 case RTDBGRETURNTYPE_NEAR64: 633 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC)) 634 { 635 pFrame->AddrReturnPC.FlatPtr += CPUMGetGCPtrPacStripped(pVCpu, *uRet.pu64) - pFrame->AddrReturnPC.off; 636 pFrame->AddrReturnPC.off = CPUMGetGCPtrPacStripped(pVCpu, *uRet.pu64); 637 } 638 else 639 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, CPUMGetGCPtrPacStripped(pVCpu, *uRet.pu64)); 640 break; 641 default: 642 AssertMsgFailed(("enmReturnType=%d\n", pFrame->enmReturnType)); 643 return VERR_INVALID_PARAMETER; 644 } 645 } 646 647 648 pFrame->pSymReturnPC = DBGFR3AsSymbolByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC, 649 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED, 650 NULL /*poffDisp*/, NULL /*phMod*/); 651 pFrame->pLineReturnPC = DBGFR3AsLineByAddrA(pUnwindCtx->m_pUVM, pUnwindCtx->m_hAs, &pFrame->AddrReturnPC, 652 NULL /*poffDisp*/, NULL /*phMod*/); 653 654 /* 655 * Frame bitness flag. 656 */ 657 /** @todo use previous return type for this? */ 658 pFrame->fFlags &= ~(DBGFSTACKFRAME_FLAGS_32BIT | DBGFSTACKFRAME_FLAGS_64BIT); 659 switch (cbStackItem) 660 { 661 case 4: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_32BIT; break; 662 case 8: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_64BIT; break; 663 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_2); 664 } 665 666 /* 667 * The arguments. 668 */ 669 memcpy(&pFrame->Args, uArgs.pv, sizeof(pFrame->Args)); 670 671 /* 672 * Collect register changes. 673 * Then call the OS layer to assist us (e.g. NT trap frames). 674 */ 675 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO) 676 { 677 AssertFailed(); 678 } 679 680 /* 681 * Try use unwind information to locate the return frame pointer (for the 682 * next loop iteration). 683 */ 684 Assert(!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET)); 685 pFrame->enmReturnFrameReturnType = RTDBGRETURNTYPE_INVALID; 686 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST)) 687 { 688 /* Set PC and SP if we didn't unwind our way here (context will then point 689 and the return PC and SP already). */ 690 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)) 691 { 692 dbgfR3UnwindCtxSetPcAndSp(pUnwindCtx, &pFrame->AddrReturnPC, &pFrame->AddrReturnStack); 693 pUnwindCtx->m_State.u.armv8.auGprs[ARMV8_A64_REG_BP] = pFrame->AddrReturnFrame.off; 694 } 695 if (pUnwindCtx->m_State.enmArch == RTLDRARCH_ARM64) 696 pUnwindCtx->m_State.u.armv8.Loaded.fAll = 0; 697 else 698 AssertFailed(); 699 if (dbgfR3UnwindCtxDoOneFrame(pUnwindCtx)) 700 { 701 Assert(!pUnwindCtx->m_fIsHostRing0); 702 703 DBGFADDRESS AddrReturnFrame = pFrame->AddrReturnFrame; 704 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &AddrReturnFrame, pUnwindCtx->m_State.u.armv8.FrameAddr); 705 pFrame->AddrReturnFrame = AddrReturnFrame; 706 707 pFrame->enmReturnFrameReturnType = pUnwindCtx->m_State.enmRetType; 708 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET; 709 } 710 } 711 712 return VINF_SUCCESS; 713 } 714 #else 715 /** 716 * Internal worker routine. 717 * 718 * On x86 the typical stack frame layout is like this: 719 * .. .. 720 * 16 parameter 2 721 * 12 parameter 1 722 * 8 parameter 0 723 * 4 return address 724 * 0 old ebp; current ebp points here 725 */ 726 DECL_NO_INLINE(static, int) dbgfR3StackWalk(PDBGFUNWINDCTX pUnwindCtx, PDBGFSTACKFRAME pFrame, bool fFirst) 727 { 728 /* 729 * Stop if we got a read error in the previous run. 730 */ 731 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_LAST) 732 return VERR_NO_MORE_FILES; 733 734 /* 735 * Advance the frame (except for the first). 736 */ 737 if (!fFirst) /** @todo we can probably eliminate this fFirst business... */ 738 { 739 /* frame, pc and stack is taken from the existing frames return members. */ 740 pFrame->AddrFrame = pFrame->AddrReturnFrame; 741 pFrame->AddrPC = pFrame->AddrReturnPC; 742 pFrame->pSymPC = pFrame->pSymReturnPC; 743 pFrame->pLinePC = pFrame->pLineReturnPC; 744 745 /* increment the frame number. */ 746 pFrame->iFrame++; 747 748 /* UNWIND_INFO_RET -> USED_UNWIND; return type */ 749 if (!(pFrame->fFlags & DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET)) 750 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO; 751 else 752 { 753 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO; 754 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_UNWIND_INFO_RET; 755 if (pFrame->enmReturnFrameReturnType != RTDBGRETURNTYPE_INVALID) 756 { 757 pFrame->enmReturnType = pFrame->enmReturnFrameReturnType; 758 pFrame->enmReturnFrameReturnType = RTDBGRETURNTYPE_INVALID; 759 } 760 } 761 pFrame->fFlags &= ~DBGFSTACKFRAME_FLAGS_TRAP_FRAME; 762 } 763 764 /* 765 * Figure the return address size and use the old PC to guess stack item size. 766 */ 575 #elif defined(VBOX_VMM_TARGET_X86) 767 576 /** @todo this is bogus... */ 768 577 unsigned cbRetAddr = RTDbgReturnTypeSize(pFrame->enmReturnType); … … 797 606 } 798 607 } 608 #endif 799 609 800 610 /* … … 810 620 void *pv; 811 621 } u, uRet, uArgs, uBp; 812 size_t cbRead = cbRetAddr *2 + cbStackItem + sizeof(pFrame->Args);622 size_t cbRead = cbRetAddr * 2 + cbStackItem + sizeof(pFrame->Args); 813 623 u.pv = alloca(cbRead); 814 624 uBp = u; … … 833 643 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO) 834 644 { 645 #ifdef VBOX_VMM_TARGET_ARMV8 646 AssertFailed(); /** @todo */ 647 648 #elif defined(VBOX_VMM_TARGET_X86) 835 649 if ( pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_PRIV 836 650 || pFrame->enmReturnType == RTDBGRETURNTYPE_IRET64) … … 846 660 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off; 847 661 } 662 #endif /* VBOX_VMM_TARGET_X86 */ 848 663 } 849 664 else … … 851 666 switch (cbStackItem) 852 667 { 668 #ifdef VBOX_VMM_TARGET_ARMV8 669 case 8: pFrame->AddrReturnFrame.off = CPUMGetGCPtrPacStripped(pVCpu, *uBp.pu64); break; 670 #else 671 case 8: pFrame->AddrReturnFrame.off = *uBp.pu64; break; 672 #endif 673 case 4: pFrame->AddrReturnFrame.off = *uBp.pu32; break; 674 #ifdef VBOX_VMM_TARGET_X86 853 675 case 2: pFrame->AddrReturnFrame.off = *uBp.pu16; break; 854 case 4: pFrame->AddrReturnFrame.off = *uBp.pu32; break; 855 case 8: pFrame->AddrReturnFrame.off = *uBp.pu64; break; 676 #endif 856 677 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_1); 857 678 } 858 679 680 #ifdef VBOX_VMM_TARGET_X86 859 681 /* Watcom tries to keep the frame pointer odd for far returns. */ 860 682 if ( cbStackItem <= 4 … … 872 694 else if (pFrame->enmReturnType == RTDBGRETURNTYPE_NEAR32) 873 695 { 874 # if 1696 # if 1 875 697 /* Assumes returning 32-bit code. */ 876 698 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN; 877 699 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR32; 878 700 cbRetAddr = 8; 879 # else701 # else 880 702 /* Assumes returning 16-bit code. */ 881 703 pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_USED_ODD_EVEN; 882 704 pFrame->enmReturnType = RTDBGRETURNTYPE_FAR16; 883 705 cbRetAddr = 4; 884 # endif706 # endif 885 707 } 886 708 } … … 901 723 uArgs.pb = u.pb + cbStackItem + cbRetAddr; 902 724 } 725 #endif /* VBOX_VMM_TARGET_X86 */ 903 726 904 727 pFrame->AddrReturnFrame.FlatPtr += pFrame->AddrReturnFrame.off - pFrame->AddrFrame.off; … … 911 734 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO) 912 735 { 736 #ifdef VBOX_VMM_TARGET_ARMV8 737 AssertFailed(); 738 739 #elif defined(VBOX_VMM_TARGET_X86) 913 740 if ( pFrame->enmReturnType == RTDBGRETURNTYPE_IRET32_PRIV 914 741 || pFrame->enmReturnType == RTDBGRETURNTYPE_IRET64) … … 924 751 pFrame->AddrReturnStack.FlatPtr += pFrame->AddrReturnStack.off - pFrame->AddrStack.off; 925 752 } 753 #endif /* VBOX_VMM_TARGET_X86 */ 926 754 } 927 755 else … … 937 765 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO) 938 766 { 767 #ifdef VBOX_VMM_TARGET_ARMV8 768 AssertFailed(); 769 770 #elif defined(VBOX_VMM_TARGET_X86) 939 771 if (RTDbgReturnTypeIsNear(pFrame->enmReturnType)) 940 772 { … … 945 777 DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &pFrame->AddrReturnPC, 946 778 pUnwindCtx->m_State.u.x86.auSegs[X86_SREG_CS], pUnwindCtx->m_State.uPc); 779 #endif 947 780 } 948 781 else 949 782 { 783 #ifdef VBOX_VMM_TARGET_ARMV8 784 switch (pFrame->enmReturnType) 785 { 786 case RTDBGRETURNTYPE_NEAR64: 787 if (DBGFADDRESS_IS_VALID(&pFrame->AddrReturnPC)) 788 { 789 pFrame->AddrReturnPC.FlatPtr += CPUMGetGCPtrPacStripped(pVCpu, *uRet.pu64) - pFrame->AddrReturnPC.off; 790 pFrame->AddrReturnPC.off = CPUMGetGCPtrPacStripped(pVCpu, *uRet.pu64); 791 } 792 else 793 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &pFrame->AddrReturnPC, CPUMGetGCPtrPacStripped(pVCpu, *uRet.pu64)); 794 break; 795 default: 796 AssertMsgFailed(("enmReturnType=%d\n", pFrame->enmReturnType)); 797 return VERR_INVALID_PARAMETER; 798 } 799 800 #elif defined(VBOX_VMM_TARGET_X86) 950 801 int rc2; 951 802 switch (pFrame->enmReturnType) … … 1020 871 return VERR_INVALID_PARAMETER; 1021 872 } 873 #endif /* VBOX_VMM_TARGET_X86 */ 1022 874 } 1023 875 … … 1036 888 switch (cbStackItem) 1037 889 { 1038 case 2: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_16BIT; break;1039 890 case 4: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_32BIT; break; 1040 891 case 8: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_64BIT; break; 892 #ifdef VBOX_VMM_TARGET_X86 893 case 2: pFrame->fFlags |= DBGFSTACKFRAME_FLAGS_16BIT; break; 894 #endif 1041 895 default: AssertMsgFailedReturn(("cbStackItem=%d\n", cbStackItem), VERR_DBGF_STACK_IPE_2); 1042 896 } … … 1053 907 if (pFrame->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO) 1054 908 { 909 #if defined(VBOX_VMM_TARGET_X86) 1055 910 rc = dbgfR3StackWalkCollectRegisterChanges(pUnwindCtx->m_pUVM, pFrame, &pUnwindCtx->m_State); 1056 911 if (RT_FAILURE(rc)) … … 1065 920 return rc; 1066 921 } 922 #else 923 AssertFailed(); 924 #endif 1067 925 } 1068 926 … … 1080 938 { 1081 939 dbgfR3UnwindCtxSetPcAndSp(pUnwindCtx, &pFrame->AddrReturnPC, &pFrame->AddrReturnStack); 940 #ifdef VBOX_VMM_TARGET_ARMV8 941 pUnwindCtx->m_State.u.armv8.auGprs[ARMV8_A64_REG_BP] = pFrame->AddrReturnFrame.off; 942 #elif defined(VBOX_VMM_TARGET_X86) 1082 943 pUnwindCtx->m_State.u.x86.auRegs[X86_GREG_xBP] = pFrame->AddrReturnFrame.off; 1083 } 944 #endif 945 } 946 947 #ifdef VBOX_VMM_TARGET_ARMV8 948 if (pUnwindCtx->m_State.enmArch == RTLDRARCH_ARM64) 949 pUnwindCtx->m_State.u.armv8.Loaded.fAll = 0; 950 else 951 AssertFailed(); 952 953 #elif defined(VBOX_VMM_TARGET_X86) 1084 954 /** @todo Reevaluate CS if the previous frame return type isn't near. */ 1085 955 if ( pUnwindCtx->m_State.enmArch == RTLDRARCH_AMD64 … … 1089 959 else 1090 960 AssertFailed(); 961 #endif 962 1091 963 if (dbgfR3UnwindCtxDoOneFrame(pUnwindCtx)) 1092 964 { 965 #ifdef VBOX_VMM_TARGET_ARMV8 966 Assert(!pUnwindCtx->m_fIsHostRing0); 967 #elif defined(VBOX_VMM_TARGET_X86) 1093 968 if (pUnwindCtx->m_fIsHostRing0) 1094 969 DBGFR3AddrFromHostR0(&pFrame->AddrReturnFrame, pUnwindCtx->m_State.u.x86.FrameAddr.off); 1095 970 else 971 #endif 1096 972 { 1097 973 DBGFADDRESS AddrReturnFrame = pFrame->AddrReturnFrame; 974 #ifdef VBOX_VMM_TARGET_ARMV8 975 DBGFR3AddrFromFlat(pUnwindCtx->m_pUVM, &AddrReturnFrame, pUnwindCtx->m_State.u.armv8.FrameAddr); 976 #elif defined(VBOX_VMM_TARGET_X86) 1098 977 rc = DBGFR3AddrFromSelOff(pUnwindCtx->m_pUVM, pUnwindCtx->m_idCpu, &AddrReturnFrame, 1099 978 pUnwindCtx->m_State.u.x86.FrameAddr.sel, pUnwindCtx->m_State.u.x86.FrameAddr.off); 1100 979 if (RT_SUCCESS(rc)) 980 #endif 1101 981 pFrame->AddrReturnFrame = AddrReturnFrame; 1102 982 } … … 1108 988 return VINF_SUCCESS; 1109 989 } 1110 #endif1111 990 1112 991 … … 1137 1016 1138 1017 int rc = VINF_SUCCESS; 1139 #if defined(VBOX_VMM_TARGET_ARMV8)1140 1018 if (pAddrPC) 1141 1019 pCur->AddrPC = *pAddrPC; 1020 #ifdef VBOX_VMM_TARGET_ARMV8 1142 1021 else 1143 1022 DBGFR3AddrFromFlat(pUVM, &pCur->AddrPC, pCtx->Pc.u64); 1144 #else 1145 if (pAddrPC) 1146 pCur->AddrPC = *pAddrPC; 1023 #elif defined(VBOX_VMM_TARGET_X86) 1147 1024 else if (enmCodeType != DBGFCODETYPE_GUEST) 1148 1025 DBGFR3AddrFromFlat(pUVM, &pCur->AddrPC, pCtx->rip); … … 1167 1044 PVMCPU const pVCpu = pUVM->pVM->apCpusR3[idCpu]; 1168 1045 CPUMMODE const enmCpuMode = CPUMGetGuestMode(pVCpu); 1169 #if defined(VBOX_VMM_TARGET_ARMV8) 1046 1047 #ifdef VBOX_VMM_TARGET_ARMV8 1170 1048 /** @todo */ 1171 1049 Assert(enmCpuMode == CPUMMODE_ARMV8_AARCH64); RT_NOREF(enmCpuMode); … … 1173 1051 if (enmReturnType == RTDBGRETURNTYPE_INVALID) 1174 1052 pCur->enmReturnType = RTDBGRETURNTYPE_NEAR64; 1175 #else 1053 1054 #elif defined(VBOX_VMM_TARGET_X86) 1176 1055 if (enmCpuMode == CPUMMODE_REAL) 1177 1056 { … … 1196 1075 } 1197 1076 1198 #if !defined(VBOX_VMM_TARGET_ARMV8)1077 #ifdef VBOX_VMM_TARGET_X86 1199 1078 if (enmReturnType == RTDBGRETURNTYPE_INVALID) 1200 1079 switch (pCur->AddrPC.fFlags & DBGFADDRESS_FLAGS_TYPE_MASK) … … 1213 1092 1214 1093 1215 #if defined(VBOX_VMM_TARGET_ARMV8)1216 1094 if (pAddrStack) 1217 1095 pCur->AddrStack = *pAddrStack; 1096 #ifdef VBOX_VMM_TARGET_ARMV8 1218 1097 else 1219 1098 DBGFR3AddrFromFlat(pUVM, &pCur->AddrStack, pCtx->aSpReg[1].u64 & fAddrMask); /** @todo EL0 stack. */ 1220 1221 Assert(!(pCur->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)); 1222 if (pAddrFrame) 1223 pCur->AddrFrame = *pAddrFrame; 1224 else 1225 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtx->aGRegs[ARMV8_A64_REG_BP].x & fAddrMask); 1226 #else 1227 if (pAddrStack) 1228 pCur->AddrStack = *pAddrStack; 1099 #elif defined(VBOX_VMM_TARGET_X86) 1229 1100 else if (enmCodeType != DBGFCODETYPE_GUEST) 1230 1101 DBGFR3AddrFromFlat(pUVM, &pCur->AddrStack, pCtx->rsp & fAddrMask); 1231 1102 else 1232 1103 rc = DBGFR3AddrFromSelOff(pUVM, idCpu, &pCur->AddrStack, pCtx->ss.Sel, pCtx->rsp & fAddrMask); 1104 #endif 1233 1105 1234 1106 Assert(!(pCur->fFlags & DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO)); 1235 1107 if (pAddrFrame) 1236 1108 pCur->AddrFrame = *pAddrFrame; 1109 #ifdef VBOX_VMM_TARGET_ARMV8 1110 else 1111 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtx->aGRegs[ARMV8_A64_REG_BP].x & fAddrMask); 1112 #elif defined(VBOX_VMM_TARGET_X86) 1237 1113 else if (enmCodeType != DBGFCODETYPE_GUEST) 1238 1114 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, pCtx->rbp & fAddrMask); … … 1250 1126 pCur->enmReturnType = UnwindCtx.m_State.enmRetType; 1251 1127 pCur->fFlags |= DBGFSTACKFRAME_FLAGS_USED_UNWIND_INFO; 1252 #if defined(VBOX_VMM_TARGET_ARMV8)1128 #ifdef VBOX_VMM_TARGET_ARMV8 1253 1129 DBGFR3AddrFromFlat(pUVM, &pCur->AddrFrame, UnwindCtx.m_State.u.armv8.FrameAddr); 1254 #el se1130 #elif defined(VBOX_VMM_TARGET_X86) 1255 1131 if (!UnwindCtx.m_fIsHostRing0) 1256 1132 rc = DBGFR3AddrFromSelOff(UnwindCtx.m_pUVM, UnwindCtx.m_idCpu, &pCur->AddrFrame, -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r107194 r107227 116 116 117 117 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, 118 #if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN) && !defined(VBOX_VMM_TARGET_ARMV8) 118 #if (defined(VBOX_VMM_TARGET_X86) && !defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) \ 119 || (defined(VBOX_VMM_TARGET_ARMV8) && !defined(RT_ARCH_ARM64)) /** @todo not main exec engine = iem/recomp would be better... */ 119 120 true 120 121 #else … … 706 707 #endif /* LOG_ENABLED || VBOX_STRICT */ 707 708 708 #if !defined(VBOX_VMM_TARGET_ARMV8)709 #ifdef VBOX_VMM_TARGET_X86 709 710 710 711 /** … … 852 853 } 853 854 854 #endif /* VBOX_VMM_TARGET_ ARMV8*/855 #endif /* VBOX_VMM_TARGET_X86 */ 855 856 856 857 /** … … 885 886 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/); 886 887 #endif 887 #if !defined(VBOX_VMM_TARGET_ARMV8)888 888 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM) 889 889 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/)); 890 890 else 891 891 { 892 #if defined(VBOX_VMM_TARGET_X86) /** @todo IEM/arm64 */ 892 893 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */ 893 894 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE) 894 895 rc = VINF_EM_DBG_STEPPED; 896 #else 897 AssertFailed(); 898 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/)); 899 #endif 895 900 } 896 901 902 #ifdef VBOX_VMM_TARGET_X86 897 903 if (rc != VINF_EM_EMULATE_SPLIT_LOCK) 898 904 { /* likely */ } … … 903 909 rc = VINF_EM_DBG_STEPPED; 904 910 } 905 #else906 AssertMsg(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM,907 ("%u\n", pVCpu->em.s.enmState));908 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));909 911 #endif 910 912 break; … … 1072 1074 #ifdef VBOX_VMM_TARGET_ARMV8 1073 1075 LogFlow(("emR3RecompilerExecute/%u: (pc=%RGv)\n", pVCpu->idCpu, (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64)); 1076 #elif defined(VBOX_VMM_TARGET_X86) 1077 LogFlow(("emR3RecompilerExecute/%u: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip)); 1074 1078 #else 1075 LogFlow(("emR3RecompilerExecute/%u: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip)); 1079 # error "port me" 1076 1080 #endif 1077 1081 … … 1085 1089 #ifdef LOG_ENABLED 1086 1090 # if defined(VBOX_VMM_TARGET_ARMV8) 1087 Log3(("EM: pc=%08 x\n", CPUMGetGuestFlatPC(pVCpu)));1088 # el se1091 Log3(("EM: pc=%08RX64\n", CPUMGetGuestFlatPC(pVCpu))); 1092 # elif defined(VBOX_VMM_TARGET_X86) 1089 1093 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM) 1090 1094 Log(("EMR%d: %04X:%08RX64 RSP=%08RX64 IF=%d CR0=%x eflags=%x\n", CPUMGetGuestCPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, … … 1093 1097 else 1094 1098 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF)); 1099 # else 1100 # error "port me" 1095 1101 # endif 1096 1102 #endif … … 1174 1180 * Check if we can switch back to the main execution engine now. 1175 1181 */ 1176 #if !defined(VBOX_VMM_TARGET_ARMV8)1182 #ifdef VBOX_WITH_HWVIRT 1177 1183 if (VM_IS_HM_ENABLED(pVM)) 1178 1184 { … … 1274 1280 PDMCritSectBothFF(pVM, pVCpu); 1275 1281 1276 #if !defined(VBOX_VMM_TARGET_ARMV8)1282 #ifdef VBOX_VMM_TARGET_X86 1277 1283 /* Update CR3 (Nested Paging case for HM). */ 1278 1284 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) … … 1312 1318 } 1313 1319 1314 1315 #if !defined(VBOX_VMM_TARGET_ARMV8) 1320 #ifdef VBOX_VMM_TARGET_X86 1321 1316 1322 /** 1317 1323 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit. … … 1323 1329 static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu) 1324 1330 { 1325 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX1331 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1326 1332 /* Handle the "external interrupt" VM-exit intercept. */ 1327 1333 if ( CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT) … … 1334 1340 return VBOXSTRICTRC_VAL(rcStrict); 1335 1341 } 1336 # else1342 # else 1337 1343 RT_NOREF(pVCpu); 1338 # endif1344 # endif 1339 1345 return VINF_NO_CHANGE; 1340 1346 } … … 1350 1356 static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu) 1351 1357 { 1352 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM1358 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1353 1359 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */ 1354 1360 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR)) … … 1366 1372 return VINF_EM_TRIPLE_FAULT; 1367 1373 } 1368 # else1374 # else 1369 1375 NOREF(pVCpu); 1370 # endif1376 # endif 1371 1377 return VINF_NO_CHANGE; 1372 1378 } … … 1382 1388 static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu) 1383 1389 { 1384 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM1390 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1385 1391 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR)) 1386 1392 { … … 1395 1401 return VINF_EM_TRIPLE_FAULT; 1396 1402 } 1397 # else1403 # else 1398 1404 NOREF(pVCpu); 1399 # endif1405 # endif 1400 1406 return VINF_NO_CHANGE; 1401 1407 } 1402 #endif 1403 1408 1409 #endif /* VBOX_VMM_TARGET_X86 */ 1404 1410 1405 1411 /** … … 1658 1664 TMR3TimerQueuesDo(pVM); 1659 1665 1660 #if !defined(VBOX_VMM_TARGET_ARMV8)1666 #ifdef VBOX_VMM_TARGET_X86 1661 1667 /* 1662 1668 * Pick up asynchronously posted interrupts into the APIC. … … 1803 1809 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER)); 1804 1810 } 1805 # endif 1811 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 1806 1812 1807 1813 /* … … 1959 1965 } 1960 1966 1961 #else /* VBOX_VMM_TARGET_ARMV8 */1967 #else /* VBOX_VMM_TARGET_ARMV8 */ 1962 1968 bool fWakeupPending = false; 1963 1964 1969 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VTIMER_ACTIVATED)) 1965 1970 { … … 2081 2086 /* check that we got them all */ 2082 2087 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)); 2083 #if defined(VBOX_VMM_TARGET_ARMV8)2088 #ifdef VBOX_VMM_TARGET_ARMV8 2084 2089 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_DBGF)); 2090 #elif defined(VBOX_VMM_TARGET_X86) 2091 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)); 2085 2092 #else 2086 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)); 2093 # error "port me" 2087 2094 #endif 2088 2095 } … … 2209 2216 fFFDone = false; 2210 2217 2211 #if defined(VBOX_STRICT) && !defined(VBOX_VMM_TARGET_ARMV8)2218 #if defined(VBOX_STRICT) && defined(VBOX_VMM_TARGET_X86) 2212 2219 CPUMAssertGuestRFlagsCookie(pVM, pVCpu); 2213 2220 #endif … … 2233 2240 if (!pVM->em.s.fIemExecutesAll) 2234 2241 { 2235 #if !defined(VBOX_VMM_TARGET_ARMV8)2242 #ifdef VBOX_WITH_HWVIRT 2236 2243 if (VM_IS_HM_ENABLED(pVM)) 2237 2244 { … … 2597 2604 else if (rc == VINF_SUCCESS) 2598 2605 rc = VINF_EM_RESCHEDULE; /* Need to check whether we can run in HM or NEM again. */ 2599 #if ndef VBOX_VMM_TARGET_ARMV82606 #ifdef VBOX_VMM_TARGET_X86 2600 2607 if (rc != VINF_EM_EMULATE_SPLIT_LOCK) 2601 2608 { /* likely */ } … … 2624 2631 if (TRPMHasTrap(pVCpu)) 2625 2632 rc = VINF_EM_RESCHEDULE; 2626 #if !defined(VBOX_VMM_TARGET_ARMV8)2633 #ifdef VBOX_VMM_TARGET_X86 2627 2634 /* MWAIT has a special extension where it's woken up when 2628 2635 an interrupt is pending even when IF=0. */ … … 2649 2656 else 2650 2657 { 2651 #if defined(VBOX_VMM_TARGET_ARMV8)2658 #ifdef VBOX_VMM_TARGET_ARMV8 2652 2659 const uint32_t fWaitHalted = 0; /* WFI/WFE always return when an interrupt happens. */ 2653 #el se2660 #elif defined(VBOX_VMM_TARGET_X86) 2654 2661 const uint32_t fWaitHalted = (CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF) ? 0 : VMWAITHALTED_F_IGNORE_IRQS; 2655 2662 #endif 2656 2663 rc = VMR3WaitHalted(pVM, pVCpu, fWaitHalted); 2664 2657 2665 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to 2658 2666 check VMCPU_FF_UPDATE_APIC here. */ 2659 2667 if ( rc == VINF_SUCCESS 2660 #if defined(VBOX_VMM_TARGET_ARMV8) 2661 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_VTIMER_ACTIVATED 2662 | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_INTERRUPT_IRQ) 2668 #ifdef VBOX_VMM_TARGET_ARMV8 2669 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI 2670 | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_INTERRUPT_IRQ 2671 | VMCPU_FF_VTIMER_ACTIVATED) 2672 #elif defined(VBOX_VMM_TARGET_X86) 2673 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT) 2663 2674 #else 2664 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT) 2675 # error "port me" 2665 2676 #endif 2666 2677 ) -
trunk/src/VBox/VMM/VMMR3/EMR3Dbg.cpp
r106895 r107227 169 169 break; 170 170 171 #if !defined(VBOX_VMM_TARGET_ARMV8)171 #if defined(VBOX_VMM_TARGET_X86) && defined(VBOX_WITH_HWVIRT) 172 172 case EMEXIT_F_KIND_VMX: 173 173 pszExitName = HMGetVmxExitName(uFlagsAndType & EMEXIT_F_TYPE_MASK); … … 187 187 break; 188 188 189 #ifdef VBOX_VMM_TARGET_X86 189 190 case EMEXIT_F_KIND_XCPT: 190 #if defined(VBOX_VMM_TARGET_ARMV8)191 pszExitName = NULL;192 AssertReleaseFailed();193 #else194 191 switch (uFlagsAndType & EMEXIT_F_TYPE_MASK) 195 192 { … … 232 229 break; 233 230 } 231 break; 234 232 #endif 235 break;236 233 237 234 default: -
trunk/src/VBox/VMM/VMMR3/EMR3Nem.cpp
r107194 r107227 97 97 return VINF_EM_RESCHEDULE; 98 98 99 #if defined(VBOX_VMM_TARGET_ARMV8) 100 uint64_t const uOldPc = pVCpu->cpum.GstCtx.Pc.u64; 99 #ifdef VBOX_VMM_TARGET_ARMV8 100 uint64_t const uOldPc = pVCpu->cpum.GstCtx.Pc.u64; 101 #elif defined(VBOX_VMM_TARGET_X86) 102 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip; 101 103 #else 102 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip; 104 # error "port me" 103 105 #endif 104 106 for (;;) … … 147 149 * Done? 148 150 */ 149 #if defined(VBOX_VMM_TARGET_ARMV8)151 #ifdef VBOX_VMM_TARGET_ARMV8 150 152 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_PC); 151 153 if ( (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED) … … 160 162 return rcStrict; 161 163 } 162 #else 164 165 #elif defined(VBOX_VMM_TARGET_X86) 163 166 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP); 164 167 if ( (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED) … … 173 176 return rcStrict; 174 177 } 178 179 #else 180 # error "port me" 175 181 #endif 176 182 } … … 201 207 * Log it. 202 208 */ 203 # ifdef VBOX_VMM_TARGET_ARMV8209 # ifdef VBOX_VMM_TARGET_ARMV8 204 210 Log(("EMINS: %RGv SP_EL0=%RGv SP_EL1=%RGv\n", (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64, 205 (RTGCPTR)pVCpu->cpum.GstCtx.aSpReg[0].u64, 206 (RTGCPTR)pVCpu->cpum.GstCtx.aSpReg[1].u64)); 211 (RTGCPTR)pVCpu->cpum.GstCtx.aSpReg[0].u64, (RTGCPTR)pVCpu->cpum.GstCtx.aSpReg[1].u64)); 207 212 if (pszPrefix) 208 213 { … … 210 215 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix); 211 216 } 212 # el se217 # elif defined(VBOX_VMM_TARGET_X86) 213 218 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, (RTGCPTR)pVCpu->cpum.GstCtx.rsp)); 214 219 if (pszPrefix) … … 217 222 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix); 218 223 } 224 # else 225 # error "port me" 219 226 # endif 220 227 #endif … … 375 382 #ifdef VBOX_VMM_TARGET_ARMV8 376 383 LogFlow(("emR3NemExecute%d: (pc=%RGv)\n", pVCpu->idCpu, (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64)); 384 #elif defined(VBOX_VMM_TARGET_X86) 385 LogFlow(("emR3NemExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip)); 377 386 #else 378 LogFlow(("emR3NemExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip)); 387 # error "port me" 379 388 #endif 380 389 *pfFFDone = false; … … 411 420 } 412 421 413 #if defined(LOG_ENABLED) && !defined(VBOX_VMM_TARGET_ARMV8)422 #ifdef LOG_ENABLED 414 423 /* 415 424 * Log important stuff before entering GC. 416 425 */ 426 # ifdef VBOX_VMM_TARGET_X86 417 427 if (TRPMHasTrap(pVCpu)) 418 428 Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip)); … … 441 451 } 442 452 } 453 # elif defined(VBOX_VMM_TARGET_ARMV8) 454 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PC)) 455 { 456 /** @todo better logging */ 457 if (pVM->cCpus == 1) 458 Log(("NEM: %RX64\n", pVCpu->cpum.GstCtx.Pc.u64)); 459 else 460 Log(("NEM-CPU%d: %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64)); 461 } 462 # else 463 # error "port me" 464 # endif 443 465 else if (pVM->cCpus == 1) 444 466 Log(("NEMRx: -> NEMR3RunGC\n")); 445 467 else 446 Log(("NEMRx-CPU%u: -> NEMR3RunGC\n", 468 Log(("NEMRx-CPU%u: -> NEMR3RunGC\n", pVCpu->idCpu)); 447 469 #endif /* LOG_ENABLED */ 448 470 -
trunk/src/VBox/VMM/VMMR3/GIM.cpp
r106061 r107227 74 74 #include <iprt/string.h> 75 75 76 #if !defined(VBOX_VMM_TARGET_ARMV8)76 #if defined(VBOX_VMM_TARGET_X86) 77 77 /* Include all GIM providers. */ 78 78 # include "GIMMinimalInternal.h" … … 163 163 * 'most up-to-date implementation' version number when 0. Otherwise, 164 164 * we'll have abiguities when loading the state of older VMs. */ 165 #if !defined(VBOX_VMM_TARGET_ARMV8)165 #if defined(VBOX_VMM_TARGET_X86) 166 166 if (!RTStrCmp(szProvider, "Minimal")) 167 167 { … … 210 210 switch (pVM->gim.s.enmProviderId) 211 211 { 212 #if !defined(VBOX_VMM_TARGET_ARMV8)212 #if defined(VBOX_VMM_TARGET_X86) 213 213 case GIMPROVIDERID_MINIMAL: 214 214 return gimR3MinimalInitCompleted(pVM); … … 261 261 switch (pVM->gim.s.enmProviderId) 262 262 { 263 #if !defined(VBOX_VMM_TARGET_ARMV8)263 #if defined(VBOX_VMM_TARGET_X86) 264 264 case GIMPROVIDERID_HYPERV: 265 265 rc = gimR3HvSave(pVM, pSSM); … … 326 326 switch (pVM->gim.s.enmProviderId) 327 327 { 328 #if !defined(VBOX_VMM_TARGET_ARMV8)328 #if defined(VBOX_VMM_TARGET_X86) 329 329 case GIMPROVIDERID_HYPERV: 330 330 rc = gimR3HvLoad(pVM, pSSM); … … 350 350 static DECLCALLBACK(int) gimR3LoadDone(PVM pVM, PSSMHANDLE pSSM) 351 351 { 352 #if defined(VBOX_VMM_TARGET_ARMV8)353 352 RT_NOREF(pSSM); 354 #endif355 353 356 354 switch (pVM->gim.s.enmProviderId) 357 355 { 358 #if !defined(VBOX_VMM_TARGET_ARMV8)356 #if defined(VBOX_VMM_TARGET_X86) 359 357 case GIMPROVIDERID_HYPERV: 360 358 return gimR3HvLoadDone(pVM, pSSM); … … 379 377 switch (pVM->gim.s.enmProviderId) 380 378 { 381 #if !defined(VBOX_VMM_TARGET_ARMV8)379 #if defined(VBOX_VMM_TARGET_X86) 382 380 case GIMPROVIDERID_HYPERV: 383 381 return gimR3HvTerm(pVM); … … 387 385 #endif 388 386 default: 389 break; 390 } 391 return VINF_SUCCESS; 387 return VINF_SUCCESS; 388 } 392 389 } 393 390 … … 403 400 VMMR3_INT_DECL(void) GIMR3Relocate(PVM pVM, RTGCINTPTR offDelta) 404 401 { 405 #if defined(VBOX_VMM_TARGET_ARMV8)406 402 RT_NOREF(offDelta); 407 #endif408 403 409 404 switch (pVM->gim.s.enmProviderId) 410 405 { 411 #if !defined(VBOX_VMM_TARGET_ARMV8)406 #if defined(VBOX_VMM_TARGET_X86) 412 407 case GIMPROVIDERID_HYPERV: 413 408 gimR3HvRelocate(pVM, offDelta); … … 432 427 switch (pVM->gim.s.enmProviderId) 433 428 { 434 #if !defined(VBOX_VMM_TARGET_ARMV8)429 #if defined(VBOX_VMM_TARGET_X86) 435 430 case GIMPROVIDERID_HYPERV: 436 return gimR3HvReset(pVM); 431 gimR3HvReset(pVM); 432 break; 437 433 438 434 case GIMPROVIDERID_KVM: 439 return gimR3KvmReset(pVM); 435 gimR3KvmReset(pVM); 436 break; 440 437 #endif 441 438 default: … … 474 471 switch (pVM->gim.s.enmProviderId) 475 472 { 476 #if !defined(VBOX_VMM_TARGET_ARMV8)473 #if defined(VBOX_VMM_TARGET_X86) 477 474 case GIMPROVIDERID_HYPERV: 478 475 return gimR3HvGetDebugSetup(pVM, pDbgSetup); 479 476 #endif 480 477 default: 481 break; 482 } 483 return VERR_GIM_NO_DEBUG_CONNECTION; 478 return VERR_GIM_NO_DEBUG_CONNECTION; 479 } 484 480 } 485 481 -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r106731 r107227 37 37 #include <VBox/vmm/mm.h> 38 38 #include <VBox/vmm/ssm.h> 39 /** @todo this isn't sustainable. */ 39 40 #if defined(VBOX_VMM_TARGET_ARMV8) 40 41 # include "IEMInternal-armv8.h" -
trunk/src/VBox/VMM/VMMR3/MM.cpp
r106061 r107227 171 171 #include <iprt/assert.h> 172 172 #include <iprt/string.h> 173 #if defined(VBOX_VMM_TARGET_ARMV8)173 #ifdef VBOX_VMM_TARGET_ARMV8 174 174 # include <iprt/file.h> 175 175 #endif … … 271 271 } 272 272 273 274 #if defined(VBOX_VMM_TARGET_ARMV8) 273 #ifdef VBOX_VMM_TARGET_ARMV8 274 275 275 /** 276 276 * Initializes the given RAM range with data from the given file. … … 287 287 if (RT_SUCCESS(rc)) 288 288 { 289 uint8_t abRead[GUEST_PAGE_SIZE];290 289 RTGCPHYS GCPhys = GCPhysStart; 291 290 292 291 for (;;) 293 292 { 294 size_t cbThisRead = 0; 293 uint8_t abRead[GUEST_PAGE_SIZE]; 294 size_t cbThisRead = 0; 295 295 rc = RTFileRead(hFile, &abRead[0], sizeof(abRead), &cbThisRead); 296 296 if (RT_FAILURE(rc)) … … 391 391 if (u64GCPhysStart >= _4G) 392 392 pVM->mm.s.cbRamAbove4GB += u64MemSize; 393 else if (u64GCPhysStart + u64MemSize > _4G) 393 else if (u64GCPhysStart + u64MemSize <= _4G) 394 pVM->mm.s.cbRamBelow4GB += (uint32_t)u64MemSize; 395 else 394 396 { 395 uint64_t c bRamAbove4GB = (u64GCPhysStart + u64MemSize)- _4G;397 uint64_t const cbRamAbove4GB = u64GCPhysStart + u64MemSize - _4G; 396 398 pVM->mm.s.cbRamAbove4GB += cbRamAbove4GB; 397 pVM->mm.s.cbRamBelow4GB += (u64MemSize - cbRamAbove4GB);399 pVM->mm.s.cbRamBelow4GB += u64MemSize - cbRamAbove4GB; 398 400 } 399 else 400 pVM->mm.s.cbRamBelow4GB += (uint32_t)u64MemSize; 401 } 402 403 return rc; 404 } 405 #endif 406 407 408 /** 409 * Initializes the MM parts which depends on PGM being initialized. 410 * 411 * @returns VBox status code. 412 * @param pVM The cross context VM structure. 413 * @remark No cleanup necessary since MMR3Term() will be called on failure. 414 */ 415 VMMR3DECL(int) MMR3InitPaging(PVM pVM) 416 { 417 LogFlow(("MMR3InitPaging:\n")); 418 419 /* 420 * Query the CFGM values. 421 */ 422 int rc; 423 PCFGMNODE pMMCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"); 424 if (!pMMCfg) 425 { 426 rc = CFGMR3InsertNode(CFGMR3GetRoot(pVM), "MM", &pMMCfg); 427 AssertRCReturn(rc, rc); 428 } 429 430 #if defined(VBOX_VMM_TARGET_ARMV8) 431 rc = mmR3InitRamArmV8(pVM, pMMCfg); 432 #else 401 } 402 403 return rc; 404 } 405 406 #elif defined(VBOX_VMM_TARGET_X86) 407 408 /** 409 * RAM setup function for X86. 410 */ 411 static int mmR3InitRamX86(PVM pVM, PCFGMNODE pMMCfg) 412 { 413 RT_NOREF(pMMCfg); 414 433 415 /** @cfgm{/RamSize, uint64_t, 0, 16TB, 0} 434 416 * Specifies the size of the base RAM that is to be set up during 435 417 * VM initialization. 436 418 */ 437 uint64_t cbRam ;438 rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);419 uint64_t cbRam = 0; 420 int rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam); 439 421 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 440 422 cbRam = 0; … … 450 432 * to avoid mapping RAM to the range normally used for PCI memory regions. 451 433 * Must be aligned on a 4MB boundary. */ 452 uint32_t cbRamHole ;434 uint32_t cbRamHole = 0; 453 435 rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT); 454 436 AssertLogRelMsgRCReturn(rc, ("Configuration error: Failed to query integer \"RamHoleSize\", rc=%Rrc.\n", rc), rc); … … 561 543 } 562 544 } 563 #endif /* !VBOX_VMM_TARGET_ARMV8 */ 545 546 AssertMsg(pVM->mm.s.cBasePages == cBasePages || RT_FAILURE(rc), ("%RX64 != %RX64\n", pVM->mm.s.cBasePages, cBasePages)); 547 return rc; 548 } 549 550 #endif /* VBOX_VMM_TARGET_X86 */ 551 552 /** 553 * Initializes the MM parts which depends on PGM being initialized. 554 * 555 * @returns VBox status code. 556 * @param pVM The cross context VM structure. 557 * @remark No cleanup necessary since MMR3Term() will be called on failure. 558 */ 559 VMMR3DECL(int) MMR3InitPaging(PVM pVM) 560 { 561 LogFlow(("MMR3InitPaging:\n")); 562 563 /* 564 * Query the CFGM values. 565 */ 566 int rc; 567 PCFGMNODE pMMCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"); 568 if (!pMMCfg) 569 { 570 rc = CFGMR3InsertNode(CFGMR3GetRoot(pVM), "MM", &pMMCfg); 571 AssertRCReturn(rc, rc); 572 } 573 574 #ifdef VBOX_VMM_TARGET_ARMV8 575 rc = mmR3InitRamArmV8(pVM, pMMCfg); 576 #elif defined(VBOX_VMM_TARGET_X86) 577 rc = mmR3InitRamX86(pVM, pMMCfg); 578 #else 579 # error "port me" 580 #endif 564 581 565 582 /* … … 568 585 */ 569 586 pVM->mm.s.fDoneMMR3InitPaging = true; 570 #if !defined(VBOX_VMM_TARGET_ARMV8)571 AssertMsg(pVM->mm.s.cBasePages == cBasePages || RT_FAILURE(rc), ("%RX64 != %RX64\n", pVM->mm.s.cBasePages, cBasePages));572 #endif573 587 574 588 LogFlow(("MMR3InitPaging: returns %Rrc\n", rc)); -
trunk/src/VBox/VMM/VMMR3/NEMR3.cpp
r106951 r107227 103 103 "|VmxLbr" 104 104 #endif 105 #if defined(VBOX_VMM_TARGET_ARMV8)105 #ifdef VBOX_VMM_TARGET_ARMV8 106 106 "|VTimerInterrupt" 107 107 #endif … … 140 140 } 141 141 142 #if defined(VBOX_VMM_TARGET_ARMV8)142 #ifdef VBOX_VMM_TARGET_ARMV8 143 143 /** @cfgm{/NEM/VTimerInterrupt, uint32_t} 144 144 * Specifies the interrupt identifier for the VTimer. */ … … 182 182 if (pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API) 183 183 { 184 #ifndef VBOX_VMM_TARGET_ARMV8 /* NEM is the only option on ARM for now, so calling it turtle and snail mode 185 * is a bit unfair as long as we don't have a native hypervisor to compare against :). */ 184 #ifndef VBOX_WITH_HWVIRT /* Don't complain if there are no other alternatives. */ 186 185 # ifdef RT_OS_WINDOWS /* The WHv* API is extremely slow at handling VM exits. The AppleHv and 187 186 KVM APIs are much faster, thus the different mode name. :-) */ -
trunk/src/VBox/VMM/VMMR3/NEMR3NativeTemplate-linux.cpp.h
r104840 r107227 66 66 67 67 CAP_ENTRY__L(KVM_CAP_IRQCHIP), /* 0 */ 68 #ifdef VBOX_VMM_TARGET_ARMV8 68 #ifdef VBOX_VMM_TARGET_X86 69 CAP_ENTRY_ML(KVM_CAP_HLT), 70 #else 69 71 CAP_ENTRY__L(KVM_CAP_HLT), 70 #else71 CAP_ENTRY_ML(KVM_CAP_HLT),72 72 #endif 73 73 CAP_ENTRY__L(KVM_CAP_MMU_SHADOW_CACHE_CONTROL), … … 120 120 CAP_ENTRY__L(KVM_CAP_XEN_HVM), 121 121 #endif 122 #ifdef VBOX_VMM_TARGET_ARMV8 122 #ifdef VBOX_VMM_TARGET_X86 123 CAP_ENTRY_ML(KVM_CAP_ADJUST_CLOCK), 124 #else 123 125 CAP_ENTRY__L(KVM_CAP_ADJUST_CLOCK), 124 #else125 CAP_ENTRY_ML(KVM_CAP_ADJUST_CLOCK),126 126 #endif 127 127 CAP_ENTRY__L(KVM_CAP_INTERNAL_ERROR_DATA), /* 40 */ … … 142 142 CAP_ENTRY__L(KVM_CAP_DEBUGREGS), /* 50 */ 143 143 #endif 144 #ifdef VBOX_VMM_TARGET_ARMV8 144 #ifdef VBOX_VMM_TARGET_X86 145 CAP_ENTRY__S(KVM_CAP_X86_ROBUST_SINGLESTEP, fRobustSingleStep), 146 #else 145 147 CAP_ENTRY__L(KVM_CAP_X86_ROBUST_SINGLESTEP), 146 #else147 CAP_ENTRY__S(KVM_CAP_X86_ROBUST_SINGLESTEP, fRobustSingleStep),148 148 #endif 149 149 CAP_ENTRY__L(KVM_CAP_PPC_OSI), … … 305 305 CAP_ENTRY__L(KVM_CAP_S390_DIAG318), 306 306 CAP_ENTRY__L(KVM_CAP_STEAL_TIME), 307 #ifdef VBOX_VMM_TARGET_ARMV8 307 #ifdef VBOX_VMM_TARGET_X86 308 CAP_ENTRY_ML(KVM_CAP_X86_USER_SPACE_MSR), /* (since 5.10) */ 309 CAP_ENTRY_ML(KVM_CAP_X86_MSR_FILTER), 310 #else 308 311 CAP_ENTRY__L(KVM_CAP_X86_USER_SPACE_MSR), /* (since 5.10) */ 309 312 CAP_ENTRY__L(KVM_CAP_X86_MSR_FILTER), 310 #else311 CAP_ENTRY_ML(KVM_CAP_X86_USER_SPACE_MSR), /* (since 5.10) */312 CAP_ENTRY_ML(KVM_CAP_X86_MSR_FILTER),313 313 #endif 314 314 CAP_ENTRY__L(KVM_CAP_ENFORCE_PV_FEATURE_CPUID), /* 190 */ -
trunk/src/VBox/VMM/VMMR3/PDM.cpp
r107113 r107227 1031 1031 { 1032 1032 PVMCPU pVCpu = pVM->apCpusR3[idCpu]; 1033 #if defined(VBOX_VMM_TARGET_ARMV8)1033 #ifdef VBOX_VMM_TARGET_ARMV8 1034 1034 SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ)); 1035 1035 SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ)); 1036 #el se1036 #elif defined(VBOX_VMM_TARGET_X86) 1037 1037 SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC)); 1038 1038 SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC)); 1039 #else 1040 # error "port me" 1039 1041 #endif 1040 1042 SSMR3PutU32(pSSM, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)); … … 1066 1068 { 1067 1069 PVMCPU pVCpu = pVM->apCpusR3[idCpu]; 1068 # if defined(VBOX_VMM_TARGET_ARMV8)1070 # ifdef VBOX_VMM_TARGET_ARMV8 1069 1071 LogFlow(("pdmR3LoadPrep: VCPU %u %s%s\n", idCpu, 1070 1072 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ) ? " VMCPU_FF_INTERRUPT_IRQ" : "", 1071 1073 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ) ? " VMCPU_FF_INTERRUPT_FIQ" : "")); 1072 # else1074 # elif defined(VBOX_VMM_TARGET_X86) 1073 1075 LogFlow(("pdmR3LoadPrep: VCPU %u %s%s\n", idCpu, 1074 1076 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC) ? " VMCPU_FF_INTERRUPT_APIC" : "", 1075 1077 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC) ? " VMCPU_FF_INTERRUPT_PIC" : "")); 1076 #endif 1078 # else 1079 # error "port me" 1080 # endif 1077 1081 } 1078 1082 #endif … … 1090 1094 { 1091 1095 PVMCPU pVCpu = pVM->apCpusR3[idCpu]; 1092 #if defined(VBOX_VMM_TARGET_ARMV8)1096 #ifdef VBOX_VMM_TARGET_ARMV8 1093 1097 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_IRQ); 1094 1098 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_FIQ); 1095 #el se1099 #elif defined(VBOX_VMM_TARGET_X86) 1096 1100 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); 1097 1101 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); 1102 #else 1103 # error "port me" 1098 1104 #endif 1099 1105 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); … … 1157 1163 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1158 1164 } 1159 #if defined(VBOX_VMM_TARGET_ARMV8)1165 #ifdef VBOX_VMM_TARGET_ARMV8 1160 1166 AssertLogRelMsg(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ), 1161 1167 ("VCPU%03u: VMCPU_FF_INTERRUPT_IRQ set! Devices shouldn't set interrupts during state restore...\n", idCpu)); 1162 1168 if (fInterruptPending) 1163 1169 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ); 1164 #else 1170 1171 #elif defined(VBOX_VMM_TARGET_X86) 1165 1172 AssertLogRelMsg(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC), 1166 1173 ("VCPU%03u: VMCPU_FF_INTERRUPT_APIC set! Devices shouldn't set interrupts during state restore...\n", idCpu)); 1167 1174 if (fInterruptPending) 1168 1175 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC); 1176 #else 1177 # error "port me" 1169 1178 #endif 1170 1179 … … 1179 1188 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1180 1189 } 1181 #if defined(VBOX_VMM_TARGET_ARMV8)1190 #ifdef VBOX_VMM_TARGET_ARMV8 1182 1191 AssertLogRelMsg(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ), 1183 1192 ("VCPU%03u: VMCPU_FF_INTERRUPT_FIQ set! Devices shouldn't set interrupts during state restore...\n", idCpu)); 1184 1193 if (fInterruptPending) 1185 1194 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ); 1186 #else 1195 1196 #elif defined(VBOX_VMM_TARGET_X86) 1187 1197 AssertLogRelMsg(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC), 1188 1198 ("VCPU%03u: VMCPU_FF_INTERRUPT_PIC set! Devices shouldn't set interrupts during state restore...\n", idCpu)); 1189 1199 if (fInterruptPending) 1190 1200 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC); 1201 #else 1202 # error "port me" 1191 1203 #endif 1192 1204 … … 1730 1742 VMMR3_INT_DECL(void) PDMR3ResetCpu(PVMCPU pVCpu) 1731 1743 { 1732 #if defined(VBOX_VMM_TARGET_ARMV8)1744 #ifdef VBOX_VMM_TARGET_ARMV8 1733 1745 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_IRQ); 1734 1746 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_FIQ); 1735 #el se1747 #elif defined(VBOX_VMM_TARGET_X86) 1736 1748 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC); 1737 1749 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC); 1750 #else 1751 # error "port me" 1738 1752 #endif 1739 1753 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); -
trunk/src/VBox/VMM/VMMR3/PDMDevMiscHlp.cpp
r107113 r107227 35 35 #include <VBox/vmm/pgm.h> 36 36 #include <VBox/vmm/hm.h> 37 #if ndef VBOX_VMM_TARGET_ARMV837 #ifdef VBOX_VMM_TARGET_X86 38 38 # include <VBox/vmm/pdmapic.h> 39 39 #endif … … 67 67 Assert(pVM->enmVMState != VMSTATE_LOADING || pVM->pdm.s.fStateLoaded); 68 68 69 #if defined(VBOX_VMM_TARGET_ARMV8) 69 #ifdef VBOX_VMM_TARGET_X86 70 PVMCPU pVCpu = pVM->apCpusR3[0]; /* for PIC we always deliver to CPU 0, SMP uses APIC */ 71 PDMApicSetLocalInterrupt(pVCpu, 0 /* u8Pin */, 1 /* u8Level */, VINF_SUCCESS /* rcRZ */); 72 #else 70 73 AssertReleaseFailed(); 71 74 RT_NOREF(pVM); 72 #else73 PVMCPU pVCpu = pVM->apCpusR3[0]; /* for PIC we always deliver to CPU 0, SMP uses APIC */74 PDMApicSetLocalInterrupt(pVCpu, 0 /* u8Pin */, 1 /* u8Level */, VINF_SUCCESS /* rcRZ */);75 75 #endif 76 76 } … … 86 86 Assert(pVM->enmVMState != VMSTATE_LOADING || pVM->pdm.s.fStateLoaded); 87 87 88 #if defined(VBOX_VMM_TARGET_ARMV8) 88 #ifdef VBOX_VMM_TARGET_X86 89 PVMCPU pVCpu = pVM->apCpusR3[0]; /* for PIC we always deliver to CPU 0, SMP uses APIC */ 90 PDMApicSetLocalInterrupt(pVCpu, 0 /* u8Pin */, 0 /* u8Level */, VINF_SUCCESS /* rcRZ */); 91 #else 89 92 AssertReleaseFailed(); 90 93 RT_NOREF(pVM); 91 #else92 PVMCPU pVCpu = pVM->apCpusR3[0]; /* for PIC we always deliver to CPU 0, SMP uses APIC */93 PDMApicSetLocalInterrupt(pVCpu, 0 /* u8Pin */, 0 /* u8Level */, VINF_SUCCESS /* rcRZ */);94 94 #endif 95 95 } … … 140 140 LogFlow(("pdmR3IoApicHlp_ApicBusDeliver: caller='%s'/%d: u8Dest=%RX8 u8DestMode=%RX8 u8DeliveryMode=%RX8 uVector=%RX8 u8Polarity=%RX8 u8TriggerMode=%RX8 uTagSrc=%#x\n", 141 141 pDevIns->pReg->szName, pDevIns->iInstance, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc)); 142 #if defined(VBOX_VMM_TARGET_ARMV8) 142 #ifdef VBOX_VMM_TARGET_X86 143 PVM pVM = pDevIns->Internal.s.pVMR3; 144 return PDMApicBusDeliver(pVM, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc); 145 #else 143 146 AssertReleaseFailed(); 144 147 RT_NOREF(pDevIns, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc); 145 148 return VERR_NOT_IMPLEMENTED; 146 #else147 PVM pVM = pDevIns->Internal.s.pVMR3;148 return PDMApicBusDeliver(pVM, u8Dest, u8DestMode, u8DeliveryMode, uVector, u8Polarity, u8TriggerMode, uTagSrc);149 149 #endif 150 150 } -
trunk/src/VBox/VMM/VMMR3/PDMDevice.cpp
r107194 r107227 34 34 #include "PDMInternal.h" 35 35 #include <VBox/vmm/pdm.h> 36 #if defined(VBOX_VMM_TARGET_ARMV8)36 #ifdef VBOX_VMM_TARGET_ARMV8 37 37 # include <VBox/vmm/gic.h> 38 38 # include <VBox/vmm/pmu.h> 39 #el se39 #elif defined(VBOX_VMM_TARGET_X86) 40 40 # include <VBox/vmm/pdmapic.h> 41 41 #endif … … 700 700 RegCB.pCfgNode = NULL; 701 701 702 #if defined(VBOX_VMM_TARGET_ARMV8) 703 /* 704 * Register the internal VMM GIC device. 705 */ 702 /* 703 * Register internal VMM devices. 704 */ 705 #ifdef VBOX_VMM_TARGET_ARMV8 706 /* Register the internal VMM GIC device. */ 706 707 int rc = pdmR3DevReg_Register(&RegCB.Core, &g_DeviceGIC); 707 708 AssertRCReturn(rc, rc); 708 709 709 /* 710 * Register the internal VMM GIC device, NEM variant. 711 */ 710 /* Register the internal VMM GIC device, NEM variant. */ 712 711 rc = pdmR3DevReg_Register(&RegCB.Core, &g_DeviceGICNem); 713 712 AssertRCReturn(rc, rc); 714 713 715 /* 716 * Register the internal VMM PMU device. 717 */ 714 /* Register the internal VMM PMU device. */ 718 715 rc = pdmR3DevReg_Register(&RegCB.Core, &g_DevicePMU); 719 716 AssertRCReturn(rc, rc); 720 #e lse721 /* 722 * Register the internal VMM APIC device. 723 */717 #endif 718 719 #ifdef VBOX_VMM_TARGET_X86 720 /* Register the internal VMM APIC device. */ 724 721 int rc = pdmR3DevReg_Register(&RegCB.Core, &g_DeviceAPIC); 725 722 AssertRCReturn(rc, rc); -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r107194 r107227 2000 2000 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL); 2001 2001 2002 #if !defined(VBOX_VMM_TARGET_ARMV8)2002 #ifdef VBOX_VMM_TARGET_X86 2003 2003 if (!pVCpu->pgm.s.fA20Enabled) 2004 2004 { -
trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
r107171 r107227 1084 1084 else 1085 1085 { 1086 #if ndef VBOX_VMM_TARGET_ARMV81086 #ifdef VBOX_VMM_TARGET_X86 1087 1087 Assert(WalkGst.enmType != PGMPTWALKGSTTYPE_INVALID); 1088 1088 #endif … … 1095 1095 */ 1096 1096 uint64_t cPagesCanSkip; 1097 #if ndef VBOX_VMM_TARGET_ARMV81097 #ifdef VBOX_VMM_TARGET_X86 1098 1098 switch (Walk.uLevel) 1099 1099 { … … 1144 1144 continue; 1145 1145 } 1146 #else 1146 1147 #elif defined(VBOX_VMM_TARGET_ARMV8) 1148 1147 1149 /** @todo Sketch, needs creating proper defines for constants in armv8.h and using these 1148 1150 * instead of hardcoding these here. */ … … 1178 1180 continue; 1179 1181 } 1182 #else 1183 # error "port me" 1180 1184 #endif 1181 1185 } … … 1185 1189 break; 1186 1190 cPages -= cIncPages; 1187 #if ndef VBOX_VMM_TARGET_ARMV81191 #ifdef VBOX_VMM_TARGET_X86 1188 1192 GCPtr += (RTGCPTR)cIncPages << X86_PT_PAE_SHIFT; 1193 #elif defined(VBOX_VMM_TARGET_ARMV8) 1194 GCPtr += (RTGCPTR)cIncPages << 12; 1189 1195 #else 1190 GCPtr += (RTGCPTR)cIncPages << 12; 1196 # error "port me" 1191 1197 #endif 1192 1198 -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r107194 r107227 6322 6322 *********************************************************************************************************************************/ 6323 6323 6324 #if !defined(VBOX_VMM_TARGET_ARMV8)6324 #ifdef VBOX_VMM_TARGET_X86 6325 6325 /** 6326 6326 * Sets the Address Gate 20 state. … … 6335 6335 if (pVCpu->pgm.s.fA20Enabled != fEnable) 6336 6336 { 6337 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX6337 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 6338 6338 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 6339 6339 if ( CPUMIsGuestInVmxRootMode(pCtx) … … 6343 6343 return; 6344 6344 } 6345 # endif6345 # endif 6346 6346 pVCpu->pgm.s.fA20Enabled = fEnable; 6347 6347 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20); 6348 6348 if (VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM))) 6349 6349 NEMR3NotifySetA20(pVCpu, fEnable); 6350 # ifdef PGM_WITH_A206350 # ifdef PGM_WITH_A20 6351 6351 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 6352 6352 pgmR3RefreshShadowModeAfterA20Change(pVCpu); 6353 6353 HMFlushTlb(pVCpu); 6354 # endif6355 # if 0 /* PGMGetPage will apply the A20 mask to the GCPhys it returns, so we must invalid both sides of the TLB. */6354 # endif 6355 # if 0 /* PGMGetPage will apply the A20 mask to the GCPhys it returns, so we must invalid both sides of the TLB. */ 6356 6356 IEMTlbInvalidateAllPhysical(pVCpu); 6357 # else6357 # else 6358 6358 IEMTlbInvalidateAllGlobal(pVCpu); 6359 # endif6359 # endif 6360 6360 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes); 6361 6361 } 6362 6362 } 6363 #endif 6364 6363 #endif /* VBOX_VMM_TARGET_X86 */ 6364 -
trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp
r107171 r107227 3331 3331 AssertLogRelRCReturn(rc, rc); 3332 3332 3333 #if !defined(VBOX_VMM_TARGET_ARMV8)3333 #ifdef VBOX_VMM_TARGET_X86 3334 3334 /* Update the PSE, NX flags and validity masks. */ 3335 3335 pVCpu->pgm.s.fGst32BitPageSizeExtension = CPUMIsGuestPageSizeExtEnabled(pVCpu); -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r107220 r107227 818 818 PVMCPU pVCpu = pVM->apCpusR3[i]; 819 819 820 #if defined(VBOX_VMM_TARGET_ARMV8)820 #ifdef VBOX_VMM_TARGET_ARMV8 821 821 pVCpu->cNsVTimerActivate = UINT64_MAX; 822 822 #endif … … 1315 1315 pVCpu->tm.s.u64TSC = 0; 1316 1316 pVCpu->tm.s.u64TSCLastSeen = 0; 1317 #if defined(VBOX_VMM_TARGET_ARMV8)1317 #ifdef VBOX_VMM_TARGET_ARMV8 1318 1318 pVCpu->cNsVTimerActivate = UINT64_MAX; 1319 1319 #endif -
trunk/src/VBox/VMM/VMMR3/TRPM.cpp
r106061 r107227 387 387 VMMR3DECL(int) TRPMR3InjectEvent(PVM pVM, PVMCPU pVCpu, TRPMEVENT enmEvent, bool *pfInjected) 388 388 { 389 #if defined(VBOX_VMM_TARGET_ARMV8) 390 RT_NOREF(pVM, pVCpu, enmEvent, pfInjected); 391 AssertReleaseFailed(); 392 return VERR_NOT_IMPLEMENTED; 393 #else 389 #ifdef VBOX_VMM_TARGET_X86 394 390 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 395 391 Assert(!CPUMIsInInterruptShadow(pCtx)); … … 453 449 return VINF_EM_RESCHEDULE; 454 450 # endif 455 #endif 451 452 #else /* !VBOX_VMM_TARGET_X86 */ 453 RT_NOREF(pVM, pVCpu, enmEvent, pfInjected); 454 AssertReleaseFailed(); 455 return VERR_NOT_IMPLEMENTED; 456 #endif /* !VBOX_VMM_TARGET_X86 */ 456 457 } 457 458 -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r107194 r107227 875 875 if (RT_SUCCESS(rc)) 876 876 { 877 #if !defined(VBOX_VMM_TARGET_ARMV8)877 #ifdef VBOX_VMM_TARGET_X86 878 878 rc = SELMR3Init(pVM); 879 if (RT_SUCCESS(rc)) 879 880 #endif 880 if (RT_SUCCESS(rc))881 881 { 882 882 rc = TRPMR3Init(pVM); … … 906 906 if (RT_SUCCESS(rc)) 907 907 { 908 #if !defined(VBOX_VMM_TARGET_ARMV8)908 #ifdef VBOX_VMM_TARGET_X86 909 909 rc = GCMR3Init(pVM); 910 if (RT_SUCCESS(rc)) 910 911 #endif 911 if (RT_SUCCESS(rc))912 912 { 913 913 rc = PDMR3Init(pVM); … … 933 933 AssertRC(rc2); 934 934 } 935 #if !defined(VBOX_VMM_TARGET_ARMV8)935 #ifdef VBOX_VMM_TARGET_X86 936 936 int rc2 = GCMR3Term(pVM); 937 937 AssertRC(rc2); … … 958 958 AssertRC(rc2); 959 959 } 960 #if !defined(VBOX_VMM_TARGET_ARMV8)960 #ifdef VBOX_VMM_TARGET_X86 961 961 int rc2 = SELMR3Term(pVM); 962 962 AssertRC(rc2); … … 1081 1081 CPUMR3Relocate(pVM); 1082 1082 HMR3Relocate(pVM); 1083 #if !defined(VBOX_VMM_TARGET_ARMV8)1083 #ifdef VBOX_VMM_TARGET_X86 1084 1084 SELMR3Relocate(pVM); 1085 1085 #endif 1086 1086 VMMR3Relocate(pVM, offDelta); 1087 #if !defined(VBOX_VMM_TARGET_ARMV8)1087 #ifdef VBOX_VMM_TARGET_X86 1088 1088 SELMR3Relocate(pVM); /* !hack! fix stack! */ 1089 1089 #endif … … 2237 2237 rc = TRPMR3Term(pVM); 2238 2238 AssertRC(rc); 2239 #if !defined(VBOX_VMM_TARGET_ARMV8)2239 #ifdef VBOX_VMM_TARGET_X86 2240 2240 rc = SELMR3Term(pVM); 2241 2241 AssertRC(rc); … … 2621 2621 PDMR3Reset(pVM); 2622 2622 PGMR3Reset(pVM); 2623 #if !defined(VBOX_VMM_TARGET_ARMV8)2623 #ifdef VBOX_VMM_TARGET_X86 2624 2624 SELMR3Reset(pVM); 2625 2625 #endif … … 4241 4241 switch (pVM->bMainExecutionEngine) 4242 4242 { 4243 #if !defined(VBOX_VMM_TARGET_ARMV8)4243 #ifdef VBOX_WITH_HWVIRT 4244 4244 case VM_EXEC_ENGINE_HW_VIRT: 4245 4245 return HMIsLongModeAllowed(pVM); … … 4248 4248 case VM_EXEC_ENGINE_NATIVE_API: 4249 4249 return NEMHCIsLongModeAllowed(pVM); 4250 4251 case VM_EXEC_ENGINE_IEM: 4252 return true; 4250 4253 4251 4254 case VM_EXEC_ENGINE_NOT_SET: -
trunk/src/VBox/VMM/VMMR3/VMEmt.cpp
r106589 r107227 568 568 } 569 569 570 #if defined(VBOX_VMM_TARGET_ARMV8)571 uint64_t cNsVTimerActivate = TMCpuGetVTimerActivationNano(pVCpu);572 const bool fVTimerActive = cNsVTimerActivate != UINT64_MAX;570 #ifdef VBOX_VMM_TARGET_ARMV8 571 uint64_t cNsVTimerActivate = TMCpuGetVTimerActivationNano(pVCpu); 572 const bool fVTimerActive = cNsVTimerActivate != UINT64_MAX; 573 573 #endif 574 574 … … 590 590 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_EXTERNAL_HALTED_MASK) 591 591 || VMCPU_FF_IS_ANY_SET(pVCpu, fMask) 592 #if defined(VBOX_VMM_TARGET_ARMV8)592 #ifdef VBOX_VMM_TARGET_ARMV8 593 593 || cNsElapsedTimers >= cNsVTimerActivate 594 594 #endif 595 595 ) 596 596 { 597 #if defined(VBOX_VMM_TARGET_ARMV8)597 #ifdef VBOX_VMM_TARGET_ARMV8 598 598 cNsVTimerActivate = 0; 599 599 #endif … … 601 601 } 602 602 603 #if defined(VBOX_VMM_TARGET_ARMV8)603 #ifdef VBOX_VMM_TARGET_ARMV8 604 604 cNsVTimerActivate -= cNsElapsedTimers; 605 605 #endif … … 614 614 break; 615 615 616 #if defined(VBOX_VMM_TARGET_ARMV8)616 #ifdef VBOX_VMM_TARGET_ARMV8 617 617 u64NanoTS = RT_MIN(cNsVTimerActivate, u64NanoTS); 618 618 #endif … … 679 679 fBlockOnce = false; 680 680 681 #if defined(VBOX_VMM_TARGET_ARMV8)681 #ifdef VBOX_VMM_TARGET_ARMV8 682 682 cNsVTimerActivate -= RT_MIN(cNsVTimerActivate, Elapsed); 683 683 /* Did the vTimer expire? */ … … 689 689 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct); 690 690 691 #if defined(VBOX_VMM_TARGET_ARMV8)691 #ifdef VBOX_VMM_TARGET_ARMV8 692 692 if (fVTimerActive) 693 693 { … … 1157 1157 * Check Relevant FFs. 1158 1158 */ 1159 #if defined(VBOX_VMM_TARGET_ARMV8)1160 const uint64_t fMaskI nterrupts = ((fFlags & VMWAITHALTED_F_IGNORE_IRQS) ? VMCPU_FF_INTERRUPT_IRQ : 0)1159 #ifdef VBOX_VMM_TARGET_ARMV8 1160 const uint64_t fMaskIrqs = ((fFlags & VMWAITHALTED_F_IGNORE_IRQS) ? VMCPU_FF_INTERRUPT_IRQ : 0) 1161 1161 | ((fFlags & VMWAITHALTED_F_IGNORE_FIQS) ? VMCPU_FF_INTERRUPT_FIQ : 0); 1162 const uint64_t fMask = VMCPU_FF_EXTERNAL_HALTED_MASK & ~fMaskInterrupts;1162 const uint64_t fMask = VMCPU_FF_EXTERNAL_HALTED_MASK & ~fMaskIrqs; 1163 1163 #else 1164 const uint64_t fMask = !(fFlags & VMWAITHALTED_F_IGNORE_IRQS)1164 const uint64_t fMask = !(fFlags & VMWAITHALTED_F_IGNORE_IRQS) 1165 1165 ? VMCPU_FF_EXTERNAL_HALTED_MASK 1166 1166 : VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC); -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r107194 r107227 130 130 #include <VBox/sup.h> 131 131 #include <VBox/vmm/dbgf.h> 132 #if defined(VBOX_VMM_TARGET_ARMV8)132 #ifdef VBOX_VMM_TARGET_ARMV8 133 133 # include <VBox/vmm/gic.h> 134 #el se134 #elif defined(VBOX_VMM_TARGET_X86) 135 135 # include <VBox/vmm/pdmapic.h> 136 136 #endif … … 146 146 #include <iprt/assert.h> 147 147 #include <iprt/alloc.h> 148 #if defined(VBOX_VMM_TARGET_ARMV8)148 #ifdef VBOX_VMM_TARGET_ARMV8 149 149 # include <iprt/armv8.h> 150 150 #endif … … 580 580 case VMINITCOMPLETED_HM: 581 581 { 582 #if !defined(VBOX_VMM_TARGET_ARMV8)582 #ifdef VBOX_VMM_TARGET_X86 583 583 /* 584 584 * Disable the periodic preemption timers if we can use the … … 1214 1214 1215 1215 #ifdef VBOX_WITH_HWVIRT 1216 1216 # ifndef VBOX_VMM_TARGET_X86 1217 # error "config error" 1218 # endif 1217 1219 /** 1218 1220 * Executes guest code (Intel VT-x and AMD-V). … … 1223 1225 VMMR3_INT_DECL(int) VMMR3HmRunGC(PVM pVM, PVMCPU pVCpu) 1224 1226 { 1225 # if defined(VBOX_VMM_TARGET_ARMV8)1226 /* We should actually never get here as the only execution engine is NEM. */1227 RT_NOREF(pVM, pVCpu);1228 AssertReleaseFailed();1229 return VERR_NOT_SUPPORTED;1230 # else1231 1227 Log2(("VMMR3HmRunGC: (cs:rip=%04x:%RX64)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu))); 1232 1228 … … 1234 1230 do 1235 1231 { 1236 # 1232 # ifdef NO_SUPCALLR0VMM 1237 1233 rc = VERR_GENERAL_FAILURE; 1238 # 1234 # else 1239 1235 rc = SUPR3CallVMMR0Fast(VMCC_GET_VMR0_FOR_CALL(pVM), VMMR0_DO_HM_RUN, pVCpu->idCpu); 1240 1236 if (RT_LIKELY(rc == VINF_SUCCESS)) 1241 1237 rc = pVCpu->vmm.s.iLastGZRc; 1242 # 1238 # endif 1243 1239 } while (rc == VINF_EM_RAW_INTERRUPT_HYPER); 1244 1240 1245 # 1241 # if 0 /** @todo triggers too often */ 1246 1242 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3)); 1247 # 1243 # endif 1248 1244 1249 1245 /* … … 1260 1256 } 1261 1257 return vmmR3HandleRing0Assert(pVM, pVCpu); 1262 # endif1263 1258 } 1264 1259 #endif /* VBOX_WITH_HWVIRT */ 1265 1260 1266 1261 1267 #if defined(VBOX_VMM_TARGET_ARMV8)1262 #ifdef VBOX_VMM_TARGET_ARMV8 1268 1263 1269 1264 /** … … 1318 1313 } 1319 1314 1320 #el se /* !VBOX_VMM_TARGET_ARMV8 */1315 #elif defined(VBOX_VMM_TARGET_X86) 1321 1316 1322 1317 /** … … 1404 1399 PGMR3ResetCpu(pVM, pVCpu); 1405 1400 PDMR3ResetCpu(pVCpu); /* Only clears pending interrupts force flags */ 1406 # if !defined(VBOX_VMM_TARGET_ARMV8)1407 1401 PDMR3ApicInitIpi(pVCpu); 1408 # endif1409 1402 TRPMR3ResetCpu(pVCpu); 1410 1403 CPUMR3ResetCpu(pVM, pVCpu); … … 1449 1442 } 1450 1443 1451 #endif /* !VBOX_VMM_TARGET_ARMV8*/1444 #endif /* VBOX_VMM_TARGET_X86 */ 1452 1445 1453 1446 /** … … 2582 2575 c = 0; 2583 2576 f = fLocalForcedActions; 2584 #if defined(VBOX_VMM_TARGET_ARMV8)2577 #ifdef VBOX_VMM_TARGET_ARMV8 2585 2578 PRINT_FLAG(VMCPU_FF_,INTERRUPT_IRQ); 2586 2579 PRINT_FLAG(VMCPU_FF_,INTERRUPT_FIQ); 2587 #el se2580 #elif defined(VBOX_VMM_TARGET_X86) 2588 2581 PRINT_FLAG(VMCPU_FF_,INTERRUPT_APIC); 2589 2582 PRINT_FLAG(VMCPU_FF_,INTERRUPT_PIC); 2583 #else 2584 # error "port me" 2590 2585 #endif 2591 2586 PRINT_FLAG(VMCPU_FF_,TIMER); -
trunk/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp
r106061 r107227 374 374 case VERR_VMM_LONG_JMP_ERROR: 375 375 { 376 #if defined(VBOX_VMM_TARGET_ARMV8)377 AssertReleaseFailed();378 #else379 376 /* 380 377 * Active trap? This is only of partial interest when in hardware … … 390 387 if (RT_SUCCESS(rc2)) 391 388 pHlp->pfnPrintf(pHlp, 392 "!! ACTIVE TRAP=%02x ERRCD=%RX32 CR2=%RGv PC=%RGr Type=%d cbInstr=%02x fIcebp=%RTbool (Guest!)\n", 393 u8TrapNo, uErrorCode, uCR2, CPUMGetGuestRIP(pVCpu), enmType, cbInstr, fIcebp); 394 389 "!! ACTIVE TRAP=%02x ERRCD=%RX32 CR2=%RGv FlatPC=%RGr Type=%d cbInstr=%02x fIcebp=%RTbool (Guest!)\n", 390 u8TrapNo, uErrorCode, uCR2, CPUMGetGuestFlatPC(pVCpu), enmType, cbInstr, fIcebp); 391 392 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) 395 393 /* 396 394 * Dump the relevant hypervisor registers and stack. … … 422 420 PRTHCUINTPTR const pBP = (PRTHCUINTPTR)&pVCpu->vmm.s.abAssertStack[ pVCpu->vmm.s.AssertJmpBuf.UnwindBp 423 421 - pVCpu->vmm.s.AssertJmpBuf.UnwindSp]; 424 # if HC_ARCH_BITS == 32422 # if HC_ARCH_BITS == 32 425 423 pHlp->pfnPrintf(pHlp, 426 424 "eax=volatile ebx=%08x ecx=volatile edx=volatile esi=%08x edi=%08x\n" … … 429 427 pBP[-3], pBP[-2], pBP[-1], 430 428 pBP[1], pVCpu->vmm.s.AssertJmpBuf.SavedEbp - 8, pBP[0], pBP[-4]); 431 # else432 # ifdef RT_OS_WINDOWS429 # else 430 # ifdef RT_OS_WINDOWS 433 431 pHlp->pfnPrintf(pHlp, 434 432 "rax=volatile rbx=%016RX64 rcx=volatile rdx=volatile\n" … … 443 441 pBP[-2], pBP[-1], 444 442 pBP[1], pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp, pBP[0], pBP[-8]); 445 # else443 # else 446 444 pHlp->pfnPrintf(pHlp, 447 445 "rax=volatile rbx=%016RX64 rcx=volatile rdx=volatile\n" … … 455 453 pBP[-2], pBP[-1], 456 454 pBP[1], pVCpu->vmm.s.AssertJmpBuf.UnwindRetSp, pBP[0], pBP[-6]); 455 # endif 457 456 # endif 458 #endif459 457 460 458 /* Callstack. */ … … 472 470 "!! Call Stack:\n" 473 471 "!!\n"); 474 # if HC_ARCH_BITS == 32472 # if HC_ARCH_BITS == 32 475 473 pHlp->pfnPrintf(pHlp, "EBP Ret EBP Ret CS:EIP Arg0 Arg1 Arg2 Arg3 CS:EIP Symbol [line]\n"); 476 # else474 # else 477 475 pHlp->pfnPrintf(pHlp, "RBP Ret RBP Ret RIP RIP Symbol [line]\n"); 478 # endif476 # endif 479 477 for (PCDBGFSTACKFRAME pFrame = pFirstFrame; 480 478 pFrame; 481 479 pFrame = DBGFR3StackWalkNext(pFrame)) 482 480 { 483 # if HC_ARCH_BITS == 32481 # if HC_ARCH_BITS == 32 484 482 pHlp->pfnPrintf(pHlp, 485 483 "%RHv %RHv %04RX32:%RHv %RHv %RHv %RHv %RHv", … … 493 491 pFrame->Args.au32[3]); 494 492 pHlp->pfnPrintf(pHlp, " %RTsel:%08RHv", pFrame->AddrPC.Sel, pFrame->AddrPC.off); 495 # else493 # else 496 494 pHlp->pfnPrintf(pHlp, 497 495 "%RHv %RHv %RHv %RHv", … … 500 498 (RTHCUINTPTR)pFrame->AddrReturnPC.off, 501 499 (RTHCUINTPTR)pFrame->AddrPC.off); 502 # endif500 # endif 503 501 if (pFrame->pSymPC) 504 502 { … … 588 586 "!! Skipping ring-0 registers and stack, rcErr=%Rrc\n", rcErr); 589 587 } 590 #endif /* !VBOX_VMM_TARGET_ARMV8*/588 #endif /* RT_ARCH_AMD64 || RT_ARCH_X86 */ 591 589 break; 592 590 } -
trunk/src/VBox/VMM/dwarfdump-to-offsets.sed
r104367 r107227 75 75 x 76 76 H 77 x78 s/\(_OFF_.*\)[\n]\(.*\)$/#define \2\1 \\/79 p80 77 } 81 78 /DW_AT_data_member_location/ { 82 s/^[[:space:]]*DW_AT_data_member_location[[:space:]]*/ / 83 p 79 s/^[[:space:]]*DW_AT_data_member_location[[:space:]]*// 80 s/[[:space:]]//g 81 x 82 H 83 x 84 s/\([(]0x[0-9a-zA-F]*[)]\)[\n]\(_OFF_.*\)[\n]\(.*\)$/#define \3\2 \1/p 85 } 86 # cleanup hold space. 87 /^[[:space:]]*$/ { 88 x 89 s/\([(]0x[0-9a-zA-F]*[)]\)[\n]\(_OFF_.*\)[\n]\(.*\)$/\3/ 90 s/\(_OFF_.*\)[\n]\(.*\)$/\2/ 91 x 92 d 84 93 } 85 94 } -
trunk/src/VBox/VMM/include/EMHandleRCTmpl.h
r106061 r107227 112 112 break; 113 113 114 #if !defined(VBOX_VMM_TARGET_ARMV8)114 #ifdef VBOX_VMM_TARGET_X86 115 115 /* 116 116 * Execute pending I/O Port access. … … 168 168 break; 169 169 170 #if !defined(VBOX_VMM_TARGET_ARMV8)170 #ifdef VBOX_VMM_TARGET_X86 171 171 case VINF_EM_EMULATE_SPLIT_LOCK: 172 172 rc = VBOXSTRICTRC_TODO(emR3ExecuteSplitLockInstruction(pVM, pVCpu)); -
trunk/src/VBox/VMM/include/NEMInternal.h
r106988 r107227 39 39 #include <VBox/vmm/vmapi.h> 40 40 #ifdef RT_OS_WINDOWS 41 # include <iprt/nt/hyperv.h>42 # include <iprt/critsect.h>41 # include <iprt/nt/hyperv.h> 42 # include <iprt/critsect.h> 43 43 #elif defined(RT_OS_DARWIN) 44 # if defined(VBOX_VMM_TARGET_ARMV8)44 # ifdef VBOX_VMM_TARGET_ARMV8 45 45 # include <Hypervisor/Hypervisor.h> 46 46 # else … … 112 112 113 113 #ifdef RT_OS_DARWIN 114 # if !defined(VBOX_VMM_TARGET_ARMV8)114 # ifndef VBOX_VMM_TARGET_ARMV8 115 115 /** vCPU ID declaration to avoid dragging in HV headers here. */ 116 116 typedef unsigned hv_vcpuid_t; … … 128 128 /** @} */ 129 129 130 # if defined(VBOX_VMM_TARGET_ARMV8)131 130 /** The CPUMCTX_EXTRN_XXX mask for IEM. */ 132 # define NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM ( IEM_CPUMCTX_EXTRN_MUST_MASK ) 131 # ifdef VBOX_VMM_TARGET_ARMV8 132 # define NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM ( IEM_CPUMCTX_EXTRN_MUST_MASK ) 133 133 # else 134 /** The CPUMCTX_EXTRN_XXX mask for IEM. */ 135 # define NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM ( IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_INHIBIT_INT \ 136 | CPUMCTX_EXTRN_INHIBIT_NMI ) 137 #endif 134 # define NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM ( IEM_CPUMCTX_EXTRN_MUST_MASK \ 135 | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI ) 136 # endif 138 137 139 138 /** The CPUMCTX_EXTRN_XXX mask for IEM when raising exceptions. */ … … 141 140 142 141 143 # if defined(VBOX_VMM_TARGET_ARMV8)142 # ifdef VBOX_VMM_TARGET_ARMV8 144 143 /** 145 144 * MMIO2 tracking region. … … 221 220 bool fUseDebugLoop; 222 221 223 #if defined(VBOX_VMM_TARGET_ARMV8)222 #ifdef VBOX_VMM_TARGET_ARMV8 224 223 /** The PPI interrupt number of the vTimer. */ 225 224 uint32_t u32GicPpiVTimer; … … 252 251 /** Set if we've created the EMTs. */ 253 252 bool fCreatedEmts : 1; 254 # if defined(VBOX_VMM_TARGET_ARMV8)253 # ifdef VBOX_VMM_TARGET_ARMV8 255 254 bool fHypercallExit : 1; 256 255 bool fGpaAccessFaultExit : 1; 257 256 /** Cache line flush size as a power of two. */ 258 257 uint8_t cPhysicalAddressWidth; 259 # el se258 # elif defined(VBOX_VMM_TARGET_X86) 260 259 /** WHvRunVpExitReasonX64Cpuid is supported. */ 261 260 bool fExtendedMsrExit : 1; … … 316 315 } R0Stats; 317 316 318 # if defined(VBOX_VMM_TARGET_ARMV8)317 # ifdef VBOX_VMM_TARGET_ARMV8 319 318 /** Re-distributor memory region for all vCPUs. */ 320 319 RTGCPHYS GCPhysMmioBaseReDist; … … 332 331 /** Set if EL2 is enabled. */ 333 332 bool fEl2Enabled : 1; 334 # if defined(VBOX_VMM_TARGET_ARMV8)333 # ifdef VBOX_VMM_TARGET_ARMV8 335 334 /** @name vTimer related state. 336 335 * @{ */ … … 344 343 hv_vcpu_config_t hVCpuCfg; 345 344 /** @} */ 346 # el se345 # elif defined(VBOX_VMM_TARGET_X86) 347 346 /** Set if hv_vm_space_create() was called successfully. */ 348 347 bool fCreatedAsid : 1; … … 381 380 /** The last valid host LBR info stack range. */ 382 381 uint32_t idLbrInfoMsrLast; 383 # endif 382 # endif /* VBOX_VMM_TARGET_X86 */ 384 383 385 384 STAMCOUNTER StatMapPage; … … 387 386 STAMCOUNTER StatMapPageFailed; 388 387 STAMCOUNTER StatUnmapPageFailed; 389 #endif /* RT_OS_ WINDOWS*/388 #endif /* RT_OS_DARWIN */ 390 389 } NEM; 391 390 /** Pointer to NEM VM instance data. */ … … 432 431 /** Pointer to the KVM_RUN data exchange region. */ 433 432 R3PTRTYPE(struct kvm_run *) pRun; 434 # if defined(VBOX_VMM_TARGET_ARMV8)433 # ifdef VBOX_VMM_TARGET_ARMV8 435 434 /** The IRQ device levels from device_irq_level. */ 436 435 uint64_t fIrqDeviceLvls; … … 439 438 /** Status of the FIQ line when last seen. */ 440 439 bool fFiqLastSeen; 441 # el se440 # elif defined(VBOX_VMM_TARGET_X86) 442 441 /** The MSR_IA32_APICBASE value known to KVM. */ 443 442 uint64_t uKvmApicBase; 444 # endif443 # endif 445 444 446 445 /** @name Statistics … … 491 490 492 491 #elif defined(RT_OS_WINDOWS) 493 # if ndef VBOX_VMM_TARGET_ARMV8492 # ifdef VBOX_VMM_TARGET_X86 494 493 /** The current state of the interrupt windows (NEM_WIN_INTW_F_XXX). */ 495 494 uint8_t fCurrentInterruptWindows; … … 543 542 544 543 #elif defined(RT_OS_DARWIN) 545 # if defined(VBOX_VMM_TARGET_ARMV8)544 # ifdef VBOX_VMM_TARGET_ARMV8 546 545 /** The vCPU handle associated with the EMT executing this vCPU. */ 547 546 hv_vcpu_t hVCpu; … … 557 556 * (for first guest exec call on the EMT after loading the saved state). */ 558 557 bool fIdRegsSynced; 559 # else 558 559 # elif defined(VBOX_VMM_TARGET_X86) 560 560 /** The vCPU handle associated with the EMT executing this vCPU. */ 561 561 hv_vcpuid_t hVCpuId; … … 628 628 /** Pointer to the VMX statistics. */ 629 629 PVMXSTATISTICS pVmxStats; 630 # endif 630 # endif /* VBOX_VMM_TARGET_X86 */ 631 631 632 632 /** @name Statistics … … 641 641 STAMCOUNTER StatImportOnReturnSkipped; 642 642 STAMCOUNTER StatQueryCpuTick; 643 # ifdef VBOX_WITH_STATISTICS643 # ifdef VBOX_WITH_STATISTICS 644 644 STAMPROFILEADV StatProfGstStateImport; 645 645 STAMPROFILEADV StatProfGstStateExport; 646 # endif646 # endif 647 647 /** @} */ 648 648 -
trunk/src/VBox/VMM/testcase/Makefile.kmk
r106945 r107227 156 156 ifdef VBOX_WITH_RAW_MODE 157 157 tstVMStructRC_TEMPLATE = VBoxRcExe 158 tstVMStructRC_DEFS = VBOX_ IN_VMM IN_VMM_RC IN_DIS IN_RT_RC VBOX_WITH_RAW_MODE $(VMM_COMMON_DEFS)158 tstVMStructRC_DEFS = VBOX_VMM_TARGET_AGNOSTIC VBOX_IN_VMM IN_VMM_RC IN_DIS IN_RT_RC VBOX_WITH_RAW_MODE $(VMM_COMMON_DEFS) 159 159 ifdef VBOX_WITH_R0_LOGGING 160 160 tstVMStructRC_DEFS += VBOX_WITH_R0_LOGGING … … 170 170 tstVMStructSize_CXXFLAGS += $(VBOX_GCC_Wno-invalid-offsetof) 171 171 endif 172 tstVMStructSize_DEFS = VBOX_ IN_VMM IN_VMM_R3 IN_DIS $(VMM_COMMON_DEFS)172 tstVMStructSize_DEFS = VBOX_VMM_TARGET_AGNOSTIC VBOX_IN_VMM IN_VMM_R3 IN_DIS $(VMM_COMMON_DEFS) 173 173 ifdef VBOX_WITH_RAW_MODE 174 174 tstVMStructSize_DEFS += VBOX_WITH_RAW_MODE … … 192 192 tstAsmStructSize_CXXFLAGS += $(VBOX_GCC_Wno-invalid-offsetof) 193 193 endif 194 tstAsmStructs_DEFS = VBOX_ IN_VMM IN_VMM_R3 IN_DIS $(VMM_COMMON_DEFS)194 tstAsmStructs_DEFS = VBOX_VMM_TARGET_AGNOSTIC VBOX_IN_VMM IN_VMM_R3 IN_DIS $(VMM_COMMON_DEFS) 195 195 ifdef VBOX_WITH_RAW_MODE 196 196 tstAsmStructs_DEFS += VBOX_WITH_RAW_MODE … … 207 207 ifdef VBOX_WITH_RAW_MODE 208 208 tstAsmStructsRC_TEMPLATE = VBoxRcExe 209 tstAsmStructsRC_DEFS = VBOX_ IN_VMM IN_VMM_RC IN_DIS IN_RT_RC VBOX_WITH_RAW_MODE $(VMM_COMMON_DEFS)209 tstAsmStructsRC_DEFS = VBOX_VMM_TARGET_AGNOSTIC VBOX_IN_VMM IN_VMM_RC IN_DIS IN_RT_RC VBOX_WITH_RAW_MODE $(VMM_COMMON_DEFS) 210 210 ifdef VBOX_WITH_R0_LOGGING 211 211 tstAsmStructsRC_DEFS += VBOX_WITH_R0_LOGGING … … 655 655 656 656 tstIEMAImpl_TEMPLATE = VBoxR3TstExe 657 tstIEMAImpl_DEFS = $(VMM_COMMON_DEFS) IEM_WITHOUT_ASSEMBLY IEM_WITHOUT_INSTRUCTION_STATS657 tstIEMAImpl_DEFS = VBOX_VMM_TARGET_X86 $(VMM_COMMON_DEFS) IEM_WITHOUT_ASSEMBLY IEM_WITHOUT_INSTRUCTION_STATS 658 658 tstIEMAImpl_SDKS = VBoxSoftFloatR3Shared 659 659 tstIEMAImpl_INCS = ../include . … … 677 677 tstIEMAImplAsm_TEMPLATE := VBoxR3TstExe 678 678 tstIEMAImplAsm_SDKS := VBoxSoftFloatR3Shared 679 tstIEMAImplAsm_DEFS = $(VMM_COMMON_DEFS) IEM_WITH_ASSEMBLY IEM_WITHOUT_INSTRUCTION_STATS TSTIEMAIMPL_WITH_GENERATOR679 tstIEMAImplAsm_DEFS = VBOX_VMM_TARGET_X86 $(VMM_COMMON_DEFS) IEM_WITH_ASSEMBLY IEM_WITHOUT_INSTRUCTION_STATS TSTIEMAIMPL_WITH_GENERATOR 680 680 tstIEMAImplAsm_ASFLAGS.amd64 := -Werror 681 681 tstIEMAImplAsm_ASFLAGS.x86 := -Werror … … 701 701 tstIEMCheckMc_TEMPLATE = VBoxR3TstExe 702 702 tstIEMCheckMc_SOURCES = tstIEMCheckMc.cpp 703 tstIEMCheckMc_DEFS = $(VMM_COMMON_DEFS) IEM_WITHOUT_INSTRUCTION_STATS703 tstIEMCheckMc_DEFS = VBOX_VMM_TARGET_X86 $(VMM_COMMON_DEFS) IEM_WITHOUT_INSTRUCTION_STATS 704 704 tstIEMCheckMc_LIBS = $(LIB_RUNTIME) 705 705 ifeq ($(KBUILD_TARGET),win) … … 713 713 # 714 714 tstIEMN8veProfiling_TEMPLATE := VBoxR3Exe 715 tstIEMN8veProfiling_DEFS = $(VMM_COMMON_DEFS)715 tstIEMN8veProfiling_DEFS = VBOX_VMM_TARGET_AGNOSTIC $(VMM_COMMON_DEFS) 716 716 tstIEMN8veProfiling_SOURCES := tstIEMN8veProfiling.cpp 717 717 tstIEMN8veProfiling_LIBS = $(LIB_VMM) $(LIB_RUNTIME) … … 722 722 tstSSM_TEMPLATE = VBoxR3TstExe 723 723 tstSSM_INCS = $(VBOX_PATH_VMM_SRC)/include 724 tstSSM_DEFS = $(VMM_COMMON_DEFS)724 tstSSM_DEFS = VBOX_VMM_TARGET_AGNOSTIC $(VMM_COMMON_DEFS) 725 725 tstSSM_SOURCES = tstSSM.cpp 726 726 tstSSM_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME) … … 742 742 tstCFGM_TEMPLATE = VBoxR3SignedTstExe 743 743 endif 744 tstCFGM_DEFS = $(VMM_COMMON_DEFS)744 tstCFGM_DEFS = VBOX_VMM_TARGET_AGNOSTIC $(VMM_COMMON_DEFS) 745 745 tstCFGM_SOURCES = tstCFGM.cpp 746 746 tstCFGM_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME) … … 781 781 tstVMREQ_TEMPLATE = VBoxR3SignedExe 782 782 endif 783 tstVMREQ_DEFS = $(VMM_COMMON_DEFS)783 tstVMREQ_DEFS = VBOX_VMM_TARGET_AGNOSTIC $(VMM_COMMON_DEFS) 784 784 tstVMREQ_SOURCES = tstVMREQ.cpp 785 785 tstVMREQ_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME) … … 801 801 tstAnimate_TEMPLATE = VBoxR3SignedExe 802 802 endif 803 tstAnimate_DEFS = $(VMM_COMMON_DEFS)803 tstAnimate_DEFS = VBOX_VMM_TARGET_X86 $(VMM_COMMON_DEFS) 804 804 tstAnimate_SOURCES = tstAnimate.cpp 805 805 tstAnimate_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME) … … 918 918 # 919 919 tstPDMQueue_TEMPLATE := VBoxR3Exe 920 tstPDMQueue_DEFS =$(VMM_COMMON_DEFS)920 tstPDMQueue_DEFS = VBOX_VMM_TARGET_AGNOSTIC $(VMM_COMMON_DEFS) 921 921 tstPDMQueue_SOURCES := tstPDMQueue.cpp 922 922 tstPDMQueue_LIBS := $(LIB_VMM) $(LIB_RUNTIME) … … 940 940 tstPDMAsyncCompletion_TEMPLATE = VBoxR3SignedExe 941 941 endif 942 tstPDMAsyncCompletion_DEFS = $(VMM_COMMON_DEFS)942 tstPDMAsyncCompletion_DEFS = VBOX_VMM_TARGET_AGNOSTIC $(VMM_COMMON_DEFS) 943 943 tstPDMAsyncCompletion_INCS = $(VBOX_PATH_VMM_SRC)/include 944 944 tstPDMAsyncCompletion_SOURCES = tstPDMAsyncCompletion.cpp … … 961 961 tstPDMAsyncCompletionStress_TEMPLATE = VBoxR3SignedExe 962 962 endif 963 tstPDMAsyncCompletionStress_DEFS = $(VMM_COMMON_DEFS)963 tstPDMAsyncCompletionStress_DEFS = VBOX_VMM_TARGET_AGNOSTIC $(VMM_COMMON_DEFS) 964 964 tstPDMAsyncCompletionStress_INCS = $(VBOX_PATH_VMM_SRC)/include 965 965 tstPDMAsyncCompletionStress_SOURCES = tstPDMAsyncCompletionStress.cpp … … 970 970 PROGRAMS += tstSSM-2 971 971 tstSSM-2_TEMPLATE = VBoxR3TstExe 972 tstSSM-2_DEFS = IN_VMM_STATIC972 tstSSM-2_DEFS = VBOX_VMM_TARGET_AGNOSTIC IN_VMM_STATIC 973 973 tstSSM-2_SOURCES = tstSSM-2.cpp 974 974 tstSSM-2_LIBS = $(PATH_STAGE_LIB)/SSMStandalone$(VBOX_SUFF_LIB) … … 983 983 || defined(VBOX_WITH_DTRACE_RC)) 984 984 tstVMStructDTrace_TEMPLATE = VBoxR3AutoTest 985 tstVMStructDTrace_DEFS = VBOX_ IN_VMM IN_VMM_R3 IN_DIS $(VMM_COMMON_DEFS) IEM_WITHOUT_INSTRUCTION_STATS985 tstVMStructDTrace_DEFS = VBOX_VMM_TARGET_AGNOSTIC VBOX_IN_VMM IN_VMM_R3 IN_DIS $(VMM_COMMON_DEFS) IEM_WITHOUT_INSTRUCTION_STATS 986 986 ifdef VBOX_WITH_RAW_MODE 987 987 tstVMStructDTrace_DEFS += VBOX_WITH_RAW_MODE … … 1025 1025 $(DEFS.$(KBUILD_TARGET_ARCH)) \ 1026 1026 $(DEFS.$(KBUILD_TARGET).$(KBUILD_TARGET_ARCH)) \ 1027 $(VMM_COMMON_DEFS) \1027 VBOX_VMM_TARGET_X86 $(VMM_COMMON_DEFS) \ 1028 1028 ) \ 1029 1029 -f $(if $(eq $(KBUILD_TARGET),darwin),macho,$(if $(eq $(KBUILD_TARGET),win),coff,elf)) \ -
trunk/src/VBox/VMM/tools/Makefile.kmk
r106945 r107227 67 67 endif 68 68 VBoxCpuReport_TEMPLATE := VBoxR3Static 69 VBoxCpuReport_DEFS = VBOX_ IN_VMM IN_VMM_R3 IN_VBOX_CPU_REPORT $(VMM_COMMON_DEFS)69 VBoxCpuReport_DEFS = VBOX_VMM_TARGET_X86 VBOX_IN_VMM IN_VMM_R3 IN_VBOX_CPU_REPORT $(VMM_COMMON_DEFS) 70 70 VBoxCpuReport_INCS = ../include 71 71 VBoxCpuReport_SOURCES = \
Note:
See TracChangeset
for help on using the changeset viewer.