VirtualBox

Ignore:
Timestamp:
May 20, 2024 3:31:01 PM (9 months ago)
Author:
vboxsync
Message:

VMM/NEM: Factor some bits common between x86 and arm64 KVM out into a template to reduce code duplication, bugref:10391

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux-armv8.cpp

    r104399 r104725  
    5454#include <sys/mman.h>
    5555#include <linux/kvm.h>
    56 
    57 /** @note This is an experiment right now and therefore is separate from the amd64 KVM NEM backend
    58  *        We'll see whether it would make sense to merge things later on when things have settled.
    59  */
    6056
    6157
     
    243239
    244240
    245 /**
    246  * Worker for nemR3NativeInit that gets the hypervisor capabilities.
    247  *
    248  * @returns VBox status code.
    249  * @param   pVM                 The cross context VM structure.
    250  * @param   pErrInfo            Where to always return error info.
    251  */
    252 static int nemR3LnxInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo)
    253 {
    254     AssertReturn(pVM->nem.s.fdKvm != -1, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order"));
    255 
    256     /*
    257      * Capabilities.
    258      */
    259     static const struct
    260     {
    261         const char *pszName;
    262         int         iCap;
    263         uint32_t    offNem      : 24;
    264         uint32_t    cbNem       : 3;
    265         uint32_t    fReqNonZero : 1;
    266         uint32_t    uReserved   : 4;
    267     } s_aCaps[] =
    268     {
    269 #define CAP_ENTRY__L(a_Define)           { #a_Define, a_Define,            UINT32_C(0x00ffffff), 0, 0, 0 }
    270 #define CAP_ENTRY__S(a_Define, a_Member) { #a_Define, a_Define, RT_UOFFSETOF(NEM, a_Member), RT_SIZEOFMEMB(NEM, a_Member), 0, 0 }
    271 #define CAP_ENTRY_MS(a_Define, a_Member) { #a_Define, a_Define, RT_UOFFSETOF(NEM, a_Member), RT_SIZEOFMEMB(NEM, a_Member), 1, 0 }
    272 #define CAP_ENTRY__U(a_Number)           { "KVM_CAP_" #a_Number, a_Number, UINT32_C(0x00ffffff), 0, 0, 0 }
    273 #define CAP_ENTRY_ML(a_Number)           { "KVM_CAP_" #a_Number, a_Number, UINT32_C(0x00ffffff), 0, 1, 0 }
    274 
    275         CAP_ENTRY__L(KVM_CAP_IRQCHIP),                       /* 0 */
    276         CAP_ENTRY__L(KVM_CAP_HLT),
    277         CAP_ENTRY__L(KVM_CAP_MMU_SHADOW_CACHE_CONTROL),
    278         CAP_ENTRY_ML(KVM_CAP_USER_MEMORY),
    279         CAP_ENTRY__L(KVM_CAP_SET_TSS_ADDR),
    280         CAP_ENTRY__U(5),
    281         CAP_ENTRY__L(KVM_CAP_VAPIC),
    282         CAP_ENTRY__L(KVM_CAP_EXT_CPUID),
    283         CAP_ENTRY__L(KVM_CAP_CLOCKSOURCE),
    284         CAP_ENTRY__L(KVM_CAP_NR_VCPUS),
    285         CAP_ENTRY_MS(KVM_CAP_NR_MEMSLOTS, cMaxMemSlots),     /* 10 */
    286         CAP_ENTRY__L(KVM_CAP_PIT),
    287         CAP_ENTRY__L(KVM_CAP_NOP_IO_DELAY),
    288         CAP_ENTRY__L(KVM_CAP_PV_MMU),
    289         CAP_ENTRY__L(KVM_CAP_MP_STATE),
    290         CAP_ENTRY__L(KVM_CAP_COALESCED_MMIO),
    291         CAP_ENTRY__L(KVM_CAP_SYNC_MMU),
    292         CAP_ENTRY__U(17),
    293         CAP_ENTRY__L(KVM_CAP_IOMMU),
    294         CAP_ENTRY__U(19), /* Buggy KVM_CAP_JOIN_MEMORY_REGIONS? */
    295         CAP_ENTRY__U(20), /* Mon-working KVM_CAP_DESTROY_MEMORY_REGION? */
    296         CAP_ENTRY__L(KVM_CAP_DESTROY_MEMORY_REGION_WORKS),   /* 21 */
    297         CAP_ENTRY__L(KVM_CAP_USER_NMI),
    298 #ifdef __KVM_HAVE_GUEST_DEBUG
    299         CAP_ENTRY__L(KVM_CAP_SET_GUEST_DEBUG),
    300 #endif
    301 #ifdef __KVM_HAVE_PIT
    302         CAP_ENTRY__L(KVM_CAP_REINJECT_CONTROL),
    303 #endif
    304         CAP_ENTRY__L(KVM_CAP_IRQ_ROUTING),
    305         CAP_ENTRY__L(KVM_CAP_IRQ_INJECT_STATUS),
    306         CAP_ENTRY__U(27),
    307         CAP_ENTRY__U(28),
    308         CAP_ENTRY__L(KVM_CAP_ASSIGN_DEV_IRQ),
    309         CAP_ENTRY__L(KVM_CAP_JOIN_MEMORY_REGIONS_WORKS),     /* 30 */
    310 #ifdef __KVM_HAVE_MCE
    311         CAP_ENTRY__L(KVM_CAP_MCE),
    312 #endif
    313         CAP_ENTRY__L(KVM_CAP_IRQFD),
    314 #ifdef __KVM_HAVE_PIT
    315         CAP_ENTRY__L(KVM_CAP_PIT2),
    316 #endif
    317         CAP_ENTRY__L(KVM_CAP_SET_BOOT_CPU_ID),
    318 #ifdef __KVM_HAVE_PIT_STATE2
    319         CAP_ENTRY__L(KVM_CAP_PIT_STATE2),
    320 #endif
    321         CAP_ENTRY__L(KVM_CAP_IOEVENTFD),
    322         CAP_ENTRY__L(KVM_CAP_SET_IDENTITY_MAP_ADDR),
    323 #ifdef __KVM_HAVE_XEN_HVM
    324         CAP_ENTRY__L(KVM_CAP_XEN_HVM),
    325 #endif
    326         CAP_ENTRY__L(KVM_CAP_ADJUST_CLOCK),
    327         CAP_ENTRY__L(KVM_CAP_INTERNAL_ERROR_DATA),           /* 40 */
    328 #ifdef __KVM_HAVE_VCPU_EVENTS
    329         CAP_ENTRY_ML(KVM_CAP_VCPU_EVENTS),
    330 #else
    331         CAP_ENTRY_MU(41),
    332 #endif
    333         CAP_ENTRY__L(KVM_CAP_S390_PSW),
    334         CAP_ENTRY__L(KVM_CAP_PPC_SEGSTATE),
    335         CAP_ENTRY__L(KVM_CAP_HYPERV),
    336         CAP_ENTRY__L(KVM_CAP_HYPERV_VAPIC),
    337         CAP_ENTRY__L(KVM_CAP_HYPERV_SPIN),
    338         CAP_ENTRY__L(KVM_CAP_PCI_SEGMENT),
    339         CAP_ENTRY__L(KVM_CAP_PPC_PAIRED_SINGLES),
    340         CAP_ENTRY__L(KVM_CAP_INTR_SHADOW),
    341 #ifdef __KVM_HAVE_DEBUGREGS
    342         CAP_ENTRY__L(KVM_CAP_DEBUGREGS),                     /* 50 */
    343 #endif
    344         CAP_ENTRY__L(KVM_CAP_X86_ROBUST_SINGLESTEP),
    345         CAP_ENTRY__L(KVM_CAP_PPC_OSI),
    346         CAP_ENTRY__L(KVM_CAP_PPC_UNSET_IRQ),
    347         CAP_ENTRY__L(KVM_CAP_ENABLE_CAP),
    348         CAP_ENTRY__L(KVM_CAP_PPC_GET_PVINFO),
    349         CAP_ENTRY__L(KVM_CAP_PPC_IRQ_LEVEL),
    350         CAP_ENTRY__L(KVM_CAP_ASYNC_PF),
    351         CAP_ENTRY__L(KVM_CAP_TSC_CONTROL),                   /* 60 */
    352         CAP_ENTRY__L(KVM_CAP_GET_TSC_KHZ),
    353         CAP_ENTRY__L(KVM_CAP_PPC_BOOKE_SREGS),
    354         CAP_ENTRY__L(KVM_CAP_SPAPR_TCE),
    355         CAP_ENTRY__L(KVM_CAP_PPC_SMT),
    356         CAP_ENTRY__L(KVM_CAP_PPC_RMA),
    357         CAP_ENTRY__L(KVM_CAP_MAX_VCPUS),
    358         CAP_ENTRY__L(KVM_CAP_PPC_HIOR),
    359         CAP_ENTRY__L(KVM_CAP_PPC_PAPR),
    360         CAP_ENTRY__L(KVM_CAP_SW_TLB),
    361         CAP_ENTRY__L(KVM_CAP_ONE_REG),                       /* 70 */
    362         CAP_ENTRY__L(KVM_CAP_S390_GMAP),
    363         CAP_ENTRY__L(KVM_CAP_TSC_DEADLINE_TIMER),
    364         CAP_ENTRY__L(KVM_CAP_S390_UCONTROL),
    365         CAP_ENTRY__L(KVM_CAP_SYNC_REGS),
    366         CAP_ENTRY__L(KVM_CAP_PCI_2_3),
    367         CAP_ENTRY__L(KVM_CAP_KVMCLOCK_CTRL),
    368         CAP_ENTRY__L(KVM_CAP_SIGNAL_MSI),
    369         CAP_ENTRY__L(KVM_CAP_PPC_GET_SMMU_INFO),
    370         CAP_ENTRY__L(KVM_CAP_S390_COW),
    371         CAP_ENTRY__L(KVM_CAP_PPC_ALLOC_HTAB),                /* 80 */
    372         CAP_ENTRY__L(KVM_CAP_READONLY_MEM),
    373         CAP_ENTRY__L(KVM_CAP_IRQFD_RESAMPLE),
    374         CAP_ENTRY__L(KVM_CAP_PPC_BOOKE_WATCHDOG),
    375         CAP_ENTRY__L(KVM_CAP_PPC_HTAB_FD),
    376         CAP_ENTRY__L(KVM_CAP_S390_CSS_SUPPORT),
    377         CAP_ENTRY__L(KVM_CAP_PPC_EPR),
    378         CAP_ENTRY_ML(KVM_CAP_ARM_PSCI),
    379         CAP_ENTRY_ML(KVM_CAP_ARM_SET_DEVICE_ADDR),
    380         CAP_ENTRY_ML(KVM_CAP_DEVICE_CTRL),
    381         CAP_ENTRY__L(KVM_CAP_IRQ_MPIC),                      /* 90 */
    382         CAP_ENTRY__L(KVM_CAP_PPC_RTAS),
    383         CAP_ENTRY__L(KVM_CAP_IRQ_XICS),
    384         CAP_ENTRY__L(KVM_CAP_ARM_EL1_32BIT),
    385         CAP_ENTRY__L(KVM_CAP_SPAPR_MULTITCE),
    386         CAP_ENTRY__L(KVM_CAP_EXT_EMUL_CPUID),
    387         CAP_ENTRY__L(KVM_CAP_HYPERV_TIME),
    388         CAP_ENTRY__L(KVM_CAP_IOAPIC_POLARITY_IGNORED),
    389         CAP_ENTRY__L(KVM_CAP_ENABLE_CAP_VM),
    390         CAP_ENTRY__L(KVM_CAP_S390_IRQCHIP),
    391         CAP_ENTRY__L(KVM_CAP_IOEVENTFD_NO_LENGTH),           /* 100 */
    392         CAP_ENTRY__L(KVM_CAP_VM_ATTRIBUTES),
    393         CAP_ENTRY_ML(KVM_CAP_ARM_PSCI_0_2),
    394         CAP_ENTRY__L(KVM_CAP_PPC_FIXUP_HCALL),
    395         CAP_ENTRY__L(KVM_CAP_PPC_ENABLE_HCALL),
    396         CAP_ENTRY__L(KVM_CAP_CHECK_EXTENSION_VM),
    397         CAP_ENTRY__L(KVM_CAP_S390_USER_SIGP),
    398         CAP_ENTRY__L(KVM_CAP_S390_VECTOR_REGISTERS),
    399         CAP_ENTRY__L(KVM_CAP_S390_MEM_OP),
    400         CAP_ENTRY__L(KVM_CAP_S390_USER_STSI),
    401         CAP_ENTRY__L(KVM_CAP_S390_SKEYS),                    /* 110 */
    402         CAP_ENTRY__L(KVM_CAP_MIPS_FPU),
    403         CAP_ENTRY__L(KVM_CAP_MIPS_MSA),
    404         CAP_ENTRY__L(KVM_CAP_S390_INJECT_IRQ),
    405         CAP_ENTRY__L(KVM_CAP_S390_IRQ_STATE),
    406         CAP_ENTRY__L(KVM_CAP_PPC_HWRNG),
    407         CAP_ENTRY__L(KVM_CAP_DISABLE_QUIRKS),
    408         CAP_ENTRY__L(KVM_CAP_X86_SMM),
    409         CAP_ENTRY__L(KVM_CAP_MULTI_ADDRESS_SPACE),
    410         CAP_ENTRY__L(KVM_CAP_GUEST_DEBUG_HW_BPS),
    411         CAP_ENTRY__L(KVM_CAP_GUEST_DEBUG_HW_WPS),            /* 120 */
    412         CAP_ENTRY__L(KVM_CAP_SPLIT_IRQCHIP),
    413         CAP_ENTRY__L(KVM_CAP_IOEVENTFD_ANY_LENGTH),
    414         CAP_ENTRY__L(KVM_CAP_HYPERV_SYNIC),
    415         CAP_ENTRY__L(KVM_CAP_S390_RI),
    416         CAP_ENTRY__L(KVM_CAP_SPAPR_TCE_64),
    417         CAP_ENTRY__L(KVM_CAP_ARM_PMU_V3),
    418         CAP_ENTRY__L(KVM_CAP_VCPU_ATTRIBUTES),
    419         CAP_ENTRY__L(KVM_CAP_MAX_VCPU_ID),
    420         CAP_ENTRY__L(KVM_CAP_X2APIC_API),
    421         CAP_ENTRY__L(KVM_CAP_S390_USER_INSTR0),              /* 130 */
    422         CAP_ENTRY__L(KVM_CAP_MSI_DEVID),
    423         CAP_ENTRY__L(KVM_CAP_PPC_HTM),
    424         CAP_ENTRY__L(KVM_CAP_SPAPR_RESIZE_HPT),
    425         CAP_ENTRY__L(KVM_CAP_PPC_MMU_RADIX),
    426         CAP_ENTRY__L(KVM_CAP_PPC_MMU_HASH_V3),
    427         CAP_ENTRY__L(KVM_CAP_IMMEDIATE_EXIT),
    428         CAP_ENTRY__L(KVM_CAP_MIPS_VZ),
    429         CAP_ENTRY__L(KVM_CAP_MIPS_TE),
    430         CAP_ENTRY__L(KVM_CAP_MIPS_64BIT),
    431         CAP_ENTRY__L(KVM_CAP_S390_GS),                       /* 140 */
    432         CAP_ENTRY__L(KVM_CAP_S390_AIS),
    433         CAP_ENTRY__L(KVM_CAP_SPAPR_TCE_VFIO),
    434         CAP_ENTRY__L(KVM_CAP_X86_DISABLE_EXITS),
    435         CAP_ENTRY_ML(KVM_CAP_ARM_USER_IRQ),
    436         CAP_ENTRY__L(KVM_CAP_S390_CMMA_MIGRATION),
    437         CAP_ENTRY__L(KVM_CAP_PPC_FWNMI),
    438         CAP_ENTRY__L(KVM_CAP_PPC_SMT_POSSIBLE),
    439         CAP_ENTRY__L(KVM_CAP_HYPERV_SYNIC2),
    440         CAP_ENTRY__L(KVM_CAP_HYPERV_VP_INDEX),
    441         CAP_ENTRY__L(KVM_CAP_S390_AIS_MIGRATION),            /* 150 */
    442         CAP_ENTRY__L(KVM_CAP_PPC_GET_CPU_CHAR),
    443         CAP_ENTRY__L(KVM_CAP_S390_BPB),
    444         CAP_ENTRY__L(KVM_CAP_GET_MSR_FEATURES),
    445         CAP_ENTRY__L(KVM_CAP_HYPERV_EVENTFD),
    446         CAP_ENTRY__L(KVM_CAP_HYPERV_TLBFLUSH),
    447         CAP_ENTRY__L(KVM_CAP_S390_HPAGE_1M),
    448         CAP_ENTRY__L(KVM_CAP_NESTED_STATE),
    449         CAP_ENTRY__L(KVM_CAP_ARM_INJECT_SERROR_ESR),
    450         CAP_ENTRY__L(KVM_CAP_MSR_PLATFORM_INFO),
    451         CAP_ENTRY__L(KVM_CAP_PPC_NESTED_HV),                 /* 160 */
    452         CAP_ENTRY__L(KVM_CAP_HYPERV_SEND_IPI),
    453         CAP_ENTRY__L(KVM_CAP_COALESCED_PIO),
    454         CAP_ENTRY__L(KVM_CAP_HYPERV_ENLIGHTENED_VMCS),
    455         CAP_ENTRY__L(KVM_CAP_EXCEPTION_PAYLOAD),
    456         CAP_ENTRY_MS(KVM_CAP_ARM_VM_IPA_SIZE, cIpaBits),
    457         CAP_ENTRY__L(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT),
    458         CAP_ENTRY__L(KVM_CAP_HYPERV_CPUID),
    459         CAP_ENTRY__L(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2),
    460         CAP_ENTRY__L(KVM_CAP_PPC_IRQ_XIVE),
    461         CAP_ENTRY__L(KVM_CAP_ARM_SVE),                       /* 170 */
    462         CAP_ENTRY__L(KVM_CAP_ARM_PTRAUTH_ADDRESS),
    463         CAP_ENTRY__L(KVM_CAP_ARM_PTRAUTH_GENERIC),
    464         CAP_ENTRY__L(KVM_CAP_PMU_EVENT_FILTER),
    465         CAP_ENTRY__L(KVM_CAP_ARM_IRQ_LINE_LAYOUT_2),
    466         CAP_ENTRY__L(KVM_CAP_HYPERV_DIRECT_TLBFLUSH),
    467         CAP_ENTRY__L(KVM_CAP_PPC_GUEST_DEBUG_SSTEP),
    468         CAP_ENTRY__L(KVM_CAP_ARM_NISV_TO_USER),
    469         CAP_ENTRY__L(KVM_CAP_ARM_INJECT_EXT_DABT),
    470         CAP_ENTRY__L(KVM_CAP_S390_VCPU_RESETS),
    471         CAP_ENTRY__L(KVM_CAP_S390_PROTECTED),                /* 180 */
    472         CAP_ENTRY__L(KVM_CAP_PPC_SECURE_GUEST),
    473         CAP_ENTRY__L(KVM_CAP_HALT_POLL),
    474         CAP_ENTRY__L(KVM_CAP_ASYNC_PF_INT),
    475         CAP_ENTRY__L(KVM_CAP_LAST_CPU),
    476         CAP_ENTRY__L(KVM_CAP_SMALLER_MAXPHYADDR),
    477         CAP_ENTRY__L(KVM_CAP_S390_DIAG318),
    478         CAP_ENTRY__L(KVM_CAP_STEAL_TIME),
    479         CAP_ENTRY__L(KVM_CAP_X86_USER_SPACE_MSR),            /* (since 5.10) */
    480         CAP_ENTRY__L(KVM_CAP_X86_MSR_FILTER),
    481         CAP_ENTRY__L(KVM_CAP_ENFORCE_PV_FEATURE_CPUID),      /* 190 */
    482         CAP_ENTRY__L(KVM_CAP_SYS_HYPERV_CPUID),
    483         CAP_ENTRY__L(KVM_CAP_DIRTY_LOG_RING),
    484         CAP_ENTRY__L(KVM_CAP_X86_BUS_LOCK_EXIT),
    485         CAP_ENTRY__L(KVM_CAP_PPC_DAWR1),
    486         CAP_ENTRY__L(KVM_CAP_SET_GUEST_DEBUG2),
    487         CAP_ENTRY__L(KVM_CAP_SGX_ATTRIBUTE),
    488         CAP_ENTRY__L(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM),
    489         CAP_ENTRY__L(KVM_CAP_PTP_KVM),
    490         CAP_ENTRY__U(199),
    491         CAP_ENTRY__U(200),
    492         CAP_ENTRY__U(201),
    493         CAP_ENTRY__U(202),
    494         CAP_ENTRY__U(203),
    495         CAP_ENTRY__U(204),
    496         CAP_ENTRY__U(205),
    497         CAP_ENTRY__U(206),
    498         CAP_ENTRY__U(207),
    499         CAP_ENTRY__U(208),
    500         CAP_ENTRY__U(209),
    501         CAP_ENTRY__U(210),
    502         CAP_ENTRY__U(211),
    503         CAP_ENTRY__U(212),
    504         CAP_ENTRY__U(213),
    505         CAP_ENTRY__U(214),
    506         CAP_ENTRY__U(215),
    507         CAP_ENTRY__U(216),
    508     };
    509 
    510     LogRel(("NEM: KVM capabilities (system):\n"));
    511     int rcRet = VINF_SUCCESS;
    512     for (unsigned i = 0; i < RT_ELEMENTS(s_aCaps); i++)
    513     {
    514         int rc = ioctl(pVM->nem.s.fdKvm, KVM_CHECK_EXTENSION, s_aCaps[i].iCap);
    515         if (rc >= 10)
    516             LogRel(("NEM:   %36s: %#x (%d)\n", s_aCaps[i].pszName, rc, rc));
    517         else if (rc >= 0)
    518             LogRel(("NEM:   %36s: %d\n", s_aCaps[i].pszName, rc));
    519         else
    520             LogRel(("NEM:   %s failed: %d/%d\n", s_aCaps[i].pszName, rc, errno));
    521         switch (s_aCaps[i].cbNem)
    522         {
    523             case 0:
    524                 break;
    525             case 1:
    526             {
    527                 uint8_t *puValue = (uint8_t *)&pVM->nem.padding[s_aCaps[i].offNem];
    528                 AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0);
    529                 *puValue = (uint8_t)rc;
    530                 AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc));
    531                 break;
    532             }
    533             case 2:
    534             {
    535                 uint16_t *puValue = (uint16_t *)&pVM->nem.padding[s_aCaps[i].offNem];
    536                 AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0);
    537                 *puValue = (uint16_t)rc;
    538                 AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc));
    539                 break;
    540             }
    541             case 4:
    542             {
    543                 uint32_t *puValue = (uint32_t *)&pVM->nem.padding[s_aCaps[i].offNem];
    544                 AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0);
    545                 *puValue = (uint32_t)rc;
    546                 AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc));
    547                 break;
    548             }
    549             default:
    550                 rcRet = RTErrInfoSetF(pErrInfo, VERR_NEM_IPE_0, "s_aCaps[%u] is bad: cbNem=%#x - %s",
    551                                       i, s_aCaps[i].pszName, s_aCaps[i].cbNem);
    552                 AssertFailedReturn(rcRet);
    553         }
    554 
    555         /*
    556          * Is a require non-zero entry zero or failing?
    557          */
    558         if (s_aCaps[i].fReqNonZero && rc <= 0)
    559             rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_MISSING_FEATURE,
    560                                             "Required capability '%s' is missing!", s_aCaps[i].pszName);
    561     }
    562 
    563     /*
    564      * Get per VCpu KVM_RUN MMAP area size.
    565      */
    566     int rc = ioctl(pVM->nem.s.fdKvm, KVM_GET_VCPU_MMAP_SIZE, 0UL);
    567     if ((unsigned)rc < _64M)
    568     {
    569         pVM->nem.s.cbVCpuMmap = (uint32_t)rc;
    570         LogRel(("NEM:   %36s: %#x (%d)\n", "KVM_GET_VCPU_MMAP_SIZE", rc, rc));
    571     }
    572     else if (rc < 0)
    573         rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_MISSING_FEATURE, "KVM_GET_VCPU_MMAP_SIZE failed: %d", errno);
    574     else
    575         rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_INIT_FAILED, "Odd KVM_GET_VCPU_MMAP_SIZE value: %#x (%d)", rc, rc);
    576 
    577     /*
    578      * Init the slot ID bitmap.
    579      */
    580     ASMBitSet(&pVM->nem.s.bmSlotIds[0], 0);         /* don't use slot 0 */
    581     if (pVM->nem.s.cMaxMemSlots < _32K)
    582         ASMBitSetRange(&pVM->nem.s.bmSlotIds[0], pVM->nem.s.cMaxMemSlots, _32K);
    583     ASMBitSet(&pVM->nem.s.bmSlotIds[0], _32K - 1);  /* don't use the last slot */
    584 
    585     return rcRet;
    586 }
     241/* Forward declarations of things called by the template. */
     242static int nemR3LnxInitSetupVm(PVM pVM, PRTERRINFO pErrInfo);
     243
     244
     245/* Instantiate the common bits we share with the x86 KVM backend. */
     246#include "NEMR3NativeTemplate-linux.cpp.h"
    587247
    588248
     
    795455
    796456
    797 /** @callback_method_impl{FNVMMEMTRENDEZVOUS}   */
    798 static DECLCALLBACK(VBOXSTRICTRC) nemR3LnxFixThreadPoke(PVM pVM, PVMCPU pVCpu, void *pvUser)
    799 {
    800     RT_NOREF(pVM, pvUser);
    801     int rc = RTThreadControlPokeSignal(pVCpu->hThread, true /*fEnable*/);
    802     AssertLogRelRC(rc);
    803     return VINF_SUCCESS;
    804 }
    805 
    806 
    807 /**
    808  * Try initialize the native API.
    809  *
    810  * This may only do part of the job, more can be done in
    811  * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
    812  *
    813  * @returns VBox status code.
    814  * @param   pVM             The cross context VM structure.
    815  * @param   fFallback       Whether we're in fallback mode or use-NEM mode. In
    816  *                          the latter we'll fail if we cannot initialize.
    817  * @param   fForced         Whether the HMForced flag is set and we should
    818  *                          fail if we cannot initialize.
    819  */
    820 int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
    821 {
    822     RT_NOREF(pVM, fFallback, fForced);
    823     /*
    824      * Some state init.
    825      */
    826     pVM->nem.s.fdKvm = -1;
    827     pVM->nem.s.fdVm  = -1;
    828     for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    829     {
    830         PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
    831         pNemCpu->fdVCpu = -1;
    832     }
    833 
    834     /*
    835      * Error state.
    836      * The error message will be non-empty on failure and 'rc' will be set too.
    837      */
    838     RTERRINFOSTATIC ErrInfo;
    839     PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
    840 
    841     /*
    842      * Open kvm subsystem so we can issue system ioctls.
    843      */
    844     int rc;
    845     int fdKvm = open("/dev/kvm", O_RDWR | O_CLOEXEC);
    846     if (fdKvm >= 0)
    847     {
    848         pVM->nem.s.fdKvm = fdKvm;
    849 
    850         /*
    851          * Check capabilities.
    852          */
    853         rc = nemR3LnxInitCheckCapabilities(pVM, pErrInfo);
    854         if (RT_SUCCESS(rc))
    855         {
    856             /*
    857              * Create an empty VM since it is recommended we check capabilities on
    858              * the VM rather than the system descriptor.
    859              */
    860             int fdVm = ioctl(fdKvm, KVM_CREATE_VM, pVM->nem.s.cIpaBits);
    861             if (fdVm >= 0)
    862             {
    863                 pVM->nem.s.fdVm = fdVm;
    864 
    865                 /*
    866                  * Set up the VM (more on this later).
    867                  */
    868                 rc = nemR3LnxInitSetupVm(pVM, pErrInfo);
    869                 if (RT_SUCCESS(rc))
    870                 {
    871                     /*
    872                      * Set ourselves as the execution engine and make config adjustments.
    873                      */
    874                     VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
    875                     Log(("NEM: Marked active!\n"));
    876                     PGMR3EnableNemMode(pVM);
    877 
    878                     /*
    879                      * Register release statistics
    880                      */
    881                     for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    882                     {
    883                         PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
    884                         STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand,      STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports",      "/NEM/CPU%u/ImportOnDemand", idCpu);
    885                         STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn,      STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);
    886                         STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);
    887                         STAMR3RegisterF(pVM, &pNemCpu->StatImportPendingInterrupt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times an interrupt was pending when importing from KVM", "/NEM/CPU%u/ImportPendingInterrupt", idCpu);
    888                         STAMR3RegisterF(pVM, &pNemCpu->StatExportPendingInterrupt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times an interrupt was pending when exporting to KVM", "/NEM/CPU%u/ExportPendingInterrupt", idCpu);
    889                         STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn,   STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn", idCpu);
    890                         STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn1Loop, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-01-loop", idCpu);
    891                         STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn2Loops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-02-loops", idCpu);
    892                         STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn3Loops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-03-loops", idCpu);
    893                         STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn4PlusLoops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-04-to-7-loops", idCpu);
    894                         STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick,        STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries",                  "/NEM/CPU%u/QueryCpuTick", idCpu);
    895                         STAMR3RegisterF(pVM, &pNemCpu->StatExitTotal,           STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "All exits",                  "/NEM/CPU%u/Exit", idCpu);
    896                         STAMR3RegisterF(pVM, &pNemCpu->StatExitIo,              STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_IO",                "/NEM/CPU%u/Exit/Io", idCpu);
    897                         STAMR3RegisterF(pVM, &pNemCpu->StatExitMmio,            STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_MMIO",              "/NEM/CPU%u/Exit/Mmio", idCpu);
    898                         STAMR3RegisterF(pVM, &pNemCpu->StatExitIntr,            STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTR",              "/NEM/CPU%u/Exit/Intr", idCpu);
    899                         STAMR3RegisterF(pVM, &pNemCpu->StatExitHypercall,       STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_HYPERCALL",         "/NEM/CPU%u/Exit/Hypercall", idCpu);
    900                         STAMR3RegisterF(pVM, &pNemCpu->StatExitDebug,           STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_DEBUG",             "/NEM/CPU%u/Exit/Debug", idCpu);
    901                         STAMR3RegisterF(pVM, &pNemCpu->StatExitBusLock,         STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_BUS_LOCK",          "/NEM/CPU%u/Exit/BusLock", idCpu);
    902                         STAMR3RegisterF(pVM, &pNemCpu->StatExitInternalErrorEmulation, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTERNAL_ERROR/EMULATION", "/NEM/CPU%u/Exit/InternalErrorEmulation", idCpu);
    903                         STAMR3RegisterF(pVM, &pNemCpu->StatExitInternalErrorFatal,     STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTERNAL_ERROR/*", "/NEM/CPU%u/Exit/InternalErrorFatal", idCpu);
    904                     }
    905 
    906                     /*
    907                      * Success.
    908                      */
    909                     return VINF_SUCCESS;
    910                 }
    911                 close(fdVm);
    912                 pVM->nem.s.fdVm = -1;
    913             }
    914             else
    915                 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "KVM_CREATE_VM failed: %u", errno);
    916         }
    917         close(fdKvm);
    918         pVM->nem.s.fdKvm = -1;
    919     }
    920     else if (errno == EACCES)
    921         rc = RTErrInfoSet(pErrInfo, VERR_ACCESS_DENIED, "Do not have access to open /dev/kvm for reading & writing.");
    922     else if (errno == ENOENT)
    923         rc = RTErrInfoSet(pErrInfo, VERR_NOT_SUPPORTED, "KVM is not availble (/dev/kvm does not exist)");
    924     else
    925         rc = RTErrInfoSetF(pErrInfo, RTErrConvertFromErrno(errno), "Failed to open '/dev/kvm': %u", errno);
    926 
    927     /*
    928      * We only fail if in forced mode, otherwise just log the complaint and return.
    929      */
    930     Assert(RTErrInfoIsSet(pErrInfo));
    931     if (   (fForced || !fFallback)
    932         && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
    933         return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
    934     LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
    935     return VINF_SUCCESS;
    936 }
    937 
    938 
    939 /**
    940  * This is called after CPUMR3Init is done.
    941  *
    942  * @returns VBox status code.
    943  * @param   pVM                 The VM handle..
    944  */
    945 int nemR3NativeInitAfterCPUM(PVM pVM)
    946 {
    947     /*
    948      * Validate sanity.
    949      */
    950     AssertReturn(pVM->nem.s.fdKvm >= 0, VERR_WRONG_ORDER);
    951     AssertReturn(pVM->nem.s.fdVm >= 0, VERR_WRONG_ORDER);
    952     AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
    953 
    954     /** @todo */
    955 
    956     return VINF_SUCCESS;
    957 }
    958 
    959 
    960457int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
    961458{
     
    968465
    969466    return VINF_SUCCESS;
    970 }
    971 
    972 
    973 int nemR3NativeTerm(PVM pVM)
    974 {
    975     /*
    976      * Per-cpu data
    977      */
    978     for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    979     {
    980         PVMCPU pVCpu = pVM->apCpusR3[idCpu];
    981 
    982         if (pVCpu->nem.s.fdVCpu != -1)
    983         {
    984             close(pVCpu->nem.s.fdVCpu);
    985             pVCpu->nem.s.fdVCpu = -1;
    986         }
    987         if (pVCpu->nem.s.pRun)
    988         {
    989             munmap(pVCpu->nem.s.pRun, pVM->nem.s.cbVCpuMmap);
    990             pVCpu->nem.s.pRun = NULL;
    991         }
    992     }
    993 
    994     /*
    995      * Global data.
    996      */
    997     if (pVM->nem.s.fdVm != -1)
    998     {
    999         close(pVM->nem.s.fdVm);
    1000         pVM->nem.s.fdVm = -1;
    1001     }
    1002 
    1003     if (pVM->nem.s.fdKvm != -1)
    1004     {
    1005         close(pVM->nem.s.fdKvm);
    1006         pVM->nem.s.fdKvm = -1;
    1007     }
    1008     return VINF_SUCCESS;
    1009 }
    1010 
    1011 
    1012 /**
    1013  * VM reset notification.
    1014  *
    1015  * @param   pVM         The cross context VM structure.
    1016  */
    1017 void nemR3NativeReset(PVM pVM)
    1018 {
    1019     RT_NOREF(pVM);
    1020 }
    1021 
    1022 
    1023 /**
    1024  * Reset CPU due to INIT IPI or hot (un)plugging.
    1025  *
    1026  * @param   pVCpu       The cross context virtual CPU structure of the CPU being
    1027  *                      reset.
    1028  * @param   fInitIpi    Whether this is the INIT IPI or hot (un)plugging case.
    1029  */
    1030 void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
    1031 {
    1032     RT_NOREF(pVCpu, fInitIpi);
    1033 }
    1034 
    1035 
    1036 /*********************************************************************************************************************************
    1037 *   Memory management                                                                                                            *
    1038 *********************************************************************************************************************************/
    1039 
    1040 
    1041 /**
    1042  * Allocates a memory slot ID.
    1043  *
    1044  * @returns Slot ID on success, UINT16_MAX on failure.
    1045  */
    1046 static uint16_t nemR3LnxMemSlotIdAlloc(PVM pVM)
    1047 {
    1048     /* Use the hint first. */
    1049     uint16_t idHint = pVM->nem.s.idPrevSlot;
    1050     if (idHint < _32K - 1)
    1051     {
    1052         int32_t idx = ASMBitNextClear(&pVM->nem.s.bmSlotIds, _32K, idHint);
    1053         Assert(idx < _32K);
    1054         if (idx > 0 && !ASMAtomicBitTestAndSet(&pVM->nem.s.bmSlotIds, idx))
    1055             return pVM->nem.s.idPrevSlot = (uint16_t)idx;
    1056     }
    1057 
    1058     /*
    1059      * Search the whole map from the start.
    1060      */
    1061     int32_t idx = ASMBitFirstClear(&pVM->nem.s.bmSlotIds, _32K);
    1062     Assert(idx < _32K);
    1063     if (idx > 0 && !ASMAtomicBitTestAndSet(&pVM->nem.s.bmSlotIds, idx))
    1064         return pVM->nem.s.idPrevSlot = (uint16_t)idx;
    1065 
    1066     Assert(idx < 0 /*shouldn't trigger unless there is a race */);
    1067     return UINT16_MAX; /* caller is expected to assert. */
    1068 }
    1069 
    1070 
    1071 /**
    1072  * Frees a memory slot ID
    1073  */
    1074 static void nemR3LnxMemSlotIdFree(PVM pVM, uint16_t idSlot)
    1075 {
    1076     if (RT_LIKELY(idSlot < _32K && ASMAtomicBitTestAndClear(&pVM->nem.s.bmSlotIds, idSlot)))
    1077     { /*likely*/ }
    1078     else
    1079         AssertMsgFailed(("idSlot=%u (%#x)\n", idSlot, idSlot));
    1080 }
    1081 
    1082 
    1083 
    1084 VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
    1085                                                uint8_t *pu2State, uint32_t *puNemRange)
    1086 {
    1087     uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);
    1088     AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);
    1089 
    1090     Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d) - idSlot=%#x\n",
    1091           GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange, idSlot));
    1092 
    1093     struct kvm_userspace_memory_region Region;
    1094     Region.slot             = idSlot;
    1095     Region.flags            = 0;
    1096     Region.guest_phys_addr  = GCPhys;
    1097     Region.memory_size      = cb;
    1098     Region.userspace_addr   = (uintptr_t)pvR3;
    1099 
    1100     int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
    1101     if (rc == 0)
    1102     {
    1103         *pu2State   = 0;
    1104         *puNemRange = idSlot;
    1105         return VINF_SUCCESS;
    1106     }
    1107 
    1108     LogRel(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p, idSlot=%#x failed: %u/%u\n", GCPhys, cb, pvR3, idSlot, rc, errno));
    1109     nemR3LnxMemSlotIdFree(pVM, idSlot);
    1110     return VERR_NEM_MAP_PAGES_FAILED;
    1111 }
    1112 
    1113 
    1114 VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
    1115 {
    1116     RT_NOREF(pVM);
    1117     return true;
    1118 }
    1119 
    1120 
    1121 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
    1122                                                   void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
    1123 {
    1124     Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",
    1125           GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));
    1126     RT_NOREF(pvRam);
    1127 
    1128     if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
    1129     {
    1130         /** @todo implement splitting and whatnot of ranges if we want to be 100%
    1131          *        conforming (just modify RAM registrations in MM.cpp to test). */
    1132         AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p\n", GCPhys, cb, fFlags, pvRam, pvMmio2),
    1133                                     VERR_NEM_MAP_PAGES_FAILED);
    1134     }
    1135 
    1136     /*
    1137      * Register MMIO2.
    1138      */
    1139     if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
    1140     {
    1141         AssertReturn(pvMmio2, VERR_NEM_MAP_PAGES_FAILED);
    1142         AssertReturn(puNemRange, VERR_NEM_MAP_PAGES_FAILED);
    1143 
    1144         uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);
    1145         AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);
    1146 
    1147         struct kvm_userspace_memory_region Region;
    1148         Region.slot             = idSlot;
    1149         Region.flags            = fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES ? KVM_MEM_LOG_DIRTY_PAGES : 0;
    1150         Region.guest_phys_addr  = GCPhys;
    1151         Region.memory_size      = cb;
    1152         Region.userspace_addr   = (uintptr_t)pvMmio2;
    1153 
    1154         int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
    1155         if (rc == 0)
    1156         {
    1157             *pu2State   = 0;
    1158             *puNemRange = idSlot;
    1159             Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvMmio2=%p - idSlot=%#x\n",
    1160                   GCPhys, cb, fFlags, pvMmio2, idSlot));
    1161             return VINF_SUCCESS;
    1162         }
    1163 
    1164         nemR3LnxMemSlotIdFree(pVM, idSlot);
    1165         AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvMmio2=%p, idSlot=%#x failed: %u/%u\n",
    1166                                      GCPhys, cb, fFlags, pvMmio2, idSlot, errno, rc),
    1167                                     VERR_NEM_MAP_PAGES_FAILED);
    1168     }
    1169 
    1170     /* MMIO, don't care. */
    1171     *pu2State   = 0;
    1172     *puNemRange = UINT32_MAX;
    1173     return VINF_SUCCESS;
    1174 }
    1175 
    1176 
    1177 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
    1178                                                  void *pvRam, void *pvMmio2, uint32_t *puNemRange)
    1179 {
    1180     RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
    1181     return VINF_SUCCESS;
    1182 }
    1183 
    1184 
    1185 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
    1186                                                void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
    1187 {
    1188     Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p puNemRange=%p (%#x)\n",
    1189           GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
    1190     RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
    1191 
    1192     if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
    1193     {
    1194         /** @todo implement splitting and whatnot of ranges if we want to be 100%
    1195          *        conforming (just modify RAM registrations in MM.cpp to test). */
    1196         AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p\n", GCPhys, cb, fFlags, pvRam, pvMmio2),
    1197                                     VERR_NEM_UNMAP_PAGES_FAILED);
    1198     }
    1199 
    1200     if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
    1201     {
    1202         uint32_t const idSlot = *puNemRange;
    1203         AssertReturn(idSlot > 0 && idSlot < _32K, VERR_NEM_IPE_4);
    1204         AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, idSlot), VERR_NEM_IPE_4);
    1205 
    1206         struct kvm_userspace_memory_region Region;
    1207         Region.slot             = idSlot;
    1208         Region.flags            = 0;
    1209         Region.guest_phys_addr  = GCPhys;
    1210         Region.memory_size      = 0;    /* this deregisters it. */
    1211         Region.userspace_addr   = (uintptr_t)pvMmio2;
    1212 
    1213         int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
    1214         if (rc == 0)
    1215         {
    1216             if (pu2State)
    1217                 *pu2State = 0;
    1218             *puNemRange = UINT32_MAX;
    1219             nemR3LnxMemSlotIdFree(pVM, idSlot);
    1220             return VINF_SUCCESS;
    1221         }
    1222 
    1223         AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvMmio2=%p, idSlot=%#x failed: %u/%u\n",
    1224                                      GCPhys, cb, fFlags, pvMmio2, idSlot, errno, rc),
    1225                                     VERR_NEM_UNMAP_PAGES_FAILED);
    1226     }
    1227 
    1228     if (pu2State)
    1229         *pu2State = UINT8_MAX;
    1230     return VINF_SUCCESS;
    1231 }
    1232 
    1233 
    1234 VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
    1235                                                            void *pvBitmap, size_t cbBitmap)
    1236 {
    1237     AssertReturn(uNemRange > 0 && uNemRange < _32K, VERR_NEM_IPE_4);
    1238     AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, uNemRange), VERR_NEM_IPE_4);
    1239 
    1240     RT_NOREF(GCPhys, cbBitmap);
    1241 
    1242     struct kvm_dirty_log DirtyLog;
    1243     DirtyLog.slot         = uNemRange;
    1244     DirtyLog.padding1     = 0;
    1245     DirtyLog.dirty_bitmap = pvBitmap;
    1246 
    1247     int rc = ioctl(pVM->nem.s.fdVm, KVM_GET_DIRTY_LOG, &DirtyLog);
    1248     AssertLogRelMsgReturn(rc == 0, ("%RGp LB %RGp idSlot=%#x failed: %u/%u\n", GCPhys, cb, uNemRange, errno, rc),
    1249                           VERR_NEM_QUERY_DIRTY_BITMAP_FAILED);
    1250 
    1251     return VINF_SUCCESS;
    1252 }
    1253 
    1254 
    1255 VMMR3_INT_DECL(int)  NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
    1256                                                      uint8_t *pu2State, uint32_t *puNemRange)
    1257 {
    1258     Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
    1259     *pu2State = UINT8_MAX;
    1260 
    1261     /* Don't support puttint ROM where there is already RAM.  For
    1262        now just shuffle the registrations till it works... */
    1263     AssertLogRelMsgReturn(!(fFlags & NEM_NOTIFY_PHYS_ROM_F_REPLACE), ("%RGp LB %RGp fFlags=%#x\n", GCPhys, cb, fFlags),
    1264                           VERR_NEM_MAP_PAGES_FAILED);
    1265 
    1266     /** @todo figure out how to do shadow ROMs.   */
    1267 
    1268     /*
    1269      * We only allocate a slot number here in case we need to use it to
    1270      * fend of physical handler fun.
    1271      */
    1272     uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);
    1273     AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);
    1274 
    1275     *pu2State   = 0;
    1276     *puNemRange = idSlot;
    1277     Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x pvPages=%p - idSlot=%#x\n",
    1278           GCPhys, cb, fFlags, pvPages, idSlot));
    1279     RT_NOREF(GCPhys, cb, fFlags, pvPages);
    1280     return VINF_SUCCESS;
    1281 }
    1282 
    1283 
    1284 VMMR3_INT_DECL(int)  NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
    1285                                                     uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
    1286 {
    1287     Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
    1288           GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
    1289 
    1290     AssertPtrReturn(pvPages, VERR_NEM_IPE_5);
    1291 
    1292     uint32_t const idSlot = *puNemRange;
    1293     AssertReturn(idSlot > 0 && idSlot < _32K, VERR_NEM_IPE_4);
    1294     AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, idSlot), VERR_NEM_IPE_4);
    1295 
    1296     *pu2State = UINT8_MAX;
    1297 
    1298     /*
    1299      * Do the actual setting of the user pages here now that we've
    1300      * got a valid pvPages (typically isn't available during the early
    1301      * notification, unless we're replacing RAM).
    1302      */
    1303     struct kvm_userspace_memory_region Region;
    1304     Region.slot             = idSlot;
    1305     Region.flags            = 0;
    1306     Region.guest_phys_addr  = GCPhys;
    1307     Region.memory_size      = cb;
    1308     Region.userspace_addr   = (uintptr_t)pvPages;
    1309 
    1310     int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);
    1311     if (rc == 0)
    1312     {
    1313         *pu2State   = 0;
    1314         Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x pvPages=%p - idSlot=%#x\n",
    1315               GCPhys, cb, fFlags, pvPages, idSlot));
    1316         return VINF_SUCCESS;
    1317     }
    1318     AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvPages=%p, idSlot=%#x failed: %u/%u\n",
    1319                                  GCPhys, cb, fFlags, pvPages, idSlot, errno, rc),
    1320                                 VERR_NEM_MAP_PAGES_FAILED);
    1321 }
    1322 
    1323 
    1324 VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
    1325 {
    1326     Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));
    1327     Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));
    1328     RT_NOREF(pVCpu, fEnabled);
    1329 }
    1330 
    1331 
    1332 VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
    1333                                                         RTR3PTR pvMemR3, uint8_t *pu2State)
    1334 {
    1335     Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
    1336           GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
    1337 
    1338     *pu2State = UINT8_MAX;
    1339     RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
    1340 }
    1341 
    1342 
    1343 void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
    1344 {
    1345     Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
    1346     RT_NOREF(pVM, enmKind, GCPhys, cb);
    1347 }
    1348 
    1349 
    1350 void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
    1351                                             RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
    1352 {
    1353     Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
    1354           GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
    1355     RT_NOREF(pVM, enmKind, GCPhysOld, GCPhysNew, cb, fRestoreAsRAM);
    1356 }
    1357 
    1358 
    1359 int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
    1360                                        PGMPAGETYPE enmType, uint8_t *pu2State)
    1361 {
    1362     Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
    1363           GCPhys, HCPhys, fPageProt, enmType, *pu2State));
    1364     RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
    1365     return VINF_SUCCESS;
    1366 }
    1367 
    1368 
    1369 VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
    1370                                                   PGMPAGETYPE enmType, uint8_t *pu2State)
    1371 {
    1372     Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
    1373           GCPhys, HCPhys, fPageProt, enmType, *pu2State));
    1374     Assert(VM_IS_NEM_ENABLED(pVM));
    1375     RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
    1376 
    1377 }
    1378 
    1379 
    1380 VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
    1381                                               RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
    1382 {
    1383     Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",
    1384           GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));
    1385     Assert(VM_IS_NEM_ENABLED(pVM));
    1386     RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
    1387467}
    1388468
     
    23971477
    23981478
    2399 /** @page pg_nem_linux NEM/linux - Native Execution Manager, Linux.
     1479/** @page pg_nem_linux_armv8 NEM/linux - Native Execution Manager, Linux.
    24001480 *
    24011481 * This is using KVM.
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette