Changeset 104740 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- May 20, 2024 6:33:12 PM (7 months ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp
r104739 r104740 53 53 #include <linux/kvm.h> 54 54 55 /* 56 * Supply stuff missing from the kvm.h on the build box. 57 */ 58 #ifndef KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON /* since 5.4 */ 59 # define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4 60 #endif 61 62 63 64 /** 65 * Worker for nemR3NativeInit that gets the hypervisor capabilities. 66 * 67 * @returns VBox status code. 68 * @param pVM The cross context VM structure. 69 * @param pErrInfo Where to always return error info. 70 */ 71 static int nemR3LnxInitCheckCapabilities(PVM pVM, PRTERRINFO pErrInfo) 72 { 73 AssertReturn(pVM->nem.s.fdVm != -1, RTErrInfoSet(pErrInfo, VERR_WRONG_ORDER, "Wrong initalization order")); 74 75 /* 76 * Capabilities. 77 */ 78 static const struct 79 { 80 const char *pszName; 81 int iCap; 82 uint32_t offNem : 24; 83 uint32_t cbNem : 3; 84 uint32_t fReqNonZero : 1; 85 uint32_t uReserved : 4; 86 } s_aCaps[] = 87 { 88 #define CAP_ENTRY__L(a_Define) { #a_Define, a_Define, UINT32_C(0x00ffffff), 0, 0, 0 } 89 #define CAP_ENTRY__S(a_Define, a_Member) { #a_Define, a_Define, RT_UOFFSETOF(NEM, a_Member), RT_SIZEOFMEMB(NEM, a_Member), 0, 0 } 90 #define CAP_ENTRY_MS(a_Define, a_Member) { #a_Define, a_Define, RT_UOFFSETOF(NEM, a_Member), RT_SIZEOFMEMB(NEM, a_Member), 1, 0 } 91 #define CAP_ENTRY__U(a_Number) { "KVM_CAP_" #a_Number, a_Number, UINT32_C(0x00ffffff), 0, 0, 0 } 92 #define CAP_ENTRY_ML(a_Number) { "KVM_CAP_" #a_Number, a_Number, UINT32_C(0x00ffffff), 0, 1, 0 } 93 94 CAP_ENTRY__L(KVM_CAP_IRQCHIP), /* 0 */ 95 CAP_ENTRY_ML(KVM_CAP_HLT), 96 CAP_ENTRY__L(KVM_CAP_MMU_SHADOW_CACHE_CONTROL), 97 CAP_ENTRY_ML(KVM_CAP_USER_MEMORY), 98 CAP_ENTRY__L(KVM_CAP_SET_TSS_ADDR), 99 CAP_ENTRY__U(5), 100 CAP_ENTRY__L(KVM_CAP_VAPIC), 101 CAP_ENTRY__L(KVM_CAP_EXT_CPUID), 102 CAP_ENTRY__L(KVM_CAP_CLOCKSOURCE), 103 CAP_ENTRY__L(KVM_CAP_NR_VCPUS), 104 CAP_ENTRY_MS(KVM_CAP_NR_MEMSLOTS, cMaxMemSlots), /* 10 */ 105 CAP_ENTRY__L(KVM_CAP_PIT), 106 CAP_ENTRY__L(KVM_CAP_NOP_IO_DELAY), 107 CAP_ENTRY__L(KVM_CAP_PV_MMU), 108 CAP_ENTRY__L(KVM_CAP_MP_STATE), 109 CAP_ENTRY__L(KVM_CAP_COALESCED_MMIO), 110 CAP_ENTRY__L(KVM_CAP_SYNC_MMU), 111 CAP_ENTRY__U(17), 112 CAP_ENTRY__L(KVM_CAP_IOMMU), 113 CAP_ENTRY__U(19), /* Buggy KVM_CAP_JOIN_MEMORY_REGIONS? */ 114 CAP_ENTRY__U(20), /* Mon-working KVM_CAP_DESTROY_MEMORY_REGION? */ 115 CAP_ENTRY__L(KVM_CAP_DESTROY_MEMORY_REGION_WORKS), /* 21 */ 116 CAP_ENTRY__L(KVM_CAP_USER_NMI), 117 #ifdef __KVM_HAVE_GUEST_DEBUG 118 CAP_ENTRY__L(KVM_CAP_SET_GUEST_DEBUG), 119 #endif 120 #ifdef __KVM_HAVE_PIT 121 CAP_ENTRY__L(KVM_CAP_REINJECT_CONTROL), 122 #endif 123 CAP_ENTRY__L(KVM_CAP_IRQ_ROUTING), 124 CAP_ENTRY__L(KVM_CAP_IRQ_INJECT_STATUS), 125 CAP_ENTRY__U(27), 126 CAP_ENTRY__U(28), 127 CAP_ENTRY__L(KVM_CAP_ASSIGN_DEV_IRQ), 128 CAP_ENTRY__L(KVM_CAP_JOIN_MEMORY_REGIONS_WORKS), /* 30 */ 129 #ifdef __KVM_HAVE_MCE 130 CAP_ENTRY__L(KVM_CAP_MCE), 131 #endif 132 CAP_ENTRY__L(KVM_CAP_IRQFD), 133 #ifdef __KVM_HAVE_PIT 134 CAP_ENTRY__L(KVM_CAP_PIT2), 135 #endif 136 CAP_ENTRY__L(KVM_CAP_SET_BOOT_CPU_ID), 137 #ifdef __KVM_HAVE_PIT_STATE2 138 CAP_ENTRY__L(KVM_CAP_PIT_STATE2), 139 #endif 140 CAP_ENTRY__L(KVM_CAP_IOEVENTFD), 141 CAP_ENTRY__L(KVM_CAP_SET_IDENTITY_MAP_ADDR), 142 #ifdef __KVM_HAVE_XEN_HVM 143 CAP_ENTRY__L(KVM_CAP_XEN_HVM), 144 #endif 145 CAP_ENTRY_ML(KVM_CAP_ADJUST_CLOCK), 146 CAP_ENTRY__L(KVM_CAP_INTERNAL_ERROR_DATA), /* 40 */ 147 #ifdef __KVM_HAVE_VCPU_EVENTS 148 CAP_ENTRY_ML(KVM_CAP_VCPU_EVENTS), 149 #else 150 CAP_ENTRY_MU(41), 151 #endif 152 CAP_ENTRY__L(KVM_CAP_S390_PSW), 153 CAP_ENTRY__L(KVM_CAP_PPC_SEGSTATE), 154 CAP_ENTRY__L(KVM_CAP_HYPERV), 155 CAP_ENTRY__L(KVM_CAP_HYPERV_VAPIC), 156 CAP_ENTRY__L(KVM_CAP_HYPERV_SPIN), 157 CAP_ENTRY__L(KVM_CAP_PCI_SEGMENT), 158 CAP_ENTRY__L(KVM_CAP_PPC_PAIRED_SINGLES), 159 CAP_ENTRY__L(KVM_CAP_INTR_SHADOW), 160 #ifdef __KVM_HAVE_DEBUGREGS 161 CAP_ENTRY__L(KVM_CAP_DEBUGREGS), /* 50 */ 162 #endif 163 CAP_ENTRY__S(KVM_CAP_X86_ROBUST_SINGLESTEP, fRobustSingleStep), 164 CAP_ENTRY__L(KVM_CAP_PPC_OSI), 165 CAP_ENTRY__L(KVM_CAP_PPC_UNSET_IRQ), 166 CAP_ENTRY__L(KVM_CAP_ENABLE_CAP), 167 #ifdef __KVM_HAVE_XSAVE 168 CAP_ENTRY_ML(KVM_CAP_XSAVE), 169 #else 170 CAP_ENTRY_MU(55), 171 #endif 172 #ifdef __KVM_HAVE_XCRS 173 CAP_ENTRY_ML(KVM_CAP_XCRS), 174 #else 175 CAP_ENTRY_MU(56), 176 #endif 177 CAP_ENTRY__L(KVM_CAP_PPC_GET_PVINFO), 178 CAP_ENTRY__L(KVM_CAP_PPC_IRQ_LEVEL), 179 CAP_ENTRY__L(KVM_CAP_ASYNC_PF), 180 CAP_ENTRY__L(KVM_CAP_TSC_CONTROL), /* 60 */ 181 CAP_ENTRY__L(KVM_CAP_GET_TSC_KHZ), 182 CAP_ENTRY__L(KVM_CAP_PPC_BOOKE_SREGS), 183 CAP_ENTRY__L(KVM_CAP_SPAPR_TCE), 184 CAP_ENTRY__L(KVM_CAP_PPC_SMT), 185 CAP_ENTRY__L(KVM_CAP_PPC_RMA), 186 CAP_ENTRY__L(KVM_CAP_MAX_VCPUS), 187 CAP_ENTRY__L(KVM_CAP_PPC_HIOR), 188 CAP_ENTRY__L(KVM_CAP_PPC_PAPR), 189 CAP_ENTRY__L(KVM_CAP_SW_TLB), 190 CAP_ENTRY__L(KVM_CAP_ONE_REG), /* 70 */ 191 CAP_ENTRY__L(KVM_CAP_S390_GMAP), 192 CAP_ENTRY__L(KVM_CAP_TSC_DEADLINE_TIMER), 193 CAP_ENTRY__L(KVM_CAP_S390_UCONTROL), 194 CAP_ENTRY__L(KVM_CAP_SYNC_REGS), 195 CAP_ENTRY__L(KVM_CAP_PCI_2_3), 196 CAP_ENTRY__L(KVM_CAP_KVMCLOCK_CTRL), 197 CAP_ENTRY__L(KVM_CAP_SIGNAL_MSI), 198 CAP_ENTRY__L(KVM_CAP_PPC_GET_SMMU_INFO), 199 CAP_ENTRY__L(KVM_CAP_S390_COW), 200 CAP_ENTRY__L(KVM_CAP_PPC_ALLOC_HTAB), /* 80 */ 201 CAP_ENTRY__L(KVM_CAP_READONLY_MEM), 202 CAP_ENTRY__L(KVM_CAP_IRQFD_RESAMPLE), 203 CAP_ENTRY__L(KVM_CAP_PPC_BOOKE_WATCHDOG), 204 CAP_ENTRY__L(KVM_CAP_PPC_HTAB_FD), 205 CAP_ENTRY__L(KVM_CAP_S390_CSS_SUPPORT), 206 CAP_ENTRY__L(KVM_CAP_PPC_EPR), 207 CAP_ENTRY__L(KVM_CAP_ARM_PSCI), 208 CAP_ENTRY__L(KVM_CAP_ARM_SET_DEVICE_ADDR), 209 CAP_ENTRY__L(KVM_CAP_DEVICE_CTRL), 210 CAP_ENTRY__L(KVM_CAP_IRQ_MPIC), /* 90 */ 211 CAP_ENTRY__L(KVM_CAP_PPC_RTAS), 212 CAP_ENTRY__L(KVM_CAP_IRQ_XICS), 213 CAP_ENTRY__L(KVM_CAP_ARM_EL1_32BIT), 214 CAP_ENTRY__L(KVM_CAP_SPAPR_MULTITCE), 215 CAP_ENTRY__L(KVM_CAP_EXT_EMUL_CPUID), 216 CAP_ENTRY__L(KVM_CAP_HYPERV_TIME), 217 CAP_ENTRY__L(KVM_CAP_IOAPIC_POLARITY_IGNORED), 218 CAP_ENTRY__L(KVM_CAP_ENABLE_CAP_VM), 219 CAP_ENTRY__L(KVM_CAP_S390_IRQCHIP), 220 CAP_ENTRY__L(KVM_CAP_IOEVENTFD_NO_LENGTH), /* 100 */ 221 CAP_ENTRY__L(KVM_CAP_VM_ATTRIBUTES), 222 CAP_ENTRY__L(KVM_CAP_ARM_PSCI_0_2), 223 CAP_ENTRY__L(KVM_CAP_PPC_FIXUP_HCALL), 224 CAP_ENTRY__L(KVM_CAP_PPC_ENABLE_HCALL), 225 CAP_ENTRY__L(KVM_CAP_CHECK_EXTENSION_VM), 226 CAP_ENTRY__L(KVM_CAP_S390_USER_SIGP), 227 CAP_ENTRY__L(KVM_CAP_S390_VECTOR_REGISTERS), 228 CAP_ENTRY__L(KVM_CAP_S390_MEM_OP), 229 CAP_ENTRY__L(KVM_CAP_S390_USER_STSI), 230 CAP_ENTRY__L(KVM_CAP_S390_SKEYS), /* 110 */ 231 CAP_ENTRY__L(KVM_CAP_MIPS_FPU), 232 CAP_ENTRY__L(KVM_CAP_MIPS_MSA), 233 CAP_ENTRY__L(KVM_CAP_S390_INJECT_IRQ), 234 CAP_ENTRY__L(KVM_CAP_S390_IRQ_STATE), 235 CAP_ENTRY__L(KVM_CAP_PPC_HWRNG), 236 CAP_ENTRY__L(KVM_CAP_DISABLE_QUIRKS), 237 CAP_ENTRY__L(KVM_CAP_X86_SMM), 238 CAP_ENTRY__L(KVM_CAP_MULTI_ADDRESS_SPACE), 239 CAP_ENTRY__L(KVM_CAP_GUEST_DEBUG_HW_BPS), 240 CAP_ENTRY__L(KVM_CAP_GUEST_DEBUG_HW_WPS), /* 120 */ 241 CAP_ENTRY__L(KVM_CAP_SPLIT_IRQCHIP), 242 CAP_ENTRY__L(KVM_CAP_IOEVENTFD_ANY_LENGTH), 243 CAP_ENTRY__L(KVM_CAP_HYPERV_SYNIC), 244 CAP_ENTRY__L(KVM_CAP_S390_RI), 245 CAP_ENTRY__L(KVM_CAP_SPAPR_TCE_64), 246 CAP_ENTRY__L(KVM_CAP_ARM_PMU_V3), 247 CAP_ENTRY__L(KVM_CAP_VCPU_ATTRIBUTES), 248 CAP_ENTRY__L(KVM_CAP_MAX_VCPU_ID), 249 CAP_ENTRY__L(KVM_CAP_X2APIC_API), 250 CAP_ENTRY__L(KVM_CAP_S390_USER_INSTR0), /* 130 */ 251 CAP_ENTRY__L(KVM_CAP_MSI_DEVID), 252 CAP_ENTRY__L(KVM_CAP_PPC_HTM), 253 CAP_ENTRY__L(KVM_CAP_SPAPR_RESIZE_HPT), 254 CAP_ENTRY__L(KVM_CAP_PPC_MMU_RADIX), 255 CAP_ENTRY__L(KVM_CAP_PPC_MMU_HASH_V3), 256 CAP_ENTRY__L(KVM_CAP_IMMEDIATE_EXIT), 257 CAP_ENTRY__L(KVM_CAP_MIPS_VZ), 258 CAP_ENTRY__L(KVM_CAP_MIPS_TE), 259 CAP_ENTRY__L(KVM_CAP_MIPS_64BIT), 260 CAP_ENTRY__L(KVM_CAP_S390_GS), /* 140 */ 261 CAP_ENTRY__L(KVM_CAP_S390_AIS), 262 CAP_ENTRY__L(KVM_CAP_SPAPR_TCE_VFIO), 263 CAP_ENTRY__L(KVM_CAP_X86_DISABLE_EXITS), 264 CAP_ENTRY__L(KVM_CAP_ARM_USER_IRQ), 265 CAP_ENTRY__L(KVM_CAP_S390_CMMA_MIGRATION), 266 CAP_ENTRY__L(KVM_CAP_PPC_FWNMI), 267 CAP_ENTRY__L(KVM_CAP_PPC_SMT_POSSIBLE), 268 CAP_ENTRY__L(KVM_CAP_HYPERV_SYNIC2), 269 CAP_ENTRY__L(KVM_CAP_HYPERV_VP_INDEX), 270 CAP_ENTRY__L(KVM_CAP_S390_AIS_MIGRATION), /* 150 */ 271 CAP_ENTRY__L(KVM_CAP_PPC_GET_CPU_CHAR), 272 CAP_ENTRY__L(KVM_CAP_S390_BPB), 273 CAP_ENTRY__L(KVM_CAP_GET_MSR_FEATURES), 274 CAP_ENTRY__L(KVM_CAP_HYPERV_EVENTFD), 275 CAP_ENTRY__L(KVM_CAP_HYPERV_TLBFLUSH), 276 CAP_ENTRY__L(KVM_CAP_S390_HPAGE_1M), 277 CAP_ENTRY__L(KVM_CAP_NESTED_STATE), 278 CAP_ENTRY__L(KVM_CAP_ARM_INJECT_SERROR_ESR), 279 CAP_ENTRY__L(KVM_CAP_MSR_PLATFORM_INFO), 280 CAP_ENTRY__L(KVM_CAP_PPC_NESTED_HV), /* 160 */ 281 CAP_ENTRY__L(KVM_CAP_HYPERV_SEND_IPI), 282 CAP_ENTRY__L(KVM_CAP_COALESCED_PIO), 283 CAP_ENTRY__L(KVM_CAP_HYPERV_ENLIGHTENED_VMCS), 284 CAP_ENTRY__L(KVM_CAP_EXCEPTION_PAYLOAD), 285 CAP_ENTRY__L(KVM_CAP_ARM_VM_IPA_SIZE), 286 CAP_ENTRY__L(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT), 287 CAP_ENTRY__L(KVM_CAP_HYPERV_CPUID), 288 CAP_ENTRY__L(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2), 289 CAP_ENTRY__L(KVM_CAP_PPC_IRQ_XIVE), 290 CAP_ENTRY__L(KVM_CAP_ARM_SVE), /* 170 */ 291 CAP_ENTRY__L(KVM_CAP_ARM_PTRAUTH_ADDRESS), 292 CAP_ENTRY__L(KVM_CAP_ARM_PTRAUTH_GENERIC), 293 CAP_ENTRY__L(KVM_CAP_PMU_EVENT_FILTER), 294 CAP_ENTRY__L(KVM_CAP_ARM_IRQ_LINE_LAYOUT_2), 295 CAP_ENTRY__L(KVM_CAP_HYPERV_DIRECT_TLBFLUSH), 296 CAP_ENTRY__L(KVM_CAP_PPC_GUEST_DEBUG_SSTEP), 297 CAP_ENTRY__L(KVM_CAP_ARM_NISV_TO_USER), 298 CAP_ENTRY__L(KVM_CAP_ARM_INJECT_EXT_DABT), 299 CAP_ENTRY__L(KVM_CAP_S390_VCPU_RESETS), 300 CAP_ENTRY__L(KVM_CAP_S390_PROTECTED), /* 180 */ 301 CAP_ENTRY__L(KVM_CAP_PPC_SECURE_GUEST), 302 CAP_ENTRY__L(KVM_CAP_HALT_POLL), 303 CAP_ENTRY__L(KVM_CAP_ASYNC_PF_INT), 304 CAP_ENTRY__L(KVM_CAP_LAST_CPU), 305 CAP_ENTRY__L(KVM_CAP_SMALLER_MAXPHYADDR), 306 CAP_ENTRY__L(KVM_CAP_S390_DIAG318), 307 CAP_ENTRY__L(KVM_CAP_STEAL_TIME), 308 CAP_ENTRY_ML(KVM_CAP_X86_USER_SPACE_MSR), /* (since 5.10) */ 309 CAP_ENTRY_ML(KVM_CAP_X86_MSR_FILTER), 310 CAP_ENTRY__L(KVM_CAP_ENFORCE_PV_FEATURE_CPUID), /* 190 */ 311 CAP_ENTRY__L(KVM_CAP_SYS_HYPERV_CPUID), 312 CAP_ENTRY__L(KVM_CAP_DIRTY_LOG_RING), 313 CAP_ENTRY__L(KVM_CAP_X86_BUS_LOCK_EXIT), 314 CAP_ENTRY__L(KVM_CAP_PPC_DAWR1), 315 CAP_ENTRY__L(KVM_CAP_SET_GUEST_DEBUG2), 316 CAP_ENTRY__L(KVM_CAP_SGX_ATTRIBUTE), 317 CAP_ENTRY__L(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM), 318 CAP_ENTRY__L(KVM_CAP_PTP_KVM), 319 CAP_ENTRY__U(199), 320 CAP_ENTRY__U(200), 321 CAP_ENTRY__U(201), 322 CAP_ENTRY__U(202), 323 CAP_ENTRY__U(203), 324 CAP_ENTRY__U(204), 325 CAP_ENTRY__U(205), 326 CAP_ENTRY__U(206), 327 CAP_ENTRY__U(207), 328 CAP_ENTRY__U(208), 329 CAP_ENTRY__U(209), 330 CAP_ENTRY__U(210), 331 CAP_ENTRY__U(211), 332 CAP_ENTRY__U(212), 333 CAP_ENTRY__U(213), 334 CAP_ENTRY__U(214), 335 CAP_ENTRY__U(215), 336 CAP_ENTRY__U(216), 337 }; 338 339 LogRel(("NEM: KVM capabilities (system):\n")); 340 int rcRet = VINF_SUCCESS; 341 for (unsigned i = 0; i < RT_ELEMENTS(s_aCaps); i++) 342 { 343 int rc = ioctl(pVM->nem.s.fdVm, KVM_CHECK_EXTENSION, s_aCaps[i].iCap); 344 if (rc >= 10) 345 LogRel(("NEM: %36s: %#x (%d)\n", s_aCaps[i].pszName, rc, rc)); 346 else if (rc >= 0) 347 LogRel(("NEM: %36s: %d\n", s_aCaps[i].pszName, rc)); 348 else 349 LogRel(("NEM: %s failed: %d/%d\n", s_aCaps[i].pszName, rc, errno)); 350 switch (s_aCaps[i].cbNem) 351 { 352 case 0: 353 break; 354 case 1: 355 { 356 uint8_t *puValue = (uint8_t *)&pVM->nem.padding[s_aCaps[i].offNem]; 357 AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0); 358 *puValue = (uint8_t)rc; 359 AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc)); 360 break; 361 } 362 case 2: 363 { 364 uint16_t *puValue = (uint16_t *)&pVM->nem.padding[s_aCaps[i].offNem]; 365 AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0); 366 *puValue = (uint16_t)rc; 367 AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc)); 368 break; 369 } 370 case 4: 371 { 372 uint32_t *puValue = (uint32_t *)&pVM->nem.padding[s_aCaps[i].offNem]; 373 AssertReturn(s_aCaps[i].offNem <= sizeof(NEM) - sizeof(*puValue), VERR_NEM_IPE_0); 374 *puValue = (uint32_t)rc; 375 AssertLogRelMsg((int)*puValue == rc, ("%s: %#x\n", s_aCaps[i].pszName, rc)); 376 break; 377 } 378 default: 379 rcRet = RTErrInfoSetF(pErrInfo, VERR_NEM_IPE_0, "s_aCaps[%u] is bad: cbNem=%#x - %s", 380 i, s_aCaps[i].pszName, s_aCaps[i].cbNem); 381 AssertFailedReturn(rcRet); 382 } 383 384 /* 385 * Is a require non-zero entry zero or failing? 386 */ 387 if (s_aCaps[i].fReqNonZero && rc <= 0) 388 rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_MISSING_FEATURE, 389 "Required capability '%s' is missing!", s_aCaps[i].pszName); 390 } 391 392 /* 393 * Get per VCpu KVM_RUN MMAP area size. 394 */ 395 int rc = ioctl(pVM->nem.s.fdKvm, KVM_GET_VCPU_MMAP_SIZE, 0UL); 396 if ((unsigned)rc < _64M) 397 { 398 pVM->nem.s.cbVCpuMmap = (uint32_t)rc; 399 LogRel(("NEM: %36s: %#x (%d)\n", "KVM_GET_VCPU_MMAP_SIZE", rc, rc)); 400 } 401 else if (rc < 0) 402 rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_MISSING_FEATURE, "KVM_GET_VCPU_MMAP_SIZE failed: %d", errno); 403 else 404 rcRet = RTERRINFO_LOG_REL_ADD_F(pErrInfo, VERR_NEM_INIT_FAILED, "Odd KVM_GET_VCPU_MMAP_SIZE value: %#x (%d)", rc, rc); 405 406 /* 407 * Init the slot ID bitmap. 408 */ 409 ASMBitSet(&pVM->nem.s.bmSlotIds[0], 0); /* don't use slot 0 */ 410 if (pVM->nem.s.cMaxMemSlots < _32K) 411 ASMBitSetRange(&pVM->nem.s.bmSlotIds[0], pVM->nem.s.cMaxMemSlots, _32K); 412 ASMBitSet(&pVM->nem.s.bmSlotIds[0], _32K - 1); /* don't use the last slot */ 413 414 return rcRet; 415 } 55 56 /* Forward declarations of things called by the template. */ 57 static int nemR3LnxInitSetupVm(PVM pVM, PRTERRINFO pErrInfo); 58 59 60 /* Instantiate the common bits we share with the ARMv8 KVM backend. */ 61 #include "NEMR3NativeTemplate-linux.cpp.h" 62 416 63 417 64 … … 461 108 pVCpu->nem.s.pRun->kvm_valid_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS | KVM_SYNC_X86_EVENTS; 462 109 } 463 return VINF_SUCCESS;464 }465 466 467 /** @callback_method_impl{FNVMMEMTRENDEZVOUS} */468 static DECLCALLBACK(VBOXSTRICTRC) nemR3LnxFixThreadPoke(PVM pVM, PVMCPU pVCpu, void *pvUser)469 {470 RT_NOREF(pVM, pvUser);471 int rc = RTThreadControlPokeSignal(pVCpu->hThread, true /*fEnable*/);472 AssertLogRelRC(rc);473 return VINF_SUCCESS;474 }475 476 477 /**478 * Try initialize the native API.479 *480 * This may only do part of the job, more can be done in481 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().482 *483 * @returns VBox status code.484 * @param pVM The cross context VM structure.485 * @param fFallback Whether we're in fallback mode or use-NEM mode. In486 * the latter we'll fail if we cannot initialize.487 * @param fForced Whether the HMForced flag is set and we should488 * fail if we cannot initialize.489 */490 int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)491 {492 RT_NOREF(pVM, fFallback, fForced);493 /*494 * Some state init.495 */496 pVM->nem.s.fdKvm = -1;497 pVM->nem.s.fdVm = -1;498 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)499 {500 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;501 pNemCpu->fdVCpu = -1;502 }503 504 /*505 * Error state.506 * The error message will be non-empty on failure and 'rc' will be set too.507 */508 RTERRINFOSTATIC ErrInfo;509 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);510 511 /*512 * Open kvm subsystem so we can issue system ioctls.513 */514 int rc;515 int fdKvm = open("/dev/kvm", O_RDWR | O_CLOEXEC);516 if (fdKvm >= 0)517 {518 pVM->nem.s.fdKvm = fdKvm;519 520 /*521 * Create an empty VM since it is recommended we check capabilities on522 * the VM rather than the system descriptor.523 */524 int fdVm = ioctl(fdKvm, KVM_CREATE_VM, 0UL /* Type must be zero on x86 */);525 if (fdVm >= 0)526 {527 pVM->nem.s.fdVm = fdVm;528 529 /*530 * Check capabilities.531 */532 rc = nemR3LnxInitCheckCapabilities(pVM, pErrInfo);533 if (RT_SUCCESS(rc))534 {535 /*536 * Set up the VM (more on this later).537 */538 rc = nemR3LnxInitSetupVm(pVM, pErrInfo);539 if (RT_SUCCESS(rc))540 {541 /*542 * Set ourselves as the execution engine and make config adjustments.543 */544 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);545 Log(("NEM: Marked active!\n"));546 PGMR3EnableNemMode(pVM);547 548 /*549 * Register release statistics550 */551 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)552 {553 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;554 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", idCpu);555 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of state imports on loop return", "/NEM/CPU%u/ImportOnReturn", idCpu);556 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnReturnSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of skipped state imports on loop return", "/NEM/CPU%u/ImportOnReturnSkipped", idCpu);557 STAMR3RegisterF(pVM, &pNemCpu->StatImportPendingInterrupt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times an interrupt was pending when importing from KVM", "/NEM/CPU%u/ImportPendingInterrupt", idCpu);558 STAMR3RegisterF(pVM, &pNemCpu->StatExportPendingInterrupt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times an interrupt was pending when exporting to KVM", "/NEM/CPU%u/ExportPendingInterrupt", idCpu);559 STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn", idCpu);560 STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn1Loop, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-01-loop", idCpu);561 STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn2Loops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-02-loops", idCpu);562 STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn3Loops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-03-loops", idCpu);563 STAMR3RegisterF(pVM, &pNemCpu->StatFlushExitOnReturn4PlusLoops, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of times a KVM_EXIT_IO or KVM_EXIT_MMIO was flushed before returning to EM", "/NEM/CPU%u/FlushExitOnReturn-04-to-7-loops", idCpu);564 STAMR3RegisterF(pVM, &pNemCpu->StatQueryCpuTick, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSC queries", "/NEM/CPU%u/QueryCpuTick", idCpu);565 STAMR3RegisterF(pVM, &pNemCpu->StatExitTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "All exits", "/NEM/CPU%u/Exit", idCpu);566 STAMR3RegisterF(pVM, &pNemCpu->StatExitIo, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_IO", "/NEM/CPU%u/Exit/Io", idCpu);567 STAMR3RegisterF(pVM, &pNemCpu->StatExitMmio, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_MMIO", "/NEM/CPU%u/Exit/Mmio", idCpu);568 STAMR3RegisterF(pVM, &pNemCpu->StatExitSetTpr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_SET_TRP", "/NEM/CPU%u/Exit/SetTpr", idCpu);569 STAMR3RegisterF(pVM, &pNemCpu->StatExitTprAccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_TPR_ACCESS", "/NEM/CPU%u/Exit/TprAccess", idCpu);570 STAMR3RegisterF(pVM, &pNemCpu->StatExitRdMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_RDMSR", "/NEM/CPU%u/Exit/RdMsr", idCpu);571 STAMR3RegisterF(pVM, &pNemCpu->StatExitWrMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_WRMSR", "/NEM/CPU%u/Exit/WrMsr", idCpu);572 STAMR3RegisterF(pVM, &pNemCpu->StatExitIrqWindowOpen, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_IRQ_WINDOWS_OPEN", "/NEM/CPU%u/Exit/IrqWindowOpen", idCpu);573 STAMR3RegisterF(pVM, &pNemCpu->StatExitHalt, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_HLT", "/NEM/CPU%u/Exit/Hlt", idCpu);574 STAMR3RegisterF(pVM, &pNemCpu->StatExitIntr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTR", "/NEM/CPU%u/Exit/Intr", idCpu);575 STAMR3RegisterF(pVM, &pNemCpu->StatExitHypercall, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_HYPERCALL", "/NEM/CPU%u/Exit/Hypercall", idCpu);576 STAMR3RegisterF(pVM, &pNemCpu->StatExitDebug, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_DEBUG", "/NEM/CPU%u/Exit/Debug", idCpu);577 STAMR3RegisterF(pVM, &pNemCpu->StatExitBusLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_BUS_LOCK", "/NEM/CPU%u/Exit/BusLock", idCpu);578 STAMR3RegisterF(pVM, &pNemCpu->StatExitInternalErrorEmulation, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTERNAL_ERROR/EMULATION", "/NEM/CPU%u/Exit/InternalErrorEmulation", idCpu);579 STAMR3RegisterF(pVM, &pNemCpu->StatExitInternalErrorFatal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "KVM_EXIT_INTERNAL_ERROR/*", "/NEM/CPU%u/Exit/InternalErrorFatal", idCpu);580 }581 582 /*583 * Success.584 */585 return VINF_SUCCESS;586 }587 588 /*589 * Bail out.590 */591 }592 close(fdVm);593 pVM->nem.s.fdVm = -1;594 }595 else596 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_VM_CREATE_FAILED, "KVM_CREATE_VM failed: %u", errno);597 close(fdKvm);598 pVM->nem.s.fdKvm = -1;599 }600 else if (errno == EACCES)601 rc = RTErrInfoSet(pErrInfo, VERR_ACCESS_DENIED, "Do not have access to open /dev/kvm for reading & writing.");602 else if (errno == ENOENT)603 rc = RTErrInfoSet(pErrInfo, VERR_NOT_SUPPORTED, "KVM is not availble (/dev/kvm does not exist)");604 else605 rc = RTErrInfoSetF(pErrInfo, RTErrConvertFromErrno(errno), "Failed to open '/dev/kvm': %u", errno);606 607 /*608 * We only fail if in forced mode, otherwise just log the complaint and return.609 */610 Assert(RTErrInfoIsSet(pErrInfo));611 if ( (fForced || !fFallback)612 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)613 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);614 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));615 return VINF_SUCCESS;616 }617 618 619 /**620 * This is called after CPUMR3Init is done.621 *622 * @returns VBox status code.623 * @param pVM The VM handle..624 */625 int nemR3NativeInitAfterCPUM(PVM pVM)626 {627 /*628 * Validate sanity.629 */630 AssertReturn(pVM->nem.s.fdKvm >= 0, VERR_WRONG_ORDER);631 AssertReturn(pVM->nem.s.fdVm >= 0, VERR_WRONG_ORDER);632 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);633 634 /** @todo */635 636 110 return VINF_SUCCESS; 637 111 } … … 768 242 769 243 return VINF_SUCCESS; 770 }771 772 773 int nemR3NativeTerm(PVM pVM)774 {775 /*776 * Per-cpu data777 */778 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)779 {780 PVMCPU pVCpu = pVM->apCpusR3[idCpu];781 782 if (pVCpu->nem.s.fdVCpu != -1)783 {784 close(pVCpu->nem.s.fdVCpu);785 pVCpu->nem.s.fdVCpu = -1;786 }787 if (pVCpu->nem.s.pRun)788 {789 munmap(pVCpu->nem.s.pRun, pVM->nem.s.cbVCpuMmap);790 pVCpu->nem.s.pRun = NULL;791 }792 }793 794 /*795 * Global data.796 */797 if (pVM->nem.s.fdVm != -1)798 {799 close(pVM->nem.s.fdVm);800 pVM->nem.s.fdVm = -1;801 }802 803 if (pVM->nem.s.fdKvm != -1)804 {805 close(pVM->nem.s.fdKvm);806 pVM->nem.s.fdKvm = -1;807 }808 return VINF_SUCCESS;809 }810 811 812 /**813 * VM reset notification.814 *815 * @param pVM The cross context VM structure.816 */817 void nemR3NativeReset(PVM pVM)818 {819 RT_NOREF(pVM);820 }821 822 823 /**824 * Reset CPU due to INIT IPI or hot (un)plugging.825 *826 * @param pVCpu The cross context virtual CPU structure of the CPU being827 * reset.828 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.829 */830 void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)831 {832 RT_NOREF(pVCpu, fInitIpi);833 }834 835 836 /*********************************************************************************************************************************837 * Memory management *838 *********************************************************************************************************************************/839 840 841 /**842 * Allocates a memory slot ID.843 *844 * @returns Slot ID on success, UINT16_MAX on failure.845 */846 static uint16_t nemR3LnxMemSlotIdAlloc(PVM pVM)847 {848 /* Use the hint first. */849 uint16_t idHint = pVM->nem.s.idPrevSlot;850 if (idHint < _32K - 1)851 {852 int32_t idx = ASMBitNextClear(&pVM->nem.s.bmSlotIds, _32K, idHint);853 Assert(idx < _32K);854 if (idx > 0 && !ASMAtomicBitTestAndSet(&pVM->nem.s.bmSlotIds, idx))855 return pVM->nem.s.idPrevSlot = (uint16_t)idx;856 }857 858 /*859 * Search the whole map from the start.860 */861 int32_t idx = ASMBitFirstClear(&pVM->nem.s.bmSlotIds, _32K);862 Assert(idx < _32K);863 if (idx > 0 && !ASMAtomicBitTestAndSet(&pVM->nem.s.bmSlotIds, idx))864 return pVM->nem.s.idPrevSlot = (uint16_t)idx;865 866 Assert(idx < 0 /*shouldn't trigger unless there is a race */);867 return UINT16_MAX; /* caller is expected to assert. */868 }869 870 871 /**872 * Frees a memory slot ID873 */874 static void nemR3LnxMemSlotIdFree(PVM pVM, uint16_t idSlot)875 {876 if (RT_LIKELY(idSlot < _32K && ASMAtomicBitTestAndClear(&pVM->nem.s.bmSlotIds, idSlot)))877 { /*likely*/ }878 else879 AssertMsgFailed(("idSlot=%u (%#x)\n", idSlot, idSlot));880 }881 882 883 884 VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,885 uint8_t *pu2State, uint32_t *puNemRange)886 {887 uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);888 AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);889 890 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p pu2State=%p (%d) puNemRange=%p (%d) - idSlot=%#x\n",891 GCPhys, cb, pvR3, pu2State, pu2State, puNemRange, *puNemRange, idSlot));892 893 struct kvm_userspace_memory_region Region;894 Region.slot = idSlot;895 Region.flags = 0;896 Region.guest_phys_addr = GCPhys;897 Region.memory_size = cb;898 Region.userspace_addr = (uintptr_t)pvR3;899 900 int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);901 if (rc == 0)902 {903 *pu2State = 0;904 *puNemRange = idSlot;905 return VINF_SUCCESS;906 }907 908 LogRel(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p, idSlot=%#x failed: %u/%u\n", GCPhys, cb, pvR3, idSlot, rc, errno));909 nemR3LnxMemSlotIdFree(pVM, idSlot);910 return VERR_NEM_MAP_PAGES_FAILED;911 }912 913 914 VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)915 {916 RT_NOREF(pVM);917 return true;918 }919 920 921 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,922 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)923 {924 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d) puNemRange=%p (%#x)\n",925 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State, puNemRange, puNemRange ? *puNemRange : UINT32_MAX));926 RT_NOREF(pvRam);927 928 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)929 {930 /** @todo implement splitting and whatnot of ranges if we want to be 100%931 * conforming (just modify RAM registrations in MM.cpp to test). */932 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p\n", GCPhys, cb, fFlags, pvRam, pvMmio2),933 VERR_NEM_MAP_PAGES_FAILED);934 }935 936 /*937 * Register MMIO2.938 */939 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)940 {941 AssertReturn(pvMmio2, VERR_NEM_MAP_PAGES_FAILED);942 AssertReturn(puNemRange, VERR_NEM_MAP_PAGES_FAILED);943 944 uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);945 AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);946 947 struct kvm_userspace_memory_region Region;948 Region.slot = idSlot;949 Region.flags = fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES ? KVM_MEM_LOG_DIRTY_PAGES : 0;950 Region.guest_phys_addr = GCPhys;951 Region.memory_size = cb;952 Region.userspace_addr = (uintptr_t)pvMmio2;953 954 int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);955 if (rc == 0)956 {957 *pu2State = 0;958 *puNemRange = idSlot;959 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvMmio2=%p - idSlot=%#x\n",960 GCPhys, cb, fFlags, pvMmio2, idSlot));961 return VINF_SUCCESS;962 }963 964 nemR3LnxMemSlotIdFree(pVM, idSlot);965 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvMmio2=%p, idSlot=%#x failed: %u/%u\n",966 GCPhys, cb, fFlags, pvMmio2, idSlot, errno, rc),967 VERR_NEM_MAP_PAGES_FAILED);968 }969 970 /* MMIO, don't care. */971 *pu2State = 0;972 *puNemRange = UINT32_MAX;973 return VINF_SUCCESS;974 }975 976 977 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,978 void *pvRam, void *pvMmio2, uint32_t *puNemRange)979 {980 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);981 return VINF_SUCCESS;982 }983 984 985 VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,986 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)987 {988 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p puNemRange=%p (%#x)\n",989 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));990 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);991 992 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)993 {994 /** @todo implement splitting and whatnot of ranges if we want to be 100%995 * conforming (just modify RAM registrations in MM.cpp to test). */996 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p\n", GCPhys, cb, fFlags, pvRam, pvMmio2),997 VERR_NEM_UNMAP_PAGES_FAILED);998 }999 1000 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)1001 {1002 uint32_t const idSlot = *puNemRange;1003 AssertReturn(idSlot > 0 && idSlot < _32K, VERR_NEM_IPE_4);1004 AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, idSlot), VERR_NEM_IPE_4);1005 1006 struct kvm_userspace_memory_region Region;1007 Region.slot = idSlot;1008 Region.flags = 0;1009 Region.guest_phys_addr = GCPhys;1010 Region.memory_size = 0; /* this deregisters it. */1011 Region.userspace_addr = (uintptr_t)pvMmio2;1012 1013 int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);1014 if (rc == 0)1015 {1016 if (pu2State)1017 *pu2State = 0;1018 *puNemRange = UINT32_MAX;1019 nemR3LnxMemSlotIdFree(pVM, idSlot);1020 return VINF_SUCCESS;1021 }1022 1023 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvMmio2=%p, idSlot=%#x failed: %u/%u\n",1024 GCPhys, cb, fFlags, pvMmio2, idSlot, errno, rc),1025 VERR_NEM_UNMAP_PAGES_FAILED);1026 }1027 1028 if (pu2State)1029 *pu2State = UINT8_MAX;1030 return VINF_SUCCESS;1031 }1032 1033 1034 VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,1035 void *pvBitmap, size_t cbBitmap)1036 {1037 AssertReturn(uNemRange > 0 && uNemRange < _32K, VERR_NEM_IPE_4);1038 AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, uNemRange), VERR_NEM_IPE_4);1039 1040 RT_NOREF(GCPhys, cbBitmap);1041 1042 struct kvm_dirty_log DirtyLog;1043 DirtyLog.slot = uNemRange;1044 DirtyLog.padding1 = 0;1045 DirtyLog.dirty_bitmap = pvBitmap;1046 1047 int rc = ioctl(pVM->nem.s.fdVm, KVM_GET_DIRTY_LOG, &DirtyLog);1048 AssertLogRelMsgReturn(rc == 0, ("%RGp LB %RGp idSlot=%#x failed: %u/%u\n", GCPhys, cb, uNemRange, errno, rc),1049 VERR_NEM_QUERY_DIRTY_BITMAP_FAILED);1050 1051 return VINF_SUCCESS;1052 }1053 1054 1055 VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,1056 uint8_t *pu2State, uint32_t *puNemRange)1057 {1058 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));1059 *pu2State = UINT8_MAX;1060 1061 /* Don't support puttint ROM where there is already RAM. For1062 now just shuffle the registrations till it works... */1063 AssertLogRelMsgReturn(!(fFlags & NEM_NOTIFY_PHYS_ROM_F_REPLACE), ("%RGp LB %RGp fFlags=%#x\n", GCPhys, cb, fFlags),1064 VERR_NEM_MAP_PAGES_FAILED);1065 1066 /** @todo figure out how to do shadow ROMs. */1067 1068 /*1069 * We only allocate a slot number here in case we need to use it to1070 * fend of physical handler fun.1071 */1072 uint16_t idSlot = nemR3LnxMemSlotIdAlloc(pVM);1073 AssertLogRelReturn(idSlot < _32K, VERR_NEM_MAP_PAGES_FAILED);1074 1075 *pu2State = 0;1076 *puNemRange = idSlot;1077 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x pvPages=%p - idSlot=%#x\n",1078 GCPhys, cb, fFlags, pvPages, idSlot));1079 RT_NOREF(GCPhys, cb, fFlags, pvPages);1080 return VINF_SUCCESS;1081 }1082 1083 1084 VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,1085 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)1086 {1087 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",1088 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));1089 1090 AssertPtrReturn(pvPages, VERR_NEM_IPE_5);1091 1092 uint32_t const idSlot = *puNemRange;1093 AssertReturn(idSlot > 0 && idSlot < _32K, VERR_NEM_IPE_4);1094 AssertReturn(ASMBitTest(pVM->nem.s.bmSlotIds, idSlot), VERR_NEM_IPE_4);1095 1096 *pu2State = UINT8_MAX;1097 1098 /*1099 * Do the actual setting of the user pages here now that we've1100 * got a valid pvPages (typically isn't available during the early1101 * notification, unless we're replacing RAM).1102 */1103 struct kvm_userspace_memory_region Region;1104 Region.slot = idSlot;1105 Region.flags = 0;1106 Region.guest_phys_addr = GCPhys;1107 Region.memory_size = cb;1108 Region.userspace_addr = (uintptr_t)pvPages;1109 1110 int rc = ioctl(pVM->nem.s.fdVm, KVM_SET_USER_MEMORY_REGION, &Region);1111 if (rc == 0)1112 {1113 *pu2State = 0;1114 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp fFlags=%#x pvPages=%p - idSlot=%#x\n",1115 GCPhys, cb, fFlags, pvPages, idSlot));1116 return VINF_SUCCESS;1117 }1118 AssertLogRelMsgFailedReturn(("%RGp LB %RGp fFlags=%#x, pvPages=%p, idSlot=%#x failed: %u/%u\n",1119 GCPhys, cb, fFlags, pvPages, idSlot, errno, rc),1120 VERR_NEM_MAP_PAGES_FAILED);1121 }1122 1123 1124 VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)1125 {1126 Log(("nemR3NativeNotifySetA20: fEnabled=%RTbool\n", fEnabled));1127 Assert(VM_IS_NEM_ENABLED(pVCpu->CTX_SUFF(pVM)));1128 RT_NOREF(pVCpu, fEnabled);1129 }1130 1131 1132 VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,1133 RTR3PTR pvMemR3, uint8_t *pu2State)1134 {1135 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",1136 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));1137 1138 *pu2State = UINT8_MAX;1139 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);1140 }1141 1142 1143 void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)1144 {1145 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));1146 RT_NOREF(pVM, enmKind, GCPhys, cb);1147 }1148 1149 1150 void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,1151 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)1152 {1153 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",1154 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));1155 RT_NOREF(pVM, enmKind, GCPhysOld, GCPhysNew, cb, fRestoreAsRAM);1156 }1157 1158 1159 int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,1160 PGMPAGETYPE enmType, uint8_t *pu2State)1161 {1162 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",1163 GCPhys, HCPhys, fPageProt, enmType, *pu2State));1164 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);1165 return VINF_SUCCESS;1166 }1167 1168 1169 VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,1170 PGMPAGETYPE enmType, uint8_t *pu2State)1171 {1172 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",1173 GCPhys, HCPhys, fPageProt, enmType, *pu2State));1174 Assert(VM_IS_NEM_ENABLED(pVM));1175 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);1176 1177 }1178 1179 1180 VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,1181 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)1182 {1183 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp pvNewR3=%p fPageProt=%#x enmType=%d *pu2State=%d\n",1184 GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, *pu2State));1185 Assert(VM_IS_NEM_ENABLED(pVM));1186 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);1187 244 } 1188 245
Note:
See TracChangeset
for help on using the changeset viewer.