Changeset 78438 in vbox for trunk/src/VBox
- Timestamp:
- May 7, 2019 3:57:37 PM (6 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Config.kmk
r78431 r78438 82 82 VMM_COMMON_DEFS += VBOX_WITH_MORE_RING0_MEM_MAPPINGS 83 83 endif 84 ifdef VBOX_WITH_NATIVE_NEM 85 if1of ($(KBUILD_TARGET).$(KBUILD_TARGET_ARCH), win.amd64) 86 VMM_COMMON_DEFS += VBOX_WITH_NATIVE_NEM VBOX_WITH_NEM_R0 87 endif 88 endif 84 89 ifdef VBOX_BUGREF_9217 85 90 VMM_COMMON_DEFS += VBOX_BUGREF_9217 -
trunk/src/VBox/VMM/Makefile.kmk
r76553 r78438 972 972 \ 973 973 -e '/^g_VM\>/d'\ 974 -e '/^g_VCpu0\>/d'\ 974 975 -e '/^g_CPUM\>/d'\ 975 976 -e '/^g_Logger\>/d'\ -
trunk/src/VBox/VMM/VMMAll/VMMAll.cpp
r76553 r78438 24 24 #include "VMMInternal.h" 25 25 #include <VBox/vmm/vm.h> 26 #ifdef IN_RING0 27 # include <VBox/vmm/gvm.h> 28 #endif 26 29 #include <VBox/vmm/hm.h> 27 30 #include <VBox/vmm/vmcpuset.h> … … 203 206 if (pVM->cCpus == 1) 204 207 return 0; 208 # ifdef VBOX_BUGREF_9217 209 PGVM pGVM = (PGVM)pVM; 210 VMCPUID const cCpus = pGVM->cCpusSafe; 211 # else 212 VMCPUID const cCpus = pVM->cCpus; 213 # endif 205 214 206 215 /* Search first by host cpu id (most common case) … … 215 224 216 225 /** @todo optimize for large number of VCPUs when that becomes more common. */ 217 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)226 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++) 218 227 { 228 # ifdef VBOX_BUGREF_9217 229 PVMCPU pVCpu = &pGVM->aCpus[idCpu]; 230 # else 219 231 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 232 # endif 220 233 221 234 if (pVCpu->idHostCpu == idHostCpu) … … 228 241 229 242 /** @todo optimize for large number of VCPUs when that becomes more common. */ 230 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 231 { 243 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++) 244 { 245 # ifdef VBOX_BUGREF_9217 246 PVMCPU pVCpu = &pGVM->aCpus[idCpu]; 247 # else 232 248 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 249 # endif 233 250 234 251 if (pVCpu->hNativeThreadR0 == hThread) … … 259 276 return NULL; 260 277 Assert(idCpu < pVM->cCpus); 278 # ifdef VBOX_BUGREF_9217 279 return pVM->apCpus[idCpu]; 280 # else 261 281 return &pVM->aCpus[idCpu]; 282 # endif 262 283 263 284 #elif defined(IN_RING0) 285 # ifdef VBOX_BUGREF_9217 286 PGVM pGVM = (PGVM)pVM; 287 VMCPUID const cCpus = pGVM->cCpusSafe; 288 # else 289 VMCPUID const cCpus = pVM->cCpus; 290 # endif 264 291 if (pVM->cCpus == 1) 292 # ifdef VBOX_BUGREF_9217 293 return &pGVM->aCpus[0]; 294 # else 265 295 return &pVM->aCpus[0]; 296 # endif 266 297 267 298 /* … … 277 308 278 309 /** @todo optimize for large number of VCPUs when that becomes more common. */ 279 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)310 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++) 280 311 { 312 # ifdef VBOX_BUGREF_9217 313 PVMCPU pVCpu = &pGVM->aCpus[idCpu]; 314 # else 281 315 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 282 316 # endif 283 317 if (pVCpu->idHostCpu == idHostCpu) 284 318 return pVCpu; … … 291 325 /** @todo optimize for large number of VCPUs when that becomes more common. 292 326 * Use a map like GIP does that's indexed by the host CPU index. */ 293 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 294 { 327 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++) 328 { 329 # ifdef VBOX_BUGREF_9217 330 PVMCPU pVCpu = &pGVM->aCpus[idCpu]; 331 # else 295 332 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 296 333 # endif 297 334 if (pVCpu->hNativeThreadR0 == hThread) 298 335 return pVCpu; … … 301 338 302 339 #else /* RC: Always EMT(0) */ 303 return &pVM->aCpus[0]; 340 RT_NOREF(pVM); 341 return &g_VCpu0; 304 342 #endif /* IN_RING0 */ 305 343 } … … 316 354 { 317 355 Assert(pVM->cCpus == 1); 356 #ifdef VBOX_BUGREF_9217 357 # ifdef IN_RING3 358 return pVM->apCpus[0]; 359 # elif defined(IN_RING0) 360 return &((PGVM)pVM)->aCpus[0]; 361 # else /* RC */ 362 RT_NOREF(pVM); 363 return &g_VCpu0; 364 # endif 365 #else 318 366 return &pVM->aCpus[0]; 367 #endif 319 368 } 320 369 … … 332 381 { 333 382 AssertReturn(idCpu < pVM->cCpus, NULL); 383 #ifdef VBOX_BUGREF_9217 384 # ifdef IN_RING3 385 return pVM->apCpus[idCpu]; 386 # elif defined(IN_RING0) 387 return &((PGVM)pVM)->aCpus[0]; 388 # else /* RC */ 389 RT_NOREF(pVM, idCpu); 390 Assert(idCpu == 0); 391 return &g_VCpu0; 392 # endif 393 #else 334 394 return &pVM->aCpus[idCpu]; 395 #endif 335 396 } 336 397 -
trunk/src/VBox/VMM/VMMR3/PDMLdr.cpp
r76553 r78438 362 362 if (!strcmp(pszSymbol, "g_VM")) 363 363 *pValue = pVM->pVMRC; 364 else if (!strcmp(pszSymbol, "g_VCpu0")) 365 *pValue = pVM->pVMRC + pVM->offVMCPU; 364 366 else if (!strcmp(pszSymbol, "g_CPUM")) 365 367 *pValue = VM_RC_ADDR(pVM, &pVM->cpum); -
trunk/src/VBox/VMM/VMMRC/PDMRCDevice.cpp
r76553 r78438 453 453 { 454 454 PDMDEV_ASSERT_DEVINS(pDevIns); 455 #ifdef VBOX_BUGREF_9217 456 PVMCPU pVCpu = &g_VCpu0; /* for PIC we always deliver to CPU 0, MP use APIC */ 457 #else 455 458 PVM pVM = pDevIns->Internal.s.pVMRC; 456 459 PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */ 460 #endif 457 461 /** @todo r=ramshankar: Propagating rcRZ and make all callers handle it? */ 458 462 APICLocalInterrupt(pVCpu, 0 /* u8Pin */, 1 /* u8Level */, VINF_SUCCESS /* rcRZ */); … … 464 468 { 465 469 PDMDEV_ASSERT_DEVINS(pDevIns); 470 #ifdef VBOX_BUGREF_9217 471 PVMCPU pVCpu = &g_VCpu0; /* for PIC we always deliver to CPU 0, MP use APIC */ 472 #else 466 473 PVM pVM = pDevIns->Internal.s.CTX_SUFF(pVM); 467 474 PVMCPU pVCpu = &pVM->aCpus[0]; /* for PIC we always deliver to CPU 0, MP use APIC */ 475 #endif 468 476 /** @todo r=ramshankar: Propagating rcRZ and make all callers handle it? */ 469 477 APICLocalInterrupt(pVCpu, 0 /* u8Pin */, 0 /* u8Level */, VINF_SUCCESS /* rcRZ */); -
trunk/src/VBox/VMM/VMMRC/VMMRCBuiltin.def
r76553 r78438 20 20 ; data 21 21 g_VM DATA 22 g_VCpu0 DATA 22 23 g_CPUM DATA 23 24 g_TRPM DATA
Note:
See TracChangeset
for help on using the changeset viewer.