Changeset 90597 in vbox
- Timestamp:
- Aug 10, 2021 1:08:35 PM (3 years ago)
- Location:
- trunk
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/gvm.h
r90380 r90597 193 193 struct GVMMPERVM s; 194 194 #endif 195 uint8_t padding[ 256];195 uint8_t padding[4352]; 196 196 } gvmm; 197 197 -
trunk/include/VBox/vmm/gvm.mac
r90380 r90597 67 67 68 68 alignb 64 69 .gvmm resb 25669 .gvmm resb 4352 70 70 alignb 64 71 71 .gmm resb 1024 -
trunk/include/VBox/vmm/gvmm.h
r85121 r90597 188 188 GVMMR0DECL(PVMCC) GVMMR0GetVMByEMT(RTNATIVETHREAD hEMT); 189 189 GVMMR0DECL(PGVMCPU) GVMMR0GetGVCpuByEMT(RTNATIVETHREAD hEMT); 190 GVMMR0DECL(PGVMCPU) GVMMR0GetGVCpuByGVMandEMT(PGVM pGVM, RTNATIVETHREAD hEMT); 190 191 GVMMR0DECL(int) GVMMR0SchedHalt(PGVM pGVM, PGVMCPU pGVCpu, uint64_t u64ExpireGipTime); 191 192 GVMMR0DECL(int) GVMMR0SchedHaltReq(PGVM pGVM, VMCPUID idCpu, uint64_t u64ExpireGipTime); -
trunk/include/VBox/vmm/vm.h
r90500 r90597 275 275 /** Trace groups enable flags. */ 276 276 uint32_t fTraceGroups; /* 64 / 44 */ 277 /** State data for use by ad hoc profiling. */ 278 uint32_t uAdHoc; 277 /** Number of collisions hashing the ring-0 EMT handle. */ 278 uint8_t cEmtHashCollisions; 279 uint8_t abAdHoc[3]; 279 280 /** Profiling samples for use by ad hoc profiling. */ 280 281 STAMPROFILEADV aStatAdHoc[8]; /* size: 40*8 = 320 */ … … 1240 1241 /** @} */ 1241 1242 1243 /** Max EMT hash lookup collisions (in GVMM). */ 1244 uint8_t cMaxEmtHashCollisions; 1245 1242 1246 /** Padding - the unions must be aligned on a 64 bytes boundary. */ 1243 uint8_t abAlignment3[HC_ARCH_BITS == 64 ? 2 4 : 52];1247 uint8_t abAlignment3[HC_ARCH_BITS == 64 ? 23 : 51]; 1244 1248 1245 1249 /** CPUM part. */ -
trunk/include/VBox/vmm/vm.mac
r90500 r90597 83 83 alignb 64 84 84 .fTraceGroups resd 1 85 .uAdHoc resd 1 85 .cEmtHashCollisions resb 1 86 .abAdHoc resb 3 86 87 alignb 8 87 88 .aStatAdHoc resb STAMPROFILEADV_size * 8 -
trunk/src/VBox/VMM/VMMAll/VMMAll.cpp
r90533 r90597 247 247 248 248 #elif defined(IN_RING0) 249 VMCPUID const cCpus = pVM->cCpus; 250 if (pVM->cCpus == 1) 251 { 252 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pVM); 253 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf()); 254 return pVCpu; 255 } 256 257 /* 258 * Search first by host cpu id (most common case) 259 * and then by native thread id (page fusion case). 260 */ 261 if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 262 { 263 /** @todo r=ramshankar: This doesn't buy us anything in terms of performance 264 * leaving it here for hysterical raisins and as a reference if we 265 * implemented a hashing approach in the future. */ 266 RTCPUID idHostCpu = RTMpCpuId(); 267 268 /** @todo optimize for large number of VCPUs when that becomes more common. */ 269 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++) 270 { 271 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu); 272 if (pVCpu->idHostCpu == idHostCpu) 273 { 274 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf()); 275 return pVCpu; 276 } 277 } 278 } 279 280 /* RTThreadGetNativeSelf had better be cheap. */ 281 RTNATIVETHREAD hThread = RTThreadNativeSelf(); 282 283 /** @todo optimize for large number of VCPUs when that becomes more common. 284 * Use a map like GIP does that's indexed by the host CPU index. */ 285 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++) 286 { 287 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu); 288 if (pVCpu->hNativeThreadR0 == hThread) 289 return pVCpu; 290 } 291 return NULL; 249 return GVMMR0GetGVCpuByGVMandEMT(pVM, NIL_RTNATIVETHREAD); 292 250 293 251 #else /* RC: Always EMT(0) */ -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r90379 r90597 143 143 #endif 144 144 145 /** Special value that GVMMR0DeregisterVCpu sets. */ 146 #define GVMM_RTNATIVETHREAD_DESTROYED (~(RTNATIVETHREAD)1) 147 AssertCompile(GVMM_RTNATIVETHREAD_DESTROYED != NIL_RTNATIVETHREAD); 145 148 146 149 … … 974 977 AssertRC(rc); 975 978 976 pHandle->pGVM = pGVM; 977 pHandle->hEMT0 = hEMT0; 978 pHandle->ProcId = ProcId; 979 pGVM->pVMR3 = pVMR3; 980 pGVM->pVMR3Unsafe = pVMR3; 981 pGVM->aCpus[0].hEMT = hEMT0; 982 pGVM->aCpus[0].hNativeThreadR0 = hEMT0; 979 pHandle->pGVM = pGVM; 980 pHandle->hEMT0 = hEMT0; 981 pHandle->ProcId = ProcId; 982 pGVM->pVMR3 = pVMR3; 983 pGVM->pVMR3Unsafe = pVMR3; 984 pGVM->aCpus[0].hEMT = hEMT0; 985 pGVM->aCpus[0].hNativeThreadR0 = hEMT0; 986 pGVM->aCpus[0].cEmtHashCollisions = 0; 987 uint32_t const idxHash = GVMM_EMT_HASH_1(hEMT0); 988 pGVM->aCpus[0].gvmm.s.idxEmtHash = (uint16_t)idxHash; 989 pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt = hEMT0; 990 pGVM->gvmm.s.aEmtHash[idxHash].idVCpu = 0; 983 991 pGVMM->cEMTs += cCpus; 984 992 … … 1102 1110 pGVM->gvmm.s.fDoneVMMR0Init = false; 1103 1111 pGVM->gvmm.s.fDoneVMMR0Term = false; 1112 for (size_t i = 0; i < RT_ELEMENTS(pGVM->gvmm.s.aEmtHash); i++) 1113 { 1114 pGVM->gvmm.s.aEmtHash[i].hNativeEmt = NIL_RTNATIVETHREAD; 1115 pGVM->gvmm.s.aEmtHash[i].idVCpu = NIL_VMCPUID; 1116 } 1104 1117 1105 1118 /* … … 1112 1125 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI; 1113 1126 pGVM->aCpus[i].gvmm.s.VMCpuMapObj = NIL_RTR0MEMOBJ; 1127 pGVM->aCpus[i].gvmm.s.idxEmtHash = UINT16_MAX; 1114 1128 pGVM->aCpus[i].hEMT = NIL_RTNATIVETHREAD; 1115 1129 pGVM->aCpus[i].pGVM = pGVM; … … 1252 1266 uint32_t cNotDeregistered = 0; 1253 1267 for (VMCPUID idCpu = 1; idCpu < pGVM->cCpus; idCpu++) 1254 cNotDeregistered += pGVM->aCpus[idCpu].hEMT != ~(RTNATIVETHREAD)1; /* see GVMMR0DeregisterVCpu for the value */1268 cNotDeregistered += pGVM->aCpus[idCpu].hEMT != GVMM_RTNATIVETHREAD_DESTROYED; 1255 1269 if (cNotDeregistered == 0) 1256 1270 { … … 1503 1517 */ 1504 1518 PGVMM pGVMM; 1505 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /* fTakeUsedLock */); /** @todo take lock here. */1519 int rc = gvmmR0ByGVM(pGVM, &pGVMM, false /* fTakeUsedLock */); 1506 1520 if (RT_SUCCESS(rc)) 1507 1521 { 1508 1522 if (idCpu < pGVM->cCpus) 1509 1523 { 1524 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf(); 1525 1526 gvmmR0CreateDestroyLock(pGVMM); /** @todo per-VM lock? */ 1527 1510 1528 /* Check that the EMT isn't already assigned to a thread. */ 1511 1529 if (pGVM->aCpus[idCpu].hEMT == NIL_RTNATIVETHREAD) … … 1513 1531 Assert(pGVM->aCpus[idCpu].hNativeThreadR0 == NIL_RTNATIVETHREAD); 1514 1532 1515 /* A thread may only be one EMT. */ 1516 RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf(); 1533 /* A thread may only be one EMT (this makes sure hNativeSelf isn't NIL). */ 1517 1534 for (VMCPUID iCpu = 0; iCpu < pGVM->cCpus; iCpu++) 1518 1535 AssertBreakStmt(pGVM->aCpus[iCpu].hEMT != hNativeSelf, rc = VERR_INVALID_PARAMETER); … … 1522 1539 * Do the assignment, then try setup the hook. Undo if that fails. 1523 1540 */ 1524 pGVM->aCpus[idCpu].hNativeThreadR0 = pGVM->aCpus[idCpu].hEMT = RTThreadNativeSelf(); 1541 unsigned cCollisions = 0; 1542 uint32_t idxHash = GVMM_EMT_HASH_1(hNativeSelf); 1543 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt != NIL_RTNATIVETHREAD) 1544 { 1545 uint32_t const idxHash2 = GVMM_EMT_HASH_2(hNativeSelf); 1546 do 1547 { 1548 cCollisions++; 1549 Assert(cCollisions < GVMM_EMT_HASH_SIZE); 1550 idxHash = (idxHash + idxHash2) % GVMM_EMT_HASH_SIZE; 1551 } while (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt != NIL_RTNATIVETHREAD); 1552 } 1553 pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt = hNativeSelf; 1554 pGVM->gvmm.s.aEmtHash[idxHash].idVCpu = idCpu; 1555 pGVM->aCpus[idCpu].hNativeThreadR0 = hNativeSelf; 1556 pGVM->aCpus[idCpu].hEMT = hNativeSelf; 1557 pGVM->aCpus[idCpu].cEmtHashCollisions = (uint8_t)cCollisions; 1558 pGVM->aCpus[idCpu].gvmm.s.idxEmtHash = (uint16_t)idxHash; 1525 1559 1526 1560 rc = VMMR0ThreadCtxHookCreateForEmt(&pGVM->aCpus[idCpu]); … … 1528 1562 CPUMR0RegisterVCpuThread(&pGVM->aCpus[idCpu]); 1529 1563 else 1530 pGVM->aCpus[idCpu].hNativeThreadR0 = pGVM->aCpus[idCpu].hEMT = NIL_RTNATIVETHREAD; 1564 { 1565 pGVM->aCpus[idCpu].hNativeThreadR0 = NIL_RTNATIVETHREAD; 1566 pGVM->aCpus[idCpu].hEMT = NIL_RTNATIVETHREAD; 1567 pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt = NIL_RTNATIVETHREAD; 1568 pGVM->gvmm.s.aEmtHash[idxHash].idVCpu = NIL_VMCPUID; 1569 pGVM->aCpus[idCpu].gvmm.s.idxEmtHash = UINT16_MAX; 1570 } 1531 1571 } 1532 1572 } 1533 1573 else 1534 1574 rc = VERR_ACCESS_DENIED; 1575 1576 gvmmR0CreateDestroyUnlock(pGVMM); 1535 1577 } 1536 1578 else … … 1566 1608 */ 1567 1609 gvmmR0CreateDestroyLock(pGVMM); 1610 1568 1611 uint32_t hSelf = pGVM->hSelf; 1569 1612 ASMCompilerBarrier(); … … 1581 1624 * GVMMR0RegisterVCpu to be called again, and we don't want that. 1582 1625 */ 1583 AssertCompile(~(RTNATIVETHREAD)1 != NIL_RTNATIVETHREAD); 1584 pGVM->aCpus[idCpu].hEMT = ~(RTNATIVETHREAD)1; 1626 pGVM->aCpus[idCpu].hEMT = GVMM_RTNATIVETHREAD_DESTROYED; 1585 1627 pGVM->aCpus[idCpu].hNativeThreadR0 = NIL_RTNATIVETHREAD; 1628 1629 uint32_t const idxHash = pGVM->aCpus[idCpu].gvmm.s.idxEmtHash; 1630 if (idxHash < RT_ELEMENTS(pGVM->gvmm.s.aEmtHash)) 1631 pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt = GVMM_RTNATIVETHREAD_DESTROYED; 1586 1632 } 1587 1633 … … 1900 1946 } 1901 1947 return NULL; 1948 } 1949 1950 1951 /** 1952 * Get the GVMCPU structure for the given EMT. 1953 * 1954 * @returns The VCpu structure for @a hEMT, NULL if not an EMT. 1955 * @param pGVM The global (ring-0) VM structure. 1956 * @param hEMT The native thread handle of the EMT. 1957 * NIL_RTNATIVETHREAD means the current thread 1958 */ 1959 GVMMR0DECL(PGVMCPU) GVMMR0GetGVCpuByGVMandEMT(PGVM pGVM, RTNATIVETHREAD hEMT) 1960 { 1961 /* 1962 * Validate & adjust input. 1963 */ 1964 AssertPtr(pGVM); 1965 Assert(pGVM->u32Magic == GVM_MAGIC); 1966 if (hEMT == NIL_RTNATIVETHREAD /* likely */) 1967 { 1968 hEMT = RTThreadNativeSelf(); 1969 AssertReturn(hEMT != NIL_RTNATIVETHREAD, NULL); 1970 } 1971 1972 /* 1973 * Find the matching hash table entry. 1974 */ 1975 uint32_t idxHash = GVMM_EMT_HASH_1(hEMT); 1976 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == hEMT) 1977 { /* likely */ } 1978 else 1979 { 1980 #ifdef VBOX_STRICT 1981 unsigned cCollisions = 0; 1982 #endif 1983 uint32_t const idxHash2 = GVMM_EMT_HASH_2(hEMT); 1984 for (;;) 1985 { 1986 Assert(cCollisions++ < GVMM_EMT_HASH_SIZE); 1987 idxHash = (idxHash + idxHash2) % GVMM_EMT_HASH_SIZE; 1988 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == hEMT) 1989 break; 1990 if (pGVM->gvmm.s.aEmtHash[idxHash].hNativeEmt == NIL_RTNATIVETHREAD) 1991 { 1992 #ifdef VBOX_STRICT 1993 uint32_t idxCpu = pGVM->cCpus; 1994 AssertStmt(idxCpu < VMM_MAX_CPU_COUNT, idxCpu = VMM_MAX_CPU_COUNT); 1995 while (idxCpu-- > 0) 1996 Assert(pGVM->aCpus[idxCpu].hNativeThreadR0 != hEMT); 1997 #endif 1998 return NULL; 1999 } 2000 } 2001 } 2002 2003 /* 2004 * Validate the VCpu number and translate it into a pointer. 2005 */ 2006 VMCPUID const idCpu = pGVM->gvmm.s.aEmtHash[idxHash].idVCpu; 2007 AssertReturn(idCpu < pGVM->cCpus, NULL); 2008 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 2009 Assert(pGVCpu->hNativeThreadR0 == hEMT); 2010 Assert(pGVCpu->gvmm.s.idxEmtHash == idxHash); 2011 return pGVCpu; 1902 2012 } 1903 2013 -
trunk/src/VBox/VMM/VMMR0/GVMMR0Internal.h
r82968 r90597 40 40 * start using this for something sensible... */ 41 41 uint8_t iCpuEmt; 42 uint8_t bPadding; 43 /** The EMT hash table index for this VCpu. */ 44 uint16_t idxEmtHash; 42 45 } GVMMPERVCPU; 43 46 /** Pointer to the GVMM per VCPU data. */ 44 47 typedef GVMMPERVCPU *PGVMMPERVCPU; 48 49 50 /** 51 * EMT hash table entry. 52 */ 53 typedef struct GVMMEMTHASHENTRY 54 { 55 /** The key. */ 56 RTNATIVETHREAD hNativeEmt; 57 /** The VCpu index. */ 58 VMCPUID idVCpu; 59 #if HC_ARCH_BITS == 64 60 uint32_t u32Padding; 61 #endif 62 } GVMMEMTHASHENTRY; 63 AssertCompileSize(GVMMEMTHASHENTRY, sizeof(void *) * 2); 64 65 /** The EMT hash table size. */ 66 #define GVMM_EMT_HASH_SIZE (VMM_MAX_CPU_COUNT * 4) 67 /** Primary EMT hash table hash function, sans range limit. 68 * @note We assume the native ring-0 thread handle is a pointer to a pretty big 69 * structure of at least 1 KiB. 70 * - NT AMD64 6.0 ETHREAD: 0x450. See 71 * https://www.geoffchappell.com/studies/windows/km/ntoskrnl/inc/ntos/ps/ethread/index.htm 72 * for more details. 73 * - Solaris kthread_t is at least 0x370 in Solaris 10. 74 * - Linux task_struct looks pretty big too. 75 * - As does struct thread in xnu. 76 * @todo Make platform specific adjustment as needed. */ 77 #define GVMM_EMT_HASH_CORE(a_hNativeSelf) ( (uintptr_t)(a_hNativeSelf) >> 10 ) 78 /** Primary EMT hash table function. */ 79 #define GVMM_EMT_HASH_1(a_hNativeSelf) ( GVMM_EMT_HASH_CORE(a_hNativeSelf) % GVMM_EMT_HASH_SIZE ) 80 /** Secondary EMT hash table function, added to the primary one on collision. 81 * This uses the bits above the primary hash. 82 * @note It is always odd, which guarantees that we'll visit all hash table 83 * entries in case of a collision. */ 84 #define GVMM_EMT_HASH_2(a_hNativeSelf) ( ((GVMM_EMT_HASH_CORE(a_hNativeSelf) / GVMM_EMT_HASH_SIZE) | 1) % GVMM_EMT_HASH_SIZE ) 45 85 46 86 /** … … 65 105 /** Whether the per-VM ring-0 termination is being or has been performed. */ 66 106 bool fDoneVMMR0Term; 107 bool afPadding[6]; 108 109 /** EMT lookup hash table. */ 110 GVMMEMTHASHENTRY aEmtHash[GVMM_EMT_HASH_SIZE]; 67 111 } GVMMPERVM; 68 112 /** Pointer to the GVMM per VM data. */ -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r90379 r90597 550 550 STAMR3RegisterF(pVM, &pVCpu->vmm.s.cR0HaltsSucceeded, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistorySucceeded", i); 551 551 STAMR3RegisterF(pVM, &pVCpu->vmm.s.cR0HaltsToRing3, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/PROF/CPU%u/VM/Halt/R0HaltHistoryToRing3", i); 552 553 STAMR3RegisterF(pVM, &pVCpu->cEmtHashCollisions, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "", "/VMM/EmtHashCollisions/Emt%02u", i); 552 554 } 553 555 } -
trunk/src/VBox/VMM/include/VMMInternal.h
r90380 r90597 463 463 bool volatile fInHmContext; 464 464 465 bool afPadding[7]; 465 bool afPadding[5]; 466 /** The EMT hash table index. */ 467 uint16_t idxEmtHash; 466 468 /** Pointer to the VMMR0EntryFast preemption state structure. 467 469 * This is used to temporarily restore preemption before blocking. */ -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r90380 r90597 1459 1459 GEN_CHECK_OFF(VMCPU, hNativeThreadR0); 1460 1460 GEN_CHECK_OFF(VMCPU, fTraceGroups); 1461 GEN_CHECK_OFF(VMCPU, uAdHoc);1461 GEN_CHECK_OFF(VMCPU, abAdHoc); 1462 1462 GEN_CHECK_OFF(VMCPU, aStatAdHoc); 1463 1463 GEN_CHECK_OFF(VMCPU, hm);
Note:
See TracChangeset
for help on using the changeset viewer.