Changeset 90380 in vbox
- Timestamp:
- Jul 28, 2021 9:38:23 PM (3 years ago)
- Location:
- trunk
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/gvm.h
r90379 r90380 57 57 58 58 /** VCPU id (0 - (pVM->cCpus - 1). */ 59 VMCPUID idCpu;59 VMCPUID idCpu; 60 60 /** Padding. */ 61 uint32_t uPadding;61 uint32_t uPadding0; 62 62 63 63 /** Handle to the EMT thread. */ 64 RTNATIVETHREAD hEMT;64 RTNATIVETHREAD hEMT; 65 65 66 66 /** Pointer to the global (ring-0) VM structure this CPU belongs to. */ 67 R0PTRTYPE(PGVM) pGVM;67 R0PTRTYPE(PGVM) pGVM; 68 68 /** Pointer to the GVM structure, for CTX_SUFF use in VMMAll code. */ 69 PGVM pVMR0;69 PGVM pVMR0; 70 70 /** The ring-3 address of this structure (only VMCPU part). */ 71 PVMCPUR3 pVCpuR3; 71 PVMCPUR3 pVCpuR3; 72 73 /** Padding so the noisy stuff on a 64 byte boundrary. 74 * @note Keeping this working for 32-bit header syntax checking. */ 75 uint8_t abPadding1[HC_ARCH_BITS == 32 ? 40 : 24]; 76 77 /** Which host CPU ID is this EMT running on. 78 * Only valid when in RC or HMR0 with scheduling disabled. */ 79 RTCPUID volatile idHostCpu; 80 /** The CPU set index corresponding to idHostCpu, UINT32_MAX if not valid. 81 * @remarks Best to make sure iHostCpuSet shares cache line with idHostCpu! */ 82 uint32_t volatile iHostCpuSet; 72 83 73 84 /** Padding so gvmm starts on a 64 byte boundrary. 74 85 * @note Keeping this working for 32-bit header syntax checking. */ 75 uint8_t abPadding[HC_ARCH_BITS == 32 ? 40 : 24];86 uint8_t abPadding2[56]; 76 87 77 88 /** The GVMM per vcpu data. */ … … 109 120 struct VMMR0PERVCPU s; 110 121 #endif 111 uint8_t padding[ 128];122 uint8_t padding[64]; 112 123 } vmmr0; 113 124 114 125 /** Padding the structure size to page boundrary. */ 115 126 #ifdef VBOX_WITH_NEM_R0 116 uint8_t abPadding 2[4096 - 64 - 64 - 1024 - 64 - 128];127 uint8_t abPadding3[4096 - 64*2 - 64 - 1024 - 64 - 64]; 117 128 #else 118 uint8_t abPadding 2[4096 - 64 - 64 - 1024 - 128];129 uint8_t abPadding3[4096 - 64*2 - 64 - 1024 - 64]; 119 130 #endif 120 131 } GVMCPU; -
trunk/include/VBox/vmm/gvm.mac
r90379 r90380 40 40 41 41 alignb 64 42 .idHostCpu resd 1 43 .iHostCpuSet resd 1 44 45 alignb 64 42 46 .gvmm resb 64 43 47 alignb 64 … … 47 51 %endif 48 52 alignb 64 49 .vmmr0 resb 12853 .vmmr0 resb 64 50 54 alignb 4096 51 55 endstruc -
trunk/include/VBox/vmm/vm.h
r88348 r90380 120 120 VMCPUSTATE volatile enmState; 121 121 122 /** Which host CPU ID is this EMT running on.123 * Only valid when in RC or HMR0 with scheduling disabled. */124 RTCPUID volatile idHostCpu;125 /** The CPU set index corresponding to idHostCpu, UINT32_MAX if not valid.126 * @remarks Best to make sure iHostCpuSet shares cache line with idHostCpu! */127 uint32_t volatile iHostCpuSet;128 122 /** Padding up to 64 bytes. */ 129 uint8_t abAlignment0[64 - 20];123 uint8_t abAlignment0[64 - 12]; 130 124 /** @} */ 131 125 -
trunk/include/VBox/vmm/vm.mac
r88348 r90380 46 46 alignb 8 47 47 .enmState resd 1 48 .idHostCpu resd 149 .iHostCpuSet resd 150 48 51 49 alignb 64 -
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r89087 r90380 368 368 * sync deadline. 369 369 */ 370 DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(PVMCPU pVCpu, uint64_t cNsToDeadline)370 DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(PVMCPUCC pVCpu, uint64_t cNsToDeadline) 371 371 { 372 372 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G); -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r90379 r90380 368 368 { 369 369 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 370 pGVCpu->vmmr0.s.idHostCpu = NIL_RTCPUID;371 pGVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX;370 Assert(pGVCpu->idHostCpu == NIL_RTCPUID); 371 Assert(pGVCpu->iHostCpuSet == UINT32_MAX); 372 372 pGVCpu->vmmr0.s.fInHmContext = false; 373 373 pGVCpu->vmmr0.s.pPreemptState = NULL; … … 1006 1006 RTCPUID idHostCpu; 1007 1007 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu); 1008 pVCpu->vmmr0.s.iHostCpuSet = iHostCpuSet;1009 ASMAtomicWriteU32(&pVCpu->vmmr0.s.idHostCpu, idHostCpu);1010 1008 pVCpu->iHostCpuSet = iHostCpuSet; 1011 1009 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu); … … 1036 1034 * have the same host CPU associated with it. 1037 1035 */ 1038 pVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX;1039 ASMAtomicWriteU32(&pVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID);1040 1036 pVCpu->iHostCpuSet = UINT32_MAX; 1041 1037 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); … … 1109 1105 * @thread EMT(pVCpu) 1110 1106 * 1111 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after1107 * @remarks This also clears GVMCPU::idHostCpu, so the mapping is invalid after 1112 1108 * this call. This means you have to be careful with what you do! 1113 1109 */ … … 1121 1117 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during 1122 1118 * longjmp & normal return to ring-3, which opens a window where we may be 1123 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if1119 * rescheduled without changing GVMCPUID::idHostCpu and cause confusion if 1124 1120 * the CPU starts executing a different EMT. Both functions first disables 1125 1121 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving … … 1128 1124 /** @todo Make HM not need this API! Then we could leave the hooks enabled 1129 1125 * all the time. */ 1130 /** @todo move this into the context hook disabling if(). */1131 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);1132 1126 1133 1127 /* … … 1137 1131 { 1138 1132 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1139 ASMAtomicWriteU32(&pVCpu-> vmmr0.s.idHostCpu, NIL_RTCPUID);1133 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 1140 1134 int rc = RTThreadCtxHookDisable(pVCpu->vmmr0.s.hCtxHook); 1141 1135 AssertRC(rc); … … 1458 1452 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet))) 1459 1453 { 1460 pGVCpu->vmmr0.s.iHostCpuSet = iHostCpuSet;1461 ASMAtomicWriteU32(&pGVCpu->vmmr0.s.idHostCpu, idHostCpu);1462 1463 1454 pGVCpu->iHostCpuSet = iHostCpuSet; 1464 1455 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu); … … 1552 1543 * hook / restore preemption. 1553 1544 */ 1554 pGVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX;1555 ASMAtomicWriteU32(&pGVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID);1556 1557 1545 pGVCpu->iHostCpuSet = UINT32_MAX; 1558 1546 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); … … 1566 1554 */ 1567 1555 if (pGVCpu->vmmr0.s.hCtxHook != NIL_RTTHREADCTXHOOK) 1568 {1569 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);1570 1556 RTThreadCtxHookDisable(pGVCpu->vmmr0.s.hCtxHook); 1571 }1572 1557 } 1573 1558 /* … … 1576 1561 else 1577 1562 { 1578 rc = VINF_EM_RAW_INTERRUPT;1579 1563 pGVCpu->iHostCpuSet = UINT32_MAX; 1580 1564 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); 1565 rc = VINF_EM_RAW_INTERRUPT; 1581 1566 } 1582 1567 … … 1618 1603 { 1619 1604 pGVCpu->vmmr0.s.pPreemptState = NULL; 1620 pGVCpu->vmmr0.s.iHostCpuSet = UINT32_MAX;1621 ASMAtomicWriteU32(&pGVCpu->vmmr0.s.idHostCpu, NIL_RTCPUID);1622 1605 pGVCpu->iHostCpuSet = UINT32_MAX; 1623 1606 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); … … 2635 2618 * Are we in HM context and w/o a context hook? If so work the context hook. 2636 2619 */ 2637 if (pVCpu-> vmmr0.s.idHostCpu != NIL_RTCPUID)2638 { 2639 Assert(pVCpu-> vmmr0.s.iHostCpuSet != UINT32_MAX);2620 if (pVCpu->idHostCpu != NIL_RTCPUID) 2621 { 2622 Assert(pVCpu->iHostCpuSet != UINT32_MAX); 2640 2623 Assert(pVCpu->vmmr0.s.fInHmContext); 2641 2624 -
trunk/src/VBox/VMM/include/VMMInternal.h
r90379 r90380 460 460 typedef struct VMMR0PERVCPU 461 461 { 462 /** Which host CPU ID is this EMT running on.463 * Only valid when in RC or HMR0 with scheduling disabled. */464 RTCPUID volatile idHostCpu;465 /** The CPU set index corresponding to idHostCpu, UINT32_MAX if not valid.466 * @remarks Best to make sure iHostCpuSet shares cache line with idHostCpu! */467 uint32_t volatile iHostCpuSet;468 462 /** Set if we've entered HM context. */ 469 463 bool volatile fInHmContext; -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r89088 r90380 1458 1458 GEN_CHECK_OFF(VMCPU, hNativeThread); 1459 1459 GEN_CHECK_OFF(VMCPU, hNativeThreadR0); 1460 GEN_CHECK_OFF(VMCPU, idHostCpu);1461 1460 GEN_CHECK_OFF(VMCPU, fTraceGroups); 1462 1461 GEN_CHECK_OFF(VMCPU, uAdHoc);
Note:
See TracChangeset
for help on using the changeset viewer.