Changeset 93655 in vbox
- Timestamp:
- Feb 8, 2022 1:56:01 PM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 149810
- Location:
- trunk
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/gvm.h
r93650 r93655 287 287 TMR0PERVM s; 288 288 #endif 289 uint8_t padding[1 28];289 uint8_t padding[192]; 290 290 } tmr0; 291 291 … … 300 300 /** Padding so aCpus starts on a page boundrary. */ 301 301 #ifdef VBOX_WITH_NEM_R0 302 uint8_t abPadding2[16384 - 64 - 4352 - 1024 - 256 - 256 - 64 - 3008 - 1920 - 512 - 64 - 1024 - 1 28- 704 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT];302 uint8_t abPadding2[16384 - 64 - 4352 - 1024 - 256 - 256 - 64 - 3008 - 1920 - 512 - 64 - 1024 - 192 - 704 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT]; 303 303 #else 304 uint8_t abPadding2[16384 - 64 - 4352 - 1024 - 256 - 64 - 3008 - 1920 - 512 - 64 - 1024 - 1 28- 704 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT];304 uint8_t abPadding2[16384 - 64 - 4352 - 1024 - 256 - 64 - 3008 - 1920 - 512 - 64 - 1024 - 192 - 704 - sizeof(PGVMCPU) * VMM_MAX_CPU_COUNT]; 305 305 #endif 306 306 … … 340 340 #endif /* !VBOX_INCLUDED_vmm_gvm_h */ 341 341 342 -
trunk/include/VBox/vmm/vmcc.h
r93115 r93655 122 122 123 123 124 /** 125 * Used to pick ring-0 or ring-3 VM component data. 126 * 127 * @code{.cpp} 128 * pVM->VMCC_CTX(pdm).s.pfnWorker 129 * @endcode 130 */ 131 #ifdef IN_RING0 132 # define VMCC_CTX(a_Name) a_Name ## r0 133 #else 134 # define VMCC_CTX(a_Name) a_Name 135 #endif 136 124 137 #endif /* !VBOX_INCLUDED_vmm_vmcc_h */ 125 138 -
trunk/src/VBox/VMM/VMMAll/APICAll.cpp
r93115 r93655 612 612 uint32_t uSrcTag, int rcRZ) 613 613 { 614 AssertCompile(sizeof(pVM->apic.s) <= sizeof(pVM->apic.padding)); 615 AssertCompile(sizeof(pVCpu->apic.s) <= sizeof(pVCpu->apic.padding)); 616 #ifdef IN_RING0 617 AssertCompile(sizeof(pVM->apicr0.s) <= sizeof(pVM->apicr0.padding)); 618 #endif 614 619 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 615 620 VMCPUID const cCpus = pVM->cCpus; -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r93115 r93655 46 46 uint64_t u64PrevNanoTS) 47 47 { 48 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));48 PVMCC pVM = RT_FROM_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData); 49 49 pData->cBadPrev++; 50 50 if ((int64_t)u64DeltaPrev < 0) … … 81 81 DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra) 82 82 { 83 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));83 PVMCC pVM = RT_FROM_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData); 84 84 PFNTIMENANOTSINTERNAL pfnWorker; 85 85 … … 166 166 * Update the pfnVirtualGetRaw pointer and call the worker we selected. 167 167 */ 168 ASMAtomicWritePtr((void * volatile *)& CTX_SUFF(pVM->tm.s.pfnVirtualGetRaw), (void *)(uintptr_t)pfnWorker);168 ASMAtomicWritePtr((void * volatile *)&pVM->VMCC_CTX(tm).s.pfnVirtualGetRaw, (void *)(uintptr_t)pfnWorker); 169 169 return pfnWorker(pData, pExtra); 170 170 } … … 177 177 uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu) 178 178 { 179 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));179 PVMCC pVM = RT_FROM_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData); 180 180 AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x pExtra=%p\n", pVM, idApic, iCpuSet, iGipCpu, pExtra)); 181 181 #ifndef _MSC_VER … … 190 190 DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVMCC pVM) 191 191 { 192 # 193 uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData), NULL /*pExtra*/);194 # else /* !IN_RING3 */195 uint32_t cPrevSteps = pVM->tm .s.CTX_SUFF(VirtualGetRawData).c1nsSteps;196 uint64_t u64 = pVM->tm .s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData), NULL /*pExtra*/);197 if (cPrevSteps != pVM->tm .s.CTX_SUFF(VirtualGetRawData).c1nsSteps)192 #ifdef IN_RING3 193 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, NULL /*pExtra*/); 194 #elif defined(IN_RING0) 195 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps; 196 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, NULL /*pExtra*/); 197 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps) 198 198 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); 199 # endif /* !IN_RING3 */ 199 #else 200 # error "unsupported context" 201 #endif 200 202 /*DBGFTRACE_POS_U64(pVM, u64);*/ 201 203 return u64; … … 209 211 { 210 212 RTITMENANOTSEXTRA Extra; 211 # 212 uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData), &Extra);213 # else /* !IN_RING3 */214 uint32_t cPrevSteps = pVM->tm .s.CTX_SUFF(VirtualGetRawData).c1nsSteps;215 uint64_t u64 = pVM->tm .s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData), &Extra);216 if (cPrevSteps != pVM->tm .s.CTX_SUFF(VirtualGetRawData).c1nsSteps)213 #ifdef IN_RING3 214 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, &Extra); 215 #elif defined(IN_RING0) 216 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps; 217 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, &Extra); 218 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps) 217 219 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); 218 # endif /* !IN_RING3 */ 220 #else 221 # error "unsupported context" 222 #endif 219 223 if (puTscNow) 220 224 *puTscNow = Extra.uTSCValue; -
trunk/src/VBox/VMM/VMMR0/DBGFR0.cpp
r93115 r93655 51 51 VMMR0_INT_DECL(void) DBGFR0InitPerVMData(PGVM pGVM) 52 52 { 53 AssertCompile(sizeof(pGVM->dbgfr0.s) <= sizeof(pGVM->dbgfr0.padding)); 53 54 pGVM->dbgfr0.s.pTracerR0 = NULL; 54 55 -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r93557 r93655 1160 1160 VMMR0_INT_DECL(int) HMR0InitVM(PVMCC pVM) 1161 1161 { 1162 AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding)); 1163 AssertCompile(sizeof(pVM->hmr0.s) <= sizeof(pVM->hmr0.padding)); 1164 AssertCompile(sizeof(pVM->aCpus[0].hm.s) <= sizeof(pVM->aCpus[0].hm.padding)); 1165 AssertCompile(sizeof(pVM->aCpus[0].hmr0.s) <= sizeof(pVM->aCpus[0].hmr0.padding)); 1162 1166 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1163 1167 -
trunk/src/VBox/VMM/VMMR0/IEMR0.cpp
r93650 r93655 34 34 VMMR0_INT_DECL(int) IEMR0InitVM(PGVM pGVM) 35 35 { 36 AssertCompile(sizeof(pGVM->iem.s) <= sizeof(pGVM->iem.padding)); 37 AssertCompile(sizeof(pGVM->aCpus[0].iem.s) <= sizeof(pGVM->aCpus[0].iem.padding)); 38 36 39 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 37 40 /* -
trunk/src/VBox/VMM/VMMR0/TMR0.cpp
r93654 r93655 46 46 VMMR0_INT_DECL(void) TMR0InitPerVMData(PGVM pGVM) 47 47 { 48 AssertCompile(sizeof(pGVM->tmr0.padding) >= sizeof(pGVM->tmr0.s)); 49 48 50 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pGVM->tmr0.s.aTimerQueues); idxQueue++) 49 51 { … … 52 54 } 53 55 54 pGVM->tm .s.VirtualGetRawDataR0.pu64Prev = &pGVM->tm.s.u64VirtualRawPrev;55 pGVM->tm .s.VirtualGetRawDataR0.pfnBad = tmVirtualNanoTSBad;56 pGVM->tm .s.VirtualGetRawDataR0.pfnBadCpuIndex = tmVirtualNanoTSBadCpuIndex;57 pGVM->tm .s.VirtualGetRawDataR0.pfnRediscover = tmVirtualNanoTSRediscover;58 pGVM->tm .s.pfnVirtualGetRawR0= tmVirtualNanoTSRediscover;56 pGVM->tmr0.s.VirtualGetRawData.pu64Prev = &pGVM->tm.s.u64VirtualRawPrev; 57 pGVM->tmr0.s.VirtualGetRawData.pfnBad = tmVirtualNanoTSBad; 58 pGVM->tmr0.s.VirtualGetRawData.pfnBadCpuIndex = tmVirtualNanoTSBadCpuIndex; 59 pGVM->tmr0.s.VirtualGetRawData.pfnRediscover = tmVirtualNanoTSRediscover; 60 pGVM->tmr0.s.pfnVirtualGetRaw = tmVirtualNanoTSRediscover; 59 61 } 60 62 -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r93654 r93655 287 287 * Setup the VirtualGetRaw backend. 288 288 */ 289 pVM->tm.s.pfnVirtualGetRaw R3= tmVirtualNanoTSRediscover;290 pVM->tm.s.VirtualGetRawData R3.pfnRediscover = tmVirtualNanoTSRediscover;291 pVM->tm.s.VirtualGetRawData R3.pfnBad = tmVirtualNanoTSBad;292 pVM->tm.s.VirtualGetRawData R3.pfnBadCpuIndex = tmVirtualNanoTSBadCpuIndex;293 pVM->tm.s.VirtualGetRawData R3.pu64Prev = &pVM->tm.s.u64VirtualRawPrev;289 pVM->tm.s.pfnVirtualGetRaw = tmVirtualNanoTSRediscover; 290 pVM->tm.s.VirtualGetRawData.pfnRediscover = tmVirtualNanoTSRediscover; 291 pVM->tm.s.VirtualGetRawData.pfnBad = tmVirtualNanoTSBad; 292 pVM->tm.s.VirtualGetRawData.pfnBadCpuIndex = tmVirtualNanoTSBadCpuIndex; 293 pVM->tm.s.VirtualGetRawData.pu64Prev = &pVM->tm.s.u64VirtualRawPrev; 294 294 295 295 /* … … 671 671 * Register statistics. 672 672 */ 673 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataR3.c1nsSteps,STAMTYPE_U32, "/TM/R3/1nsSteps", STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations)."); 674 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataR3.cBadPrev, STAMTYPE_U32, "/TM/R3/cBadPrev", STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen)."); 673 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawData.c1nsSteps,STAMTYPE_U32, "/TM/R3/1nsSteps", STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations)."); 674 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawData.cBadPrev, STAMTYPE_U32, "/TM/R3/cBadPrev", STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen)."); 675 #if 0 /** @todo retreive from ring-0 */ 675 676 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataR0.c1nsSteps,STAMTYPE_U32, "/TM/R0/1nsSteps", STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations)."); 676 677 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataR0.cBadPrev, STAMTYPE_U32, "/TM/R0/cBadPrev", STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen)."); 678 #endif 677 679 STAM_REL_REG( pVM,(void*)&pVM->tm.s.offVirtualSync, STAMTYPE_U64, "/TM/VirtualSync/CurrentOffset", STAMUNIT_NS, "The current offset. (subtract GivenUp to get the lag)"); 678 680 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.offVirtualSyncGivenUp, STAMTYPE_U64, "/TM/VirtualSync/GivenUp", STAMUNIT_NS, "Nanoseconds of the 'CurrentOffset' that's been given up and won't ever be attempted caught up with."); … … 686 688 687 689 #ifdef VBOX_WITH_STATISTICS 688 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataR3.cExpired, STAMTYPE_U32, "/TM/R3/cExpired", STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps)."); 689 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataR3.cUpdateRaces,STAMTYPE_U32, "/TM/R3/cUpdateRaces", STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp."); 690 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawData.cExpired, STAMTYPE_U32, "/TM/R3/cExpired", STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps)."); 691 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawData.cUpdateRaces,STAMTYPE_U32, "/TM/R3/cUpdateRaces", STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp."); 692 # if 0 /** @todo retreive from ring-0 */ 690 693 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataR0.cExpired, STAMTYPE_U32, "/TM/R0/cExpired", STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps)."); 691 694 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataR0.cUpdateRaces,STAMTYPE_U32, "/TM/R0/cUpdateRaces", STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp."); 695 # endif 692 696 STAM_REG(pVM, &pVM->tm.s.StatDoQueues, STAMTYPE_PROFILE, "/TM/DoQueues", STAMUNIT_TICKS_PER_CALL, "Profiling timer TMR3TimerQueuesDo."); 693 697 STAM_REG(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].StatDo, STAMTYPE_PROFILE, "/TM/DoQueues/Virtual", STAMUNIT_TICKS_PER_CALL, "Time spent on the virtual clock queue."); -
trunk/src/VBox/VMM/include/TMInternal.h
r93115 r93655 489 489 uint64_t volatile u64VirtualRawPrev; 490 490 /** The ring-3 data structure for the RTTimeNanoTS workers used by tmVirtualGetRawNanoTS. */ 491 RTTIMENANOTSDATAR3 VirtualGetRawDataR3; 492 /** The ring-0 data structure for the RTTimeNanoTS workers used by tmVirtualGetRawNanoTS. */ 493 RTTIMENANOTSDATAR0 VirtualGetRawDataR0; 491 RTTIMENANOTSDATAR3 VirtualGetRawData; 494 492 /** Pointer to the ring-3 tmVirtualGetRawNanoTS worker function. */ 495 R3PTRTYPE(PFNTIMENANOTSINTERNAL) pfnVirtualGetRawR3; 496 /** Pointer to the ring-0 tmVirtualGetRawNanoTS worker function. */ 497 R0PTRTYPE(PFNTIMENANOTSINTERNAL) pfnVirtualGetRawR0; 493 R3PTRTYPE(PFNTIMENANOTSINTERNAL) pfnVirtualGetRaw; 498 494 /** The guest virtual timer synchronous time when fVirtualSyncTicking is cleared. 499 495 * When fVirtualSyncTicking is set it holds the last time returned to … … 846 842 /** Timer queues for the different clock types. */ 847 843 TMTIMERQUEUER0 aTimerQueues[TMCLOCK_MAX]; 844 845 /** The ring-0 data structure for the RTTimeNanoTS workers used by tmVirtualGetRawNanoTS. */ 846 RTTIMENANOTSDATAR0 VirtualGetRawData; 847 /** Pointer to the ring-0 tmVirtualGetRawNanoTS worker function. */ 848 R0PTRTYPE(PFNTIMENANOTSINTERNAL) pfnVirtualGetRaw; 848 849 } TMR0PERVM; 849 850
Note:
See TracChangeset
for help on using the changeset viewer.