Changeset 19360 in vbox
- Timestamp:
- May 5, 2009 10:14:15 AM (16 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/gvm.h
r8155 r19360 36 36 #include <iprt/thread.h> 37 37 38 /** @defgroup grp_gvm GVMCPU - The Global VMCPU Data 39 * @{ 40 */ 41 42 typedef struct GVMCPU 43 { 44 /* VCPU id (0 - (pVM->cCPUs - 1) */ 45 uint32_t idCpu; 46 47 /** The GVMM per vcpu data. */ 48 struct 49 { 50 #ifdef ___GVMMR0Internal_h 51 struct GVMMPERVCPU s; 52 #endif 53 uint8_t padding[64]; 54 } gvmm; 55 } GVMCPU; 56 /** Pointer to the GVMCPU data. */ 57 typedef GVMCPU *PGVMCPU; 58 59 /** @} */ 38 60 39 61 /** @defgroup grp_gvm GVM - The Global VM Data … … 60 82 /** The ring-0 mapping of the VM structure. */ 61 83 PVM pVM; 84 /** Number of VCPUs (same as pVM->cCPUs) */ 85 uint32_t cCPUs; 86 uint32_t padding; 62 87 63 88 /** The GVMM per vm data. */ … … 79 104 } gmm; 80 105 106 /** GVMCPU array for the configured number of virtual CPUs. */ 107 GVMCPU aCpus[1]; 81 108 } GVM; 82 109 -
trunk/include/VBox/gvmm.h
r14811 r19360 135 135 GVMMR0DECL(PVM) GVMMR0GetVMByHandle(uint32_t hGVM); 136 136 GVMMR0DECL(PVM) GVMMR0GetVMByEMT(RTNATIVETHREAD hEMT); 137 GVMMR0DECL(int) GVMMR0SchedHalt(PVM pVM, u int64_t u64ExpireGipTime);138 GVMMR0DECL(int) GVMMR0SchedWakeUp(PVM pVM );137 GVMMR0DECL(int) GVMMR0SchedHalt(PVM pVM, unsigned idCpu, uint64_t u64ExpireGipTime); 138 GVMMR0DECL(int) GVMMR0SchedWakeUp(PVM pVM, unsigned idCpu); 139 139 GVMMR0DECL(int) GVMMR0SchedPoll(PVM pVM, bool fYield); 140 140 GVMMR0DECL(int) GVMMR0QueryStatistics(PGVMMSTATS pStats, PSUPDRVSESSION pSession, PVM pVM); -
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r18870 r19360 725 725 { 726 726 AssertCompile(RT_SIZEOFMEMB(GVM,gmm.s) <= RT_SIZEOFMEMB(GVM,gmm.padding)); 727 AssertRelease(RT_SIZEOFMEMB(GVM,gmm.s) <= RT_SIZEOFMEMB(GVM,gmm.padding));728 727 729 728 pGVM->gmm.s.enmPolicy = GMMOCPOLICY_INVALID; -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r19238 r19360 571 571 * Allocate the global VM structure (GVM) and initialize it. 572 572 */ 573 PGVM pGVM = (PGVM)RTMemAllocZ( sizeof(*pGVM));573 PGVM pGVM = (PGVM)RTMemAllocZ(RT_UOFFSETOF(GVM, aCpus[cCPUs])); 574 574 if (pGVM) 575 575 { 576 pGVM->u32Magic = GVM_MAGIC; 577 pGVM->hSelf = iHandle; 578 pGVM->hEMT = NIL_RTNATIVETHREAD; 579 pGVM->pVM = NULL; 576 pGVM->u32Magic = GVM_MAGIC; 577 pGVM->hSelf = iHandle; 578 pGVM->hEMT = NIL_RTNATIVETHREAD; 579 pGVM->pVM = NULL; 580 pGVM->cCPUs = cCPUs; 580 581 581 582 gvmmR0InitPerVMData(pGVM); … … 701 702 { 702 703 AssertCompile(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding)); 703 Assert(RT_SIZEOFMEMB(GVM,gvmm.s) <= RT_SIZEOFMEMB(GVM,gvmm.padding));704 704 pGVM->gvmm.s.VMMemObj = NIL_RTR0MEMOBJ; 705 705 pGVM->gvmm.s.VMMapObj = NIL_RTR0MEMOBJ; 706 706 pGVM->gvmm.s.VMPagesMemObj = NIL_RTR0MEMOBJ; 707 707 pGVM->gvmm.s.VMPagesMapObj = NIL_RTR0MEMOBJ; 708 pGVM->gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI;709 708 pGVM->gvmm.s.fDoneVMMR0Init = false; 710 709 pGVM->gvmm.s.fDoneVMMR0Term = false; 710 711 for (unsigned i=0; i< pGVM->cCPUs; i++) 712 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI; 711 713 } 712 714 … … 731 733 { 732 734 if ( !pGVM->gvmm.s.fDoneVMMR0Init 733 && pGVM->gvmm.s.HaltEventMulti == NIL_RTSEMEVENTMULTI) 734 { 735 rc = RTSemEventMultiCreate(&pGVM->gvmm.s.HaltEventMulti); 736 if (RT_FAILURE(rc)) 737 pGVM->gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI; 735 && pGVM->aCpus[0].gvmm.s.HaltEventMulti == NIL_RTSEMEVENTMULTI) 736 { 737 for (unsigned i=0; i < pGVM->cCPUs; i++) 738 { 739 rc = RTSemEventMultiCreate(&pGVM->aCpus[i].gvmm.s.HaltEventMulti); 740 if (RT_FAILURE(rc)) 741 { 742 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI; 743 break; 744 } 745 } 738 746 } 739 747 else … … 1008 1016 } 1009 1017 1010 if (pGVM->gvmm.s.HaltEventMulti != NIL_RTSEMEVENTMULTI) 1011 { 1012 rc = RTSemEventMultiDestroy(pGVM->gvmm.s.HaltEventMulti); AssertRC(rc); 1013 pGVM->gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI; 1018 for (unsigned i=0; i< pGVM->cCPUs; i++) 1019 { 1020 if (pGVM->aCpus[i].gvmm.s.HaltEventMulti != NIL_RTSEMEVENTMULTI) 1021 { 1022 rc = RTSemEventMultiDestroy(pGVM->aCpus[i].gvmm.s.HaltEventMulti); AssertRC(rc); 1023 pGVM->aCpus[i].gvmm.s.HaltEventMulti = NIL_RTSEMEVENTMULTI; 1024 } 1014 1025 } 1015 1026 … … 1307 1318 && pCurGVM->u32Magic == GVM_MAGIC) 1308 1319 { 1309 uint64_t u64 = pCurGVM->gvmm.s.u64HaltExpire; 1310 if (u64) 1320 for (unsigned idCpu = 0; idCpu < pCurGVM->cCPUs; idCpu++) 1311 1321 { 1312 if (u64 <= u64Now) 1322 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu]; 1323 1324 uint64_t u64 = pCurGVCpu->gvmm.s.u64HaltExpire; 1325 if (u64) 1313 1326 { 1314 if ( ASMAtomicXchgU64(&pCurGVM->gvmm.s.u64HaltExpire, 0))1327 if (u64 <= u64Now) 1315 1328 { 1316 int rc = RTSemEventMultiSignal(pCurGVM->gvmm.s.HaltEventMulti); 1317 AssertRC(rc); 1318 cWoken++; 1329 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0)) 1330 { 1331 int rc = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti); 1332 AssertRC(rc); 1333 cWoken++; 1334 } 1319 1335 } 1320 }1321 else1322 {1323 cHalted++;1324 if (u64 <= u64Now + pGVMM->nsEarlyWakeUp1)1325 cTodo2nd++;1326 else if (u64 <= u64Now + pGVMM->nsEarlyWakeUp2)1327 cTodo3rd++;1336 else 1337 { 1338 cHalted++; 1339 if (u64 <= u64Now + pGVMM->nsEarlyWakeUp1) 1340 cTodo2nd++; 1341 else if (u64 <= u64Now + pGVMM->nsEarlyWakeUp2) 1342 cTodo3rd++; 1343 } 1328 1344 } 1329 1345 } … … 1340 1356 PGVM pCurGVM = pGVMM->aHandles[i].pGVM; 1341 1357 if ( VALID_PTR(pCurGVM) 1342 && pCurGVM->u32Magic == GVM_MAGIC 1343 && pCurGVM->gvmm.s.u64HaltExpire 1344 && pCurGVM->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp1) 1358 && pCurGVM->u32Magic == GVM_MAGIC) 1345 1359 { 1346 if (ASMAtomicXchgU64(&pCurGVM->gvmm.s.u64HaltExpire, 0))1360 for (unsigned idCpu = 0; idCpu < pCurGVM->cCPUs; idCpu++) 1347 1361 { 1348 int rc = RTSemEventMultiSignal(pCurGVM->gvmm.s.HaltEventMulti); 1349 AssertRC(rc); 1350 cWoken++; 1362 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu]; 1363 1364 if ( pCurGVCpu->gvmm.s.u64HaltExpire 1365 && pCurGVCpu->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp1) 1366 { 1367 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0)) 1368 { 1369 int rc = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti); 1370 AssertRC(rc); 1371 cWoken++; 1372 } 1373 } 1351 1374 } 1352 1375 } … … 1363 1386 PGVM pCurGVM = pGVMM->aHandles[i].pGVM; 1364 1387 if ( VALID_PTR(pCurGVM) 1365 && pCurGVM->u32Magic == GVM_MAGIC 1366 && pCurGVM->gvmm.s.u64HaltExpire 1367 && pCurGVM->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp2) 1388 && pCurGVM->u32Magic == GVM_MAGIC) 1368 1389 { 1369 if (ASMAtomicXchgU64(&pCurGVM->gvmm.s.u64HaltExpire, 0))1390 for (unsigned idCpu = 0; idCpu < pCurGVM->cCPUs; idCpu++) 1370 1391 { 1371 int rc = RTSemEventMultiSignal(pCurGVM->gvmm.s.HaltEventMulti); 1372 AssertRC(rc); 1373 cWoken++; 1392 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu]; 1393 1394 if ( pCurGVCpu->gvmm.s.u64HaltExpire 1395 && pCurGVCpu->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp2) 1396 { 1397 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0)) 1398 { 1399 int rc = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti); 1400 AssertRC(rc); 1401 cWoken++; 1402 } 1403 } 1374 1404 } 1375 1405 } … … 1388 1418 * VERR_INTERRUPTED if a signal was scheduled for the thread. 1389 1419 * @param pVM Pointer to the shared VM structure. 1420 * @param idCpu VCPU id 1390 1421 * @param u64ExpireGipTime The time for the sleep to expire expressed as GIP time. 1391 1422 * @thread EMT. 1392 1423 */ 1393 GVMMR0DECL(int) GVMMR0SchedHalt(PVM pVM, u int64_t u64ExpireGipTime)1424 GVMMR0DECL(int) GVMMR0SchedHalt(PVM pVM, unsigned idCpu, uint64_t u64ExpireGipTime) 1394 1425 { 1395 1426 LogFlow(("GVMMR0SchedHalt: pVM=%p\n", pVM)); … … 1398 1429 * Validate the VM structure, state and handle. 1399 1430 */ 1400 PGVMM pGVMM; 1401 PGVM pGVM; 1431 PGVMM pGVMM; 1432 PGVM pGVM; 1433 PGVMCPU pCurGVCpu; 1434 1402 1435 int rc = gvmmR0ByVMAndEMT(pVM, &pGVM, &pGVMM); 1403 1436 if (RT_FAILURE(rc)) 1404 1437 return rc; 1438 1405 1439 pGVM->gvmm.s.StatsSched.cHaltCalls++; 1406 1440 1407 Assert(!pGVM->gvmm.s.u64HaltExpire); 1441 pCurGVCpu = &pGVM->aCpus[idCpu]; 1442 Assert(idCpu < pGVM->cCPUs); 1443 Assert(!pCurGVCpu->gvmm.s.u64HaltExpire); 1408 1444 1409 1445 /* … … 1415 1451 AssertRC(rc); 1416 1452 1417 p GVM->gvmm.s.iCpuEmt = ASMGetApicId();1453 pCurGVCpu->gvmm.s.iCpuEmt = ASMGetApicId(); 1418 1454 1419 1455 Assert(ASMGetFlags() & X86_EFL_IF); … … 1430 1466 { 1431 1467 pGVM->gvmm.s.StatsSched.cHaltBlocking++; 1432 ASMAtomicXchgU64(&p GVM->gvmm.s.u64HaltExpire, u64ExpireGipTime);1468 ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime); 1433 1469 gvmmR0UsedUnlock(pGVMM); 1434 1470 1435 1471 uint32_t cMillies = (u64ExpireGipTime - u64Now) / 1000000; 1436 rc = RTSemEventMultiWaitNoResume(p GVM->gvmm.s.HaltEventMulti, cMillies ? cMillies : 1);1437 ASMAtomicXchgU64(&p GVM->gvmm.s.u64HaltExpire, 0);1472 rc = RTSemEventMultiWaitNoResume(pCurGVCpu->gvmm.s.HaltEventMulti, cMillies ? cMillies : 1); 1473 ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0); 1438 1474 if (rc == VERR_TIMEOUT) 1439 1475 { … … 1449 1485 1450 1486 /* Make sure false wake up calls (gvmmR0SchedDoWakeUps) cause us to spin. */ 1451 RTSemEventMultiReset(p GVM->gvmm.s.HaltEventMulti);1487 RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti); 1452 1488 1453 1489 return rc; … … 1461 1497 * VINF_GVM_NOT_BLOCKED if the EMT thread wasn't blocked. 1462 1498 * @param pVM Pointer to the shared VM structure. 1499 * @param idCpu VCPU id 1463 1500 * @thread Any but EMT. 1464 1501 */ 1465 GVMMR0DECL(int) GVMMR0SchedWakeUp(PVM pVM )1502 GVMMR0DECL(int) GVMMR0SchedWakeUp(PVM pVM, unsigned idCpu) 1466 1503 { 1467 1504 /* 1468 1505 * Validate input and take the UsedLock. 1469 1506 */ 1470 PGVM pGVM; 1471 PGVMM pGVMM; 1507 PGVMM pGVMM; 1508 PGVM pGVM; 1509 PGVMCPU pCurGVCpu; 1510 1472 1511 int rc = gvmmR0ByVM(pVM, &pGVM, &pGVMM, true /* fTakeUsedLock */); 1473 1512 if (RT_SUCCESS(rc)) 1474 1513 { 1514 Assert(idCpu < pGVM->cCPUs); 1515 1516 pCurGVCpu = &pGVM->aCpus[idCpu]; 1517 1475 1518 pGVM->gvmm.s.StatsSched.cWakeUpCalls++; 1476 1519 … … 1483 1526 * the it is flagged as halted in the VMM. 1484 1527 */ 1485 if (p GVM->gvmm.s.u64HaltExpire)1528 if (pCurGVCpu->gvmm.s.u64HaltExpire) 1486 1529 { 1487 1530 rc = VINF_SUCCESS; 1488 ASMAtomicXchgU64(&p GVM->gvmm.s.u64HaltExpire, 0);1531 ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0); 1489 1532 } 1490 1533 else … … 1494 1537 } 1495 1538 1496 int rc2 = RTSemEventMultiSignal(p GVM->gvmm.s.HaltEventMulti);1539 int rc2 = RTSemEventMultiSignal(pCurGVCpu->gvmm.s.HaltEventMulti); 1497 1540 AssertRC(rc2); 1498 1541 -
trunk/src/VBox/VMM/VMMR0/GVMMR0Internal.h
r14811 r19360 25 25 #include <iprt/mem.h> 26 26 27 /** 28 * The GVMM per VM data. 29 */ 30 typedef struct GVMMPERVCPU 31 { 32 /** The time the halted EMT thread expires. 33 * 0 if the EMT thread is blocked here. */ 34 uint64_t volatile u64HaltExpire; 35 /** The event semaphore the EMT thread is blocking on. */ 36 RTSEMEVENTMULTI HaltEventMulti; 37 /** The APIC ID of the CPU that EMT was scheduled on the last time we checked. */ 38 uint8_t iCpuEmt; 39 } GVMMPERVCPU; 40 /** Pointer to the GVMM per VCPU data. */ 41 typedef GVMMPERVCPU *PGVMMPERVCPU; 27 42 28 43 /** … … 40 55 RTR0MEMOBJ VMPagesMapObj; 41 56 42 /** The time the halted EMT thread expires.43 * 0 if the EMT thread is blocked here. */44 uint64_t volatile u64HaltExpire;45 /** The event semaphore the EMT thread is blocking on. */46 RTSEMEVENTMULTI HaltEventMulti;47 /** The APIC ID of the CPU that EMT was scheduled on the last time we checked. */48 uint8_t iCpuEmt;49 50 57 /** The scheduler statistics. */ 51 58 GVMMSTATSSCHED StatsSched; -
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r19227 r19360 1192 1192 return pVCpu; 1193 1193 } 1194 AssertReleaseFailed(); 1195 return 0; 1194 return NULL; 1196 1195 } 1197 1196 … … 1207 1206 return pVCpu->idCpu; 1208 1207 1209 AssertReleaseFailed();1210 1208 return 0; 1211 1209 } -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r19262 r19360 746 746 if (pReqHdr) 747 747 return VERR_INVALID_PARAMETER; 748 return GVMMR0SchedHalt(pVM, u64Arg);748 return GVMMR0SchedHalt(pVM, idCpu, u64Arg); 749 749 750 750 case VMMR0_DO_GVMM_SCHED_WAKE_UP: 751 751 if (pReqHdr || u64Arg) 752 752 return VERR_INVALID_PARAMETER; 753 return GVMMR0SchedWakeUp(pVM );753 return GVMMR0SchedWakeUp(pVM, idCpu); 754 754 755 755 case VMMR0_DO_GVMM_SCHED_POLL:
Note:
See TracChangeset
for help on using the changeset viewer.