Changeset 19434 in vbox
- Timestamp:
- May 6, 2009 1:58:35 PM (16 years ago)
- Location:
- trunk
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vm.h
r19423 r19434 161 161 } tm; 162 162 163 /** VMM part. 164 * @todo Combine this with other tiny structures. */ 163 /** VMM part. */ 165 164 union 166 165 { … … 168 167 struct VMMCPU s; 169 168 #endif 170 char padding[ 64];/* multiple of 64 */169 char padding[256]; /* multiple of 64 */ 171 170 } vmm; 172 171 -
trunk/src/VBox/VMM/VMM.cpp
r19366 r19434 107 107 static DECLCALLBACK(int) vmmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version); 108 108 static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser); 109 static int vmmR3ServiceCallHostRequest(PVM pVM );109 static int vmmR3ServiceCallHostRequest(PVM pVM, PVMCPU pVCpu); 110 110 static DECLCALLBACK(void) vmmR3InfoFF(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 111 111 … … 125 125 */ 126 126 AssertMsg(pVM->vmm.s.offVM == 0, ("Already initialized!\n")); 127 AssertMsg(sizeof(pVM->vmm.padding) >= sizeof(pVM->vmm.s), 128 ("pVM->vmm.padding is too small! vmm.padding %d while vmm.s is %d\n", 129 sizeof(pVM->vmm.padding), sizeof(pVM->vmm.s))); 127 AssertCompile(sizeof(pVM->vmm.s) <= sizeof(pVM->vmm.padding)); 128 AssertCompile(sizeof(pVM->aCpus[0].vmm.s) <= sizeof(pVM->aCpus[0].vmm.padding)); 130 129 131 130 /* … … 218 217 static int vmmR3InitStacks(PVM pVM) 219 218 { 220 /** @todo SMP: One stack per vCPU. */ 219 int rc = VINF_SUCCESS; 220 221 for (unsigned idCpu = 0; idCpu < pVM->cCPUs; idCpu++) 222 { 223 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 224 221 225 #ifdef VBOX_STRICT_VMM_STACK 222 int rc = MMR3HyperAllocOnceNoRel(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbEMTStackR3);226 rc = MMR3HyperAllocOnceNoRel(pVM, VMM_STACK_SIZE + PAGE_SIZE + PAGE_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVCpu->vmm.s.pbEMTStackR3); 223 227 #else 224 int rc = MMR3HyperAllocOnceNoRel(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVM->vmm.s.pbEMTStackR3);225 #endif 226 if (RT_SUCCESS(rc))227 {228 rc = MMR3HyperAllocOnceNoRel(pVM, VMM_STACK_SIZE, PAGE_SIZE, MM_TAG_VMM, (void **)&pVCpu->vmm.s.pbEMTStackR3); 229 #endif 230 if (RT_SUCCESS(rc)) 231 { 228 232 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 229 /* MMHyperR3ToR0 returns R3 when not doing hardware assisted virtualization. */ 230 if (!VMMIsHwVirtExtForced(pVM)) 231 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = NIL_RTR0PTR; 232 else 233 #endif 234 pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack = MMHyperR3ToR0(pVM, pVM->vmm.s.pbEMTStackR3); 235 pVM->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3); 236 pVM->vmm.s.pbEMTStackBottomRC = pVM->vmm.s.pbEMTStackRC + VMM_STACK_SIZE; 237 AssertRelease(pVM->vmm.s.pbEMTStackRC); 238 239 for (unsigned i=0;i<pVM->cCPUs;i++) 240 { 241 PVMCPU pVCpu = &pVM->aCpus[i]; 242 CPUMSetHyperESP(pVCpu, pVM->vmm.s.pbEMTStackBottomRC); 233 /* MMHyperR3ToR0 returns R3 when not doing hardware assisted virtualization. */ 234 if (!VMMIsHwVirtExtForced(pVM)) 235 pVCpu->vmm.s.CallHostR0JmpBuf.pvSavedStack = NIL_RTR0PTR; 236 else 237 #endif 238 pVCpu->vmm.s.CallHostR0JmpBuf.pvSavedStack = MMHyperR3ToR0(pVM, pVCpu->vmm.s.pbEMTStackR3); 239 pVCpu->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3); 240 pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE; 241 AssertRelease(pVCpu->vmm.s.pbEMTStackRC); 242 243 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); 243 244 } 244 245 } … … 389 390 VMMR3DECL(int) VMMR3InitFinalize(PVM pVM) 390 391 { 392 int rc = VINF_SUCCESS; 393 394 for (unsigned idCpu = 0; idCpu < pVM->cCPUs; idCpu++) 395 { 396 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 397 391 398 #ifdef VBOX_STRICT_VMM_STACK 392 /* 393 * Two inaccessible pages at each sides of the stack to catch over/under-flows. 394 */ 395 memset(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE, 0xcc, PAGE_SIZE); 396 PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE), PAGE_SIZE, 0); 397 RTMemProtect(pVM->vmm.s.pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE); 398 399 memset(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, 0xcc, PAGE_SIZE); 400 PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE), PAGE_SIZE, 0); 401 RTMemProtect(pVM->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE); 402 #endif 403 404 /* 405 * Set page attributes to r/w for stack pages. 406 */ 407 int rc = PGMMapSetPage(pVM, pVM->vmm.s.pbEMTStackRC, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW); 408 AssertRC(rc); 399 /* 400 * Two inaccessible pages at each sides of the stack to catch over/under-flows. 401 */ 402 memset(pVCpu->vmm.s.pbEMTStackR3 - PAGE_SIZE, 0xcc, PAGE_SIZE); 403 PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3 - PAGE_SIZE), PAGE_SIZE, 0); 404 RTMemProtect(pVCpu->vmm.s.pbEMTStackR3 - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE); 405 406 memset(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, 0xcc, PAGE_SIZE); 407 PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE), PAGE_SIZE, 0); 408 RTMemProtect(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE, PAGE_SIZE, RTMEM_PROT_NONE); 409 #endif 410 411 /* 412 * Set page attributes to r/w for stack pages. 413 */ 414 rc = PGMMapSetPage(pVM, pVCpu->vmm.s.pbEMTStackRC, VMM_STACK_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW); 415 AssertRC(rc); 416 if (RT_FAILURE(rc)) 417 break; 418 } 409 419 if (RT_SUCCESS(rc)) 410 420 { … … 437 447 VMMR3DECL(int) VMMR3InitR0(PVM pVM) 438 448 { 439 int rc; 449 int rc; 450 PVMCPU pVCpu = VMMGetCpu(pVM); 451 Assert(pVCpu && pVCpu->idCpu == 0); 440 452 441 453 /* … … 466 478 if (rc != VINF_VMM_CALL_HOST) 467 479 break; 468 rc = vmmR3ServiceCallHostRequest(pVM );480 rc = vmmR3ServiceCallHostRequest(pVM, pVCpu); 469 481 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) 470 482 break; … … 491 503 { 492 504 PVMCPU pVCpu = VMMGetCpu(pVM); 493 Assert(pVCpu );505 Assert(pVCpu && pVCpu->idCpu == 0); 494 506 495 507 /* In VMX mode, there's no need to init RC. */ … … 510 522 { 511 523 CPUMHyperSetCtxCore(pVCpu, NULL); 512 CPUMSetHyperESP(pVCpu, pV M->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */524 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */ 513 525 uint64_t u64TS = RTTimeProgramStartNanoTS(); 514 526 CPUMPushHyper(pVCpu, (uint32_t)(u64TS >> 32)); /* Param 3: The program startup TS - Hi. */ … … 543 555 if (rc != VINF_VMM_CALL_HOST) 544 556 break; 545 rc = vmmR3ServiceCallHostRequest(pVM );557 rc = vmmR3ServiceCallHostRequest(pVM, pVCpu); 546 558 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) 547 559 break; … … 568 580 VMMR3DECL(int) VMMR3Term(PVM pVM) 569 581 { 582 PVMCPU pVCpu = VMMGetCpu(pVM); 583 Assert(pVCpu && pVCpu->idCpu == 0); 584 570 585 /* 571 586 * Call Ring-0 entry with termination code. … … 585 600 if (rc != VINF_VMM_CALL_HOST) 586 601 break; 587 rc = vmmR3ServiceCallHostRequest(pVM );602 rc = vmmR3ServiceCallHostRequest(pVM, pVCpu); 588 603 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) 589 604 break; … … 650 665 651 666 CPUMSetHyperESP(pVCpu, CPUMGetHyperESP(pVCpu) + offDelta); 652 } 653 654 pVM->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVM->vmm.s.pbEMTStackR3); 655 pVM->vmm.s.pbEMTStackBottomRC = pVM->vmm.s.pbEMTStackRC + VMM_STACK_SIZE; 667 668 pVCpu->vmm.s.pbEMTStackRC = MMHyperR3ToRC(pVM, pVCpu->vmm.s.pbEMTStackR3); 669 pVCpu->vmm.s.pbEMTStackBottomRC = pVCpu->vmm.s.pbEMTStackRC + VMM_STACK_SIZE; 670 } 671 656 672 657 673 /* … … 810 826 LogFlow(("vmmR3Save:\n")); 811 827 812 /*813 * The hypervisor stack.814 * Note! See not in vmmR3Load.815 */816 SSMR3PutRCPtr(pSSM, pVM->vmm.s.pbEMTStackBottomRC);817 818 828 for (unsigned i=0;i<pVM->cCPUs;i++) 819 829 { 820 830 PVMCPU pVCpu = &pVM->aCpus[i]; 821 831 832 /* 833 * The hypervisor stack. 834 * Note! See not in vmmR3Load. 835 */ 836 SSMR3PutRCPtr(pSSM, pVCpu->vmm.s.pbEMTStackBottomRC); 837 822 838 RTRCPTR RCPtrESP = CPUMGetHyperESP(pVCpu); 823 AssertMsg(pV M->vmm.s.pbEMTStackBottomRC - RCPtrESP <= VMM_STACK_SIZE, ("Bottom %RRv ESP=%RRv\n", pVM->vmm.s.pbEMTStackBottomRC, RCPtrESP));839 AssertMsg(pVCpu->vmm.s.pbEMTStackBottomRC - RCPtrESP <= VMM_STACK_SIZE, ("Bottom %RRv ESP=%RRv\n", pVCpu->vmm.s.pbEMTStackBottomRC, RCPtrESP)); 824 840 SSMR3PutRCPtr(pSSM, RCPtrESP); 825 } 826 SSMR3PutMem(pSSM, pVM->vmm.s.pbEMTStackR3, VMM_STACK_SIZE); 841 842 SSMR3PutMem(pSSM, pVCpu->vmm.s.pbEMTStackR3, VMM_STACK_SIZE); 843 } 827 844 return SSMR3PutU32(pSSM, ~0); /* terminator */ 828 845 } … … 865 882 866 883 /* restore the stack. */ 867 SSMR3GetMem(pSSM, pVM->vmm.s.pbEMTStackR3, VMM_STACK_SIZE); 884 for (unsigned i=0;i<pVM->cCPUs;i++) 885 { 886 PVMCPU pVCpu = &pVM->aCpus[i]; 887 888 SSMR3GetMem(pSSM, pVCpu->vmm.s.pbEMTStackR3, VMM_STACK_SIZE); 889 } 868 890 869 891 /* terminator */ … … 1025 1047 ? pVM->vmm.s.pfnCPUMRCResumeGuestV86 1026 1048 : pVM->vmm.s.pfnCPUMRCResumeGuest); 1027 CPUMSetHyperESP(pVCpu, pV M->vmm.s.pbEMTStackBottomRC);1049 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); 1028 1050 1029 1051 /* … … 1067 1089 return rc; 1068 1090 } 1069 rc = vmmR3ServiceCallHostRequest(pVM );1091 rc = vmmR3ServiceCallHostRequest(pVM, pVCpu); 1070 1092 if (RT_FAILURE(rc)) 1071 1093 return rc; … … 1113 1135 return rc; 1114 1136 } 1115 rc = vmmR3ServiceCallHostRequest(pVM );1137 rc = vmmR3ServiceCallHostRequest(pVM, pVCpu); 1116 1138 if (RT_FAILURE(rc)) 1117 1139 return rc; … … 1159 1181 */ 1160 1182 CPUMHyperSetCtxCore(pVCpu, NULL); 1161 memset(pV M->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */1162 CPUMSetHyperESP(pVCpu, pV M->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32));1163 PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pV M->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs;1183 memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); /* Clear the stack. */ 1184 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC - cArgs * sizeof(RTGCUINTPTR32)); 1185 PRTGCUINTPTR32 pFrame = (PRTGCUINTPTR32)(pVCpu->vmm.s.pbEMTStackR3 + VMM_STACK_SIZE) - cArgs; 1164 1186 int i = cArgs; 1165 1187 while (i-- > 0) … … 1209 1231 return rc; 1210 1232 } 1211 rc = vmmR3ServiceCallHostRequest(pVM );1233 rc = vmmR3ServiceCallHostRequest(pVM, pVCpu); 1212 1234 if (RT_FAILURE(rc)) 1213 1235 return rc; … … 1248 1270 if (rc != VINF_VMM_CALL_HOST) 1249 1271 break; 1250 rc = vmmR3ServiceCallHostRequest(pVM );1272 rc = vmmR3ServiceCallHostRequest(pVM, pVCpu); 1251 1273 if (RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)) 1252 1274 break; … … 1313 1335 return rc; 1314 1336 } 1315 rc = vmmR3ServiceCallHostRequest(pVM );1337 rc = vmmR3ServiceCallHostRequest(pVM, pVCpu); 1316 1338 if (RT_FAILURE(rc)) 1317 1339 return rc; … … 1325 1347 * @returns VBox status code. 1326 1348 * @param pVM VM handle. 1349 * @param pVCpu VMCPU handle 1327 1350 * @remark Careful with critsects. 1328 1351 */ 1329 static int vmmR3ServiceCallHostRequest(PVM pVM )1330 { 1331 switch (pV M->vmm.s.enmCallHostOperation)1352 static int vmmR3ServiceCallHostRequest(PVM pVM, PVMCPU pVCpu) 1353 { 1354 switch (pVCpu->vmm.s.enmCallHostOperation) 1332 1355 { 1333 1356 /* … … 1336 1359 case VMMCALLHOST_PDM_LOCK: 1337 1360 { 1338 pV M->vmm.s.rcCallHost = PDMR3LockCall(pVM);1361 pVCpu->vmm.s.rcCallHost = PDMR3LockCall(pVM); 1339 1362 break; 1340 1363 } … … 1346 1369 { 1347 1370 PDMR3QueueFlushWorker(pVM, NULL); 1348 pV M->vmm.s.rcCallHost = VINF_SUCCESS;1371 pVCpu->vmm.s.rcCallHost = VINF_SUCCESS; 1349 1372 break; 1350 1373 } … … 1355 1378 case VMMCALLHOST_PGM_POOL_GROW: 1356 1379 { 1357 pV M->vmm.s.rcCallHost = PGMR3PoolGrow(pVM);1380 pVCpu->vmm.s.rcCallHost = PGMR3PoolGrow(pVM); 1358 1381 break; 1359 1382 } … … 1364 1387 case VMMCALLHOST_PGM_MAP_CHUNK: 1365 1388 { 1366 pV M->vmm.s.rcCallHost = PGMR3PhysChunkMap(pVM, pVM->vmm.s.u64CallHostArg);1389 pVCpu->vmm.s.rcCallHost = PGMR3PhysChunkMap(pVM, pVCpu->vmm.s.u64CallHostArg); 1367 1390 break; 1368 1391 } … … 1373 1396 case VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES: 1374 1397 { 1375 pV M->vmm.s.rcCallHost = PGMR3PhysAllocateHandyPages(pVM);1398 pVCpu->vmm.s.rcCallHost = PGMR3PhysAllocateHandyPages(pVM); 1376 1399 break; 1377 1400 } … … 1382 1405 case VMMCALLHOST_PGM_LOCK: 1383 1406 { 1384 pV M->vmm.s.rcCallHost = PGMR3LockCall(pVM);1407 pVCpu->vmm.s.rcCallHost = PGMR3LockCall(pVM); 1385 1408 break; 1386 1409 } … … 1392 1415 { 1393 1416 REMR3ReplayHandlerNotifications(pVM); 1394 pV M->vmm.s.rcCallHost = VINF_SUCCESS;1417 pVCpu->vmm.s.rcCallHost = VINF_SUCCESS; 1395 1418 break; 1396 1419 } … … 1401 1424 */ 1402 1425 case VMMCALLHOST_VMM_LOGGER_FLUSH: 1403 pV M->vmm.s.rcCallHost = VINF_SUCCESS;1426 pVCpu->vmm.s.rcCallHost = VINF_SUCCESS; 1404 1427 LogAlways(("*FLUSH*\n")); 1405 1428 break; … … 1410 1433 case VMMCALLHOST_VM_SET_ERROR: 1411 1434 VMR3SetErrorWorker(pVM); 1412 pV M->vmm.s.rcCallHost = VINF_SUCCESS;1435 pVCpu->vmm.s.rcCallHost = VINF_SUCCESS; 1413 1436 break; 1414 1437 … … 1417 1440 */ 1418 1441 case VMMCALLHOST_VM_SET_RUNTIME_ERROR: 1419 pV M->vmm.s.rcCallHost = VMR3SetRuntimeErrorWorker(pVM);1442 pVCpu->vmm.s.rcCallHost = VMR3SetRuntimeErrorWorker(pVM); 1420 1443 break; 1421 1444 … … 1425 1448 */ 1426 1449 case VMMCALLHOST_VM_R0_ASSERTION: 1427 pV M->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;1428 pV M->vmm.s.CallHostR0JmpBuf.fInRing3Call = false;1450 pVCpu->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID; 1451 pVCpu->vmm.s.CallHostR0JmpBuf.fInRing3Call = false; 1429 1452 #ifdef RT_ARCH_X86 1430 pV M->vmm.s.CallHostR0JmpBuf.eip = 0;1453 pVCpu->vmm.s.CallHostR0JmpBuf.eip = 0; 1431 1454 #else 1432 pV M->vmm.s.CallHostR0JmpBuf.rip = 0;1455 pVCpu->vmm.s.CallHostR0JmpBuf.rip = 0; 1433 1456 #endif 1434 1457 LogRel((pVM->vmm.s.szRing0AssertMsg1)); … … 1440 1463 */ 1441 1464 case VMMCALLHOST_VM_R0_PREEMPT: 1442 pV M->vmm.s.rcCallHost = VINF_SUCCESS;1465 pVCpu->vmm.s.rcCallHost = VINF_SUCCESS; 1443 1466 break; 1444 1467 1445 1468 default: 1446 AssertMsgFailed(("enmCallHostOperation=%d\n", pV M->vmm.s.enmCallHostOperation));1469 AssertMsgFailed(("enmCallHostOperation=%d\n", pVCpu->vmm.s.enmCallHostOperation)); 1447 1470 return VERR_INTERNAL_ERROR; 1448 1471 } 1449 1472 1450 pV M->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID;1473 pVCpu->vmm.s.enmCallHostOperation = VMMCALLHOST_INVALID; 1451 1474 return VINF_SUCCESS; 1452 1475 } -
trunk/src/VBox/VMM/VMMAll/VMMAll.cpp
r19358 r19434 28 28 #include "VMMInternal.h" 29 29 #include <VBox/vm.h> 30 #include <VBox/vmm.h> 30 31 #include <VBox/param.h> 31 32 #include <VBox/hwaccm.h> … … 43 44 VMMDECL(RTRCPTR) VMMGetStackRC(PVM pVM) 44 45 { 45 return (RTRCPTR)pVM->vmm.s.pbEMTStackBottomRC; 46 PVMCPU pVCpu = VMMGetCpu(pVM); 47 Assert(pVCpu); 48 49 return (RTRCPTR)pVCpu->vmm.s.pbEMTStackBottomRC; 46 50 } 47 51 -
trunk/src/VBox/VMM/VMMGC/VMMGC.cpp
r17422 r19434 244 244 VMMRCDECL(int) VMMGCCallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg) 245 245 { 246 PVMCPU pVCpu = VMMGetCpu0(pVM); 247 246 248 /** @todo profile this! */ 247 pV M->vmm.s.enmCallHostOperation = enmOperation;248 pV M->vmm.s.u64CallHostArg = uArg;249 pV M->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;249 pVCpu->vmm.s.enmCallHostOperation = enmOperation; 250 pVCpu->vmm.s.u64CallHostArg = uArg; 251 pVCpu->vmm.s.rcCallHost = VERR_INTERNAL_ERROR; 250 252 pVM->vmm.s.pfnGuestToHostRC(VINF_VMM_CALL_HOST); 251 return pV M->vmm.s.rcCallHost;253 return pVCpu->vmm.s.rcCallHost; 252 254 } 253 255 -
trunk/src/VBox/VMM/VMMGuruMeditation.cpp
r19293 r19434 385 385 "!!\n" 386 386 "%.*Rhxd\n", 387 pV M->vmm.s.pbEMTStackRC, pVM->vmm.s.pbEMTStackBottomRC,388 VMM_STACK_SIZE, pV M->vmm.s.pbEMTStackR3);387 pVCpu->vmm.s.pbEMTStackRC, pVCpu->vmm.s.pbEMTStackBottomRC, 388 VMM_STACK_SIZE, pVCpu->vmm.s.pbEMTStackR3); 389 389 } /* !HWACCMR3IsActive */ 390 390 break; -
trunk/src/VBox/VMM/VMMInternal.h
r19366 r19434 214 214 R0PTRTYPE(PFNVMMSWITCHERHC) pfnHostToGuestR0; 215 215 /** @} */ 216 217 /** VMM stack, pointer to the top of the stack in R3.218 * Stack is allocated from the hypervisor heap and is page aligned219 * and always writable in RC. */220 R3PTRTYPE(uint8_t *) pbEMTStackR3;221 /** Pointer to the bottom of the stack - needed for doing relocations. */222 RCPTRTYPE(uint8_t *) pbEMTStackRC;223 /** Pointer to the bottom of the stack - needed for doing relocations. */224 RCPTRTYPE(uint8_t *) pbEMTStackBottomRC;225 216 226 217 /** @name Logging … … 262 253 /** The timestamp of the previous yield. (nano) */ 263 254 uint64_t u64LastYield; 264 265 /** @name CallHost266 * @todo SMP: per vCPU267 * @{ */268 /** The pending operation. */269 VMMCALLHOST enmCallHostOperation;270 /** The result of the last operation. */271 int32_t rcCallHost;272 /** The argument to the operation. */273 uint64_t u64CallHostArg;274 /** The Ring-0 jmp buffer. */275 VMMR0JMPBUF CallHostR0JmpBuf;276 /** @} */277 255 278 256 /** Buffer for storing the standard assertion message for a ring-0 assertion. … … 353 331 * See VMM2VMCPU(). */ 354 332 RTINT offVMCPU; 333 334 /** VMM stack, pointer to the top of the stack in R3. 335 * Stack is allocated from the hypervisor heap and is page aligned 336 * and always writable in RC. */ 337 R3PTRTYPE(uint8_t *) pbEMTStackR3; 338 /** Pointer to the bottom of the stack - needed for doing relocations. */ 339 RCPTRTYPE(uint8_t *) pbEMTStackRC; 340 /** Pointer to the bottom of the stack - needed for doing relocations. */ 341 RCPTRTYPE(uint8_t *) pbEMTStackBottomRC; 342 343 /** @name CallHost 344 * @{ */ 345 /** The pending operation. */ 346 VMMCALLHOST enmCallHostOperation; 347 /** The result of the last operation. */ 348 int32_t rcCallHost; 349 /** The argument to the operation. */ 350 uint64_t u64CallHostArg; 351 /** The Ring-0 jmp buffer. */ 352 VMMR0JMPBUF CallHostR0JmpBuf; 353 /** @} */ 354 355 355 } VMMCPU; 356 356 /** Pointer to VMMCPU. */ -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r19406 r19434 308 308 VMMR0DECL(int) VMMR0CallHost(PVM pVM, VMMCALLHOST enmOperation, uint64_t uArg) 309 309 { 310 PVMCPU pVCpu = VMMGetCpu(pVM); 311 310 312 /** @todo profile this! */ 311 pV M->vmm.s.enmCallHostOperation = enmOperation;312 pV M->vmm.s.u64CallHostArg = uArg;313 pV M->vmm.s.rcCallHost = VERR_INTERNAL_ERROR;314 int rc = vmmR0CallHostLongJmp(&pV M->vmm.s.CallHostR0JmpBuf, VINF_VMM_CALL_HOST);313 pVCpu->vmm.s.enmCallHostOperation = enmOperation; 314 pVCpu->vmm.s.u64CallHostArg = uArg; 315 pVCpu->vmm.s.rcCallHost = VERR_INTERNAL_ERROR; 316 int rc = vmmR0CallHostLongJmp(&pVCpu->vmm.s.CallHostR0JmpBuf, VINF_VMM_CALL_HOST); 315 317 if (rc == VINF_SUCCESS) 316 rc = pV M->vmm.s.rcCallHost;318 rc = pVCpu->vmm.s.rcCallHost; 317 319 return rc; 318 320 } … … 323 325 * Record return code statistics 324 326 * @param pVM The VM handle. 327 * @param pVCpu The VMCPU handle. 325 328 * @param rc The status code. 326 329 */ 327 static void vmmR0RecordRC(PVM pVM, int rc)330 static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc) 328 331 { 329 332 /* … … 438 441 break; 439 442 case VINF_VMM_CALL_HOST: 440 switch (pV M->vmm.s.enmCallHostOperation)443 switch (pVCpu->vmm.s.enmCallHostOperation) 441 444 { 442 445 case VMMCALLHOST_PDM_LOCK: … … 590 593 #ifdef VBOX_WITH_STATISTICS 591 594 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC); 592 vmmR0RecordRC(pVM, rc);595 vmmR0RecordRC(pVM, pVCpu, rc); 593 596 #endif 594 597 } … … 623 626 if (RT_SUCCESS(rc)) 624 627 { 625 rc = vmmR0CallHostSetJmp(&pV M->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */628 rc = vmmR0CallHostSetJmp(&pVCpu->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */ 626 629 int rc2 = HWACCMR0Leave(pVM, pVCpu); 627 630 AssertRC(rc2); … … 639 642 640 643 #ifdef VBOX_WITH_STATISTICS 641 vmmR0RecordRC(pVM, rc);644 vmmR0RecordRC(pVM, pVCpu, rc); 642 645 #endif 643 646 /* No special action required for external interrupts, just return. */ … … 1084 1087 case VMMR0_DO_VMMR0_TERM: 1085 1088 { 1086 if (!pVM->vmm.s.CallHostR0JmpBuf.pvSavedStack) 1089 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 1090 1091 if (!pVCpu->vmm.s.CallHostR0JmpBuf.pvSavedStack) 1087 1092 break; 1088 1093 … … 1095 1100 Args.u64Arg = u64Arg; 1096 1101 Args.pSession = pSession; 1097 return vmmR0CallHostSetJmpEx(&pV M->vmm.s.CallHostR0JmpBuf, vmmR0EntryExWrapper, &Args);1102 return vmmR0CallHostSetJmpEx(&pVCpu->vmm.s.CallHostR0JmpBuf, vmmR0EntryExWrapper, &Args); 1098 1103 } 1099 1104 … … 1141 1146 } 1142 1147 1148 PVMCPU pVCpu = VMMGetCpu(pVM); 1149 1143 1150 /* 1144 1151 * Check that the jump buffer is armed. 1145 1152 */ 1146 1153 #ifdef RT_ARCH_X86 1147 if ( !pV M->vmm.s.CallHostR0JmpBuf.eip1148 || pV M->vmm.s.CallHostR0JmpBuf.fInRing3Call)1154 if ( !pVCpu->vmm.s.CallHostR0JmpBuf.eip 1155 || pVCpu->vmm.s.CallHostR0JmpBuf.fInRing3Call) 1149 1156 #else 1150 if ( !pV M->vmm.s.CallHostR0JmpBuf.rip1151 || pV M->vmm.s.CallHostR0JmpBuf.fInRing3Call)1157 if ( !pVCpu->vmm.s.CallHostR0JmpBuf.rip 1158 || pVCpu->vmm.s.CallHostR0JmpBuf.fInRing3Call) 1152 1159 #endif 1153 1160 { … … 1200 1207 if (pVM) 1201 1208 { 1209 PVMCPU pVCpu = VMMGetCpu(pVM); 1210 1202 1211 #ifdef RT_ARCH_X86 1203 if ( pV M->vmm.s.CallHostR0JmpBuf.eip1204 && !pV M->vmm.s.CallHostR0JmpBuf.fInRing3Call)1212 if ( pVCpu->vmm.s.CallHostR0JmpBuf.eip 1213 && !pVCpu->vmm.s.CallHostR0JmpBuf.fInRing3Call) 1205 1214 #else 1206 if ( pV M->vmm.s.CallHostR0JmpBuf.rip1207 && !pV M->vmm.s.CallHostR0JmpBuf.fInRing3Call)1215 if ( pVCpu->vmm.s.CallHostR0JmpBuf.rip 1216 && !pVCpu->vmm.s.CallHostR0JmpBuf.fInRing3Call) 1208 1217 #endif 1209 1218 { -
trunk/src/VBox/VMM/VMMTests.cpp
r19334 r19434 64 64 65 65 CPUMHyperSetCtxCore(pVCpu, NULL); 66 memset(pV M->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE);67 CPUMSetHyperESP(pVCpu, pV M->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */66 memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); 67 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */ 68 68 CPUMPushHyper(pVCpu, uVariation); 69 69 CPUMPushHyper(pVCpu, enmTestcase); … … 104 104 105 105 CPUMHyperSetCtxCore(pVCpu, NULL); 106 memset(pV M->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE);107 CPUMSetHyperESP(pVCpu, pV M->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */106 memset(pVCpu->vmm.s.pbEMTStackR3, 0xaa, VMM_STACK_SIZE); 107 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */ 108 108 CPUMPushHyper(pVCpu, uVariation); 109 109 CPUMPushHyper(pVCpu, u8Trap + VMMGC_DO_TESTCASE_TRAP_FIRST); … … 342 342 */ 343 343 CPUMHyperSetCtxCore(pVCpu, NULL); 344 CPUMSetHyperESP(pVCpu, pV M->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */344 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */ 345 345 CPUMPushHyper(pVCpu, 0); 346 346 CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HYPER_INTERRUPT); … … 406 406 { 407 407 CPUMHyperSetCtxCore(pVCpu, NULL); 408 CPUMSetHyperESP(pVCpu, pV M->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */408 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */ 409 409 CPUMPushHyper(pVCpu, 0); 410 410 CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_NOP); … … 540 540 CPUMHyperSetCtxCore(pVCpu, NULL); 541 541 542 CPUMSetHyperESP(pVCpu, pV M->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */542 CPUMSetHyperESP(pVCpu, pVCpu->vmm.s.pbEMTStackBottomRC); /* Clear the stack. */ 543 543 CPUMPushHyper(pVCpu, 0); 544 544 CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HWACCM_NOP); -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r19366 r19434 873 873 GEN_CHECK_OFF(VMM, pfnCPUMRCResumeGuestV86); 874 874 GEN_CHECK_OFF(VMM, iLastGZRc); 875 GEN_CHECK_OFF(VMM , pbEMTStackR3);876 GEN_CHECK_OFF(VMM , pbEMTStackRC);877 GEN_CHECK_OFF(VMM , pbEMTStackBottomRC);875 GEN_CHECK_OFF(VMMCPU, pbEMTStackR3); 876 GEN_CHECK_OFF(VMMCPU, pbEMTStackRC); 877 GEN_CHECK_OFF(VMMCPU, pbEMTStackBottomRC); 878 878 GEN_CHECK_OFF(VMM, pRCLoggerRC); 879 879 GEN_CHECK_OFF(VMM, pRCLoggerR3); … … 884 884 GEN_CHECK_OFF(VMM, cYieldResumeMillies); 885 885 GEN_CHECK_OFF(VMM, cYieldEveryMillies); 886 GEN_CHECK_OFF(VMM , enmCallHostOperation);887 GEN_CHECK_OFF(VMM , rcCallHost);888 GEN_CHECK_OFF(VMM , u64CallHostArg);889 GEN_CHECK_OFF(VMM , CallHostR0JmpBuf);890 GEN_CHECK_OFF(VMM , CallHostR0JmpBuf.SpCheck);891 GEN_CHECK_OFF(VMM , CallHostR0JmpBuf.SpResume);886 GEN_CHECK_OFF(VMMCPU, enmCallHostOperation); 887 GEN_CHECK_OFF(VMMCPU, rcCallHost); 888 GEN_CHECK_OFF(VMMCPU, u64CallHostArg); 889 GEN_CHECK_OFF(VMMCPU, CallHostR0JmpBuf); 890 GEN_CHECK_OFF(VMMCPU, CallHostR0JmpBuf.SpCheck); 891 GEN_CHECK_OFF(VMMCPU, CallHostR0JmpBuf.SpResume); 892 892 GEN_CHECK_OFF(VMM, StatRunRC); 893 893 GEN_CHECK_OFF(VMM, StatRZCallPGMLock); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r19405 r19434 196 196 CHECK_MEMBER_ALIGNMENT(VM, cpum.s.GuestEntry, 64); 197 197 198 CHECK_MEMBER_ALIGNMENT(VM , vmm.s.CallHostR0JmpBuf, 8);198 CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.CallHostR0JmpBuf, 8); 199 199 CHECK_MEMBER_ALIGNMENT(VM, vmm.s.StatRunRC, 8); 200 200 CHECK_MEMBER_ALIGNMENT(VM, StatTotalQemuToGC, 8);
Note:
See TracChangeset
for help on using the changeset viewer.