Changeset 13796 in vbox
- Timestamp:
- Nov 4, 2008 6:37:33 PM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 38784
- Location:
- trunk
- Files:
-
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/uvm.h
r13782 r13796 33 33 #define ___VBox_uvm_h 34 34 35 36 35 #include <VBox/types.h> 37 36 38 /* Forward decl. */39 struct UVM;40 37 41 38 /** … … 44 41 typedef struct UVMCPU 45 42 { 46 struct UVM *pUVM; 43 /** Pointer to the UVM structure. */ 44 PUVM pUVM; 45 /** The virtual CPU ID. */ 47 46 RTCPUID idCPU; 48 47 … … 55 54 uint8_t padding[768]; 56 55 } vm; 57 } UVMCPU, *PUVMCPU; 56 } UVMCPU; 57 /** Pointer to the per virtual CPU ring-3 (user mode) data. */ 58 typedef UVMCPU *PUVMCPU; 59 58 60 59 61 /** … … 115 117 } stam; 116 118 117 /* Per virtual CPU data. */118 UVMCPU aCpu [1];119 /** Per virtual CPU data. */ 120 UVMCPU aCpus[1]; 119 121 } UVM; 120 122 -
trunk/include/VBox/vm.h
r13791 r13796 779 779 uint32_t u32Reserved2[8]; 780 780 781 /** VMCPU array for the configured number of virtual CPUs. */ 782 VMCPU aCpu[1]; 781 /** VMCPU array for the configured number of virtual CPUs. 782 * Must be aligned on a 64-byte boundrary. */ 783 VMCPU aCpus[1]; 783 784 } VM; 784 785 -
trunk/src/VBox/VMM/CPUM.cpp
r13778 r13796 218 218 } 219 219 220 220 221 /** 221 222 * Initializes the per-VCPU CPUM. … … 229 230 return VINF_SUCCESS; 230 231 } 232 231 233 232 234 /** … … 657 659 } 658 660 661 659 662 /** 660 663 * Terminates the per-VCPU CPUM. … … 670 673 return 0; 671 674 } 675 672 676 673 677 /** -
trunk/src/VBox/VMM/EM.cpp
r13782 r13796 374 374 } 375 375 376 376 377 /** 377 378 * Initializes the per-VCPU EM. … … 385 386 return VINF_SUCCESS; 386 387 } 388 387 389 388 390 /** -
trunk/src/VBox/VMM/MMHyper.cpp
r13767 r13796 105 105 * Map the VM structure into the hypervisor space. 106 106 */ 107 AssertRelease(pVM->cbSelf == RT_UOFFSETOF(VM, aCpu[pVM->cCPUs])); 108 107 AssertRelease(pVM->cbSelf == RT_UOFFSETOF(VM, aCpus[pVM->cCPUs])); 109 108 RTGCPTR GCPtr; 110 109 rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0, RT_ALIGN_Z(pVM->cbSelf, PAGE_SIZE) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &GCPtr); … … 114 113 pVM->pVMGC = pVM->pVMRC; 115 114 for (uint32_t i = 0; i < pVM->cCPUs; i++) 116 pVM->aCpu [i].pVMRC = pVM->pVMRC;115 pVM->aCpus[i].pVMRC = pVM->pVMRC; 117 116 118 117 /* Reserve a page for fencing. */ … … 293 292 pVM->pVMGC = pVM->pVMRC; 294 293 for (uint32_t i = 0; i < pVM->cCPUs; i++) 295 pVM->aCpu [i].pVMRC = pVM->pVMRC;294 pVM->aCpus[i].pVMRC = pVM->pVMRC; 296 295 297 296 pVM->mm.s.pvHyperAreaGC += offDelta; -
trunk/src/VBox/VMM/PGM.cpp
r13778 r13796 1279 1279 } 1280 1280 1281 1281 1282 /** 1282 1283 * Initializes the per-VCPU PGM. … … 1290 1291 return VINF_SUCCESS; 1291 1292 } 1293 1292 1294 1293 1295 /** … … 2118 2120 } 2119 2121 2122 2120 2123 /** 2121 2124 * Terminates the per-VCPU PGM. … … 2131 2134 return 0; 2132 2135 } 2136 2133 2137 2134 2138 /** -
trunk/src/VBox/VMM/PGMInternal.h
r13742 r13796 2608 2608 STAMPROFILE StatR3GstModifyPage; /**< R3: Profiling of the PGMGstModifyPage() body */ 2609 2609 #endif /* VBOX_WITH_STATISTICS */ 2610 } PGM, *PPGM; 2611 2612 2613 /** 2614 * PGMCPU Data (part of VMCPU) 2610 } PGM; 2611 /** Pointer to the PGM instance data. */ 2612 typedef PGM *PPGM; 2613 2614 2615 /** 2616 * PGMCPU Data (part of VMCPU). 2615 2617 */ 2616 2618 typedef struct PGMCPU … … 2618 2620 /** Offset to the VMCPU structure. */ 2619 2621 RTINT offVMCPU; 2620 } PGMCPU, *PPGMCPU; 2622 } PGMCPU; 2623 /** Pointer to the per-cpu PGM data. */ 2624 typedef PGMCPU *PPGMCPU; 2625 2621 2626 2622 2627 /** @name PGM::fSyncFlags Flags -
trunk/src/VBox/VMM/TM.cpp
r13778 r13796 601 601 } 602 602 603 603 604 /** 604 605 * Initializes the per-VCPU TM. … … 612 613 return VINF_SUCCESS; 613 614 } 615 614 616 615 617 /** … … 884 886 } 885 887 888 886 889 /** 887 890 * Terminates the per-VCPU TM. … … 897 900 return 0; 898 901 } 902 899 903 900 904 /** -
trunk/src/VBox/VMM/VM.cpp
r13791 r13796 192 192 VMMR3DECL(int) VMR3Create(uint32_t cCPUs, PFNVMATERROR pfnVMAtError, void *pvUserVM, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM, PVM *ppVM) 193 193 { 194 LogFlow(("VMR3Create: cCPUs=% dpfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n", cCPUs, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM));194 LogFlow(("VMR3Create: cCPUs=%RU32 pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p\n", cCPUs, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM)); 195 195 196 196 /* … … 239 239 */ 240 240 PVMREQ pReq; 241 /** @todo SMP: VMREQDEST_ANY -> VMREQDEST_CPU0 */ 241 242 rc = VMR3ReqCallU(pUVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, 0, (PFNRT)vmR3CreateU, 242 243 4, pUVM, cCPUs, pfnCFGMConstructor, pvUserCFGM); … … 378 379 static int vmR3CreateUVM(uint32_t cCPUs, PUVM *ppUVM) 379 380 { 380 /* 381 * Create the UVM, initialize the fundamental stuff (VM+MMR3Heap+STAM) 382 * and start the emulation thread (EMT). 383 */ 384 PUVM pUVM = (PUVM)RTMemAllocZ(RT_OFFSETOF(UVM, aCpu[cCPUs])); 381 uint32_t i; 382 383 /* 384 * Create and initialize the UVM. 385 */ 386 PUVM pUVM = (PUVM)RTMemAllocZ(RT_OFFSETOF(UVM, aCpus[cCPUs])); 385 387 AssertReturn(pUVM, VERR_NO_MEMORY); 386 388 pUVM->u32Magic = UVM_MAGIC; … … 395 397 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP; 396 398 399 /* Initialize the VMCPU array in the UVM. */ 400 for (i = 0; i < cCPUs; i++) 401 { 402 pUVM->aCpus[i].pUVM = pUVM; 403 pUVM->aCpus[i].idCPU = i; 404 } 405 397 406 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */ 398 407 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL); 399 408 AssertRC(rc); 400 if (RT_FAILURE(rc))401 {402 RTMemFree(pUVM);403 return rc;404 }405 406 /* Initialize the VMCPU array in the UVM. */407 for (unsigned i=0;i<cCPUs;i++)408 {409 pUVM->aCpu[i].pUVM = pUVM;410 pUVM->aCpu[i].idCPU = i;411 }412 413 rc = RTSemEventCreate(&pUVM->vm.s.EventSemWait);414 409 if (RT_SUCCESS(rc)) 415 410 { 416 rc = STAMR3InitUVM(pUVM);411 rc = RTSemEventCreate(&pUVM->vm.s.EventSemWait); 417 412 if (RT_SUCCESS(rc)) 418 413 { 419 rc = MMR3InitUVM(pUVM); 414 /* 415 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr. 416 */ 417 rc = STAMR3InitUVM(pUVM); 420 418 if (RT_SUCCESS(rc)) 421 419 { 422 rc = PDMR3InitUVM(pUVM);420 rc = MMR3InitUVM(pUVM); 423 421 if (RT_SUCCESS(rc)) 424 422 { 425 /* Start the emulation threads for all VMCPUs. */ 426 for (unsigned i=0;i<cCPUs;i++) 427 { 428 rc = RTThreadCreate(&pUVM->aCpu[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpu[i], _1M, 429 RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE, "EMT"); 430 if (RT_FAILURE(rc)) 431 break; 432 433 pUVM->aCpu[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpu[i].vm.s.ThreadEMT); 434 } 435 423 rc = PDMR3InitUVM(pUVM); 436 424 if (RT_SUCCESS(rc)) 437 425 { 438 *ppUVM = pUVM; 439 return VINF_SUCCESS; 426 /* 427 * Start the emulation threads for all VMCPUs. 428 */ 429 for (i = 0; i < cCPUs; i++) 430 { 431 rc = RTThreadCreate(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i], _1M, 432 RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE, "EMT"); 433 if (RT_FAILURE(rc)) 434 break; 435 436 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT); 437 } 438 439 if (RT_SUCCESS(rc)) 440 { 441 *ppUVM = pUVM; 442 return VINF_SUCCESS; 443 } 444 445 /* bail out. */ 446 while (i-- > 0) 447 { 448 /** @todo rainy day: terminate the EMTs. */ 449 } 450 PDMR3TermUVM(pUVM); 440 451 } 441 442 /* bail out. */ 443 PDMR3TermUVM(pUVM); 452 MMR3TermUVM(pUVM); 444 453 } 445 MMR3TermUVM(pUVM);454 STAMR3TermUVM(pUVM); 446 455 } 447 STAMR3TermUVM(pUVM);456 RTSemEventDestroy(pUVM->vm.s.EventSemWait); 448 457 } 449 RTSemEventDestroy(pUVM->vm.s.EventSemWait); 450 } 451 RTTlsFree(pUVM->vm.s.idxTLS); 458 RTTlsFree(pUVM->vm.s.idxTLS); 459 } 452 460 RTMemFree(pUVM); 453 461 return rc; … … 495 503 AssertRelease(pVM->pSession == pUVM->vm.s.pSession); 496 504 AssertRelease(pVM->cCPUs == cCPUs); 505 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus)); 497 506 498 507 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCPUs=%RU32\n", … … 503 512 */ 504 513 pVM->pUVM = pUVM; 505 pVM->offVMCPU = RT_OFFSETOF(VM, aCpu);506 514 507 515 for (uint32_t i = 0; i < pVM->cCPUs; i++) 508 516 { 509 pVM->aCpu [i].hNativeThread = pUVM->aCpu[i].vm.s.NativeThreadEMT;510 Assert(pVM->aCpu [i].hNativeThread != NIL_RTNATIVETHREAD);517 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT; 518 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD); 511 519 } 512 520 … … 792 800 } 793 801 802 794 803 /** 795 804 * Initializes all VM CPU components of the VM … … 817 826 rc = EMR3InitCPU(pVM); 818 827 if (VBOX_SUCCESS(rc)) 828 { 829 LogFlow(("vmR3InitVMCpu: returns %Rrc\n", VINF_SUCCESS)); 819 830 return VINF_SUCCESS; 831 } 820 832 821 833 rc2 = VMMR3TermCPU(pVM); … … 1589 1601 Assert(pUVM->vm.s.fTerminateEMT); 1590 1602 /** @todo SMP */ 1591 rc = RTThreadWait(pUVM->aCpu [0].vm.s.ThreadEMT, 30000, NULL);1603 rc = RTThreadWait(pUVM->aCpus[0].vm.s.ThreadEMT, 30000, NULL); 1592 1604 AssertMsgRC(rc, ("EMT thread wait failed, rc=%Rrc\n", rc)); 1593 1605 … … 1744 1756 1745 1757 /** @todo SMP */ 1746 int rc2 = RTThreadWait(pUVM->aCpu [0].vm.s.ThreadEMT, 2000, NULL);1758 int rc2 = RTThreadWait(pUVM->aCpus[0].vm.s.ThreadEMT, 2000, NULL); 1747 1759 AssertRC(rc2); 1748 1760 } … … 3166 3178 } 3167 3179 3180 3168 3181 /** 3169 3182 * Returns the VMCPU id of the current EMT thread. … … 3179 3192 return pUVMCPU->idCPU; 3180 3193 } 3194 3181 3195 3182 3196 /** … … 3197 3211 } 3198 3212 3213 3199 3214 /** 3200 3215 * Returns the native handle of the current EMT VMCPU thread. … … 3214 3229 } 3215 3230 3231 3216 3232 /** 3217 3233 * Returns the handle of the current EMT VMCPU thread. … … 3231 3247 } 3232 3248 3249 3233 3250 /** 3234 3251 * Returns the handle of the current EMT VMCPU thread. -
trunk/src/VBox/VMM/VMEmt.cpp
r13791 r13796 57 57 PUVM pUVM = pUVMCPU->pUVM; 58 58 RTCPUID idCPU = pUVMCPU->idCPU; 59 int rc = VINF_SUCCESS;59 int rc; 60 60 61 61 AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC, … … 63 63 64 64 rc = RTTlsSet(pUVM->vm.s.idxTLS, pUVMCPU); 65 AssertReleaseMsgR eturn(RT_SUCCESS(rc), ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc);65 AssertReleaseMsgRCReturn(rc, ("RTTlsSet %x failed with %Rrc\n", pUVM->vm.s.idxTLS, rc), rc); 66 66 67 67 /* 68 68 * The request loop. 69 69 */ 70 rc = VINF_SUCCESS; 70 71 volatile VMSTATE enmBefore = VMSTATE_CREATING; /* volatile because of setjmp */ 71 72 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM)); -
trunk/src/VBox/VMM/VMM.cpp
r13778 r13796 256 256 return VINF_SUCCESS; 257 257 } 258 258 259 259 260 /** … … 765 766 } 766 767 768 767 769 /** 768 770 * Terminates the per-VCPU VMM. … … 776 778 VMMR3DECL(int) VMMR3TermCPU(PVM pVM) 777 779 { 778 return 0;780 return VINF_SUCCESS; 779 781 } 780 782 -
trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp
r13791 r13796 76 76 { 77 77 pCritSect->s.Core.cNestings = 1; 78 Assert(pVM->aCpu [idCPU].hNativeThread);79 ASMAtomicXchgSize(&pCritSect->s.Core.NativeThreadOwner, pVM->aCpu [idCPU].hNativeThread);78 Assert(pVM->aCpus[idCPU].hNativeThread); 79 ASMAtomicXchgSize(&pCritSect->s.Core.NativeThreadOwner, pVM->aCpus[idCPU].hNativeThread); 80 80 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); 81 81 return VINF_SUCCESS; … … 85 85 * Nested? 86 86 */ 87 if (pCritSect->s.Core.NativeThreadOwner == pVM->aCpu [idCPU].hNativeThread)87 if (pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCPU].hNativeThread) 88 88 { 89 89 pCritSect->s.Core.cNestings++; … … 163 163 PVM pVM = pCritSect->s.CTX_SUFF(pVM); 164 164 Assert(pVM); 165 AssertMsg(pCritSect->s.Core.NativeThreadOwner == pVM->aCpu [VM_GET_VMCPUID(pVM)].hNativeThread, ("Owner %RX64 emt=%RX64\n", pCritSect->s.Core.NativeThreadOwner, pVM->aCpu[VM_GET_VMCPUID(pVM)].hNativeThread));165 AssertMsg(pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[VM_GET_VMCPUID(pVM)].hNativeThread, ("Owner %RX64 emt=%RX64\n", pCritSect->s.Core.NativeThreadOwner, pVM->aCpus[VM_GET_VMCPUID(pVM)].hNativeThread)); 166 166 167 167 /* … … 187 187 188 188 /* darn, someone raced in on us. */ 189 Assert(pVM->aCpu [VM_GET_VMCPUID(pVM)].hNativeThread);190 ASMAtomicXchgSize(&pCritSect->s.Core.NativeThreadOwner, pVM->aCpu [VM_GET_VMCPUID(pVM)].hNativeThread);189 Assert(pVM->aCpus[VM_GET_VMCPUID(pVM)].hNativeThread); 190 ASMAtomicXchgSize(&pCritSect->s.Core.NativeThreadOwner, pVM->aCpus[VM_GET_VMCPUID(pVM)].hNativeThread); 191 191 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); 192 192 } … … 222 222 PVM pVM = pCritSect->s.CTX_SUFF(pVM); 223 223 Assert(pVM); 224 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpu [VM_GET_VMCPUID(pVM)].hNativeThread;224 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[VM_GET_VMCPUID(pVM)].hNativeThread; 225 225 #endif 226 226 } -
trunk/src/VBox/VMM/VMMInternal.h
r13742 r13796 351 351 typedef VMM *PVMM; 352 352 353 353 354 /** 354 355 * VMMCPU Data (part of VMCPU) -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r13789 r13796 470 470 if (!VALID_PTR(pReq->pSession)) 471 471 return VERR_INVALID_POINTER; 472 if ( pReq->cCPUs == 0473 || pReq->cCPUs > VMCPU_MAX_CPU_COUNT)474 return VERR_INVALID_PARAMETER;475 472 476 473 /* … … 510 507 AssertPtrReturn(ppVM, VERR_INVALID_POINTER); 511 508 *ppVM = NULL; 509 510 if ( cCPUs == 0 511 || cCPUs > VMCPU_MAX_CPU_COUNT) 512 return VERR_INVALID_PARAMETER; 512 513 513 514 RTNATIVETHREAD hEMT = RTThreadNativeSelf(); … … 575 576 * Allocate the shared VM structure and associated page array. 576 577 */ 577 const size_t cbVM = RT_ OFFSETOF(VM, aCpu[cCPUs]);578 const size_t cbVM = RT_UOFFSETOF(VM, aCpus[cCPUs]); 578 579 const size_t cPages = RT_ALIGN(cbVM, PAGE_SIZE) >> PAGE_SHIFT; 579 580 rc = RTR0MemObjAllocLow(&pGVM->gvmm.s.VMMemObj, cPages << PAGE_SHIFT, false /* fExecutable */); … … 588 589 pVM->cbSelf = cbVM; 589 590 pVM->cCPUs = cCPUs; 591 pVM->offVMCPU = RT_UOFFSETOF(VM, aCpus); 590 592 591 593 rc = RTR0MemObjAllocPage(&pGVM->gvmm.s.VMPagesMemObj, cPages * sizeof(SUPPAGE), false /* fExecutable */); … … 613 615 for (uint32_t i = 0; i < cCPUs; i++) 614 616 { 615 pVM->aCpu [i].pVMR0 = pVM;616 pVM->aCpu [i].pVMR3 = pVM->pVMR3;617 pVM->aCpus[i].pVMR0 = pVM; 618 pVM->aCpus[i].pVMR3 = pVM->pVMR3; 617 619 } 618 620 -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r13542 r13796 106 106 if (VBOX_FAILURE(rc)) 107 107 { 108 if (pVM) 108 if (pVM) 109 109 VMXR0CheckError(pVM, rc); 110 110 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE); … … 563 563 RTGCUINTPTR intInfo; 564 564 565 intInfo = (iGate == X86_XCPT_GP) ? X86_XCPT_DF : iGate;565 intInfo = (iGate == X86_XCPT_GP) ? (uint32_t)X86_XCPT_DF : iGate; 566 566 intInfo |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT); 567 567 intInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; … … 863 863 * @param pVM The VM to operate on. 864 864 * @param pCtx Guest context 865 */ 865 */ 866 866 static void vmxR0PrefetchPAEPdptrs(PVM pVM, PCPUMCTX pCtx) 867 867 { … … 887 887 * @param pVM The VM to operate on. 888 888 * @param pCtx Guest context 889 */ 889 */ 890 890 static void vmxR0UpdateExceptionBitmap(PVM pVM, PCPUMCTX pCtx) 891 891 { … … 1128 1128 { 1129 1129 /* Disable cr3 read/write monitoring as we don't need it for EPT. */ 1130 pVM->hwaccm.s.vmx.proc_ctls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 1130 pVM->hwaccm.s.vmx.proc_ctls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 1131 1131 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT); 1132 1132 } … … 1134 1134 { 1135 1135 /* Reenable cr3 read/write monitoring as our identity mapped page table is active. */ 1136 pVM->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 1136 pVM->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 1137 1137 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT; 1138 1138 } … … 1251 1251 pVM->hwaccm.s.vmx.GCPhysEPTP |= VMX_EPT_MEMTYPE_WB 1252 1252 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT); 1253 1253 1254 1254 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EPTP_FULL, pVM->hwaccm.s.vmx.GCPhysEPTP); 1255 1255 #if HC_ARCH_BITS == 32 … … 3071 3071 LogFlow(("VMXR0InvalidatePage %VGv\n", GCVirt)); 3072 3072 3073 /* Only relevant if we want to use VPID. 3073 /* Only relevant if we want to use VPID. 3074 3074 * In the nested paging case we still see such calls, but 3075 3075 * can safely ignore them. (e.g. after cr3 updates) … … 3077 3077 #ifdef HWACCM_VTX_WITH_VPID 3078 3078 /* Skip it if a TLB flush is already pending. */ 3079 if ( !fFlushPending 3079 if ( !fFlushPending 3080 3080 && pVM->hwaccm.s.vmx.fVPID) 3081 3081 vmxR0FlushVPID(pVM, pVM->hwaccm.s.vmx.enmFlushPage, GCVirt); … … 3137 3137 Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason, (uint32_t)instrError)); 3138 3138 Log(("Current stack %08x\n", &rc)); 3139 3139 3140 3140 pVM->hwaccm.s.vmx.lasterror.ulLastInstrError = instrError; 3141 3141 pVM->hwaccm.s.vmx.lasterror.ulLastExitReason = exitReason; -
trunk/src/VBox/VMM/VMReq.cpp
r13782 r13796 638 638 do 639 639 { 640 pNext = pUVM->aCpu [i].vm.s.pReqs;640 pNext = pUVM->aCpus[i].vm.s.pReqs; 641 641 pReq->pNext = pNext; 642 } while (!ASMAtomicCmpXchgPtr((void * volatile *)&pUVM->aCpu [i].vm.s.pReqs, (void *)pReq, (void *)pNext));642 } while (!ASMAtomicCmpXchgPtr((void * volatile *)&pUVM->aCpus[i].vm.s.pReqs, (void *)pReq, (void *)pNext)); 643 643 644 644 /* … … 683 683 do 684 684 { 685 pNext = pUVM->aCpu [idTarget].vm.s.pReqs;685 pNext = pUVM->aCpus[idTarget].vm.s.pReqs; 686 686 pReq->pNext = pNext; 687 } while (!ASMAtomicCmpXchgPtr((void * volatile *)&pUVM->aCpu [idTarget].vm.s.pReqs, (void *)pReq, (void *)pNext));687 } while (!ASMAtomicCmpXchgPtr((void * volatile *)&pUVM->aCpus[idTarget].vm.s.pReqs, (void *)pReq, (void *)pNext)); 688 688 689 689 /* … … 703 703 LogFlow(("VMR3ReqQueue: returns %Vrc\n", rc)); 704 704 } 705 else 706 if ( pReq->enmDest == VMREQDEST_ANY 707 && !pUVMCPU /* only EMT threads have a valid pointer stored in the TLS slot. */) 705 else if ( pReq->enmDest == VMREQDEST_ANY 706 && !pUVMCPU /* only EMT threads have a valid pointer stored in the TLS slot. */) 708 707 { 709 708 unsigned fFlags = ((VMREQ volatile *)pReq)->fFlags; /* volatile paranoia */ … … 838 837 while (rc <= VINF_SUCCESS) 839 838 { 840 void *volatile *ppReqs;841 842 839 /* 843 840 * Get pending requests. 844 841 */ 842 void *volatile *ppReqs; 845 843 if (enmDest == VMREQDEST_ANY) 846 844 { … … 851 849 else 852 850 { 853 ppReqs = (void * volatile *)&pUVM->aCpu [enmDest].vm.s.pReqs;851 ppReqs = (void * volatile *)&pUVM->aCpus[enmDest].vm.s.pReqs; 854 852 if (RT_LIKELY(pUVM->pVM)) 855 853 VMCPU_FF_CLEAR(pUVM->pVM, enmDest, VM_FF_REQUEST); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r13751 r13796 173 173 CHECK_MEMBER_ALIGNMENT(VM, rem.s.StatsInQEMU, 8); 174 174 CHECK_MEMBER_ALIGNMENT(VM, rem.s.Env, 32); 175 CHECK_MEMBER_ALIGNMENT(VM, aCpu , 64);175 CHECK_MEMBER_ALIGNMENT(VM, aCpus, 64); 176 176 177 177 /* vmcpu */
Note:
See TracChangeset
for help on using the changeset viewer.