Changeset 106920 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Nov 11, 2024 1:09:38 AM (3 months ago)
- svn:sync-xref-src-repo-rev:
- 165846
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Config.kmk
r106061 r106920 42 42 # Can be prepended to by setting it in LocalConfig.kmk 43 43 VMM_COMMON_DEFS := USING_VMM_COMMON_DEFS 44 ifdef VBOX_WITH_R0_MODULES 45 VMM_COMMON_DEFS += VBOX_WITH_R0_MODULES 46 ifdef VBOX_WITH_MINIMAL_R0 47 VMM_COMMON_DEFS += VBOX_WITH_MINIMAL_R0 48 endif 49 endif 44 50 ifdef VBOX_WITH_HWVIRT 45 51 VMM_COMMON_DEFS += VBOX_WITH_HWVIRT -
trunk/src/VBox/VMM/Makefile.kmk
r106894 r106920 808 808 $(VMMR0Imp_0_OUTDIR)/VMMR0.def: $(VMMR0Imp_DEFPATH)/VMMR0/VMMR0.def | $$(dir $$@) 809 809 $(call KB_FN_AUTO_CMD_DEPS_COMMANDS) 810 ifeq ($(VBOX_LDR_FMT),lx)811 $(SED) \812 -e '/not-os2/d' \813 -e '/not-amd64/d' \814 -e 's/^[ \t][ \t]*\([a-zA-Z]\)/ _\1/' \815 -e 's/[ \t]DATA[ \t]*/ /' \816 --output $@ $(VMMR0Imp_DEFPATH)/VMMR0/VMMR0.def817 $(APPEND) "$@" ""818 $(APPEND) "$@" " ___ehInit"819 else820 810 $(SED) \ 821 811 -e '/not-win/d' \ 822 812 -e '/not-$(KBUILD_TARGET_ARCH)/d' \ 823 813 --output $@ $(VMMR0Imp_DEFPATH)/VMMR0/VMMR0.def 824 endif825 814 endif # R0: pe + lx 826 815 … … 875 864 VMMR0_ASFLAGS.x86 := -Werror 876 865 877 VMMR0_SDKS 866 VMMR0_SDKS.amd64 = VBoxSoftFloatR0 878 867 879 868 VMMR0_INCS = \ … … 883 872 VMMR0_SOURCES = \ 884 873 VBoxVMM.d \ 885 VMMR0/CPUMR0.cpp \886 VMMR0/CPUMR0A.asm \887 VMMR0/DBGFR0.cpp \888 VMMR0/DBGFR0Bp.cpp \889 $(if-expr defined(VBOX_WITH_DBGF_TRACING), VMMR0/DBGFR0Tracer.cpp,) \890 VMMR0/GIMR0.cpp \891 VMMR0/GIMR0Hv.cpp \892 VMMR0/GMMR0.cpp \893 874 VMMR0/GVMMR0.cpp \ 894 VMMR0/EMR0.cpp \895 VMMR0/HMR0.cpp \896 VMMR0/HMR0A.asm \897 VMMR0/HMR0UtilA.asm \898 VMMR0/HMVMXR0.cpp \899 VMMR0/HMSVMR0.cpp \900 VMMR0/IEMR0.cpp \901 VMMR0/IOMR0.cpp \902 VMMR0/IOMR0IoPort.cpp \903 VMMR0/IOMR0Mmio.cpp \904 VMMR0/PDMR0Device.cpp \905 VMMR0/PDMR0DevHlp.cpp \906 $(if-expr defined(VBOX_WITH_DBGF_TRACING), VMMR0/PDMR0DevHlpTracing.cpp,) \907 VMMR0/PDMR0Driver.cpp \908 VMMR0/PDMR0Queue.cpp \909 VMMR0/PGMR0.cpp \910 VMMR0/PGMR0Pool.cpp \911 VMMR0/PGMR0SharedPage.cpp \912 VMMR0/TMR0.cpp \913 875 VMMR0/VMMR0.cpp \ 914 VMMRZ/CPUMRZ.cpp \915 VMMRZ/CPUMRZA.asm \916 VMMRZ/VMMRZ.cpp \917 VMMAll/APICAll.cpp \918 VMMAll/CPUMAllCpuId.cpp \919 VMMAll/CPUMAllRegs.cpp \920 VMMAll/CPUMAllMsrs.cpp \921 VMMAll/DBGFAll.cpp \922 VMMAll/DBGFAllBp.cpp \923 $(if-expr defined(VBOX_WITH_DBGF_TRACING), VMMAll/DBGFAllTracer.cpp,) \924 VMMAll/EMAll.cpp \925 VMMAll/GCMAll.cpp \926 VMMAll/GIMAll.cpp \927 VMMAll/GIMAllHv.cpp \928 VMMAll/GIMAllKvm.cpp \929 VMMAll/HMAll.cpp \930 VMMAll/HMSVMAll.cpp \931 VMMAll/HMVMXAll.cpp \932 VMMAll/IEMAll.cpp \933 VMMAll/IEMAllIntprTables1.cpp \934 VMMAll/IEMAllIntprTables2.cpp \935 VMMAll/IEMAllIntprTables3.cpp \936 VMMAll/IEMAllIntprTables4.cpp \937 $(if-expr !defined(IEM_WITHOUT_ASSEMBLY),VMMAll/IEMAllAImpl.asm,) \938 VMMAll/IEMAllAImplC.cpp \939 VMMAll/IEMAllCImpl.cpp \940 VMMAll/IEMAllCImplSvmInstr.cpp \941 VMMAll/IEMAllCImplVmxInstr.cpp \942 VMMAll/IEMAllDbg.cpp \943 VMMAll/IOMAll.cpp \944 VMMAll/IOMAllMmioNew.cpp \945 VMMAll/MMAll.cpp \946 VMMAll/NEMAll.cpp \947 VMMAll/PDMAll.cpp \948 VMMAll/PDMAllCritSect.cpp \949 VMMAll/PDMAllCritSectRw.cpp \950 VMMAll/PDMAllCritSectBoth.cpp \951 $(if-expr defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL), VMMAll/PDMAllIommu.cpp,) \952 VMMAll/PDMAllQueue.cpp \953 VMMAll/PDMAllTask.cpp \954 VMMAll/PGMAll.cpp \955 VMMAll/PGMAllHandler.cpp \956 VMMAll/PGMAllPhys.cpp \957 VMMAll/PGMAllPool.cpp \958 VMMAll/SELMAll.cpp \959 VMMAll/TMAll.cpp \960 VMMAll/TMAllCpu.cpp \961 VMMAll/TMAllReal.cpp \962 VMMAll/TMAllVirtual.cpp \963 VMMAll/TRPMAll.cpp \964 VMMAll/VMAll.cpp \965 876 VMMAll/VMMAll.cpp \ 966 VMMAll/VMMAllA.asm 967 if1of ($(VBOX_LDR_FMT), pe lx) 968 VMMR0_SOURCES += $(VMMR0Imp_0_OUTDIR)/VMMR0.def 969 endif 970 ifdef VBOX_WITH_TRIPLE_FAULT_HACK 971 VMMR0_SOURCES += \ 972 VMMR0/VMMR0TripleFaultHack.cpp \ 973 VMMR0/VMMR0TripleFaultHackA.asm 974 endif 975 ifdef VBOX_WITH_NETSHAPER 976 VMMR0_SOURCES += \ 977 VMMAll/PDMAllNetShaper.cpp 978 endif 877 VMMRZ/VMMRZ.cpp 878 979 879 VMMR0_SOURCES.amd64 = \ 980 880 VMMR0/VMMR0JmpA-amd64.asm 981 881 VMMR0_SOURCES.x86 = \ 982 882 VMMR0/VMMR0JmpA-x86.asm 883 884 VMMR0_SOURCES.win = \ 885 $(VMMR0Imp_0_OUTDIR)/VMMR0.def 886 887 ifndef VBOX_WITH_MINIMAL_R0 888 VMMR0_SOURCES += \ 889 VMMR0/CPUMR0.cpp \ 890 VMMR0/DBGFR0.cpp \ 891 VMMR0/DBGFR0Bp.cpp \ 892 $(if-expr defined(VBOX_WITH_DBGF_TRACING), VMMR0/DBGFR0Tracer.cpp,) \ 893 VMMR0/GIMR0.cpp \ 894 VMMR0/GIMR0Hv.cpp \ 895 VMMR0/GMMR0.cpp \ 896 VMMR0/EMR0.cpp \ 897 VMMR0/HMR0.cpp \ 898 VMMR0/HMVMXR0.cpp \ 899 VMMR0/HMSVMR0.cpp \ 900 VMMR0/IEMR0.cpp \ 901 VMMR0/IOMR0.cpp \ 902 VMMR0/IOMR0IoPort.cpp \ 903 VMMR0/IOMR0Mmio.cpp \ 904 VMMR0/PDMR0Device.cpp \ 905 VMMR0/PDMR0DevHlp.cpp \ 906 $(if-expr defined(VBOX_WITH_DBGF_TRACING), VMMR0/PDMR0DevHlpTracing.cpp,) \ 907 VMMR0/PDMR0Driver.cpp \ 908 VMMR0/PDMR0Queue.cpp \ 909 VMMR0/PGMR0.cpp \ 910 VMMR0/PGMR0Pool.cpp \ 911 VMMR0/PGMR0SharedPage.cpp \ 912 VMMR0/TMR0.cpp \ 913 VMMRZ/CPUMRZ.cpp \ 914 VMMAll/APICAll.cpp \ 915 VMMAll/CPUMAllCpuId.cpp \ 916 VMMAll/CPUMAllRegs.cpp \ 917 VMMAll/CPUMAllMsrs.cpp \ 918 VMMAll/DBGFAll.cpp \ 919 VMMAll/DBGFAllBp.cpp \ 920 $(if-expr defined(VBOX_WITH_DBGF_TRACING), VMMAll/DBGFAllTracer.cpp,) \ 921 VMMAll/EMAll.cpp \ 922 VMMAll/GCMAll.cpp \ 923 VMMAll/GIMAll.cpp \ 924 VMMAll/GIMAllHv.cpp \ 925 VMMAll/GIMAllKvm.cpp \ 926 VMMAll/HMAll.cpp \ 927 VMMAll/HMSVMAll.cpp \ 928 VMMAll/HMVMXAll.cpp \ 929 VMMAll/IEMAll.cpp \ 930 VMMAll/IEMAllIntprTables1.cpp \ 931 VMMAll/IEMAllIntprTables2.cpp \ 932 VMMAll/IEMAllIntprTables3.cpp \ 933 VMMAll/IEMAllIntprTables4.cpp \ 934 VMMAll/IEMAllAImplC.cpp \ 935 VMMAll/IEMAllCImpl.cpp \ 936 VMMAll/IEMAllCImplSvmInstr.cpp \ 937 VMMAll/IEMAllCImplVmxInstr.cpp \ 938 VMMAll/IEMAllDbg.cpp \ 939 VMMAll/IOMAll.cpp \ 940 VMMAll/IOMAllMmioNew.cpp \ 941 VMMAll/MMAll.cpp \ 942 VMMAll/NEMAll.cpp \ 943 VMMAll/PDMAll.cpp \ 944 VMMAll/PDMAllCritSect.cpp \ 945 VMMAll/PDMAllCritSectRw.cpp \ 946 VMMAll/PDMAllCritSectBoth.cpp \ 947 $(if-expr defined(VBOX_WITH_IOMMU_AMD) || defined(VBOX_WITH_IOMMU_INTEL), VMMAll/PDMAllIommu.cpp,) \ 948 VMMAll/PDMAllQueue.cpp \ 949 VMMAll/PDMAllTask.cpp \ 950 VMMAll/PGMAll.cpp \ 951 VMMAll/PGMAllHandler.cpp \ 952 VMMAll/PGMAllPhys.cpp \ 953 VMMAll/PGMAllPool.cpp \ 954 VMMAll/SELMAll.cpp \ 955 VMMAll/TMAll.cpp \ 956 VMMAll/TMAllCpu.cpp \ 957 VMMAll/TMAllReal.cpp \ 958 VMMAll/TMAllVirtual.cpp \ 959 VMMAll/TRPMAll.cpp \ 960 VMMAll/VMAll.cpp 961 962 VMMR0_SOURCES.amd64 += \ 963 VMMR0/CPUMR0A.asm \ 964 VMMRZ/CPUMRZA.asm \ 965 VMMR0/HMR0A.asm \ 966 VMMR0/HMR0UtilA.asm \ 967 $(if-expr !defined(IEM_WITHOUT_ASSEMBLY),VMMAll/IEMAllAImpl.asm,) \ 968 VMMAll/VMMAllA.asm 969 970 ifdef VBOX_WITH_TRIPLE_FAULT_HACK 971 VMMR0_SOURCES.amd64 += \ 972 VMMR0/VMMR0TripleFaultHack.cpp \ 973 VMMR0/VMMR0TripleFaultHackA.asm 974 endif 975 ifdef VBOX_WITH_NETSHAPER 976 VMMR0_SOURCES += \ 977 VMMAll/PDMAllNetShaper.cpp 978 endif 979 endif # !VBOX_WITH_MINIMAL_R0 983 980 984 981 VMMR0_LIBS = \ -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r106061 r106920 78 78 79 79 #include <iprt/asm.h> 80 #include <iprt/asm-amd64-x86.h> 80 #ifdef RT_ARCH_AMD64 81 # include <iprt/asm-amd64-x86.h> 82 #endif 81 83 #include <iprt/critsect.h> 82 84 #include <iprt/mem.h> … … 102 104 * Defined Constants And Macros * 103 105 *********************************************************************************************************************************/ 104 #if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS) || defined(DOXYGEN_RUNNING) 106 #if (defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS) || defined(DOXYGEN_RUNNING)) \ 107 && !defined(VBOX_WITH_MINIMAL_R0) 105 108 /** Define this to enable the periodic preemption timer. */ 106 109 # define GVMM_SCHED_WITH_PPT … … 914 917 gvmmR0InitPerVMData(pGVM, iHandle, cCpus, pSession); 915 918 pGVM->gvmm.s.VMMemObj = hVMMemObj; 919 #ifndef VBOX_WITH_MINIMAL_R0 916 920 rc = GMMR0InitPerVMData(pGVM); 917 921 int rc2 = PGMR0InitPerVMData(pGVM, hVMMemObj); 922 #else 923 int rc2 = VINF_SUCCESS; 924 #endif 918 925 int rc3 = VMMR0InitPerVMData(pGVM); 926 #ifndef VBOX_WITH_MINIMAL_R0 919 927 CPUMR0InitPerVMData(pGVM); 920 928 DBGFR0InitPerVMData(pGVM); … … 922 930 IOMR0InitPerVMData(pGVM); 923 931 TMR0InitPerVMData(pGVM); 932 #endif 924 933 if (RT_SUCCESS(rc) && RT_SUCCESS(rc2) && RT_SUCCESS(rc3)) 925 934 { … … 1030 1039 gvmmR0CreateDestroyUnlock(pGVMM); 1031 1040 1041 #ifndef VBOX_WITH_MINIMAL_R0 1032 1042 CPUMR0RegisterVCpuThread(&pGVM->aCpus[0]); 1043 #endif 1033 1044 1034 1045 *ppGVM = pGVM; … … 1353 1364 } 1354 1365 1366 #ifndef VBOX_WITH_MINIMAL_R0 1355 1367 GMMR0CleanupVM(pGVM); 1356 # ifdef VBOX_WITH_NEM_R01368 # ifdef VBOX_WITH_NEM_R0 1357 1369 NEMR0CleanupVM(pGVM); 1358 # endif1370 # endif 1359 1371 PDMR0CleanupVM(pGVM); 1360 1372 IOMR0CleanupVM(pGVM); … … 1362 1374 PGMR0CleanupVM(pGVM); 1363 1375 TMR0CleanupVM(pGVM); 1376 #endif 1364 1377 VMMR0CleanupVM(pGVM); 1365 1378 } … … 1600 1613 if (RT_SUCCESS(rc)) 1601 1614 { 1615 #ifndef VBOX_WITH_MINIMAL_R0 1602 1616 CPUMR0RegisterVCpuThread(pGVCpu); 1617 #endif 1603 1618 1604 1619 #ifdef GVMM_SCHED_WITH_HR_WAKE_UP_TIMER -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r106061 r106920 63 63 #include <VBox/log.h> 64 64 65 #include <iprt/asm-amd64-x86.h> 65 #ifdef RT_ARCH_AMD64 66 # include <iprt/asm-amd64-x86.h> 67 #endif 66 68 #include <iprt/assert.h> 67 69 #include <iprt/crc.h> … … 139 141 DECLEXPORT(int) ModuleInit(void *hMod) 140 142 { 143 RT_NOREF_PV(hMod); 144 141 145 #ifdef VBOX_WITH_DTRACE_R0 142 146 /* … … 169 173 if (RT_SUCCESS(rc)) 170 174 { 175 #ifndef VBOX_WITH_MINIMAL_R0 171 176 rc = GMMR0Init(); 172 177 if (RT_SUCCESS(rc)) … … 180 185 if (RT_SUCCESS(rc)) 181 186 { 187 #endif /* !VBOX_WITH_MINIMAL_R0 */ 182 188 rc = IntNetR0Init(); 183 189 if (RT_SUCCESS(rc)) 184 190 { 185 #ifdef VBOX_WITH_PCI_PASSTHROUGH 191 #ifndef VBOX_WITH_MINIMAL_R0 192 # ifdef VBOX_WITH_PCI_PASSTHROUGH 186 193 rc = PciRawR0Init(); 187 # endif194 # endif 188 195 if (RT_SUCCESS(rc)) 189 196 { … … 191 198 if (RT_SUCCESS(rc)) 192 199 { 193 # ifdef VBOX_WITH_TRIPLE_FAULT_HACK200 # ifdef VBOX_WITH_TRIPLE_FAULT_HACK 194 201 rc = vmmR0TripleFaultHackInit(); 195 202 if (RT_SUCCESS(rc)) 196 # endif203 # endif 197 204 { 198 # ifdef VBOX_WITH_NEM_R0205 # ifdef VBOX_WITH_NEM_R0 199 206 rc = NEMR0Init(); 200 207 if (RT_SUCCESS(rc)) 201 #endif 208 # endif 209 #endif /* !VBOX_WITH_MINIMAL_R0 */ 202 210 { 203 211 LogFlow(("ModuleInit: returns success\n")); 204 212 return VINF_SUCCESS; 205 213 } 214 215 /* 216 * Bail out. 217 */ 218 #ifndef VBOX_WITH_MINIMAL_R0 206 219 } 207 208 /* 209 * Bail out. 210 */ 211 #ifdef VBOX_WITH_TRIPLE_FAULT_HACK 220 # ifdef VBOX_WITH_TRIPLE_FAULT_HACK 212 221 vmmR0TripleFaultHackTerm(); 213 # endif222 # endif 214 223 } 215 224 else 216 225 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc)); 217 # ifdef VBOX_WITH_PCI_PASSTHROUGH226 # ifdef VBOX_WITH_PCI_PASSTHROUGH 218 227 PciRawR0Term(); 219 # endif228 # endif 220 229 } 221 230 else 222 231 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc)); 223 232 IntNetR0Term(); 233 #endif /* !VBOX_WITH_MINIMAL_R0 */ 224 234 } 225 235 else 226 236 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc)); 237 #ifndef VBOX_WITH_MINIMAL_R0 227 238 PGMDeregisterStringFormatTypes(); 228 239 } … … 237 248 else 238 249 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc)); 250 #endif /* !VBOX_WITH_MINIMAL_R0 */ 239 251 GVMMR0Term(); 240 252 } … … 259 271 DECLEXPORT(void) ModuleTerm(void *hMod) 260 272 { 261 NOREF(hMod);262 273 LogFlow(("ModuleTerm:\n")); 263 274 RT_NOREF_PV(hMod); 275 276 #ifndef VBOX_WITH_MINIMAL_R0 264 277 /* 265 278 * Terminate the CPUM module (Local APIC cleanup). 266 279 */ 267 280 CPUMR0ModuleTerm(); 281 #endif 268 282 269 283 /* … … 272 286 IntNetR0Term(); 273 287 288 #ifndef VBOX_WITH_MINIMAL_R0 274 289 /* 275 290 * PGM (Darwin), HM and PciRaw global cleanup. 276 291 */ 277 # ifdef VBOX_WITH_PCI_PASSTHROUGH292 # ifdef VBOX_WITH_PCI_PASSTHROUGH 278 293 PciRawR0Term(); 279 # endif294 # endif 280 295 PGMDeregisterStringFormatTypes(); 281 296 HMR0Term(); 282 # ifdef VBOX_WITH_TRIPLE_FAULT_HACK297 # ifdef VBOX_WITH_TRIPLE_FAULT_HACK 283 298 vmmR0TripleFaultHackTerm(); 299 # endif 300 # ifdef VBOX_WITH_NEM_R0 301 NEMR0Term(); 302 # endif 303 #endif /* !VBOX_WITH_MINIMAL_R0 */ 304 305 /* 306 * Destroy the GMM and GVMM instances. 307 */ 308 #ifndef VBOX_WITH_MINIMAL_R0 309 GMMR0Term(); 284 310 #endif 285 #ifdef VBOX_WITH_NEM_R0286 NEMR0Term();287 #endif288 289 /*290 * Destroy the GMM and GVMM instances.291 */292 GMMR0Term();293 311 GVMMR0Term(); 294 312 … … 448 466 if (RT_SUCCESS(rc)) 449 467 { 468 #ifndef VBOX_WITH_MINIMAL_R0 450 469 /* 451 470 * Init HM, CPUM and PGM. … … 469 488 if (RT_SUCCESS(rc)) 470 489 { 471 # ifdef VBOX_WITH_PCI_PASSTHROUGH490 # ifdef VBOX_WITH_PCI_PASSTHROUGH 472 491 rc = PciRawR0InitVM(pGVM); 473 # endif492 # endif 474 493 if (RT_SUCCESS(rc)) 475 494 { … … 477 496 if (RT_SUCCESS(rc)) 478 497 { 498 #endif /* !VBOX_WITH_MINIMAL_R0 */ 479 499 GVMMR0DoneInitVM(pGVM); 500 #ifndef VBOX_WITH_MINIMAL_R0 480 501 PGMR0DoneInitVM(pGVM); 502 #endif 481 503 482 504 /* … … 488 510 489 511 /* bail out*/ 512 #ifndef VBOX_WITH_MINIMAL_R0 490 513 //GIMR0TermVM(pGVM); 491 514 } 492 # ifdef VBOX_WITH_PCI_PASSTHROUGH515 # ifdef VBOX_WITH_PCI_PASSTHROUGH 493 516 PciRawR0TermVM(pGVM); 494 # endif517 # endif 495 518 } 496 519 } … … 501 524 HMR0TermVM(pGVM); 502 525 } 526 #endif /* !VBOX_WITH_MINIMAL_R0 */ 503 527 } 504 528 … … 566 590 } 567 591 568 #ifdef VBOX_WITH_PCI_PASSTHROUGH 592 #ifndef VBOX_WITH_MINIMAL_R0 593 # ifdef VBOX_WITH_PCI_PASSTHROUGH 569 594 PciRawR0TermVM(pGVM); 595 # endif 570 596 #endif 571 597 … … 575 601 if (GVMMR0DoingTermVM(pGVM)) 576 602 { 603 #ifndef VBOX_WITH_MINIMAL_R0 577 604 GIMR0TermVM(pGVM); 578 605 … … 580 607 * here to make sure we don't leak any shared pages if we crash... */ 581 608 HMR0TermVM(pGVM); 609 #endif 582 610 } 583 611 … … 620 648 } 621 649 650 651 #ifndef VBOX_WITH_MINIMAL_R0 622 652 623 653 /** … … 966 996 } 967 997 998 #endif /* !VBOX_WITH_MINIMAL_R0 */ 968 999 969 1000 /** … … 1017 1048 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 1018 1049 1050 #ifndef VBOX_WITH_MINIMAL_R0 1019 1051 /* Invoke the HM-specific thread-context callback. */ 1020 1052 HMR0ThreadCtxCallback(enmEvent, pvUser); 1053 #endif 1021 1054 1022 1055 /* Restore preemption. */ … … 1027 1060 case RTTHREADCTXEVENT_OUT: 1028 1061 { 1062 #ifndef VBOX_WITH_MINIMAL_R0 1029 1063 /* Invoke the HM-specific thread-context callback. */ 1030 1064 HMR0ThreadCtxCallback(enmEvent, pvUser); 1065 #endif 1031 1066 1032 1067 /* … … 1040 1075 1041 1076 default: 1077 #ifndef VBOX_WITH_MINIMAL_R0 1042 1078 /* Invoke the HM-specific thread-context callback. */ 1043 1079 HMR0ThreadCtxCallback(enmEvent, pvUser); 1080 #endif 1044 1081 break; 1045 1082 } … … 1063 1100 Assert(pVCpu->vmmr0.s.hCtxHook == NIL_RTTHREADCTXHOOK); 1064 1101 1065 #if 1 /* To disable this stuff change to zero. */ 1102 #ifndef VBOX_WITH_MINIMAL_R0 1103 1104 # if 1 /* To disable this stuff change to zero. */ 1066 1105 int rc = RTThreadCtxHookCreate(&pVCpu->vmmr0.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu); 1067 1106 if (RT_SUCCESS(rc)) … … 1070 1109 return rc; 1071 1110 } 1072 # else1111 # else 1073 1112 RT_NOREF(vmmR0ThreadCtxCallback); 1074 1113 int rc = VERR_NOT_SUPPORTED; 1114 # endif 1075 1115 #endif 1076 1116 1077 1117 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK; 1078 1118 pVCpu->pGVM->vmm.s.fIsUsingContextHooks = false; 1079 if (rc == VERR_NOT_SUPPORTED) 1080 return VINF_SUCCESS;1081 1082 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu)); 1083 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */1119 #ifndef VBOX_WITH_MINIMAL_R0 1120 if (rc != VERR_NOT_SUPPORTED) /* Just ignore it, we can live without context hooks. */ 1121 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu)); 1122 #endif 1123 return VINF_SUCCESS; 1084 1124 } 1085 1125 … … 1093 1133 VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu) 1094 1134 { 1135 #ifndef VBOX_WITH_MINIMAL_R0 1095 1136 int rc = RTThreadCtxHookDestroy(pVCpu->vmmr0.s.hCtxHook); 1096 1137 AssertRC(rc); 1138 #endif 1097 1139 pVCpu->vmmr0.s.hCtxHook = NIL_RTTHREADCTXHOOK; 1098 1140 } 1099 1141 1142 #ifndef VBOX_WITH_MINIMAL_R0 1100 1143 1101 1144 /** … … 1161 1204 } 1162 1205 1206 #endif /* !VBOX_WITH_MINIMAL_R0 */ 1163 1207 1164 1208 /** … … 1380 1424 switch (enmOperation) 1381 1425 { 1426 #ifndef VBOX_WITH_MINIMAL_R0 1382 1427 /* 1383 1428 * Run guest code using the available hardware acceleration technology. … … 1420 1465 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu)); 1421 1466 1422 # ifdef VMM_R0_TOUCH_FPU1467 # ifdef VMM_R0_TOUCH_FPU 1423 1468 /* 1424 1469 * Make sure we've got the FPU state loaded so and we don't need to clear … … 1427 1472 */ 1428 1473 CPUMR0TouchHostFpu(); 1429 # endif1474 # endif 1430 1475 int rc; 1431 1476 bool fPreemptRestored = false; … … 1479 1524 rc = VERR_VMM_WRONG_HM_VMCPU_STATE; 1480 1525 } 1481 # if 01526 # if 0 1482 1527 /** @todo Get rid of this. HM shouldn't disable the context hook. */ 1483 1528 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu))) … … 1488 1533 rc = VERR_VMM_CONTEXT_HOOK_STILL_ENABLED; 1489 1534 } 1490 # endif1535 # endif 1491 1536 1492 1537 VMMRZCallRing3Disable(pGVCpu); /* Lazy bird: Simpler just disabling it again... */ … … 1534 1579 /* Fire dtrace probe and collect statistics. */ 1535 1580 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc); 1536 # ifdef VBOX_WITH_STATISTICS1581 # ifdef VBOX_WITH_STATISTICS 1537 1582 vmmR0RecordRC(pGVM, pGVCpu, rc); 1538 # endif1583 # endif 1539 1584 VMMRZCallRing3Enable(pGVCpu); 1540 1585 … … 1585 1630 } 1586 1631 1587 # ifdef VBOX_WITH_NEM_R01588 # if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)1632 # ifdef VBOX_WITH_NEM_R0 1633 # if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS) 1589 1634 case VMMR0_DO_NEM_RUN: 1590 1635 { … … 1592 1637 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode). 1593 1638 */ 1594 # ifdef VBOXSTRICTRC_STRICT_ENABLED1639 # ifdef VBOXSTRICTRC_STRICT_ENABLED 1595 1640 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu); 1596 # else1641 # else 1597 1642 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmmr0.s.AssertJmpBuf, NEMR0RunGuestCode, pGVM, idCpu); 1598 # endif1643 # endif 1599 1644 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC); 1600 1645 … … 1605 1650 */ 1606 1651 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc); 1607 # ifdef VBOX_WITH_STATISTICS1652 # ifdef VBOX_WITH_STATISTICS 1608 1653 vmmR0RecordRC(pGVM, pGVCpu, rc); 1654 # endif 1655 break; 1656 } 1609 1657 # endif 1610 break;1611 }1612 1658 # endif 1613 #endif 1659 1660 #endif /* !VBOX_WITH_MINIMAL_R0 */ 1614 1661 1615 1662 /* … … 1867 1914 break; 1868 1915 1916 #ifndef VBOX_WITH_MINIMAL_R0 1917 1869 1918 /* 1870 1919 * Attempt to enable hm mode and check the current setting. … … 2028 2077 break; 2029 2078 2030 # ifdef VBOX_WITH_PAGE_SHARING2079 # ifdef VBOX_WITH_PAGE_SHARING 2031 2080 case VMMR0_DO_GMM_CHECK_SHARED_MODULES: 2032 2081 { … … 2039 2088 break; 2040 2089 } 2041 # endif2042 2043 # if defined(VBOX_STRICT) && HC_ARCH_BITS == 642090 # endif 2091 2092 # if defined(VBOX_STRICT) && HC_ARCH_BITS == 64 2044 2093 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE: 2045 2094 if (u64Arg) … … 2047 2096 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr); 2048 2097 break; 2049 # endif2098 # endif 2050 2099 2051 2100 case VMMR0_DO_GMM_QUERY_STATISTICS: … … 2060 2109 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr); 2061 2110 break; 2111 2112 #endif /* !VBOX_WITH_MINIMAL_R0 */ 2062 2113 2063 2114 /* … … 2088 2139 } 2089 2140 2141 #ifndef VBOX_WITH_MINIMAL_R0 2090 2142 /* 2091 2143 * PDM Wrappers. … … 2131 2183 break; 2132 2184 } 2185 #endif /* !VBOX_WITH_MINIMAL_R0 */ 2133 2186 2134 2187 /* … … 2193 2246 break; 2194 2247 2195 #if 0 //def VBOX_WITH_PCI_PASSTHROUGH2248 #if 0 //defined(VBOX_WITH_PCI_PASSTHROUGH) && !defined(VBOX_WITH_MINIMAL_R0) 2196 2249 /* 2197 2250 * Requests to host PCI driver service. … … 2204 2257 #endif 2205 2258 2259 #ifndef VBOX_WITH_MINIMAL_R0 2260 2206 2261 /* 2207 2262 * NEM requests. 2208 2263 */ 2209 # ifdef VBOX_WITH_NEM_R02210 # if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)2264 # ifdef VBOX_WITH_NEM_R0 2265 # if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS) 2211 2266 case VMMR0_DO_NEM_INIT_VM: 2212 2267 if (u64Arg || pReqHdr || idCpu != 0) … … 2263 2318 break; 2264 2319 2265 # if 1 && defined(DEBUG_bird)2320 # if 1 && defined(DEBUG_bird) 2266 2321 case VMMR0_DO_NEM_EXPERIMENT: 2267 2322 if (pReqHdr) … … 2269 2324 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg); 2270 2325 break; 2271 # endif 2326 # endif 2327 # endif 2272 2328 # endif 2273 #endif2274 2329 2275 2330 /* … … 2321 2376 * DBGF requests. 2322 2377 */ 2323 # ifdef VBOX_WITH_DBGF_TRACING2378 # ifdef VBOX_WITH_DBGF_TRACING 2324 2379 case VMMR0_DO_DBGF_TRACER_CREATE: 2325 2380 { … … 2334 2389 if (!pReqHdr || u64Arg) 2335 2390 return VERR_INVALID_PARAMETER; 2336 # if 0 /** @todo */2391 # if 0 /** @todo */ 2337 2392 rc = DBGFR0TracerGenCallReqHandler(pGVM, (PDBGFTRACERGENCALLREQ)pReqHdr, idCpu); 2338 # else2393 # else 2339 2394 rc = VERR_NOT_IMPLEMENTED; 2395 # endif 2396 break; 2397 } 2340 2398 # endif 2341 break;2342 }2343 #endif2344 2399 2345 2400 case VMMR0_DO_DBGF_BP_INIT: … … 2394 2449 break; 2395 2450 } 2451 2452 #endif /* n!VBOX_WITH_MINIMAL_R0 */ 2396 2453 2397 2454 /* … … 2421 2478 2422 2479 2480 #ifndef RT_ARCH_ARM64 /** @todo port vmmR0CallRing3SetJmpEx to ARM64 */ 2423 2481 /** 2424 2482 * This is just a longjmp wrapper function for VMMR0EntryEx calls. … … 2437 2495 pGVCpu->vmmr0.s.pSession); 2438 2496 } 2497 #endif 2439 2498 2440 2499 … … 2456 2515 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession) 2457 2516 { 2517 #ifndef RT_ARCH_ARM64 /** @todo port vmmR0CallRing3SetJmpEx to ARM64 - see RTAssertShouldPanic */ 2458 2518 /* 2459 2519 * Requests that should only happen on the EMT thread will be … … 2488 2548 return VERR_VM_THREAD_NOT_EMT; 2489 2549 } 2550 #else 2551 RT_NOREF(pVM); 2552 #endif 2490 2553 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession); 2491 2554 } … … 3191 3254 static bool vmmR0LoggerFlushInnerToParent(PVMMR0PERVCPULOGGER pR0Log, PRTLOGBUFFERDESC pBufDesc) 3192 3255 { 3256 #ifdef RT_ARCH_AMD64 3193 3257 uint32_t const cbToFlush = pBufDesc->offBuf; 3194 3258 if (pR0Log->fFlushToParentVmmDbg) … … 3196 3260 if (pR0Log->fFlushToParentVmmRel) 3197 3261 RTLogWriteVmm(pBufDesc->pchBuf, cbToFlush, true /*fRelease*/); 3262 #else 3263 RT_NOREF(pR0Log, pBufDesc); 3264 #endif 3198 3265 return true; 3199 3266 } … … 3622 3689 DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void) 3623 3690 { 3624 #if 03691 #ifdef RT_ARCH_ARM64 /** @todo port vmmR0CallRing3SetJmpEx/vmmR0CallRing3LongJmp to ARM64 */ 3625 3692 return true; 3626 3693 #else -
trunk/src/VBox/VMM/VMMR0/VMMR0.def
r106061 r106920 30 30 31 31 ; code 32 GIMGetMmio2Regions 33 PDMQueueAlloc 34 PDMQueueInsert 35 PGMHandlerPhysicalPageTempOff 36 PGMShwMakePageWritable 37 PGMPhysSimpleWriteGCPhys 38 PGMPhysSimpleReadGCPtr 39 PGMPhysSimpleWriteGCPtr 40 PGMPhysReadGCPtr 41 PGMPhysWriteGCPtr 42 PGMPhysSimpleDirtyWriteGCPtr 43 PDMR0DeviceRegisterModule 44 PDMR0DeviceDeregisterModule 45 IOMMmioResetRegion 46 IOMMmioMapMmio2Page 47 RTLogDefaultInstance 48 RTLogDefaultInstanceEx 49 RTLogGetDefaultInstanceEx 50 RTLogRelGetDefaultInstance 51 RTLogRelGetDefaultInstanceEx 52 RTLogLogger 53 RTLogLoggerEx 54 RTLogLoggerExV 55 RTStrPrintf 56 RTTimeMilliTS 57 RTTraceBufAddMsgF 58 RTTraceBufAddPos 59 RTTraceBufAddPosMsgF 60 TMTimerFromMilli 61 TMTimerFromMicro 62 TMTimerFromNano 63 TMTimerGet 64 TMTimerGetFreq 65 TMTimerIsActive 66 TMTimerIsLockOwner 67 TMTimerLock 68 TMTimerSet 69 TMTimerSetRelative 70 TMTimerSetMillies 71 TMTimerSetMicro 72 TMTimerSetNano 73 TMTimerSetFrequencyHint 74 TMTimerStop 75 TMTimerUnlock 76 VMMGetSvnRev 32 GIMGetMmio2Regions ; not-arm64 33 PDMQueueAlloc ; not-arm64 34 PDMQueueInsert ; not-arm64 35 PGMHandlerPhysicalPageTempOff ; not-arm64 36 PGMShwMakePageWritable ; not-arm64 37 PGMPhysSimpleWriteGCPhys ; not-arm64 38 PGMPhysSimpleReadGCPtr ; not-arm64 39 PGMPhysSimpleWriteGCPtr ; not-arm64 40 PGMPhysReadGCPtr ; not-arm64 41 PGMPhysWriteGCPtr ; not-arm64 42 PGMPhysSimpleDirtyWriteGCPtr ; not-arm64 43 PDMR0DeviceRegisterModule ; not-arm64 44 PDMR0DeviceDeregisterModule ; not-arm64 45 IOMMmioResetRegion ; not-arm64 46 IOMMmioMapMmio2Page ; not-arm64 47 RTLogDefaultInstance 48 RTLogDefaultInstanceEx 49 RTLogGetDefaultInstanceEx 50 RTLogRelGetDefaultInstance 51 RTLogRelGetDefaultInstanceEx 52 RTLogLogger 53 RTLogLoggerEx 54 RTLogLoggerExV 55 RTStrPrintf 56 RTTimeMilliTS 57 RTTraceBufAddMsgF 58 RTTraceBufAddPos 59 RTTraceBufAddPosMsgF 60 TMTimerFromMilli ; not-arm64 61 TMTimerFromMicro ; not-arm64 62 TMTimerFromNano ; not-arm64 63 TMTimerGet ; not-arm64 64 TMTimerGetFreq ; not-arm64 65 TMTimerIsActive ; not-arm64 66 TMTimerIsLockOwner ; not-arm64 67 TMTimerLock ; not-arm64 68 TMTimerSet ; not-arm64 69 TMTimerSetRelative ; not-arm64 70 TMTimerSetMillies ; not-arm64 71 TMTimerSetMicro ; not-arm64 72 TMTimerSetNano ; not-arm64 73 TMTimerSetFrequencyHint ; not-arm64 74 TMTimerStop ; not-arm64 75 TMTimerUnlock ; not-arm64 76 VMMGetSvnRev ; not-arm64 77 77 78 78 ; Internal Networking … … 90 90 RTAssertMsg2Weak 91 91 RTAssertShouldPanic 92 RTCrc32 93 RTOnceSlow 94 RTTimeNanoTSLegacySyncInvarNoDelta 95 RTTimeNanoTSLegacySyncInvarWithDelta 96 RTTimeNanoTSLegacyAsync 97 RTTimeNanoTSLFenceSyncInvarNoDelta 98 RTTimeNanoTSLFenceSyncInvarWithDelta 99 RTTimeNanoTSLFenceAsync 92 RTCrc32 ; not-arm64 93 RTOnceSlow 94 RTTimeNanoTSLegacySyncInvarNoDelta ; not-arm64 95 RTTimeNanoTSLegacySyncInvarWithDelta ; not-arm64 96 RTTimeNanoTSLegacyAsync ; not-arm64 97 RTTimeNanoTSLFenceSyncInvarNoDelta ; not-arm64 98 RTTimeNanoTSLFenceSyncInvarWithDelta ; not-arm64 99 RTTimeNanoTSLFenceAsync ; not-arm64 100 100 RTTimeSystemNanoTS 101 101 RTTimeNanoTS 102 ASMMultU64ByU32DivByU32 ; not-os2 103 ASMAtomicXchgU8 ; not-x86 104 ASMAtomicXchgU16 ; not-x86 105 ASMBitFirstSet ; not-x86 106 ASMNopPause ; not-x86 107 nocrt_memchr 108 nocrt_memcmp 109 nocrt_memcpy 110 memcpy=nocrt_memcpy ; not-os2 111 nocrt_memmove 112 nocrt_memset 113 memset=nocrt_memset ; not-os2 114 nocrt_strcpy 115 nocrt_strcmp 116 nocrt_strchr 117 nocrt_strlen 102 ASMAtomicXchgU8 ; not-arm64 103 ASMAtomicXchgU16 ; not-arm64 104 ASMBitFirstSet ; not-arm64 105 ASMNopPause ; not-arm64 106 nocrt_memchr ; not-arm64 107 nocrt_memcmp ; not-arm64 108 nocrt_memcpy ; not-arm64 109 memcpy=nocrt_memcpy ; not-arm64 110 nocrt_memmove ; not-arm64 111 nocrt_memset ; not-arm64 112 memset=nocrt_memset ; not-arm64 113 nocrt_strcpy ; not-arm64 114 nocrt_strcmp ; not-arm64 115 nocrt_strchr ; not-arm64 116 nocrt_strlen ; not-arm64 118 117 -
trunk/src/VBox/VMM/VMMRZ/VMMRZ.cpp
r106061 r106920 36 36 37 37 #include <iprt/assert.h> 38 #include <iprt/asm-amd64-x86.h>39 38 #include <iprt/errcore.h> 40 39 #include <iprt/string.h> -
trunk/src/VBox/VMM/testcase/Makefile.kmk
r106212 r106920 39 39 # Target lists. 40 40 # 41 PROGRAMS += tstVMStructSize tstAsmStructs 41 PROGRAMS += tstVMStructSize 42 PROGRAMS.amd64 += tstAsmStructs 42 43 ifdef VBOX_WITH_RAW_MODE 43 PROGRAMS += tstVMStructRC tstAsmStructsRC 44 PROGRAMS += tstVMStructRC 45 PROGRAMS.amd64 += tstAsmStructsRC 44 46 endif 45 47 if !defined(VBOX_ONLY_EXTPACKS) \ … … 123 125 # validation testcases. Perhaps a bit hackish, but extremely useful. 124 126 # 125 ifeq ($(KBUILD_TARGET),$(KBUILD_HOST)) 126 ifeq ($(filter-out x86.x86 amd64.amd64 x86.amd64, $(KBUILD_TARGET_ARCH).$(KBUILD_HOST_ARCH)),) 127 OTHERS += \ 128 $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructs.run \ 129 $(VBOX_VMM_TESTCASE_OUT_DIR)/tstVMStructSize.run 130 endif 127 ifeq ($(KBUILD_TARGET).$(KBUILD_TARGET_ARCH),$(KBUILD_HOST).$(KBUILD_HOST_ARCH)) 128 OTHERS += $(VBOX_VMM_TESTCASE_OUT_DIR)/tstVMStructSize.run 129 OTHERS.amd64 += $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructs.run 131 130 endif 132 131 133 132 # The normal testing pass. 134 TESTING += \ 135 $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructs.run \ 136 $(VBOX_VMM_TESTCASE_OUT_DIR)/tstVMStructSize.run 133 TESTING += $(VBOX_VMM_TESTCASE_OUT_DIR)/tstVMStructSize.run 134 TESTING.amd64 += $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructs.run 137 135 138 136 OTHER_CLEAN += \ -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r106061 r106920 386 386 387 387 CHECK_PADDING_GVM(4, gvmm); 388 #ifndef VBOX_WITH_MINIMAL_R0 388 389 CHECK_PADDING_GVM(4, gmm); 390 #endif 389 391 CHECK_PADDING_GVMCPU(4, gvmm); 390 392
Note:
See TracChangeset
for help on using the changeset viewer.