- Timestamp:
- Jul 18, 2007 5:00:33 PM (18 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/DBGFSym.cpp
r2981 r3696 25 25 *******************************************************************************/ 26 26 #define LOG_GROUP LOG_GROUP_DBGF 27 #if defined( __WIN__) && defined(DEBUG_bird) // enabled this is you want to debug win32 guests or the hypervisor.27 #if defined(RT_OS_WINDOWS) && defined(DEBUG_bird) // enabled this is you want to debug win32 guests or the hypervisor. 28 28 # include <Windows.h> 29 29 # define _IMAGEHLP64 -
trunk/src/VBox/VMM/PATM/PATM.cpp
r3073 r3696 148 148 pVM->patm.s.PatchLookupTreeGC = MMHyperHC2GC(pVM, pVM->patm.s.PatchLookupTreeHC); 149 149 150 #ifdef __AMD64__/* see patmReinit(). */150 #ifdef RT_ARCH_AMD64 /* see patmReinit(). */ 151 151 /* Check CFGM option. */ 152 152 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled); … … 156 156 # else 157 157 pVM->fPATMEnabled = true; 158 # endif 158 # endif 159 159 #endif 160 160 … … 314 314 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm); 315 315 316 #ifndef __AMD64__/* would be nice if this was changed everywhere. was driving me crazy on AMD64. */316 #ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */ 317 317 #ifndef PATM_DISABLE_ALL 318 318 pVM->fPATMEnabled = true; … … 2677 2677 } 2678 2678 2679 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK))) 2679 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK))) 2680 2680 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP; 2681 2681 … … 5768 5768 /* Only harmless instructions are acceptable. */ 5769 5769 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCurPatchInstrGC, &CpuOld, 0); 5770 if ( VBOX_FAILURE(rc) 5770 if ( VBOX_FAILURE(rc) 5771 5771 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)) 5772 5772 break; -
trunk/src/VBox/VMM/PATM/PATMA.asm
r2988 r3696 46 46 BEGINCODE 47 47 48 %ifdef __AMD64__48 %ifdef RT_ARCH_AMD64 49 49 BITS 32 ; switch to 32-bit mode (x86). 50 50 %endif … … 1234 1234 iret_notring0: 1235 1235 1236 ; if interrupts are pending, then we must go back to the host context to handle them! 1236 ; if interrupts are pending, then we must go back to the host context to handle them! 1237 1237 ; Note: This is very important as pending pic interrupts can be overriden by apic interrupts if we don't check early enough (Fedora 5 boot) 1238 ; @@todo fix this properly, so we can dispatch pending interrupts in GC 1238 ; @@todo fix this properly, so we can dispatch pending interrupts in GC 1239 1239 test dword [ss:PATM_VM_FORCEDACTIONS], VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC 1240 jz iret_continue 1241 1242 ; Go to our hypervisor trap handler to dispatch the pending irq 1243 mov dword [ss:PATM_TEMP_EAX], eax 1244 mov dword [ss:PATM_TEMP_ECX], ecx 1245 mov dword [ss:PATM_TEMP_EDI], edi 1246 mov dword [ss:PATM_TEMP_RESTORE_FLAGS], PATM_RESTORE_EAX | PATM_RESTORE_ECX | PATM_RESTORE_EDI 1247 mov eax, PATM_ACTION_PENDING_IRQ_AFTER_IRET 1248 lock or dword [ss:PATM_PENDINGACTION], eax 1249 mov ecx, PATM_ACTION_MAGIC 1250 mov edi, PATM_CURINSTRADDR 1251 1252 popfd 1253 db 0fh, 0bh ; illegal instr (hardcoded assumption in PATMHandleIllegalInstrTrap) 1254 ; does not return 1240 jz iret_continue 1241 1242 ; Go to our hypervisor trap handler to dispatch the pending irq 1243 mov dword [ss:PATM_TEMP_EAX], eax 1244 mov dword [ss:PATM_TEMP_ECX], ecx 1245 mov dword [ss:PATM_TEMP_EDI], edi 1246 mov dword [ss:PATM_TEMP_RESTORE_FLAGS], PATM_RESTORE_EAX | PATM_RESTORE_ECX | PATM_RESTORE_EDI 1247 mov eax, PATM_ACTION_PENDING_IRQ_AFTER_IRET 1248 lock or dword [ss:PATM_PENDINGACTION], eax 1249 mov ecx, PATM_ACTION_MAGIC 1250 mov edi, PATM_CURINSTRADDR 1251 1252 popfd 1253 db 0fh, 0bh ; illegal instr (hardcoded assumption in PATMHandleIllegalInstrTrap) 1254 ; does not return 1255 1255 1256 1256 iret_continue : … … 1379 1379 DD 0 1380 1380 %endif 1381 DD PATM_VM_FORCEDACTIONS 1382 DD 0 1383 DD PATM_TEMP_EAX 1384 DD 0 1385 DD PATM_TEMP_ECX 1386 DD 0 1387 DD PATM_TEMP_EDI 1388 DD 0 1389 DD PATM_TEMP_RESTORE_FLAGS 1390 DD 0 1391 DD PATM_PENDINGACTION 1392 DD 0 1393 DD PATM_CURINSTRADDR 1394 DD 0 1381 DD PATM_VM_FORCEDACTIONS 1382 DD 0 1383 DD PATM_TEMP_EAX 1384 DD 0 1385 DD PATM_TEMP_ECX 1386 DD 0 1387 DD PATM_TEMP_EDI 1388 DD 0 1389 DD PATM_TEMP_RESTORE_FLAGS 1390 DD 0 1391 DD PATM_PENDINGACTION 1392 DD 0 1393 DD PATM_CURINSTRADDR 1394 DD 0 1395 1395 DD PATM_VMFLAGS 1396 1396 DD 0 -
trunk/src/VBox/VMM/PGM.cpp
r2981 r3696 198 198 199 199 200 #if 1/// @todo ndef __AMD64__200 #if 1/// @todo ndef RT_ARCH_AMD64 201 201 /* 202 202 * Shadow - 32-bit mode … … 274 274 #undef PGM_SHW_NAME_GC_STR 275 275 #undef PGM_SHW_NAME_R0_STR 276 #endif /* ! __AMD64__*/276 #endif /* !RT_ARCH_AMD64 */ 277 277 278 278 -
trunk/src/VBox/VMM/TRPM.cpp
r2981 r3696 1161 1161 { 1162 1162 /* 1163 * Only replace handlers for which we are 100% certain there won't be 1164 * any host interrupts. 1163 * Only replace handlers for which we are 100% certain there won't be 1164 * any host interrupts. 1165 1165 * 1166 1166 * 0x2E is safe on Windows because it's the system service interrupt gate. Not … … 1171 1171 * and will therefor never assign hardware interrupts to 0x80. 1172 1172 * 1173 * Exactly why 0x80 is safe on 32-bit Windows is a bit hazy, but it seems 1174 * to work ok... However on 64-bit Vista (SMP?) is doesn't work reliably. 1173 * Exactly why 0x80 is safe on 32-bit Windows is a bit hazy, but it seems 1174 * to work ok... However on 64-bit Vista (SMP?) is doesn't work reliably. 1175 1175 * Booting Linux/BSD guest will cause system lockups on most of the computers. 1176 1176 * 1177 1177 * PORTME - Check if your host keeps any of these gates free from hw ints. 1178 * 1178 * 1179 1179 * Note! SELMR3SyncTSS also has code related to this interrupt handler replacing. 1180 1180 */ 1181 1181 /** @todo handle those dependencies better! */ 1182 1182 /** @todo Solve this in a proper manner. see defect #1186 */ 1183 #if defined( __WIN__) && defined(__X86__)1183 #if defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86) 1184 1184 if (iTrap == 0x2E || iTrap == 0x80) 1185 #elif defined( __LINUX__)1185 #elif defined(RT_OS_LINUX) 1186 1186 if (iTrap == 0x80) 1187 1187 #else … … 1353 1353 /* Must check pending forced actions as our IDT or GDT might be out of sync */ 1354 1354 EMR3CheckRawForcedActions(pVM); 1355 1355 1356 1356 /* There's a handler -> let's execute it in raw mode */ 1357 1357 rc = TRPMForwardTrap(pVM, CPUMCTX2CORE(pCtx), u8Interrupt, 0, TRPM_TRAP_NO_ERRORCODE, enmEvent); -
trunk/src/VBox/VMM/VM.cpp
r2984 r3696 306 306 break; 307 307 case VERR_VM_DRIVER_NOT_ACCESSIBLE: 308 #ifdef __LINUX__308 #ifdef RT_OS_LINUX 309 309 pszError = N_("The VirtualBox kernel driver is not accessible to the current " 310 310 "user. Make sure that the user has write permissions for " … … 316 316 break; 317 317 case VERR_VM_DRIVER_NOT_INSTALLED: 318 #ifdef __LINUX__318 #ifdef RT_OS_LINUX 319 319 pszError = N_("VirtualBox kernel driver not installed. The vboxdrv kernel module " 320 320 "was either not loaded or /dev/vboxdrv was not created for some " -
trunk/src/VBox/VMM/VMM.cpp
r3324 r3696 181 181 { 182 182 NULL, /* invalid entry */ 183 #ifndef __AMD64__183 #ifndef RT_ARCH_AMD64 184 184 &vmmR3Switcher32BitTo32Bit_Def, 185 185 &vmmR3Switcher32BitToPAE_Def, … … 683 683 #else /* 64-bit GC */ 684 684 CPUMPushHyper(pVM, u64TS); /* Param 3: The program startup TS. */ 685 #endif 685 #endif 686 686 CPUMPushHyper(pVM, VBOX_VERSION); /* Param 2: Version argument. */ 687 687 CPUMPushHyper(pVM, VMMGC_DO_VMMGC_INIT); /* Param 1: Operation. */ … … 1255 1255 } 1256 1256 1257 #if defined( __AMD64__) || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)1257 #if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL) 1258 1258 /* 1259 1259 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset). … … 1274 1274 { 1275 1275 Assert(offSrc < pSwitcher->cbCode); 1276 #if defined( __DARWIN__) && defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)1276 #if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBIRD_32BIT_KERNEL) 1277 1277 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */ 1278 1278 #else … … 1798 1798 { 1799 1799 /* 1800 * This really needs some careful tuning. While we shouldn't be too gready since 1800 * This really needs some careful tuning. While we shouldn't be too gready since 1801 1801 * that'll cause the rest of the system to stop up, we shouldn't be too nice either 1802 1802 * because that'll cause us to stop up. 1803 1803 * 1804 * The current logic is to use the default interval when there is no lag worth 1804 * The current logic is to use the default interval when there is no lag worth 1805 1805 * mentioning, but when we start accumulating lag we don't bother yielding at all. 1806 1806 * -
trunk/src/VBox/VMM/VMMAll/CPUMAllA.asm
r2988 r3696 40 40 ;; @def CPUM_REG 41 41 ; The register which we load the CPUM pointer into. 42 %ifdef __AMD64__42 %ifdef RT_ARCH_AMD64 43 43 %define CPUM_REG rdx 44 44 %else … … 95 95 ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3. 96 96 ; 97 %ifdef __AMD64__98 %ifdef __WIN__97 %ifdef RT_ARCH_AMD64 98 %ifdef RT_OS_WINDOWS 99 99 mov xDX, rcx 100 100 %else … … 115 115 mov eax, [xDX + CPUM.Guest.cr0] 116 116 and eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS 117 %ifdef __AMD64__117 %ifdef RT_ARCH_AMD64 118 118 lea r8, [hlfpuajmp1 wrt rip] 119 119 jmp qword [rax*4 + r8] … … 150 150 %ifndef IN_RING3 ; IN_GC or IN_RING0 151 151 mov xCX, cr0 152 %ifdef __AMD64__152 %ifdef RT_ARCH_AMD64 153 153 lea r8, [hlfpu_afFlags wrt rip] 154 154 and rcx, [rax*4 + r8] ; calc the new cr0 flags. … … 160 160 mov cr0, xAX ; clear flags so we don't trap here. 161 161 %endif 162 %ifndef __AMD64__162 %ifndef RT_ARCH_AMD64 163 163 test dword [xDX + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR 164 164 jz short hlfpua_no_fxsave … … 176 176 ret 177 177 178 %ifndef __AMD64__178 %ifndef RT_ARCH_AMD64 179 179 ; legacy support. 180 180 hlfpua_no_fxsave: … … 191 191 frstor [xDX + CPUM.Guest.fpu] 192 192 jmp near hlfpua_finished_switch 193 %endif ; ! __AMD64__193 %endif ; !RT_ARCH_AMD64 194 194 195 195 … … 212 212 align 16 213 213 BEGINPROC CPUMRestoreHostFPUStateAsm 214 %ifdef __AMD64__215 %ifdef __WIN__214 %ifdef RT_ARCH_AMD64 215 %ifdef RT_OS_WINDOWS 216 216 mov xDX, rcx 217 217 %else -
trunk/src/VBox/VMM/VMMAll/EMAllA.asm
r2988 r3696 29 29 ;; @def MY_PTR_REG 30 30 ; The register we use for value pointers (And,Or,Dec,Inc). 31 %ifdef __AMD64__31 %ifdef RT_ARCH_AMD64 32 32 %define MY_PTR_REG rcx 33 33 %else … … 37 37 ;; @def MY_RET_REG 38 38 ; The register we return the result in. 39 %ifdef __AMD64__39 %ifdef RT_ARCH_AMD64 40 40 %define MY_RET_REG rax 41 41 %else … … 57 57 align 16 58 58 BEGINPROC EMEmulateCmp 59 %ifdef __AMD64__59 %ifdef RT_ARCH_AMD64 60 60 %ifdef __WIN64__ 61 61 mov rax, r8 ; eax = size of parameters … … 65 65 mov rdx, rsi ; rdx = second parameter 66 66 %endif ; !__WIN64__ 67 %else ; ! __AMD64__67 %else ; !RT_ARCH_AMD64 68 68 mov eax, [esp + 0ch] ; eax = size of parameters 69 69 mov ecx, [esp + 04h] ; ecx = first parameter … … 72 72 73 73 ; switch on size 74 %ifdef __AMD64__75 cmp al, 8 76 je short .do_qword ; 8 bytes variant 77 %endif 78 cmp al, 4 79 je short .do_dword ; 4 bytes variant 80 cmp al, 2 81 je short .do_word ; 2 byte variant 82 cmp al, 1 83 je short .do_byte ; 1 bytes variant 84 int3 85 86 ; workers 87 %ifdef __AMD64__74 %ifdef RT_ARCH_AMD64 75 cmp al, 8 76 je short .do_qword ; 8 bytes variant 77 %endif 78 cmp al, 4 79 je short .do_dword ; 4 bytes variant 80 cmp al, 2 81 je short .do_word ; 2 byte variant 82 cmp al, 1 83 je short .do_byte ; 1 bytes variant 84 int3 85 86 ; workers 87 %ifdef RT_ARCH_AMD64 88 88 .do_qword: 89 89 cmp rcx, rdx ; do 8 bytes CMP … … 122 122 align 16 123 123 BEGINPROC EMEmulateAnd 124 %ifdef __AMD64__124 %ifdef RT_ARCH_AMD64 125 125 %ifdef __WIN64__ 126 126 mov rax, r8 ; eax = size of parameters … … 130 130 mov rdx, rsi ; rdx = second parameter 131 131 %endif ; !__WIN64__ 132 %else ; ! __AMD64__132 %else ; !RT_ARCH_AMD64 133 133 mov eax, [esp + 0ch] ; eax = size of parameters 134 134 mov ecx, [esp + 04h] ; ecx = first parameter … … 137 137 138 138 ; switch on size 139 %ifdef __AMD64__140 cmp al, 8 141 je short .do_qword ; 8 bytes variant 142 %endif 143 cmp al, 4 144 je short .do_dword ; 4 bytes variant 145 cmp al, 2 146 je short .do_word ; 2 byte variant 147 cmp al, 1 148 je short .do_byte ; 1 bytes variant 149 int3 150 151 ; workers 152 %ifdef __AMD64__139 %ifdef RT_ARCH_AMD64 140 cmp al, 8 141 je short .do_qword ; 8 bytes variant 142 %endif 143 cmp al, 4 144 je short .do_dword ; 4 bytes variant 145 cmp al, 2 146 je short .do_word ; 2 byte variant 147 cmp al, 1 148 je short .do_byte ; 1 bytes variant 149 int3 150 151 ; workers 152 %ifdef RT_ARCH_AMD64 153 153 .do_qword: 154 154 and [MY_PTR_REG], rdx ; do 8 bytes AND … … 187 187 align 16 188 188 BEGINPROC EMEmulateOr 189 %ifdef __AMD64__189 %ifdef RT_ARCH_AMD64 190 190 %ifdef __WIN64__ 191 191 mov rax, r8 ; eax = size of parameters … … 195 195 mov rdx, rsi ; rdx = second parameter 196 196 %endif ; !__WIN64__ 197 %else ; ! __AMD64__197 %else ; !RT_ARCH_AMD64 198 198 mov eax, [esp + 0ch] ; eax = size of parameters 199 199 mov ecx, [esp + 04h] ; ecx = first parameter … … 202 202 203 203 ; switch on size 204 %ifdef __AMD64__205 cmp al, 8 206 je short .do_qword ; 8 bytes variant 207 %endif 208 cmp al, 4 209 je short .do_dword ; 4 bytes variant 210 cmp al, 2 211 je short .do_word ; 2 byte variant 212 cmp al, 1 213 je short .do_byte ; 1 bytes variant 214 int3 215 216 ; workers 217 %ifdef __AMD64__204 %ifdef RT_ARCH_AMD64 205 cmp al, 8 206 je short .do_qword ; 8 bytes variant 207 %endif 208 cmp al, 4 209 je short .do_dword ; 4 bytes variant 210 cmp al, 2 211 je short .do_word ; 2 byte variant 212 cmp al, 1 213 je short .do_byte ; 1 bytes variant 214 int3 215 216 ; workers 217 %ifdef RT_ARCH_AMD64 218 218 .do_qword: 219 219 or [MY_PTR_REG], rdx ; do 8 bytes OR … … 251 251 align 16 252 252 BEGINPROC EMEmulateXor 253 %ifdef __AMD64__253 %ifdef RT_ARCH_AMD64 254 254 %ifdef __WIN64__ 255 255 mov rax, r8 ; eax = size of parameters … … 259 259 mov rdx, rsi ; rdx = second parameter 260 260 %endif ; !__WIN64__ 261 %else ; ! __AMD64__261 %else ; !RT_ARCH_AMD64 262 262 mov eax, [esp + 0ch] ; eax = size of parameters 263 263 mov ecx, [esp + 04h] ; ecx = first parameter … … 266 266 267 267 ; switch on size 268 %ifdef __AMD64__269 cmp al, 8 270 je short .do_qword ; 8 bytes variant 271 %endif 272 cmp al, 4 273 je short .do_dword ; 4 bytes variant 274 cmp al, 2 275 je short .do_word ; 2 byte variant 276 cmp al, 1 277 je short .do_byte ; 1 bytes variant 278 int3 279 280 ; workers 281 %ifdef __AMD64__268 %ifdef RT_ARCH_AMD64 269 cmp al, 8 270 je short .do_qword ; 8 bytes variant 271 %endif 272 cmp al, 4 273 je short .do_dword ; 4 bytes variant 274 cmp al, 2 275 je short .do_word ; 2 byte variant 276 cmp al, 1 277 je short .do_byte ; 1 bytes variant 278 int3 279 280 ; workers 281 %ifdef RT_ARCH_AMD64 282 282 .do_qword: 283 283 xor [MY_PTR_REG], rdx ; do 8 bytes XOR … … 314 314 align 16 315 315 BEGINPROC EMEmulateInc 316 %ifdef __AMD64__316 %ifdef RT_ARCH_AMD64 317 317 %ifdef __WIN64__ 318 318 mov rax, rdx ; eax = size of parameters … … 321 321 mov rcx, rdi ; rcx = first parameter 322 322 %endif ; !__WIN64__ 323 %else ; ! __AMD64__323 %else ; !RT_ARCH_AMD64 324 324 mov eax, [esp + 08h] ; eax = size of parameters 325 325 mov ecx, [esp + 04h] ; ecx = first parameter … … 327 327 328 328 ; switch on size 329 %ifdef __AMD64__330 cmp al, 8 331 je short .do_qword ; 8 bytes variant 332 %endif 333 cmp al, 4 334 je short .do_dword ; 4 bytes variant 335 cmp al, 2 336 je short .do_word ; 2 byte variant 337 cmp al, 1 338 je short .do_byte ; 1 bytes variant 339 int3 340 341 ; workers 342 %ifdef __AMD64__329 %ifdef RT_ARCH_AMD64 330 cmp al, 8 331 je short .do_qword ; 8 bytes variant 332 %endif 333 cmp al, 4 334 je short .do_dword ; 4 bytes variant 335 cmp al, 2 336 je short .do_word ; 2 byte variant 337 cmp al, 1 338 je short .do_byte ; 1 bytes variant 339 int3 340 341 ; workers 342 %ifdef RT_ARCH_AMD64 343 343 .do_qword: 344 344 inc qword [MY_PTR_REG] ; do 8 bytes INC … … 377 377 align 16 378 378 BEGINPROC EMEmulateDec 379 %ifdef __AMD64__379 %ifdef RT_ARCH_AMD64 380 380 %ifdef __WIN64__ 381 381 mov rax, rdx ; eax = size of parameters … … 384 384 mov rcx, rdi ; rcx = first parameter 385 385 %endif ; !__WIN64__ 386 %else ; ! __AMD64__386 %else ; !RT_ARCH_AMD64 387 387 mov eax, [esp + 08h] ; eax = size of parameters 388 388 mov ecx, [esp + 04h] ; ecx = first parameter … … 390 390 391 391 ; switch on size 392 %ifdef __AMD64__393 cmp al, 8 394 je short .do_qword ; 8 bytes variant 395 %endif 396 cmp al, 4 397 je short .do_dword ; 4 bytes variant 398 cmp al, 2 399 je short .do_word ; 2 byte variant 400 cmp al, 1 401 je short .do_byte ; 1 bytes variant 402 int3 403 404 ; workers 405 %ifdef __AMD64__392 %ifdef RT_ARCH_AMD64 393 cmp al, 8 394 je short .do_qword ; 8 bytes variant 395 %endif 396 cmp al, 4 397 je short .do_dword ; 4 bytes variant 398 cmp al, 2 399 je short .do_word ; 2 byte variant 400 cmp al, 1 401 je short .do_byte ; 1 bytes variant 402 int3 403 404 ; workers 405 %ifdef RT_ARCH_AMD64 406 406 .do_qword: 407 407 dec qword [MY_PTR_REG] ; do 8 bytes DEC … … 440 440 align 16 441 441 BEGINPROC EMEmulateAdd 442 %ifdef __AMD64__442 %ifdef RT_ARCH_AMD64 443 443 %ifdef __WIN64__ 444 444 mov rax, r8 ; eax = size of parameters … … 448 448 mov rdx, rsi ; rdx = second parameter 449 449 %endif ; !__WIN64__ 450 %else ; ! __AMD64__450 %else ; !RT_ARCH_AMD64 451 451 mov eax, [esp + 0ch] ; eax = size of parameters 452 452 mov ecx, [esp + 04h] ; ecx = first parameter … … 455 455 456 456 ; switch on size 457 %ifdef __AMD64__458 cmp al, 8 459 je short .do_qword ; 8 bytes variant 460 %endif 461 cmp al, 4 462 je short .do_dword ; 4 bytes variant 463 cmp al, 2 464 je short .do_word ; 2 byte variant 465 cmp al, 1 466 je short .do_byte ; 1 bytes variant 467 int3 468 469 ; workers 470 %ifdef __AMD64__457 %ifdef RT_ARCH_AMD64 458 cmp al, 8 459 je short .do_qword ; 8 bytes variant 460 %endif 461 cmp al, 4 462 je short .do_dword ; 4 bytes variant 463 cmp al, 2 464 je short .do_word ; 2 byte variant 465 cmp al, 1 466 je short .do_byte ; 1 bytes variant 467 int3 468 469 ; workers 470 %ifdef RT_ARCH_AMD64 471 471 .do_qword: 472 472 add [MY_PTR_REG], rdx ; do 8 bytes ADD … … 504 504 align 16 505 505 BEGINPROC EMEmulateAdcWithCarrySet 506 %ifdef __AMD64__506 %ifdef RT_ARCH_AMD64 507 507 %ifdef __WIN64__ 508 508 mov rax, r8 ; eax = size of parameters … … 512 512 mov rdx, rsi ; rdx = second parameter 513 513 %endif ; !__WIN64__ 514 %else ; ! __AMD64__514 %else ; !RT_ARCH_AMD64 515 515 mov eax, [esp + 0ch] ; eax = size of parameters 516 516 mov ecx, [esp + 04h] ; ecx = first parameter … … 519 519 520 520 ; switch on size 521 %ifdef __AMD64__522 cmp al, 8 523 je short .do_qword ; 8 bytes variant 524 %endif 525 cmp al, 4 526 je short .do_dword ; 4 bytes variant 527 cmp al, 2 528 je short .do_word ; 2 byte variant 529 cmp al, 1 530 je short .do_byte ; 1 bytes variant 531 int3 532 533 ; workers 534 %ifdef __AMD64__521 %ifdef RT_ARCH_AMD64 522 cmp al, 8 523 je short .do_qword ; 8 bytes variant 524 %endif 525 cmp al, 4 526 je short .do_dword ; 4 bytes variant 527 cmp al, 2 528 je short .do_word ; 2 byte variant 529 cmp al, 1 530 je short .do_byte ; 1 bytes variant 531 int3 532 533 ; workers 534 %ifdef RT_ARCH_AMD64 535 535 .do_qword: 536 536 stc ; set carry flag … … 572 572 align 16 573 573 BEGINPROC EMEmulateSub 574 %ifdef __AMD64__574 %ifdef RT_ARCH_AMD64 575 575 %ifdef __WIN64__ 576 576 mov rax, r8 ; eax = size of parameters … … 580 580 mov rdx, rsi ; rdx = second parameter 581 581 %endif ; !__WIN64__ 582 %else ; ! __AMD64__582 %else ; !RT_ARCH_AMD64 583 583 mov eax, [esp + 0ch] ; eax = size of parameters 584 584 mov ecx, [esp + 04h] ; ecx = first parameter … … 587 587 588 588 ; switch on size 589 %ifdef __AMD64__590 cmp al, 8 591 je short .do_qword ; 8 bytes variant 592 %endif 593 cmp al, 4 594 je short .do_dword ; 4 bytes variant 595 cmp al, 2 596 je short .do_word ; 2 byte variant 597 cmp al, 1 598 je short .do_byte ; 1 bytes variant 599 int3 600 601 ; workers 602 %ifdef __AMD64__589 %ifdef RT_ARCH_AMD64 590 cmp al, 8 591 je short .do_qword ; 8 bytes variant 592 %endif 593 cmp al, 4 594 je short .do_dword ; 4 bytes variant 595 cmp al, 2 596 je short .do_word ; 2 byte variant 597 cmp al, 1 598 je short .do_byte ; 1 bytes variant 599 int3 600 601 ; workers 602 %ifdef RT_ARCH_AMD64 603 603 .do_qword: 604 604 sub [MY_PTR_REG], rdx ; do 8 bytes SUB … … 636 636 align 16 637 637 BEGINPROC EMEmulateBtr 638 %ifdef __AMD64__638 %ifdef RT_ARCH_AMD64 639 639 %ifndef __WIN64__ 640 640 mov rcx, rdi ; rcx = first parameter 641 641 mov rdx, rsi ; rdx = second parameter 642 642 %endif ; !__WIN64__ 643 %else ; ! __AMD64__643 %else ; !RT_ARCH_AMD64 644 644 mov ecx, [esp + 04h] ; ecx = first parameter 645 645 mov edx, [esp + 08h] ; edx = second parameter … … 666 666 align 16 667 667 BEGINPROC EMEmulateBtc 668 %ifdef __AMD64__668 %ifdef RT_ARCH_AMD64 669 669 %ifndef __WIN64__ 670 670 mov rcx, rdi ; rcx = first parameter 671 671 mov rdx, rsi ; rdx = second parameter 672 672 %endif ; !__WIN64__ 673 %else ; ! __AMD64__673 %else ; !RT_ARCH_AMD64 674 674 mov ecx, [esp + 04h] ; ecx = first parameter 675 675 mov edx, [esp + 08h] ; edx = second parameter … … 696 696 align 16 697 697 BEGINPROC EMEmulateBts 698 %ifdef __AMD64__698 %ifdef RT_ARCH_AMD64 699 699 %ifndef __WIN64__ 700 700 mov rcx, rdi ; rcx = first parameter 701 701 mov rdx, rsi ; rdx = second parameter 702 702 %endif ; !__WIN64__ 703 %else ; ! __AMD64__703 %else ; !RT_ARCH_AMD64 704 704 mov ecx, [esp + 04h] ; ecx = first parameter 705 705 mov edx, [esp + 08h] ; edx = second parameter -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r2981 r3696 84 84 85 85 86 #if 1///@todo ndef __AMD64__86 #if 1///@todo ndef RT_ARCH_AMD64 87 87 /* 88 88 * Shadow - 32-bit mode … … 132 132 #undef PGM_SHW_TYPE 133 133 #undef PGM_SHW_NAME 134 #endif /* ! __AMD64__*/134 #endif /* !RT_ARCH_AMD64 */ 135 135 136 136 -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r3393 r3696 137 137 u64Delta = u32UpdateIntervalTSC; 138 138 } 139 #if !defined(_MSC_VER) || defined( __AMD64__) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */139 #if !defined(_MSC_VER) || defined(RT_ARCH_AMD64) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */ 140 140 u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0); 141 141 u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC); -
trunk/src/VBox/VMM/VMMGC/PGMGC.cpp
r2981 r3696 52 52 53 53 54 #ifndef __AMD64__54 #ifndef RT_ARCH_AMD64 55 55 /* 56 56 * Shadow - 32-bit mode … … 92 92 #undef PGM_SHW_TYPE 93 93 #undef PGM_SHW_NAME 94 #endif /* ! __AMD64__*/94 #endif /* !RT_ARCH_AMD64 */ 95 95 96 96 -
trunk/src/VBox/VMM/VMMInternal.h
r2981 r3696 113 113 #if HC_ARCH_BITS == 64 114 114 uint64_t rbx; 115 # ifdef __WIN__115 # ifdef RT_OS_WINDOWS 116 116 uint64_t rsi; 117 117 uint64_t rdi; -
trunk/src/VBox/VMM/VMMInternal.mac
r2988 r3696 23 23 24 24 struc VMMR0JMPBUF 25 %ifdef __X86__25 %ifdef RT_ARCH_X86 26 26 ; traditional jmp_buf 27 27 .ebx resd 1 … … 40 40 .SpResume resd 1 41 41 %endif 42 %ifdef __AMD64__42 %ifdef RT_ARCH_AMD64 43 43 ; traditional jmp_buf 44 44 .rbx resq 1 45 %ifdef __WIN__45 %ifdef RT_OS_WINDOWS 46 46 .rsi resq 1 47 47 .rdi resq 1 -
trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm
r3151 r3696 29 29 %include "VBox/x86.mac" 30 30 31 %ifdef __OS2__;; @todo build cvs nasm like on OS X.31 %ifdef RT_OS_OS2 ;; @todo build cvs nasm like on OS X. 32 32 %macro vmwrite 2, 33 33 int3 … … 54 54 ; @param 2 16-bit regsiter name for \a 1. 55 55 56 %ifdef __AMD64__56 %ifdef RT_ARCH_AMD64 57 57 %ifdef ASM_CALL64_GCC 58 58 %macro MYPUSHAD 0 … … 106 106 push rax 107 107 push gs 108 %endmacro 108 %endmacro 109 109 110 110 %macro MYPOPSEGS 2 … … 126 126 %endmacro 127 127 128 %else ; __X86__128 %else ; RT_ARCH_X86 129 129 %macro MYPUSHAD 0 130 130 pushad 131 %endmacro 131 %endmacro 132 132 %macro MYPOPAD 0 133 133 popad … … 135 135 136 136 %macro MYPUSHSEGS 2 137 push ds 138 push es 139 push fs 137 push ds 138 push es 139 push fs 140 140 push gs 141 %endmacro 141 %endmacro 142 142 %macro MYPOPSEGS 2 143 143 pop gs … … 164 164 165 165 ;/* First we have to save some final CPU context registers. */ 166 %ifdef __AMD64__166 %ifdef RT_ARCH_AMD64 167 167 mov rax, qword .vmlaunch_done 168 168 push rax … … 194 194 195 195 ;/* Save the Guest CPU context pointer. */ 196 %ifdef __AMD64__196 %ifdef RT_ARCH_AMD64 197 197 %ifdef ASM_CALL64_GCC 198 198 mov rsi, rdi ; pCtx … … 250 250 251 251 push xDI 252 mov xDI, [xSP + xS * 2] ; pCtx 252 mov xDI, [xSP + xS * 2] ; pCtx 253 253 254 254 mov [ss:xDI + CPUMCTX.eax], eax … … 258 258 mov [ss:xDI + CPUMCTX.esi], esi 259 259 mov [ss:xDI + CPUMCTX.ebp], ebp 260 %ifdef __AMD64__260 %ifdef RT_ARCH_AMD64 261 261 pop xAX ; the guest edi we pushed above 262 262 mov dword [ss:xDI + CPUMCTX.edi], eax … … 339 339 340 340 ;/* First we have to save some final CPU context registers. */ 341 %ifdef __AMD64__341 %ifdef RT_ARCH_AMD64 342 342 mov rax, qword .vmresume_done 343 343 push rax … … 369 369 370 370 ;/* Save the Guest CPU context pointer. */ 371 %ifdef __AMD64__371 %ifdef RT_ARCH_AMD64 372 372 %ifdef ASM_CALL64_GCC 373 373 mov rsi, rdi ; pCtx … … 425 425 426 426 push xDI 427 mov xDI, [xSP + xS * 2] ; pCtx 427 mov xDI, [xSP + xS * 2] ; pCtx 428 428 429 429 mov [ss:xDI + CPUMCTX.eax], eax … … 433 433 mov [ss:xDI + CPUMCTX.esi], esi 434 434 mov [ss:xDI + CPUMCTX.ebp], ebp 435 %ifdef __AMD64__435 %ifdef RT_ARCH_AMD64 436 436 pop xAX ; the guest edi we pushed above 437 437 mov dword [ss:xDI + CPUMCTX.edi], eax … … 500 500 501 501 502 %ifdef __AMD64__502 %ifdef RT_ARCH_AMD64 503 503 ;/** 504 504 ; * Executes VMWRITE … … 569 569 ;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn); 570 570 BEGINPROC VMXEnable 571 %ifdef __AMD64__571 %ifdef RT_ARCH_AMD64 572 572 xor rax, rax 573 573 %ifdef ASM_CALL64_GCC … … 590 590 591 591 .the_end: 592 %ifdef __AMD64__592 %ifdef RT_ARCH_AMD64 593 593 add rsp, 8 594 594 %endif … … 615 615 ;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVMCS); 616 616 BEGINPROC VMXClearVMCS 617 %ifdef __AMD64__617 %ifdef RT_ARCH_AMD64 618 618 xor rax, rax 619 619 %ifdef ASM_CALL64_GCC … … 630 630 mov eax, VERR_VMX_INVALID_VMCS_PTR 631 631 .the_end: 632 %ifdef __AMD64__632 %ifdef RT_ARCH_AMD64 633 633 add rsp, 8 634 634 %endif … … 645 645 ;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVMCS); 646 646 BEGINPROC VMXActivateVMCS 647 %ifdef __AMD64__647 %ifdef RT_ARCH_AMD64 648 648 xor rax, rax 649 649 %ifdef ASM_CALL64_GCC … … 660 660 mov eax, VERR_VMX_INVALID_VMCS_PTR 661 661 .the_end: 662 %ifdef __AMD64__662 %ifdef RT_ARCH_AMD64 663 663 add rsp, 8 664 664 %endif … … 666 666 ENDPROC VMXActivateVMCS 667 667 668 %endif ; __AMD64__668 %endif ; RT_ARCH_AMD64 669 669 670 670 … … 678 678 ; */ 679 679 BEGINPROC SVMVMRun 680 %ifdef __AMD64__; fake a cdecl stack frame - I'm lazy, sosume.680 %ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame - I'm lazy, sosume. 681 681 %ifdef ASM_CALL64_GCC 682 682 push rdx … … 708 708 ;/* Save the Guest CPU context pointer. */ 709 709 mov xSI, [xBP + xS*2 + RTHCPHYS_CB*2] ; pCtx 710 push xSI ; push for saving the state at the end 710 push xSI ; push for saving the state at the end 711 711 712 712 ; Restore CR2 … … 768 768 769 769 pop xBP 770 %ifdef __AMD64__770 %ifdef RT_ARCH_AMD64 771 771 add xSP, 4*xS 772 772 %endif … … 774 774 ENDPROC SVMVMRun 775 775 776 %ifdef __AMD64__777 %ifdef __WIN__776 %ifdef RT_ARCH_AMD64 777 %ifdef RT_OS_WINDOWS 778 778 779 779 ;; 780 780 ; Executes INVLPGA 781 ; 781 ; 782 782 ; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate 783 783 ; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id 784 ; 784 ; 785 785 ;DECLASM(void) SVMInvlpgA(RTGCPTR pPageGC, uint32_t uASID); 786 786 BEGINPROC SVMInvlpgA 787 %ifdef __AMD64__787 %ifdef RT_ARCH_AMD64 788 788 %ifdef ASM_CALL64_GCC 789 789 mov eax, edi ;; @todo 64-bit guest. -
trunk/src/VBox/VMM/VMMR0/TRPMR0A.asm
r2988 r3696 41 41 mov xBP, xSP 42 42 43 %ifdef __AMD64__43 %ifdef RT_ARCH_AMD64 44 44 mov r11, rsp ; save the RSP for the iret frame. 45 45 and rsp, 0fffffffffffffff0h ; align the stack. (do it unconditionally saves some jump mess) … … 146 146 ; 147 147 BEGINPROC trpmR0InterruptDispatcher 148 %ifdef __AMD64__148 %ifdef RT_ARCH_AMD64 149 149 lea rsp, [rsp + 10h] ; skip pVM and uOperation 150 150 swapgs 151 151 db 48h 152 152 retf 153 %else ; ! __AMD64__153 %else ; !RT_ARCH_AMD64 154 154 add esp, byte 4 ; skip pVM 155 155 pop ds … … 157 157 pop es 158 158 retf 159 %endif ; ! __AMD64__159 %endif ; !RT_ARCH_AMD64 160 160 ENDPROC trpmR0InterruptDispatcher 161 161 … … 166 166 ; Issues a software interrupt to the specified interrupt vector. 167 167 ; 168 ; @param uActiveVector x86:[esp+4] msc:rcx gcc:rdi The vector number. 168 ; @param uActiveVector x86:[esp+4] msc:rcx gcc:rdi The vector number. 169 169 ; 170 170 ;DECLASM(void) trpmR0DispatchHostInterruptSimple(RTUINT uActiveVector); 171 171 BEGINPROC trpmR0DispatchHostInterruptSimple 172 %ifdef __X86__172 %ifdef RT_ARCH_X86 173 173 mov eax, [esp + 4] 174 174 jmp dword [.jmp_table + eax * 4] … … 181 181 %endif 182 182 %endif 183 183 184 184 .jmp_table: 185 185 %assign i 0 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r3312 r3696 42 42 #include <iprt/stdarg.h> 43 43 44 #if defined(_MSC_VER) && defined( __AMD64__) /** @todo check this with with VC7! */44 #if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */ 45 45 # pragma intrinsic(_AddressOfReturnAddress) 46 46 #endif … … 69 69 /** Pointer to the internal networking service instance. */ 70 70 PINTNET g_pIntNet = 0; 71 #endif 71 #endif 72 72 73 73 … … 458 458 # if defined(__GNUC__) 459 459 void *pvRet = (uint8_t *)__builtin_frame_address(0) + sizeof(void *); 460 # elif defined(_MSC_VER) && defined( __AMD64__) /** @todo check this with with VC7! */460 # elif defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */ 461 461 void *pvRet = (uint8_t *)_AddressOfReturnAddress(); 462 # elif defined( __X86__)462 # elif defined(RT_ARCH_X86) 463 463 void *pvRet = (uint8_t *)&pVM - sizeof(pVM); 464 464 # else … … 699 699 * Check that the jump buffer is armed. 700 700 */ 701 #ifdef __X86__701 #ifdef RT_ARCH_X86 702 702 if (!pVM->vmm.s.CallHostR0JmpBuf.eip) 703 703 #else -
trunk/src/VBox/VMM/VMMR0/VMMR0A.asm
r2988 r3696 28 28 29 29 30 %ifdef __X86__; The other architecture(s) use(s) C99 variadict macros.30 %ifdef RT_ARCH_X86 ; The other architecture(s) use(s) C99 variadict macros. 31 31 extern IMPNAME(RTLogLogger) 32 32 %endif … … 49 49 ; 50 50 BEGINPROC vmmR0CallHostSetJmp 51 %ifdef __X86__51 %ifdef RT_ARCH_X86 52 52 ; 53 53 ; Save the registers. … … 126 126 xor eax, eax ; VINF_SUCCESS 127 127 ret 128 %endif ; __X86__129 130 %ifdef __AMD64__128 %endif ; RT_ARCH_X86 129 130 %ifdef RT_ARCH_AMD64 131 131 ; 132 132 ; Save the registers. … … 186 186 mov rdi, [rdx + VMMR0JMPBUF.rdi] 187 187 %endif 188 mov r12, [rdx + VMMR0JMPBUF.r12] 189 mov r13, [rdx + VMMR0JMPBUF.r13] 190 mov r14, [rdx + VMMR0JMPBUF.r14] 191 mov r15, [rdx + VMMR0JMPBUF.r15] 188 mov r12, [rdx + VMMR0JMPBUF.r12] 189 mov r13, [rdx + VMMR0JMPBUF.r13] 190 mov r14, [rdx + VMMR0JMPBUF.r14] 191 mov r15, [rdx + VMMR0JMPBUF.r15] 192 192 mov eax, VERR_INTERNAL_ERROR ; todo better return code! 193 193 ret … … 243 243 ; 244 244 BEGINPROC vmmR0CallHostLongJmp 245 %ifdef __X86__245 %ifdef RT_ARCH_X86 246 246 ; 247 247 ; Save the registers on the stack. … … 308 308 mov esp, [edx + VMMR0JMPBUF.esp] 309 309 jmp ecx 310 %endif ; __X86__311 312 %ifdef __AMD64__310 %endif ; RT_ARCH_X86 311 312 %ifdef RT_ARCH_AMD64 313 313 ; 314 314 ; Save the registers on the stack. … … 409 409 ; 410 410 EXPORTEDNAME vmmR0LoggerWrapper 411 %ifdef __X86__; The other architecture(s) use(s) C99 variadict macros.411 %ifdef RT_ARCH_X86 ; The other architecture(s) use(s) C99 variadict macros. 412 412 push 0 ; assumes we're the wrapper for a default instance. 413 413 call IMP(RTLogLogger) -
trunk/src/VBox/VMM/VMReq.cpp
r2981 r3696 764 764 } u; 765 765 u.pfn = pReq->u.Internal.pfn; 766 #ifdef __AMD64__766 #ifdef RT_ARCH_AMD64 767 767 switch (pReq->u.Internal.cArgs) 768 768 { -
trunk/src/VBox/VMM/testcase/tstAsmStructsAsm.asm
r2988 r3696 23 23 ; 24 24 25 %ifdef __AMD64__25 %ifdef RT_ARCH_AMD64 26 26 BITS 64 27 27 %endif
Note:
See TracChangeset
for help on using the changeset viewer.