Changeset 57446 in vbox
- Timestamp:
- Aug 18, 2015 5:33:53 PM (10 years ago)
- svn:sync-xref-src-repo-rev:
- 102213
- Location:
- trunk
- Files:
-
- 21 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/pgm.h
r56627 r57446 738 738 VMMR3DECL(int) PGMR3MappingsUnfix(PVM pVM); 739 739 VMMR3DECL(bool) PGMR3MappingsNeedReFixing(PVM pVM); 740 #if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL))740 #if defined(VBOX_WITH_RAW_MODE) || HC_ARCH_BITS == 32 /* (latter for 64-bit guests on 32-bit hosts) */ 741 741 VMMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages); 742 742 #endif -
trunk/src/VBox/Devices/Makefile.kmk
r57296 r57446 1018 1018 # 1019 1019 VBoxDDR0_TEMPLATE = VBoxR0 1020 VBoxDDR0_DEFS.darwin.x86= VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 VBOX_WITH_2X_4GB_ADDR_SPACE1021 1020 VBoxDDR0_DEFS = IN_RT_R0 $(if $(VBOX_WITH_HGCM),VBOX_WITH_HGCM,) # - WTF is IN_RT_R0 doing here? 1022 1021 VBoxDDR0_DEFS.win += VBOX_WITH_WIN_PARPORT_SUP -
trunk/src/VBox/VMM/Makefile.kmk
r57126 r57446 26 26 27 27 28 # Fail on unsupported hosts. 29 ifeq ($(KBUILD_TARGET_ARCH),x86) 30 ifeq ($(KBUILD_TARGET),darwin) 31 $(error 32-bit darwin is no longer a supported VirtualBox host. Go back to 4.3 or older for 32-bit support.) 32 else ifeq ($(KBUILD_TARGET),solaris) 33 $(error 32-bit solaris is no longer a supported VirtualBox host. Go back to x.y or older for 32-bit support.) 34 endif 35 endif 36 37 28 38 # 29 39 # Globals … … 120 130 endif 121 131 VBoxVMM_DEFS.darwin = VMM_R0_SWITCH_STACK 122 VBoxVMM_DEFS.darwin.x86 = \123 VBOX_WITH_2X_4GB_ADDR_SPACE VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3 \124 VBOX_WITH_HYBRID_32BIT_KERNEL VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R3125 132 126 133 VBoxVMM_INCS = \ … … 277 284 VMMSwitcher/AMD64To32Bit.asm \ 278 285 VMMSwitcher/AMD64ToPAE.asm 279 VBoxVMM_SOURCES.darwin.x86 += \280 VMMSwitcher/AMD64ToPAE.asm281 286 endif 282 287 VBoxVMM_SOURCES.x86 += \ … … 423 428 ifdef VBOX_WITH_VMM_R0_SWITCH_STACK 424 429 VMMRC_DEFS += VMM_R0_SWITCH_STACK 425 endif426 ifeq ($(KBUILD_TARGET_ARCH),x86)427 VMMRC_DEFS.darwin = VMM_R0_SWITCH_STACK428 VMMRC_DEFS.darwin = \429 VBOX_WITH_2X_4GB_ADDR_SPACE VBOX_WITH_2X_4GB_ADDR_SPACE_IN_RC \430 VBOX_WITH_HYBRID_32BIT_KERNEL VBOX_WITH_HYBRID_32BIT_KERNEL_IN_RC431 430 endif 432 431 … … 551 550 endif 552 551 VMMR0_DEFS.darwin = VMM_R0_SWITCH_STACK 553 VMMR0_DEFS.darwin.x86 = \554 VBOX_WITH_2X_4GB_ADDR_SPACE VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 \555 VBOX_WITH_HYBRID_32BIT_KERNEL VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0556 552 VMMR0_DEFS.win.amd64 = VBOX_WITH_KERNEL_USING_XMM 557 553 … … 637 633 VMMR0_SOURCES.x86 = \ 638 634 VMMR0/VMMR0JmpA-x86.asm 639 VMMR0_SOURCES.darwin.x86 = \640 VMMRZ/PGMRZDynMap.cpp641 635 642 636 VMMR0_LIBS = \ -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r57358 r57446 325 325 #ifdef IN_RING0 326 326 # if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 327 # ifndef VBOX_WITH_HYBRID_32BIT_KERNEL 328 # define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \ 327 # define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \ 329 328 do { \ 330 329 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \ … … 333 332 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \ 334 333 } while (0) 335 # else336 # define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \337 do { \338 /** @todo we're not loading the correct guest value here! */ \339 a_fnLoad(a_uValue); \340 } while (0)341 # endif342 334 # else 343 # define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \335 # define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \ 344 336 do { \ 345 337 a_fnLoad(a_uValue); \ -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r57358 r57446 2383 2383 NOREF(pvFault); 2384 2384 2385 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0)2385 #if HC_ARCH_BITS == 32 2386 2386 Assert(pDis->Param1.cb <= 4); 2387 2387 #endif … … 3015 3015 NOREF(pvFault); 3016 3016 3017 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0)3017 #if HC_ARCH_BITS == 32 3018 3018 Assert(pDis->Param1.cb <= 4); 3019 3019 #endif … … 3729 3729 && uOpCode != OP_BTR 3730 3730 && uOpCode != OP_BTC 3731 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R03732 && uOpCode != OP_CMPXCHG /* solaris */3733 && uOpCode != OP_AND /* windows */3734 && uOpCode != OP_OR /* windows */3735 && uOpCode != OP_XOR /* because we can */3736 && uOpCode != OP_ADD /* windows (dripple) */3737 && uOpCode != OP_ADC /* because we can */3738 && uOpCode != OP_SUB /* because we can */3739 /** @todo OP_BTS or is that a different kind of failure? */3740 # endif3741 3731 ) 3742 3732 { -
trunk/src/VBox/VMM/VMMAll/EMAllA.asm
r56287 r57446 44 44 %define CAN_DO_8_BYTE_OP 1 45 45 %endif 46 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R047 %define CAN_DO_8_BYTE_OP 148 %define MY_PTR_REG64 rcx49 %endif50 51 52 ;*******************************************************************************53 ;* External Symbols *54 ;*******************************************************************************55 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R056 extern NAME(SUPR0Abs64bitKernelCS)57 extern NAME(SUPR0AbsKernelCS)58 %endif59 46 60 47 … … 188 175 pop MY_RET_REG 189 176 retn 190 191 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0192 .do_qword:193 db 0xea ; jmp far .sixtyfourbit_mode194 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)195 BITS 64196 .sixtyfourbit_mode:197 and esp, 0ffffffffh198 and MY_PTR_REG, 0ffffffffh199 mov rdx, qword [rsp + 08h] ; rdx = second parameter200 and [MY_PTR_REG64], rdx ; do 8 bytes AND201 jmp far [.fpret wrt rip]202 .fpret: ; 16:32 Pointer to .done.203 dd .done, NAME(SUPR0AbsKernelCS)204 BITS 32205 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0206 177 ENDPROC EMEmulateAnd 207 178 … … 277 248 mov eax, VINF_SUCCESS 278 249 retn 279 280 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0281 .do_qword:282 db 0xea ; jmp far .sixtyfourbit_mode283 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)284 BITS 64285 .sixtyfourbit_mode:286 and esp, 0ffffffffh287 and MY_PTR_REG, 0ffffffffh288 mov rdx, qword [rsp + 08h] ; rdx = second parameter289 lock and [MY_PTR_REG64], rdx ; do 8 bytes OR290 jmp far [.fpret wrt rip]291 .fpret: ; 16:32 Pointer to .done.292 dd .done, NAME(SUPR0AbsKernelCS)293 BITS 32294 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0295 250 ENDPROC EMEmulateLockAnd 296 251 … … 357 312 pop MY_RET_REG 358 313 retn 359 360 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0361 .do_qword:362 db 0xea ; jmp far .sixtyfourbit_mode363 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)364 BITS 64365 .sixtyfourbit_mode:366 and esp, 0ffffffffh367 and MY_PTR_REG, 0ffffffffh368 mov rdx, qword [rsp + 08h] ; rdx = second parameter369 or [MY_PTR_REG64], rdx ; do 8 bytes OR370 jmp far [.fpret wrt rip]371 .fpret: ; 16:32 Pointer to .done.372 dd .done, NAME(SUPR0AbsKernelCS)373 BITS 32374 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0375 314 ENDPROC EMEmulateOr 376 315 … … 446 385 mov eax, VINF_SUCCESS 447 386 retn 448 449 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0450 .do_qword:451 db 0xea ; jmp far .sixtyfourbit_mode452 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)453 BITS 64454 .sixtyfourbit_mode:455 and esp, 0ffffffffh456 and MY_PTR_REG, 0ffffffffh457 mov rdx, qword [rsp + 08h] ; rdx = second parameter458 lock or [MY_PTR_REG64], rdx ; do 8 bytes OR459 jmp far [.fpret wrt rip]460 .fpret: ; 16:32 Pointer to .done.461 dd .done, NAME(SUPR0AbsKernelCS)462 BITS 32463 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0464 387 ENDPROC EMEmulateLockOr 465 388 … … 527 450 pop MY_RET_REG 528 451 retn 529 530 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0531 .do_qword:532 db 0xea ; jmp far .sixtyfourbit_mode533 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)534 BITS 64535 .sixtyfourbit_mode:536 and esp, 0ffffffffh537 and MY_PTR_REG, 0ffffffffh538 mov rdx, qword [rsp + 08h] ; rdx = second parameter539 xor [MY_PTR_REG64], rdx ; do 8 bytes XOR540 jmp far [.fpret wrt rip]541 .fpret: ; 16:32 Pointer to .done.542 dd .done, NAME(SUPR0AbsKernelCS)543 BITS 32544 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0545 452 ENDPROC EMEmulateXor 546 453 … … 615 522 mov eax, VINF_SUCCESS 616 523 retn 617 618 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0619 .do_qword:620 db 0xea ; jmp far .sixtyfourbit_mode621 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)622 BITS 64623 .sixtyfourbit_mode:624 and esp, 0ffffffffh625 and MY_PTR_REG, 0ffffffffh626 mov rdx, qword [rsp + 08h] ; rdx = second parameter627 lock xor [MY_PTR_REG64], rdx ; do 8 bytes OR628 jmp far [.fpret wrt rip]629 .fpret: ; 16:32 Pointer to .done.630 dd .done, NAME(SUPR0AbsKernelCS)631 BITS 32632 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0633 524 ENDPROC EMEmulateLockXor 634 525 … … 821 712 pop MY_RET_REG 822 713 retn 823 824 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0825 .do_qword:826 db 0xea ; jmp far .sixtyfourbit_mode827 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)828 BITS 64829 .sixtyfourbit_mode:830 and esp, 0ffffffffh831 and MY_PTR_REG, 0ffffffffh832 mov rdx, qword [rsp + 08h] ; rdx = second parameter833 add [MY_PTR_REG64], rdx ; do 8 bytes ADD834 jmp far [.fpret wrt rip]835 .fpret: ; 16:32 Pointer to .done.836 dd .done, NAME(SUPR0AbsKernelCS)837 BITS 32838 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0839 714 ENDPROC EMEmulateAdd 840 715 … … 906 781 pop MY_RET_REG 907 782 retn 908 909 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0910 .do_qword:911 db 0xea ; jmp far .sixtyfourbit_mode912 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)913 BITS 64914 .sixtyfourbit_mode:915 and esp, 0ffffffffh916 and MY_PTR_REG, 0ffffffffh917 mov rdx, qword [rsp + 08h] ; rdx = second parameter918 stc ; set carry flag919 adc [MY_PTR_REG64], rdx ; do 8 bytes ADC920 jmp far [.fpret wrt rip]921 .fpret: ; 16:32 Pointer to .done.922 dd .done, NAME(SUPR0AbsKernelCS)923 BITS 32924 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0925 783 ENDPROC EMEmulateAdcWithCarrySet 926 784 … … 988 846 pop MY_RET_REG 989 847 retn 990 991 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0992 .do_qword:993 db 0xea ; jmp far .sixtyfourbit_mode994 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)995 BITS 64996 .sixtyfourbit_mode:997 and esp, 0ffffffffh998 and MY_PTR_REG, 0ffffffffh999 mov rdx, qword [rsp + 08h] ; rdx = second parameter1000 sub [MY_PTR_REG64], rdx ; do 8 bytes SUB1001 jmp far [.fpret wrt rip]1002 .fpret: ; 16:32 Pointer to .done.1003 dd .done, NAME(SUPR0AbsKernelCS)1004 BITS 321005 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R01006 848 ENDPROC EMEmulateSub 1007 849 … … 1220 1062 pop xBX 1221 1063 retn 1222 1223 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R01224 .do_qword:1225 db 0xea ; jmp far .sixtyfourbit_mode1226 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)1227 BITS 641228 .sixtyfourbit_mode:1229 and ebx, 0ffffffffh1230 and esp, 0ffffffffh1231 and ecx, 0ffffffffh1232 mov rax, qword [rbx] ; load 2nd parameter's value1233 mov rdx, qword [rsp + 0ch + 4] ; rdx = third parameter1234 1235 lock cmpxchg qword [rcx], rdx ; do 8 byte CMPXCHG1236 mov qword [rbx], rax1237 jmp far [.fpret wrt rip]1238 .fpret: ; 16:32 Pointer to .done.1239 dd .done, NAME(SUPR0AbsKernelCS)1240 BITS 321241 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R01242 1064 ENDPROC EMEmulateLockCmpXchg 1243 1065 … … 1328 1150 pop xBX 1329 1151 retn 1330 1331 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R01332 .do_qword:1333 db 0xea ; jmp far .sixtyfourbit_mode1334 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)1335 BITS 641336 .sixtyfourbit_mode:1337 and ebx, 0ffffffffh1338 and esp, 0ffffffffh1339 and ecx, 0ffffffffh1340 mov rax, qword [rbx] ; load 2nd parameter's value1341 mov rdx, qword [rsp + 0ch + 4] ; rdx = third parameter1342 1343 cmpxchg qword [rcx], rdx ; do 8 byte CMPXCHG1344 mov qword [rbx], rax1345 jmp far [.fpret wrt rip]1346 .fpret: ; 16:32 Pointer to .done.1347 dd .done, NAME(SUPR0AbsKernelCS)1348 BITS 321349 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R01350 1152 ENDPROC EMEmulateCmpXchg 1351 1153 … … 1565 1367 1566 1368 retn 1567 1568 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R01569 .do_qword:1570 db 0xea ; jmp far .sixtyfourbit_mode1571 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)1572 BITS 641573 .sixtyfourbit_mode:1574 and esp, 0ffffffffh1575 and edx, 0ffffffffh1576 and MY_PTR_REG, 0ffffffffh1577 mov rax, qword [rdx] ; load 2nd parameter's value1578 and [MY_PTR_REG64], rax ; do 8 bytes XADD1579 jmp far [.fpret wrt rip]1580 .fpret: ; 16:32 Pointer to .done.1581 dd .done, NAME(SUPR0AbsKernelCS)1582 BITS 321583 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R01584 1369 ENDPROC EMEmulateLockXAdd 1585 1370 … … 1654 1439 1655 1440 retn 1656 1657 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R01658 .do_qword:1659 db 0xea ; jmp far .sixtyfourbit_mode1660 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)1661 BITS 641662 .sixtyfourbit_mode:1663 and esp, 0ffffffffh1664 and edx, 0ffffffffh1665 and MY_PTR_REG, 0ffffffffh1666 mov rax, qword [rdx] ; load 2nd parameter's value1667 and [MY_PTR_REG64], rax ; do 8 bytes XADD1668 jmp far [.fpret wrt rip]1669 .fpret: ; 16:32 Pointer to .done.1670 dd .done, NAME(SUPR0AbsKernelCS)1671 BITS 321672 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R01673 1441 ENDPROC EMEmulateXAdd 1674 1442 -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r57358 r57446 1044 1044 1045 1045 1046 #if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL))1046 #if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && defined(VBOX_WITH_64_BITS_GUESTS)) 1047 1047 1048 1048 /** … … 1118 1118 } 1119 1119 1120 #endif /* defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) */1120 #endif /* defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && defined(VBOX_WITH_64_BITS_GUESTS)) */ 1121 1121 1122 1122 /** -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r57358 r57446 256 256 { 257 257 #ifdef RT_ARCH_X86 258 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL259 if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)260 # else261 258 if (!ASMIsIntelCpu()) 262 # endif263 259 #endif 264 260 { … … 400 396 { 401 397 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 402 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)398 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 403 399 if (CPUMIsGuestInLongModeEx(pCtx)) 404 400 { … … 464 460 NOREF(pVM); NOREF(pCtx); 465 461 466 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)462 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 467 463 if (CPUMIsGuestInLongModeEx(pCtx)) 468 464 { … … 531 527 * Save the host state. 532 528 */ 533 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL534 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);535 cpumR0SaveDRx(&pVCpu->cpum.s.Host.dr0);536 #else537 529 pVCpu->cpum.s.Host.dr0 = ASMGetDR0(); 538 530 pVCpu->cpum.s.Host.dr1 = ASMGetDR1(); 539 531 pVCpu->cpum.s.Host.dr2 = ASMGetDR2(); 540 532 pVCpu->cpum.s.Host.dr3 = ASMGetDR3(); 541 #endif542 533 pVCpu->cpum.s.Host.dr6 = ASMGetDR6(); 543 534 /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */ … … 583 574 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST) 584 575 { 585 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)576 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 586 577 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest)) 587 578 { … … 594 585 #endif 595 586 { 596 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL597 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);598 #else599 587 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0(); 600 588 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1(); 601 589 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2(); 602 590 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3(); 603 #endif604 591 if (fDr6) 605 592 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6(); … … 619 606 ASMSetDR7(X86_DR7_INIT_VAL); 620 607 621 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL622 AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);623 cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0);624 #else625 608 ASMSetDR0(pVCpu->cpum.s.Host.dr0); 626 609 ASMSetDR1(pVCpu->cpum.s.Host.dr1); 627 610 ASMSetDR2(pVCpu->cpum.s.Host.dr2); 628 611 ASMSetDR3(pVCpu->cpum.s.Host.dr3); 629 #endif630 612 /** @todo consider only updating if they differ, esp. DR6. Need to figure how 631 613 * expensive DRx reads are over DRx writes. */ … … 660 642 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST) 661 643 { 662 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)644 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 663 645 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest)) 664 646 { … … 671 653 #endif 672 654 { 673 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL674 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);675 #else676 655 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0(); 677 656 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1(); 678 657 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2(); 679 658 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3(); 680 #endif681 659 if (fDr6) 682 660 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6(); … … 707 685 * DR7 and DR6 (if fDr6 is true) are left to the caller. 708 686 */ 709 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)687 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 710 688 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest)) 711 689 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_GUEST); /* Postpone it to the world switch. */ … … 713 691 #endif 714 692 { 715 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL716 cpumR0LoadDRx(&pVCpu->cpum.s.Guest.dr[0]);717 #else718 693 ASMSetDR0(pVCpu->cpum.s.Guest.dr[0]); 719 694 ASMSetDR1(pVCpu->cpum.s.Guest.dr[1]); 720 695 ASMSetDR2(pVCpu->cpum.s.Guest.dr[2]); 721 696 ASMSetDR3(pVCpu->cpum.s.Guest.dr[3]); 722 #endif723 697 if (fDr6) 724 698 ASMSetDR6(pVCpu->cpum.s.Guest.dr[6]); … … 754 728 * DR7 and DR6 (if fDr6 is true) are left to the caller. 755 729 */ 756 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)730 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 757 731 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest)) 758 732 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_HYPER); /* Postpone it. */ … … 760 734 #endif 761 735 { 762 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL763 cpumR0LoadDRx(&pVCpu->cpum.s.Hyper.dr[0]);764 #else765 736 ASMSetDR0(pVCpu->cpum.s.Hyper.dr[0]); 766 737 ASMSetDR1(pVCpu->cpum.s.Hyper.dr[1]); 767 738 ASMSetDR2(pVCpu->cpum.s.Hyper.dr[2]); 768 739 ASMSetDR3(pVCpu->cpum.s.Hyper.dr[3]); 769 #endif770 740 if (fDr6) 771 741 ASMSetDR6(X86_DR6_INIT_VAL); -
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r55738 r57446 49 49 50 50 51 ;*******************************************************************************52 ;* External Symbols *53 ;*******************************************************************************54 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL55 extern NAME(SUPR0AbsIs64bit)56 extern NAME(SUPR0Abs64bitKernelCS)57 extern NAME(SUPR0Abs64bitKernelSS)58 extern NAME(SUPR0Abs64bitKernelDS)59 extern NAME(SUPR0AbsKernelCS)60 %endif61 62 63 ;*******************************************************************************64 ;* Global Variables *65 ;*******************************************************************************66 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL67 BEGINDATA68 %if 0 ; Currently not used.69 g_r32_Zero: dd 0.070 %endif71 72 ;;73 ; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without74 ; needing to clobber a register. (This trick doesn't quite work for PE btw.75 ; but that's not relevant atm.)76 GLOBALNAME g_fCPUMIs64bitHost77 dd NAME(SUPR0AbsIs64bit)78 %endif79 80 51 81 52 BEGINCODE … … 161 132 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask] 162 133 163 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL164 ; The joy of 32-bit darwin kernels that runs the CPU in 64-bit mode.165 cmp byte [NAME(g_fCPUMIs64bitHost)], 0166 jz %%host_legacy_mode167 db 0xea ; jmp far .sixtyfourbit_mode168 dd %%host_sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)169 BITS 64170 %%host_sixtyfourbit_mode:171 or eax, eax172 jz %%host_sixtyfourbit_fxsave173 174 ; XSAVE175 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]176 o64 xsave [pXState]177 jmp %%host_sixtyfourbit_done178 179 ; FXSAVE180 %%host_sixtyfourbit_fxsave:181 o64 fxsave [pXState]182 183 %%host_sixtyfourbit_done:184 jmp far [%%host_fpret wrt rip]185 %%host_fpret: ; 16:32 Pointer to %%host_done.186 dd %%host_done, NAME(SUPR0AbsKernelCS)187 BITS 32188 189 %%host_legacy_mode:190 %endif191 192 134 ; 193 135 ; XSAVE or FXSAVE? … … 230 172 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] 231 173 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask] 232 233 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL234 ; The joy of 32-bit darwin kernels that runs the CPU in 64-bit mode.235 cmp byte [NAME(g_fCPUMIs64bitHost)], 0236 jz %%host_legacy_mode237 db 0xea ; jmp far .sixtyfourbit_mode238 dd %%host_sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)239 BITS 64240 %%host_sixtyfourbit_mode:241 or eax, eax242 jz %%host_sixtyfourbit_fxrstor243 244 ; XRSTOR245 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]246 o64 xrstor [pXState]247 jmp %%host_sixtyfourbit_done248 249 ; FXRSTOR250 %%host_sixtyfourbit_fxrstor:251 o64 fxrstor [pXState]252 253 %%host_sixtyfourbit_done:254 jmp far [%%host_fpret wrt rip]255 %%host_fpret: ; 16:32 Pointer to %%host_done.256 dd %%host_done, NAME(SUPR0AbsKernelCS)257 BITS 32258 259 %%host_legacy_mode:260 %endif261 174 262 175 ; … … 341 254 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 342 255 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask] 343 344 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL345 ; The joy of 32-bit darwin kernels that runs the CPU in 64-bit mode.346 cmp byte [NAME(g_fCPUMIs64bitHost)], 0347 jz %%guest_legacy_mode348 db 0xea ; jmp far .sixtyfourbit_mode349 dd %%guest_sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)350 BITS 64351 %%guest_sixtyfourbit_mode:352 or eax, eax353 jz %%guest_sixtyfourbit_fxsave354 355 ; XSAVE356 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]357 o64 xsave [pXState]358 jmp %%guest_sixtyfourbit_done359 360 ; FXSAVE361 %%guest_sixtyfourbit_fxsave:362 SAVE_32_OR_64_FPU pCpumCpu, pXState, 1363 364 %%guest_sixtyfourbit_done:365 jmp far [%%guest_fpret wrt rip]366 %%guest_fpret: ; 16:32 Pointer to %%guest_done.367 dd %%guest_done, NAME(SUPR0AbsKernelCS)368 BITS 32369 370 %%guest_legacy_mode:371 %endif372 256 373 257 ; … … 438 322 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] 439 323 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask] 440 441 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL442 ; The joy of 32-bit darwin kernels that runs the CPU in 64-bit mode.443 cmp byte [NAME(g_fCPUMIs64bitHost)], 0444 jz %%guest_legacy_mode445 db 0xea ; jmp far .sixtyfourbit_mode446 dd %%guest_sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)447 BITS 64448 %%guest_sixtyfourbit_mode:449 or eax, eax450 jz %%guest_sixtyfourbit_fxrstor451 452 ; XRSTOR453 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]454 o64 xrstor [pXState]455 jmp %%guest_sixtyfourbit_done456 457 ; FXRSTOR458 %%guest_sixtyfourbit_fxrstor:459 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 1460 461 %%guest_sixtyfourbit_done:462 jmp far [%%guest_fpret wrt rip]463 %%guest_fpret: ; 16:32 Pointer to %%guest_done.464 dd %%guest_done, NAME(SUPR0AbsKernelCS)465 BITS 32466 467 %%guest_legacy_mode:468 %endif469 324 470 325 ; … … 561 416 562 417 %ifndef RT_ARCH_AMD64 563 %ifdef VBOX_WITH_64_BITS_GUESTS 564 %ifndef VBOX_WITH_HYBRID_32BIT_KERNEL 418 %ifdef VBOX_WITH_64_BITS_GUESTS 565 419 ;; 566 420 ; Saves the host FPU/SSE/AVX state. … … 571 425 align 16 572 426 BEGINPROC cpumR0SaveHostFPUState 427 ; 428 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input. 429 ; 430 %ifdef RT_ARCH_AMD64 431 %ifdef RT_OS_WINDOWS 432 mov r11, rcx 433 %else 434 mov r11, rdi 435 %endif 436 %define pCpumCpu r11 437 %define pXState r10 438 %else 439 push ebp 440 mov ebp, esp 441 push ebx 442 push esi 443 mov ebx, dword [ebp + 8] 444 %define pCpumCpu ebx 445 %define pXState esi 446 %endif 447 448 pushf ; The darwin kernel can get upset or upset things if an 449 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 450 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 451 452 CPUMR0_SAVE_HOST 453 454 RESTORE_CR0 xCX 455 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) 456 popf 457 458 %ifdef RT_ARCH_X86 459 pop esi 460 pop ebx 461 leave 462 %endif 463 xor eax, eax 464 ret 465 %undef pCpumCpu 466 %undef pXState 467 ENDPROC cpumR0SaveHostFPUState 468 %endif 469 %endif 470 471 472 ;; 473 ; Saves the guest FPU/SSE/AVX state and restores the host FPU/SSE/AVX state. 474 ; 475 ; @returns VINF_SUCCESS (0) in eax. 476 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer 477 ; 478 align 16 479 BEGINPROC cpumR0SaveGuestRestoreHostFPUState 573 480 ; 574 481 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input. … … 592 499 %endif 593 500 501 ; 502 ; Only restore FPU if guest has used it. 503 ; 504 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU 505 jz .fpu_not_used 506 594 507 pushf ; The darwin kernel can get upset or upset things if an 595 508 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0. 596 509 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 597 510 598 CPUMR0_SAVE_HOST 511 CPUMR0_SAVE_GUEST 512 CPUMR0_LOAD_HOST 599 513 600 514 RESTORE_CR0 xCX 601 or dword [pCpumCpu + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)515 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 602 516 popf 603 517 518 .fpu_not_used: 604 519 %ifdef RT_ARCH_X86 605 520 pop esi … … 609 524 xor eax, eax 610 525 ret 611 612 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0613 ALIGNCODE(16)614 BITS 64615 .sixtyfourbit_mode:616 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.617 o64 fxsave [pXstate]618 jmp far [.fpret wrt rip]619 .fpret: ; 16:32 Pointer to .the_end.620 dd .done, NAME(SUPR0AbsKernelCS)621 BITS 32622 %endif623 526 %undef pCpumCpu 624 527 %undef pXState 625 ENDPROC cpumR0SaveHostFPUState 626 %endif 627 %endif 628 %endif 629 630 631 ;; 632 ; Saves the guest FPU/SSE/AVX state and restores the host FPU/SSE/AVX state. 633 ; 634 ; @returns VINF_SUCCESS (0) in eax. 528 ENDPROC cpumR0SaveGuestRestoreHostFPUState 529 530 531 ;; 532 ; Restores the host's FPU/SSE/AVX state from pCpumCpu->Host. 533 ; 534 ; @returns 0 635 535 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer 636 536 ; 637 537 align 16 638 BEGINPROC cpumR0 SaveGuestRestoreHostFPUState538 BEGINPROC cpumR0RestoreHostFPUState 639 539 ; 640 540 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input. … … 659 559 660 560 ; 661 ; Only restore FPU if guest has used it.561 ; Restore FPU if guest has used it. 662 562 ; 663 563 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU 664 jz 564 jz short .fpu_not_used 665 565 666 566 pushf ; The darwin kernel can get upset or upset things if an … … 668 568 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use! 669 569 670 CPUMR0_SAVE_GUEST671 570 CPUMR0_LOAD_HOST 672 571 … … 683 582 xor eax, eax 684 583 ret 685 %undef pCpumCpu686 %undef pXState687 ENDPROC cpumR0SaveGuestRestoreHostFPUState688 689 690 ;;691 ; Restores the host's FPU/SSE/AVX state from pCpumCpu->Host.692 ;693 ; @returns 0694 ; @param pCpumCpu x86:[ebp+8] gcc:rdi msc:rcx CPUMCPU pointer695 ;696 align 16697 BEGINPROC cpumR0RestoreHostFPUState698 ;699 ; Prologue - xAX+xDX must be free for XSAVE/XRSTOR input.700 ;701 %ifdef RT_ARCH_AMD64702 %ifdef RT_OS_WINDOWS703 mov r11, rcx704 %else705 mov r11, rdi706 %endif707 %define pCpumCpu r11708 %define pXState r10709 %else710 push ebp711 mov ebp, esp712 push ebx713 push esi714 mov ebx, dword [ebp + 8]715 %define pCpumCpu ebx716 %define pXState esi717 %endif718 719 ;720 ; Restore FPU if guest has used it.721 ;722 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USED_FPU723 jz short .fpu_not_used724 725 pushf ; The darwin kernel can get upset or upset things if an726 cli ; interrupt occurs while we're doing fxsave/fxrstor/cr0.727 SAVE_CR0_CLEAR_FPU_TRAPS xCX, xAX ; xCX is now old CR0 value, don't use!728 729 CPUMR0_LOAD_HOST730 731 RESTORE_CR0 xCX732 and dword [pCpumCpu + CPUMCPU.fUseFlags], ~CPUM_USED_FPU733 popf734 735 .fpu_not_used:736 %ifdef RT_ARCH_X86737 pop esi738 pop ebx739 leave740 %endif741 xor eax, eax742 ret743 584 %undef pCpumCPu 744 585 %undef pXState 745 586 ENDPROC cpumR0RestoreHostFPUState 746 587 747 748 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0749 ;;750 ; DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);751 ;752 ALIGNCODE(16)753 BEGINPROC cpumR0SaveDRx754 %ifdef RT_ARCH_AMD64755 %ifdef ASM_CALL64_GCC756 mov xCX, rdi757 %endif758 %else759 mov xCX, dword [esp + 4]760 %endif761 pushf ; Just to be on the safe side.762 cli763 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL764 cmp byte [NAME(g_fCPUMIs64bitHost)], 0765 jz .legacy_mode766 db 0xea ; jmp far .sixtyfourbit_mode767 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)768 .legacy_mode:769 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL770 771 ;772 ; Do the job.773 ;774 mov xAX, dr0775 mov xDX, dr1776 mov [xCX], xAX777 mov [xCX + 8 * 1], xDX778 mov xAX, dr2779 mov xDX, dr3780 mov [xCX + 8 * 2], xAX781 mov [xCX + 8 * 3], xDX782 783 .done:784 popf785 ret786 787 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0788 ALIGNCODE(16)789 BITS 64790 .sixtyfourbit_mode:791 and ecx, 0ffffffffh792 793 mov rax, dr0794 mov rdx, dr1795 mov r8, dr2796 mov r9, dr3797 mov [rcx], rax798 mov [rcx + 8 * 1], rdx799 mov [rcx + 8 * 2], r8800 mov [rcx + 8 * 3], r9801 jmp far [.fpret wrt rip]802 .fpret: ; 16:32 Pointer to .the_end.803 dd .done, NAME(SUPR0AbsKernelCS)804 BITS 32805 %endif806 ENDPROC cpumR0SaveDRx807 808 809 ;;810 ; DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);811 ;812 ALIGNCODE(16)813 BEGINPROC cpumR0LoadDRx814 %ifdef RT_ARCH_AMD64815 %ifdef ASM_CALL64_GCC816 mov xCX, rdi817 %endif818 %else819 mov xCX, dword [esp + 4]820 %endif821 pushf ; Just to be on the safe side.822 cli823 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL824 cmp byte [NAME(g_fCPUMIs64bitHost)], 0825 jz .legacy_mode826 db 0xea ; jmp far .sixtyfourbit_mode827 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)828 .legacy_mode:829 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL830 831 ;832 ; Do the job.833 ;834 mov xAX, [xCX]835 mov xDX, [xCX + 8 * 1]836 mov dr0, xAX837 mov dr1, xDX838 mov xAX, [xCX + 8 * 2]839 mov xDX, [xCX + 8 * 3]840 mov dr2, xAX841 mov dr3, xDX842 843 .done:844 popf845 ret846 847 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0848 ALIGNCODE(16)849 BITS 64850 .sixtyfourbit_mode:851 and ecx, 0ffffffffh852 853 mov rax, [rcx]854 mov rdx, [rcx + 8 * 1]855 mov r8, [rcx + 8 * 2]856 mov r9, [rcx + 8 * 3]857 mov dr0, rax858 mov dr1, rdx859 mov dr2, r8860 mov dr3, r9861 jmp far [.fpret wrt rip]862 .fpret: ; 16:32 Pointer to .the_end.863 dd .done, NAME(SUPR0AbsKernelCS)864 BITS 32865 %endif866 ENDPROC cpumR0LoadDRx867 868 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0869 -
trunk/src/VBox/VMM/VMMR0/CPUMR0UnusedA.asm
r54674 r57446 32 32 33 33 34 ;*******************************************************************************35 ;* External Symbols *36 ;*******************************************************************************37 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL38 extern NAME(SUPR0AbsIs64bit)39 extern NAME(SUPR0Abs64bitKernelCS)40 extern NAME(SUPR0Abs64bitKernelSS)41 extern NAME(SUPR0Abs64bitKernelDS)42 extern NAME(SUPR0AbsKernelCS)43 extern NAME(g_fCPUMIs64bitHost)44 %endif45 46 34 47 35 ;; … … 63 51 mov xDX, dword [esp + 4] 64 52 %endif 65 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL66 cmp byte [NAME(g_fCPUMIs64bitHost)], 067 jz .legacy_mode68 db 0xea ; jmp far .sixtyfourbit_mode69 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)70 .legacy_mode:71 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL72 53 73 54 fxrstor [xDX + CPUMCTX.fpu] 74 55 .done: 75 56 ret 76 77 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R078 ALIGNCODE(16)79 BITS 6480 .sixtyfourbit_mode:81 and edx, 0ffffffffh82 fxrstor [rdx + CPUMCTX.fpu]83 jmp far [.fpret wrt rip]84 .fpret: ; 16:32 Pointer to .the_end.85 dd .done, NAME(SUPR0AbsKernelCS)86 BITS 3287 %endif88 57 ENDPROC cpumR0LoadFPU 89 58 … … 107 76 mov xDX, dword [esp + 4] 108 77 %endif 109 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL110 cmp byte [NAME(g_fCPUMIs64bitHost)], 0111 jz .legacy_mode112 db 0xea ; jmp far .sixtyfourbit_mode113 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)114 .legacy_mode:115 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL116 78 fxsave [xDX + CPUMCTX.fpu] 117 79 .done: 118 80 ret 119 120 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0121 ALIGNCODE(16)122 BITS 64123 .sixtyfourbit_mode:124 and edx, 0ffffffffh125 fxsave [rdx + CPUMCTX.fpu]126 jmp far [.fpret wrt rip]127 .fpret: ; 16:32 Pointer to .the_end.128 dd .done, NAME(SUPR0AbsKernelCS)129 BITS 32130 %endif131 81 ENDPROC cpumR0SaveFPU 132 82 … … 150 100 mov xDX, dword [esp + 4] 151 101 %endif 152 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL153 cmp byte [NAME(g_fCPUMIs64bitHost)], 0154 jz .legacy_mode155 db 0xea ; jmp far .sixtyfourbit_mode156 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)157 .legacy_mode:158 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL159 102 160 103 movdqa xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0] … … 180 123 movdqa xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15] 181 124 %endif 182 .done: 183 ret 184 185 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 186 ALIGNCODE(16) 187 BITS 64 188 .sixtyfourbit_mode: 189 and edx, 0ffffffffh 190 191 movdqa xmm0, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0] 192 movdqa xmm1, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1] 193 movdqa xmm2, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2] 194 movdqa xmm3, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3] 195 movdqa xmm4, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4] 196 movdqa xmm5, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5] 197 movdqa xmm6, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6] 198 movdqa xmm7, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7] 199 200 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA 201 jz .sixtyfourbit_done 202 203 movdqa xmm8, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8] 204 movdqa xmm9, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9] 205 movdqa xmm10, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10] 206 movdqa xmm11, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11] 207 movdqa xmm12, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12] 208 movdqa xmm13, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13] 209 movdqa xmm14, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14] 210 movdqa xmm15, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15] 211 .sixtyfourbit_done: 212 jmp far [.fpret wrt rip] 213 .fpret: ; 16:32 Pointer to .the_end. 214 dd .done, NAME(SUPR0AbsKernelCS) 215 BITS 32 216 %endif 125 126 .done: 127 ret 217 128 ENDPROC cpumR0LoadXMM 218 129 … … 236 147 mov xDX, dword [esp + 4] 237 148 %endif 238 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL239 cmp byte [NAME(g_fCPUMIs64bitHost)], 0240 jz .legacy_mode241 db 0xea ; jmp far .sixtyfourbit_mode242 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)243 .legacy_mode:244 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL245 149 246 150 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0 … … 265 169 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14 266 170 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15 267 268 %endif 269 .done: 270 ret 271 272 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 273 ALIGNCODE(16) 274 BITS 64 275 .sixtyfourbit_mode: 276 and edx, 0ffffffffh 277 278 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0 279 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1 280 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2 281 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3 282 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4 283 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5 284 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6 285 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7 286 287 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA 288 jz .sixtyfourbit_done 289 290 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8 291 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9 292 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10 293 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11 294 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12 295 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13 296 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14 297 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15 298 299 .sixtyfourbit_done: 300 jmp far [.fpret wrt rip] 301 .fpret: ; 16:32 Pointer to .the_end. 302 dd .done, NAME(SUPR0AbsKernelCS) 303 BITS 32 304 %endif 171 %endif 172 173 .done: 174 ret 305 175 ENDPROC cpumR0SaveXMM 306 176 -
trunk/src/VBox/VMM/VMMR0/TRPMR0.cpp
r57358 r57446 31 31 32 32 33 #if defined(RT_OS_DARWIN) && ARCH_BITS == 32 34 # error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!" 35 #endif 36 37 33 38 /** 34 39 * Dispatches an interrupt that arrived while we were in the guest context. … … 57 62 58 63 #else /* The complicated way: */ 59 60 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL61 /*62 * Check if we're in long mode or not.63 */64 if ( (ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)65 && (ASMRdMsr(MSR_K6_EFER) & MSR_K6_EFER_LMA))66 {67 trpmR0DispatchHostInterruptSimple(uActiveVector);68 return;69 }70 # endif71 64 72 65 /* -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r57429 r57446 61 61 # pragma intrinsic(_AddressOfReturnAddress) 62 62 #endif 63 64 #if defined(RT_OS_DARWIN) && ARCH_BITS == 32 65 # error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!" 66 #endif 67 63 68 64 69 -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r57358 r57446 2077 2077 */ 2078 2078 #if HC_ARCH_BITS == 32 2079 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2080 if (!(pCtx->efer & MSR_K6_EFER_LMA)) 2081 # endif 2082 { 2083 pHlp->pfnPrintf(pHlp, 2084 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n" 2085 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n" 2086 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n" 2087 "cr0=%08RX64 cr2=xxxxxxxx cr3=%08RX64 cr4=%08RX64 gdtr=%08x:%04x ldtr=%04x\n" 2088 "dr[0]=%08RX64 dr[1]=%08RX64x dr[2]=%08RX64 dr[3]=%08RX64x dr[6]=%08RX64 dr[7]=%08RX64\n" 2089 "SysEnter={cs=%04x eip=%08x esp=%08x}\n" 2090 , 2091 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi, 2092 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags, 2093 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl, 2094 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4, 2095 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7, 2096 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->ldtr, 2097 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp); 2098 } 2099 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2100 else 2101 # endif 2102 #endif 2103 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2104 { 2105 pHlp->pfnPrintf(pHlp, 2106 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n" 2107 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n" 2108 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n" 2109 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n" 2110 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n" 2111 "r14=%016RX64 r15=%016RX64\n" 2112 "iopl=%d %31s\n" 2113 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n" 2114 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n" 2115 "cr4=%016RX64 ldtr=%04x tr=%04x\n" 2116 "dr[0]=%016RX64 dr[1]=%016RX64 dr[2]=%016RX64\n" 2117 "dr[3]=%016RX64 dr[6]=%016RX64 dr[7]=%016RX64\n" 2118 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n" 2119 "SysEnter={cs=%04x eip=%08x esp=%08x}\n" 2120 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n" 2121 , 2122 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx, 2123 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi, 2124 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp, 2125 /*pCtx->r8, pCtx->r9,*/ pCtx->r10, 2126 pCtx->r11, pCtx->r12, pCtx->r13, 2127 pCtx->r14, pCtx->r15, 2128 X86_EFL_GET_IOPL(efl), szEFlags, 2129 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl, 2130 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, 2131 pCtx->cr4, pCtx->ldtr, pCtx->tr, 2132 pCtx->dr0, pCtx->dr1, pCtx->dr2, 2133 pCtx->dr3, pCtx->dr6, pCtx->dr7, 2134 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb, 2135 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp, 2136 pCtx->FSbase, pCtx->GSbase, pCtx->efer); 2137 } 2079 pHlp->pfnPrintf(pHlp, 2080 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n" 2081 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n" 2082 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n" 2083 "cr0=%08RX64 cr2=xxxxxxxx cr3=%08RX64 cr4=%08RX64 gdtr=%08x:%04x ldtr=%04x\n" 2084 "dr[0]=%08RX64 dr[1]=%08RX64x dr[2]=%08RX64 dr[3]=%08RX64x dr[6]=%08RX64 dr[7]=%08RX64\n" 2085 "SysEnter={cs=%04x eip=%08x esp=%08x}\n" 2086 , 2087 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi, 2088 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags, 2089 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl, 2090 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4, 2091 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7, 2092 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->ldtr, 2093 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp); 2094 #else 2095 pHlp->pfnPrintf(pHlp, 2096 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n" 2097 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n" 2098 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n" 2099 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n" 2100 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n" 2101 "r14=%016RX64 r15=%016RX64\n" 2102 "iopl=%d %31s\n" 2103 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n" 2104 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n" 2105 "cr4=%016RX64 ldtr=%04x tr=%04x\n" 2106 "dr[0]=%016RX64 dr[1]=%016RX64 dr[2]=%016RX64\n" 2107 "dr[3]=%016RX64 dr[6]=%016RX64 dr[7]=%016RX64\n" 2108 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n" 2109 "SysEnter={cs=%04x eip=%08x esp=%08x}\n" 2110 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n" 2111 , 2112 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx, 2113 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi, 2114 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp, 2115 /*pCtx->r8, pCtx->r9,*/ pCtx->r10, 2116 pCtx->r11, pCtx->r12, pCtx->r13, 2117 pCtx->r14, pCtx->r15, 2118 X86_EFL_GET_IOPL(efl), szEFlags, 2119 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl, 2120 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, 2121 pCtx->cr4, pCtx->ldtr, pCtx->tr, 2122 pCtx->dr0, pCtx->dr1, pCtx->dr2, 2123 pCtx->dr3, pCtx->dr6, pCtx->dr7, 2124 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb, 2125 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp, 2126 pCtx->FSbase, pCtx->GSbase, pCtx->efer); 2138 2127 #endif 2139 2128 } -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r57358 r57446 1654 1654 case SUPPAGINGMODE_AMD64_NX: 1655 1655 case SUPPAGINGMODE_AMD64_GLOBAL_NX: 1656 #ifndef VBOX_WITH_HYBRID_32BIT_KERNEL1657 1656 if (ARCH_BITS != 64) 1658 1657 { … … 1661 1660 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE; 1662 1661 } 1663 #endif1664 1662 break; 1665 1663 default: -
trunk/src/VBox/VMM/include/CPUMInternal.h
r55466 r57446 111 111 #define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20) 112 112 /** @} */ 113 114 /* Sanity check. */115 #ifndef VBOX_FOR_DTRACE_LIB116 #if defined(VBOX_WITH_HYBRID_32BIT_KERNEL) && (HC_ARCH_BITS != 32 || R0_ARCH_BITS != 32)117 # error "VBOX_WITH_HYBRID_32BIT_KERNEL is only for 32 bit builds."118 #endif119 #endif120 113 121 114 … … 202 195 /** 203 196 * The saved host CPU state. 204 *205 * @remark The special VBOX_WITH_HYBRID_32BIT_KERNEL checks here are for the 10.4.x series206 * of Mac OS X where the OS is essentially 32-bit but the cpu mode can be 64-bit.207 197 */ 208 198 typedef struct CPUMHOSTCTX … … 210 200 /** General purpose register, selectors, flags and more 211 201 * @{ */ 212 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)202 #if HC_ARCH_BITS == 64 213 203 /** General purpose register ++ 214 204 * { */ … … 264 254 /** @} */ 265 255 266 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)256 #if HC_ARCH_BITS == 32 267 257 /** Control registers. 268 258 * @{ */ … … 309 299 uint8_t auPadding[20]; 310 300 311 #elif HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)301 #elif HC_ARCH_BITS == 64 312 302 313 303 /** Control registers. … … 352 342 353 343 /* padding to get 32byte aligned size */ 354 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL355 uint8_t auPadding[52];356 # else357 344 uint8_t auPadding[4]; 358 # endif359 345 360 346 #else 361 # error HC_ARCH_BITS not defined 347 # error HC_ARCH_BITS not defined or unsupported 362 348 #endif 363 349 … … 558 544 DECLASM(void) cpumR0SetMXCSR(uint32_t u32MXCSR); 559 545 DECLASM(uint32_t) cpumR0GetMXCSR(void); 560 DECLASM(void) cpumR0LoadDRx(uint64_t const *pa4Regs);561 DECLASM(void) cpumR0SaveDRx(uint64_t *pa4Regs);562 546 #endif 563 547 -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r56286 r57446 63 63 %define CPUM_HANDLER_CTXCORE_IN_EBP RT_BIT(31) 64 64 65 66 ;; if anyone figures how to do %if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) in67 ; nasm please tell / fix this hack.68 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL69 %define fVBOX_WITH_HYBRID_32BIT_KERNEL 170 %else71 %define fVBOX_WITH_HYBRID_32BIT_KERNEL 072 %endif73 65 74 66 struc CPUM … … 249 241 alignb 64 250 242 .Host resb 0 251 %if HC_ARCH_BITS == 64 || fVBOX_WITH_HYBRID_32BIT_KERNEL243 %if HC_ARCH_BITS == 64 252 244 ;.Host.rax resq 1 - scratch 253 245 .Host.rbx resq 1 … … 295 287 .Host.csPadding resw 1 296 288 297 %if HC_ARCH_BITS == 32 && fVBOX_WITH_HYBRID_32BIT_KERNEL == 0289 %if HC_ARCH_BITS == 32 298 290 .Host.cr0 resd 1 299 291 ;.Host.cr2 resd 1 - scratch … … 354 346 .Host.GSbase resq 1 355 347 .Host.efer resq 1 356 %if fVBOX_WITH_HYBRID_32BIT_KERNEL357 .Host.auPadding resb 54358 %else359 348 .Host.auPadding resb 4 360 %endif361 349 %endif ; 64-bit 362 350 .Host.pXStateRC RTRCPTR_RES 1 -
trunk/src/VBox/VMM/include/HMInternal.h
r57429 r57446 33 33 #include <iprt/avl.h> 34 34 #include <iprt/string.h> 35 36 #if defined(RT_OS_DARWIN) && HC_ARCH_BITS == 32 37 # error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!" 38 #endif 35 39 36 40 #if HC_ARCH_BITS == 64 || defined (VBOX_WITH_64_BITS_GUESTS) -
trunk/src/VBox/VMM/include/HMInternal.mac
r56287 r57446 3 3 ; HM - Internal header file. 4 4 ; 5 5 6 ; 6 7 ; Copyright (C) 2006-2015 Oracle Corporation … … 16 17 17 18 %if HC_ARCH_BITS == 32 18 %ifndef VBOX_WITH_HYBRID_32BIT_KERNEL 19 %define VMX_USE_CACHED_VMCS_ACCESSES 20 %endif 19 %define VMX_USE_CACHED_VMCS_ACCESSES 21 20 %endif 22 21 -
trunk/src/VBox/VMM/include/PGMInternal.h
r56384 r57446 68 68 * manages the page tables for intermediate switcher context, that's all done in 69 69 * ring-3. 70 *71 * On 32-bit darwin (hybrid kernel) we do 64-bit guest support differently, so72 * there we can safely work without mappings if we don't compile in raw-mode.73 70 */ 74 71 #if defined(IN_RING0) \ 75 72 || ( !defined(VBOX_WITH_RAW_MODE) \ 76 73 && ( HC_ARCH_BITS != 32 \ 77 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) \78 74 || !defined(VBOX_WITH_64_BITS_GUESTS) \ 79 75 ) \ -
trunk/src/VBox/VMM/include/VMMInternal.h
r56287 r57446 29 29 # error "Not in VMM! This is an internal header!" 30 30 #endif 31 #if defined(RT_OS_DARWIN) && HC_ARCH_BITS == 32 32 # error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!" 33 #endif 34 31 35 32 36 -
trunk/src/VBox/VMM/testcase/Makefile.kmk
r56749 r57446 146 146 tstVMStructRC_TEMPLATE = VBoxRcExe 147 147 tstVMStructRC_DEFS = IN_VMM_RC IN_DIS IN_RT_RC VBOX_WITH_RAW_MODE 148 ifeq ($(KBUILD_TARGET).$(KBUILD_TARGET_ARCH),darwin.x86)149 tstVMStructRC_DEFS += \150 VBOX_WITH_2X_4GB_ADDR_SPACE VBOX_WITH_2X_4GB_ADDR_SPACE_IN_RC \151 VBOX_WITH_HYBRID_32BIT_KERNEL VBOX_WITH_HYBRID_32BIT_KERNEL_IN_RC152 endif153 148 ifdef VBOX_WITH_R0_LOGGING 154 149 tstVMStructRC_DEFS += VBOX_WITH_R0_LOGGING … … 165 160 tstVMStructSize_TEMPLATE= VBOXR3AUTOTST 166 161 tstVMStructSize_DEFS = IN_VMM_R3 IN_DIS 167 ifeq ($(KBUILD_TARGET).$(KBUILD_TARGET_ARCH),darwin.x86)168 tstVMStructSize_DEFS += \169 VBOX_WITH_2X_4GB_ADDR_SPACE VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3 \170 VBOX_WITH_HYBRID_32BIT_KERNEL VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R3171 endif172 162 ifdef VBOX_WITH_RAW_MODE 173 163 tstVMStructSize_DEFS += VBOX_WITH_RAW_MODE … … 478 468 tstVMStructDTrace_TEMPLATE= VBOXR3AUTOTST 479 469 tstVMStructDTrace_DEFS = IN_VMM_R3 IN_DIS 480 ifeq ($(KBUILD_TARGET).$(KBUILD_TARGET_ARCH),darwin.x86)481 tstVMStructDTrace_DEFS += \482 VBOX_WITH_2X_4GB_ADDR_SPACE VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3 \483 VBOX_WITH_HYBRID_32BIT_KERNEL VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R3484 endif485 470 ifdef VBOX_WITH_RAW_MODE 486 471 tstVMStructDTrace_DEFS += VBOX_WITH_RAW_MODE
Note:
See TracChangeset
for help on using the changeset viewer.