- Timestamp:
- Apr 30, 2007 12:03:47 PM (18 years ago)
- Location:
- trunk/src
- Files:
-
- 3 deleted
- 32 edited
- 12 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/Makefile
r1697 r2422 31 31 endif 32 32 33 SUBDIRS += bldprogs libs VBox 34 #ifeq ($(filter linux.x86 l4.x86 win.x86 os2.x86,$(BUILD_TARGET).$(BUILD_TARGET_ARCH)),) 35 SUBDIRS += recompiler/new 36 #else 37 # SUBDIRS += recompiler 38 #endif 33 SUBDIRS += bldprogs libs VBox recompiler 39 34 ifneq ($(wildcard apps),) 40 35 SUBDIRS += apps -
trunk/src/recompiler/InnoTek/Makefile.kmk
r1 r2422 18 18 DEPTH = ../../.. 19 19 include $(PATH_KBUILD)/up.kmk 20 -
trunk/src/recompiler/InnoTek/config-host.h
r1 r2422 1 /* $Id$ */ 1 2 /** @file 2 3 * Innotek Host Config - Maintained by hand … … 19 20 */ 20 21 21 #define HOST_I386 1 22 #ifdef __WIN32__ 23 # define CONFIG_WIN32 1 24 #elif defined(__OS2__) 25 # define CONFIG_OS2 26 #elif defined(__DARWIN__) 27 # define CONFIG_DARWIN 22 23 #if defined(__amd64__) || defined(HOST_X86_64) /* latter, for dyngen on win64. */ 24 # define HOST_X86_64 1 25 # define HOST_LONG_BITS 64 28 26 #else 29 # define HAVE_BYTESWAP_H 1 27 # define HOST_I386 1 28 # define HOST_LONG_BITS 32 29 # ifdef __WIN32__ 30 # define CONFIG_WIN32 1 31 # elif defined(__OS2__) 32 # define CONFIG_OS2 33 # elif defined(__DARWIN__) 34 # define CONFIG_DARWIN 35 # elif defined(__FREEBSD__) || defined(__NETBSD__) || defined(__OPENBSD__) 36 /*# define CONFIG_BSD*/ 37 # elif defined(__SOLARIS__) 38 /*# define CONFIG_SUN*/ 39 # elif !defined(IPRT_NO_CRT) 40 # define HAVE_BYTESWAP_H 1 41 # endif 30 42 #endif 31 #define CONFIG_SOFTMMU 1 43 #define QEMU_VERSION "0.8.1" 44 #define CONFIG_UNAME_RELEASE "" 45 #define CONFIG_QEMU_SHAREDIR "." 32 46 33 #define CONFIG_SDL 134 #define CONFIG_SLIRP 135 36 #ifdef __LINUX__37 #define CONFIG_GDBSTUB 138 #endif39 /* #define HAVE_GPROF 1 */40 /* #define CONFIG_STATIC 1 */41 #define QEMU_VERSION "0.6.1"42 #define CONFIG_QEMU_SHAREDIR "." -
trunk/src/recompiler/InnoTek/config.h
r1 r2422 1 /* $Id$ */ 1 2 /** @file 2 3 * Innotek Config - Maintained by hand … … 24 25 #define TARGET_I386 1 25 26 #define CONFIG_SOFTMMU 1 27 -
trunk/src/recompiler/Makefile.kmk
r1 r2422 1 # $Id$ 1 2 ## @file 2 # 3 # !GNU MAKE! 3 # The Recompiler Makefile. 4 # 5 # There are a few of complicating factors here, esp. on AMD64 systems: 6 # 7 # * op.c doesn't compile work correctly with gcc 4. For this we've 8 # checked in op.S, which is the reason why we don't compile op.c 9 # directly but always compile via the assembly file.s 10 # * On 64-bit Windows we lack a compiler and have to resort to a 11 # linux cross compiler building an ELF relocatable module which 12 # we then load using a wrapper module. Thus the REM_MOD mess. 13 # * On platforms using the 64-bit GCC ABI, we're not allowed to 14 # generate non-PIC shared objects, and op.c requires the code 15 # to be non-PIC. We apply the same trick as we developed for 16 # 64-bit windows. 17 # 18 4 19 # 5 20 # Copyright (C) 2006 InnoTek Systemberatung GmbH … … 22 37 include $(PATH_KBUILD)/header.kmk 23 38 39 24 40 # todo this is a BUILD_PLATFORM binary, to a target binary! 25 ifeq ($(filter darwin,$(BUILD_TARGET)),) 26 ifeq ($(VBOX_USING_GCC4),) 27 BLDPROGS = dyngen 28 endif 29 DLLS = VBoxREM 30 ifeq ($(BUILD_TARGET_ARCH),amd64) 31 SYSMODS = VBoxREM2 32 REM_MOD = VBoxREM2 33 else 34 REM_MOD = VBoxREM 35 endif 36 endif # !darwin 41 BLDPROGS = dyngen 42 ifeq ($(BUILD_TARGET_ARCH),amd64) 43 SYSMODS = VBoxREM2 44 REM_MOD = VBoxREM2 45 else 46 REM_MOD = VBoxREM 47 endif 48 DLLS = VBoxREM 37 49 IMPORT_LIBS = VBoxREMImp 38 GPLEXPORTS = qemu-source-drop39 50 40 51 OTHER_CLEAN = \ … … 42 53 $(PATH_$(REM_MOD))/opc.h \ 43 54 $(PATH_$(REM_MOD))/gen-op.h \ 44 $(PATH_$(REM_MOD))/opc.h \ 45 $(PATH_TARGET)/VBoxREMImp.c 46 47 #DEFS += DEBUG_DISAS 48 49 # private hack for gcc 4.1 50 ifeq ($(USERNAME).$(BUILD_TARGET),bird.linux) 51 TOOL_GCC3_CC = gcc-3.4.6 52 override VBOX_GCC_Wno-variadic-macros= 53 override VBOX_USING_GCC4= 54 endif 55 55 $(PATH_$(REM_MOD))/opc.h 56 57 DEFS.amd64 += REM_PHYS_ADDR_IN_TLB 58 59 # 60 # L4 must use the no-crt path because it's lacking math stuff it seems... 61 # Darwin must use the non-crt path because it can't compile op.c nativly. 62 # All the AMD64 target must use the no-crt path because ELF doesn't like op.c 63 # when stuffed into a shared library and windows doesn't have 64-bit gcc (yet). 64 # 65 ifeq ($(filter-out l4 darwin freebsd,$(BUILD_TARGET)),) 66 REM_USE_NOCRT := 1 67 endif 68 ifeq ($(BUILD_TARGET_ARCH),amd64) 69 REM_USE_NOCRT := 1 70 endif 71 72 73 # 74 # The dyngen build tool. 75 # 56 76 ifeq ($(BUILD_PLATFORM),win) 57 77 dyngen_TOOL = MINGW32 … … 60 80 dyngen_BLD_TRG_ARCH = x86 61 81 dyngen_BLD_TRG_CPU = i386 62 dyngen_CFLAGS = -Wall -g -fno-strict-aliasing 82 dyngen_CFLAGS = -Wall -g -fno-strict-aliasing 83 ifeq ($(BUILD_TARGET_ARCH),amd64) 84 dyngen_DEFS += HOST_X86_64=1 85 endif 63 86 else 64 dyngen_TEMPLATE = VBOXBLDPRO 65 endif 87 dyngen_TEMPLATE = VBOXBLDPROG 88 endif 89 dyngen_CFLAGS += -Wno-missing-prototypes -Wno-missing-declarations 66 90 dyngen_INCS = \ 67 91 InnoTek \ 68 92 target-i386 \ 69 fpu \ 70 $(PATH_ROOT)/ ## @todo what is $(PATH_ROOT) doing here? 93 fpu 71 94 dyngen_SOURCES = dyngen.c 72 95 73 96 74 97 # 75 # The VBoxREM or VBoxREM2 DLL/SO. 76 # 77 $(REM_MOD)_TOOL = GCC3 78 $(REM_MOD)_TOOL.win.x86 = MINGW32 79 $(REM_MOD)_TOOL.win.amd64= XGCCAMD64LINUX 80 $(REM_MOD)_SDKS.win.x86 = W32API ## @todo do we really need this now? 81 $(REM_MOD)_ASFLAGS = -x assembler-with-cpp ## @todo didn't I make this default already? 82 $(REM_MOD)_SYSSUFF = .rel # amd64 83 84 $(REM_MOD)_CFLAGS = -Wall -g 85 ifdef ($(BUILD_TARGET),win64) # with -O1 and higher, it generates incorrect code for double and long double constants. ## @todo didn't I fix this yet? 86 REMNoCRT_CFLAGS.release = -O0 87 $(REM_MOD)_CFLAGS.amd64 = -mcmodel=medium -fno-common -O0 -fno-strict-aliasing -fno-math-errno -fno-peephole2 88 else 89 $(REM_MOD)_CFLAGS.amd64 = -mcmodel=medium -fno-common -O2 -fno-strict-aliasing 90 endif 91 $(REM_MOD)_CFLAGS.debug = -O0 92 ifdef ($(BUILD_TARGET_ARCH),x86) 93 $(REM_MOD)_CFLAGS.release += -fomit-frame-pointer -fno-gcse 94 endif 95 $(REM_MOD)_CFLAGS.profile = $($(REM_MOD)_CFLAGS.release) 96 $(REM_MOD)_CFLAGS.kprofile = $($(REM_MOD)_CFLAGS.release) 97 $(REM_MOD)_CFLAGS.l4 = -nostdinc 98 $(REM_MOD)_INCS = \ 98 # The VBoxREM.[dll|so|..] or VBoxREM2.rel. 99 # 100 $(REM_MOD)_DEFS = IN_REM_R3 REM_INCLUDE_CPU_H 101 #$(REM_MOD)_DEFS += DEBUG_ALL_LOGGING DEBUG_DISAS DEBUG_PCALL DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging. 102 103 $(REM_MOD)_INCS = \ 99 104 InnoTek \ 100 InnoTek/crt 105 InnoTek/crt\ 101 106 target-i386 \ 102 107 fpu \ 103 108 $(PATH_$(REM_MOD)) \ 104 109 $(PATH_ROOT)/src/VBox/VMM 105 ifeq ($(BUILD_TARGET),l4) 106 $(REM_MOD)_INCS += $(VBOX_L4_GCC3_INCS) $(L4_INCDIR) 107 endif 108 $(REM_MOD)_DEFS = IN_RING3 IN_REM_R3 REM_INCLUDE_CPU_H #LOG_USE_C99 109 #$(REM_MOD)_DEFS += DEBUG_PCALL DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging. 110 # these defines are probably all irrelevant now: 111 $(REM_MOD)_DEFS += _GNU_SOURCE _FILE_OFFSET_BITS=64 _LARGEFILE_SOURCE _REENTRANT 112 ifeq ($(VBOX_USING_GCC4),) 113 $(REM_MOD)_SOURCES = \ 110 111 $(REM_MOD)_SOURCES = \ 114 112 VBoxRecompiler.c \ 115 113 cpu-exec.c \ 116 114 exec.c \ 117 115 translate-all.c \ 116 translate-op.c \ 117 fpu/softfloat-native.c \ 118 118 target-i386/helper.c \ 119 119 target-i386/helper2.c \ 120 target-i386/translate.c 121 # translate-op.c \ 122 # fpu/softfloat-native.c \ 123 # InnoTek/testmath.c 124 125 # ***hacking*** 126 # ifeq ($(filter-out win os2,$(BUILD_TARGET)),) 127 $(REM_MOD)_SOURCES += target-i386/op.c 128 FILE_OP_OBJ = $(PATH_$(REM_MOD)_target-i386/op.c)/op.o 129 # else # The remaining targets can be using gcc-4 and needs checking. 130 # $(REM_MOD)_SOURCES += $(PATH_$(REM_MOD))/op.S 131 # FILE_OP_OBJ = $(PATH_$(REM_MOD)_$(PATH_$(REM_MOD))/op.S)/op.o 132 # $(REM_MOD)_CLEAN = $(FILE_OP_OBJ) 133 # endif 134 $(REM_MOD)_SOURCES += InnoTek/loghack.c # this will be obsoleted soon. 135 else 136 $(REM_MOD)_SOURCES = \ 137 precompiled/VBoxRecompiler.o \ 138 precompiled/cpu-exec.o \ 139 precompiled/exec.o \ 140 precompiled/translate-all.o \ 141 precompiled/op.o \ 142 precompiled/helper.o \ 143 precompiled/helper2.o \ 144 precompiled/translate.o \ 145 precompiled/loghack.o 146 FILE_OP_OBJ = precompiled/op.o 147 endif 148 $(REM_MOD)_DEFS += fprintf=hacked_fprintf printf=hacked_printf # ditto 149 ifneq ($(BUILD_TYPE),debug) 150 $(REM_MOD)_SOURCES.win.x86 = $(REM_MOD).def 151 else 152 $(REM_MOD)_LDFLAGS.win.x86 = --export-all-symbols --output-def $(PATH_TARGET)/$(REM_MOD)-new.def \ 153 --exclude-symbols=console_main --exclude-symbols=WinMain@16 154 endif 155 $(REM_MOD)_SOURCES.os2 = $(PATH_TARGET)/VBoxREMOS2.def 156 $(REM_MOD)_LDFLAGS.linux = -Wl,--no-undefined 157 $(REM_MOD)_LDFLAGS.l4 = -T$(L4_LIBDIR)/../main_rel.ld -nostdlib -Wl,--no-undefined 158 159 ifeq ($(BUILD_TARGET_ARCH),amd64) 160 $(REM_MOD)_LIBS = \ 161 $(LIB_RUNTIME_NOCRT_GCC64) # !create this! 162 # $(VBOX_GCC_LIBGCC) - fix this 163 else # x86 164 $(REM_MOD)_LIBS = \ 165 $(LIB_VMM) \ 166 $(LIB_RUNTIME) 167 $(REM_MOD)_LIBS.win = \ 168 mingw32 \ 169 user32 gdi32 winmm ws2_32 iphlpapi dxguid 170 $(REM_MOD)_LIBS.linux = \ 171 $(LIB_UUID) \ 172 m \ 173 util \ 174 rt \ 175 $(LIB_PTHREAD) 176 $(REM_MOD)_LIBS.l4 = \ 177 gcc \ 178 $(L4_LIBDIR)/libvboxserver.s.so \ 179 $(L4_LIBDIR)/libdl.s.so \ 180 $(L4_LIBDIR)/libuc.0.s.so 181 endif # x86 120 target-i386/translate.c \ 121 InnoTek/testmath.c 122 ifeq ($(filter-out win os2,$(BUILD_TARGET)),) 123 $(REM_MOD)_SOURCES += target-i386/op.c 124 FILE_OP_OBJ = $(PATH_$(REM_MOD)_target-i386/op.c)/op.o 125 else # The remaining targets can be using gcc-4 and needs checking. 126 $(REM_MOD)_SOURCES += $(PATH_$(REM_MOD))/op.S 127 FILE_OP_OBJ = $(PATH_$(REM_MOD)_$(PATH_$(REM_MOD))/op.S)/op.o 128 $(REM_MOD)_CLEAN = $(FILE_OP_OBJ) $(PATH_$(REM_MOD))/op.S.dep 129 endif 130 #$(REM_MOD)_SOURCES.os2 = $(PATH_TARGET)/$(REM_MOD).def 131 $(REM_MOD)_SOURCES.win.x86 = $(REM_MOD).def 132 133 134 ifdef REM_USE_NOCRT 135 $(REM_MOD)_TEMPLATE = VBOXNOCRTGAS 136 $(REM_MOD)_DEFS += LOG_USE_C99 137 $(REM_MOD)_CFLAGS.amd64 = -O2 138 $(REM_MOD)_CFLAGS.debug = -O0 139 $(REM_MOD)_CFLAGS.darwin = -fno-common -mdynamic-no-pic 140 ifdef ($(BUILD_TARGET_ARCH),x86) 141 $(REM_MOD)_CFLAGS.release+= -fomit-frame-pointer -fno-gcse 142 endif 143 144 # This doesn't fit in IPRT because it requires GAS and is LGPL. 145 $(REM_MOD)_SOURCES += \ 146 InnoTek/e_powl-$(BUILD_TARGET_ARCH).S 147 148 ifeq ($(REM_MOD),VBoxREM) 149 $(REM_MOD)_LIBS = \ 150 $(PATH_LIB)/RuntimeR3NoCRTGCC$(VBOX_SUFF_LIB) \ 151 $(LIB_VMM) \ 152 $(LIB_RUNTIME) 153 $(REM_MOD)_LIBS.darwin = \ 154 $(TARGET_VBoxREMImp) 155 # $(PATH_BIN)/VBoxREMImp.dylib 156 $(REM_MOD)_LDFLAGS.darwin = -read_only_relocs suppress -multiply_defined warning #-install_name @executable_path/$(REM_MOD).dylib# 157 else 158 $(REM_MOD)_LIBS = \ 159 $(PATH_LIB)/RuntimeR3NoCRTGCC$(VBOX_SUFF_LIB) 160 $(REM_MOD)_SYSSUFF = .rel 161 endif 162 163 else # !REM_USE_NOCRT 164 165 $(REM_MOD)_TOOL = GCC3 166 $(REM_MOD)_TOOL.win.x86 = MINGW32 167 $(REM_MOD)_TOOL.win.amd64 = XGCCAMD64LINUX 168 $(REM_MOD)_SDKS.win.x86 = W32API ## @todo do we really need this now? 169 $(REM_MOD)_ASFLAGS = -x assembler-with-cpp ## @todo didn't I make this default already? 170 $(REM_MOD)_CFLAGS = -Wall -g 171 $(REM_MOD)_CFLAGS.debug = -O0 172 $(REM_MOD)_CFLAGS.release += -fomit-frame-pointer -fno-gcse 173 $(REM_MOD)_CFLAGS.profile = $($(REM_MOD)_CFLAGS.release) 174 $(REM_MOD)_CFLAGS.kprofile = $($(REM_MOD)_CFLAGS.release) 175 $(REM_MOD)_CFLAGS.l4 = -nostdinc 176 ifeq ($(BUILD_TARGET),l4) 177 $(REM_MOD)_INCS += $(VBOX_L4_GCC3_INCS) $(L4_INCDIR) 178 endif 179 180 $(REM_MOD)_DEFS += IN_RING3 LOG_USE_C99 181 #$(REM_MOD)_DEFS += DEBUG_DISAS DEBUG_PCALL DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging. 182 # these defines are probably all irrelevant now: 183 $(REM_MOD)_DEFS += _GNU_SOURCE _FILE_OFFSET_BITS=64 _LARGEFILE_SOURCE _REENTRANT 184 185 $(REM_MOD)_LDFLAGS.darwin = -read_only_relocs suppress -install_name @executable_path/$(REM_MOD).dylib -multiple_defined warning 186 $(REM_MOD)_LDFLAGS.l4 = -T$(L4_LIBDIR)/../main_rel.ld -nostdlib -Wl,--no-undefined 187 $(REM_MOD)_LDFLAGS.os2 = -Zomf 188 $(REM_MOD)_LDFLAGS.debug = -g 189 ifeq ($(BUILD_TARGET_ARCH),amd64) 190 $(REM_MOD)_LIBS = $(FILE_TOOL_GCC3_LIBGCC) 191 else # x86 192 $(REM_MOD)_LIBS = \ 193 $(LIB_VMM) \ 194 $(LIB_RUNTIME) 195 $(REM_MOD)_LIBS.win.x86 = \ 196 mingw32 \ 197 user32 gdi32 winmm ws2_32 iphlpapi dxguid 198 $(REM_MOD)_LIBS.linux = \ 199 $(LIB_UUID) \ 200 m \ 201 util \ 202 rt \ 203 $(LIB_PTHREAD) 204 $(REM_MOD)_LIBS.l4 = \ 205 gcc \ 206 $(L4_LIBDIR)/libvboxserver.s.so \ 207 $(L4_LIBDIR)/libdl.s.so \ 208 $(L4_LIBDIR)/libuc.0.s.so 209 endif # x86 210 211 endif # !REM_USE_NOCRT 182 212 183 213 # Extra flags for these source modules. 184 214 target-i386/op.c_CFLAGS = -O2 -fno-strict-aliasing -fomit-frame-pointer -falign-functions=0 -fno-reorder-blocks -fno-optimize-sibling-calls 185 215 target-i386/op.c_CFLAGS.x86 = -fno-gcse -fno-instrument-functions -mpreferred-stack-boundary=2 216 target-i386/op.c_CFLAGS.darwin.x86 = -m128bit-long-double -mpreferred-stack-boundary=4 ## @todo This means we can't use staged/op-elf-x86.s... 186 217 target-i386/helper.c_CFLAGS.x86 = -O2 -fomit-frame-pointer -fno-strict-aliasing -fno-gcse 187 218 cpu-exec.c_CFLAGS.x86 = -O2 -fomit-frame-pointer -fno-strict-aliasing -fno-gcse 219 220 221 # 222 # The math testcase as a standalone program for testing and debugging purposes. 223 # 224 ## @todo This is a bit messy because of MINGW32. 225 #BLDPROGS += testmath 226 testmath_TOOL = GCC3 227 testmath_TOOL.win.x86 = MINGW32 228 testmath_SDKS.win.x86 = W32API 229 ifeq ($(BUILD_PLATFORM).$(BUILD_PLATFORM_ARCH),win.amd64) 230 # 64-bit windows: Pretend to be 32-bit. 231 testmath_BLD_TRG = win32 232 testmath_BLD_TRG_ARCH = x86 233 testmath_BLD_TRG_CPU = i386 234 endif 235 testmath_ASTOOL = $(VBOX_ASTOOL) 236 ifeq ($(filter-out win32 win64,$(BUILD_PLATFORM)),) 237 testmath_ASFLAGS = -f win32 -DNASM_FORMAT_PE $(VBOX_ASFLAGS) -w+orphan-labels 238 else 239 testmath_ASFLAGS = -f elf -DNASM_FORMAT_ELF $(VBOX_ASFLAGS) -w+orphan-labels 240 endif 241 testmath_ASFLAGS.amd64 = -m amd64 242 testmath_CFLAGS = -Wall -g 243 testmath_CFLAGS.release = -O3 244 testmath_LDFLAGS = -g 245 testmath_DEFS = MATHTEST_STANDALONE 246 testmath_SOURCES = InnoTek/testmath.c 247 #testmath_SOURCES += $(PATH_LIB)/RuntimeR3NoCRTGCC$(VBOX_SUFF_LIB) 188 248 189 249 … … 197 257 VBoxREMWrapper.cpp \ 198 258 VBoxREMWrapperA.asm 259 VBoxREM_LDFLAGS.darwin = -install_name @executable_path/VBoxREM.dylib 199 260 VBoxREM_LIBS = \ 200 261 $(LIB_VMM) \ … … 207 268 # 208 269 VBoxREMImp_TEMPLATE = VBOXR3 270 ifeq ($(BUILD_TARGET),darwin) 271 VBoxREMImp_INST = $(INST_LIB) 272 endif 209 273 VBoxREMImp_SOURCES.win = VBoxREM.def 210 274 VBoxREMImp_SOURCES.os2 = $(PATH_TARGET)/VBoxREMOS2.def 211 275 ifeq ($(filter win os2,$(BUILD_TARGET)),) 212 276 VBoxREMImp_SOURCES = $(PATH_TARGET)/VBoxREMImp.c 277 VBoxREMImp_CLEAN = $(PATH_TARGET)/VBoxREMImp.c 213 278 endif 214 279 VBoxREMImp_SONAME.linux = VBoxREM.so 215 280 VBoxREMImp_SONAME.l4 = VBoxREM.s.so 216 VBoxREMImp_LDFLAGS.darwin = -install_name VBoxREM.dylib 281 VBoxREMImp_LDFLAGS.darwin = -install_name @executable_path/VBoxREM.dylib 282 #VBoxREMImp_LDFLAGS.darwin = -install_name VBoxREM.dylib 217 283 VBoxREMImp_LDFLAGS.l4 = -T$(L4_LIBDIR)/../main_rel.ld -nostdlib 218 284 … … 238 304 # Generate the op.S file somehow... 239 305 # 240 # Gathering the flags, defines and include dirs for the command is a lot 241 # of work. Unfortunately, there is only a highly specialized kBuild function 306 # Gathering the flags, defines and include dirs for the command is a lot 307 # of work. Unfortunately, there is only a highly specialized kBuild function 242 308 # for doing this, so we're currently left to our own devices here. 243 309 # 244 310 # Add something like VBOX_RECOMPILER_OP_GCC = gcc-3.4.6 to LocalConfig.kmk 311 # to be 100% sure that you get a working op.S. My gcc 4.1.1 seems to work 312 # fine, so feel free to try VBOX_RECOMPILER_OP_GCC = gcc. 313 # 314 # The op-undefined.lst is generated by finding all the undefined symbols 315 # in one (or more) ELF op.o files using nm. 316 # 245 317 ifndef VBOX_RECOMPILER_OP_GCC 246 318 ifeq ($(BUILD_TARGET).$(BUILD_TARGET_ARCH),darwin.x86) 247 VBOX_RECOMPILER_OP_GCC ?= gcc-elf-something 248 endif 249 ifeq ($(USERNAME).$(BUILD_TARGET),bird.linux) 250 VBOX_RECOMPILER_OP_GCC ?= gcc-3.4.6 251 endif 252 VBOX_RECOMPILER_OP_GCC ?= $(TOOL_$(VBOX_GCC_TOOL)_CC) 253 VBOX_RECOMPILER_OP_GCC ?= false 254 endif 255 256 ## @todo Check gcc version if plain gcc, gcc32 or gcc64. 257 ## @todo minimal dependencies. 258 259 $(PATH_$(REM_MOD))/op.S: target-i386/op.c staged/op-elf-$(BUILD_TARGET_ARCH).S | $(call DIRDEP,$(PATH_$(REM_MOD))) 260 $(RM) -f $@ [email protected] [email protected] 261 $(VBOX_RECOMPILER_OP_GCC) $(addsuffix $(SP)\$(NL)$(TAB),\ 262 -S -s \ 263 $(filter-out -g -O0, \ 264 $($(REM_MOD)_CFLAGS) $($(REM_MOD)_CFLAGS.$(BUILD_TYPE)) $($(REM_MOD)_CFLAGS.$(BUILD_TARGET_ARCH)) \ 265 $(target-i386/op.c_CFLAGS) $(target-i386/op.c_CFLAGS.$(BUILD_TARGET_ARCH)) \ 266 ) \ 267 $(addprefix -I, \ 268 $($(REM_MOD)_CINCS.$(BUILD_TARGET_ARCH)) $($(REM_MOD)_CINCS.$(BUILD_TARGET)) $($(REM_MOD)_CINCS) $(CINCS) \ 269 $($(REM_MOD)_INCS.$(BUILD_TARGET_ARCH)) $($(REM_MOD)_INCS.$(BUILD_TARGET)) $($(REM_MOD)_INCS) $(INCS) \ 270 ) \ 271 $(addprefix -D, \ 272 $($(REM_MOD)_CDEFS.$(BUILD_TARGET_ARCH)) $($(REM_MOD)_CDEFS.$(BUILD_TARGET)) $($(REM_MOD)_CDEFS) $(CDEFS.$(BUILD_TARGET)) $(CDEFS.release) $(CDEFS) \ 273 $($(REM_MOD)_DEFS.$(BUILD_TARGET_ARCH)) $($(REM_MOD)_DEFS.$(BUILD_TARGET)) $($(REM_MOD)_DEFS) $(DEFS.$(BUILD_TARGET)) $(DEFS.release) $(DEFS) \ 274 ) \ 319 VBOX_RECOMPILER_OP_GCC ?= i386-elf-gcc-3.4.3 # (port install i386-gcc-elf) 320 VBOX_RECOMPILER_OP_GCC_OK := yes 321 VBOX_RECOMPILER_OP_GCC_INCS ?= $(abspath $(dir $(shell LC_ALL=C $(VBOX_RECOMPILER_OP_GCC) -print-libgcc-file-name)))/include 322 endif 323 ifndef VBOX_RECOMPILER_OP_GCC 324 VBOX_RECOMPILER_OP_GCC := $(TOOL_$(VBOX_GCC_TOOL)_CC) 325 VBOX_RECOMPILER_OP_GCC_OK := dunno 326 endif 327 else 328 # If set, assume it's an OK compiler. 329 VBOX_RECOMPILER_OP_GCC_OK := yes 330 endif 331 332 333 # The command sans -o op.S.tmp. 334 COMPILE_OP_CMDS = $(VBOX_RECOMPILER_OP_GCC) \ 335 -S -s \ 336 $(filter-out -g -O0, \ 337 $($(REM_MOD)_CFLAGS) $($(REM_MOD)_CFLAGS.$(BUILD_TYPE)) $($(REM_MOD)_CFLAGS.$(BUILD_TARGET)) $($(REM_MOD)_CFLAGS.$(BUILD_TARGET_ARCH)) $($(REM_MOD)_CFLAGS.$(BUILD_TARGET).$(BUILD_TARGET_ARCH)) \ 338 $(target-i386/op.c_CFLAGS) $(target-i386/op.c_CFLAGS.$(BUILD_TARGET)) $(target-i386/op.c_CFLAGS.$(BUILD_TARGET_ARCH)) $(target-i386/op.c_CFLAGS.$(BUILD_TARGET).$(BUILD_TARGET_ARCH)) \ 339 ) \ 340 $(addprefix -I, \ 341 $($(REM_MOD)_CINCS.$(BUILD_TARGET_ARCH)) $($(REM_MOD)_CINCS.$(BUILD_TARGET)) $($(REM_MOD)_CINCS) $(CINCS) \ 342 $($(REM_MOD)_INCS.$(BUILD_TARGET_ARCH)) $($(REM_MOD)_INCS.$(BUILD_TARGET)) $($(REM_MOD)_INCS) $(INCS) \ 343 ) \ 344 $(addprefix -D, \ 345 $($(REM_MOD)_CDEFS.$(BUILD_TARGET_ARCH)) $($(REM_MOD)_CDEFS.$(BUILD_TARGET)) $($(REM_MOD)_CDEFS) $(CDEFS.$(BUILD_TARGET)) $(CDEFS.$(BUILD_TARGET_ARCH)) $(CDEFS.$(BUILD_TYPE)) $(CDEFS) \ 346 $($(REM_MOD)_DEFS.$(BUILD_TARGET_ARCH)) $($(REM_MOD)_DEFS.$(BUILD_TARGET)) $($(REM_MOD)_DEFS) $(DEFS.$(BUILD_TARGET)) $(DEFS.$(BUILD_TARGET_ARCH)) $(DEFS.$(BUILD_TYPE)) $(DEFS) \ 347 ) \ 348 -Wp,-MD,$(PATH_$(REM_MOD))/op.S.dep \ 349 -Wp,-MT,$(PATH_$(REM_MOD))/op.S \ 350 -Wp,-MP \ 351 target-i386/op.c 352 353 # Use the right GCC includes. 354 ifdef VBOX_RECOMPILER_OP_GCC_INCS 355 COMPILE_OP_CMDS := $(subst $(VBOX_PATH_GCC_INCS),$(VBOX_RECOMPILER_OP_GCC_INCS),$(COMPILE_OP_CMDS)) 356 endif 357 358 # Drop incompatible options when using the cross-compiler on darwin. 359 ifeq ($(BUILD_TARGET),darwin) 360 ifeq ($(filter-out i386-elf-gcc%, $(VBOX_RECOMPILER_OP_GCC)),) 361 COMPILE_OP_CMDS := $(filter-out -mdynamic-no-pic, $(COMPILE_OP_CMDS)) 362 endif 363 endif 364 365 # include the dependencies 366 -include $(PATH_$(REM_MOD))/op.S.dep 367 368 # The rule. 369 $(PATH_$(REM_MOD))/op.S: \ 275 370 target-i386/op.c \ 276 ) -o [email protected] \ 277 || $(CP) staged/op-elf-$(BUILD_TARGET_ARCH).S [email protected] # @todo only do this with gcc-4. 278 $(SED) -f op-validate.sed [email protected] || $(CP) staged/op-elf-$(BUILD_TARGET_ARCH).S [email protected] # This isn't good enough yet. 371 staged/op-elf-$(BUILD_TARGET_ARCH).S \ 372 op-validate.sed \ 373 op-darwin.sed \ 374 op-undefined.lst \ 375 Makefile.kmk \ 376 $(comp-cmds COMPILE_OP_CMDS,COMPILE_OP_CMDS_PREV,FORCE) \ 377 | $(call DIRDEP,$(PATH_$(REM_MOD))) 378 $(RM) -f $@ [email protected] [email protected] [email protected] 379 ifeq ($(VBOX_RECOMPILER_OP_GCC_OK),yes) 380 $(call MSG_COMPILE,VBoxREM,$<,$@,AS) 381 $(addsuffix $(SP)\$(NL)$(TAB) ,$(COMPILE_OP_CMDS)) -o [email protected] 382 else ifeq ($(VBOX_RECOMPILER_OP_GCC_OK),dunno) # (permit 3.x.x and 4.1.x+ for now) 383 major_ver=`$(VBOX_RECOMPILER_OP_GCC) -dumpversion | $(SED) -e 's/^\([2-9]\)\..*$$/\1/'`; \ 384 minor_ver=`$(VBOX_RECOMPILER_OP_GCC) -dumpversion | $(SED) -e 's/^[2-9]\.\([0-9]\)\..*$$/\1/'`; \ 385 bugfix_ver=`$(VBOX_RECOMPILER_OP_GCC) -dumpversion | $(SED) -e 's/^[2-9]\.[0-9]\.\([0-9]\).*$$/\1/'`; \ 386 if test "$$major_ver" = "3" -o "(" "$$major_ver" = "4" -a "$$minor_ver" != "0" ")"; then \ 387 $(ECHO_EXT) "Compiling $< => $@ [gcc v$${major_ver}.$${minor_ver}.$${bugfix_ver}]" && \ 388 $(addsuffix $(SP)\$(NL)$(TAB)$(TAB) ,$(COMPILE_OP_CMDS)) -o [email protected]; \ 389 else \ 390 $(ECHO_EXT) "Using staged op.S [gcc v$${major_ver}.$${minor_ver}.$${bugfix_ver}]" && \ 391 $(CP_EXT) -f staged/op-elf-$(BUILD_TARGET_ARCH).S [email protected]; \ 392 fi 393 else 394 $(CP) staged/op-elf-$(BUILD_TARGET_ARCH).S [email protected] 395 endif 396 $(SED) -f op-validate.sed [email protected] 279 397 ifeq ($(BUILD_TARGET),darwin) 280 398 $(SED) -f op-darwin.sed [email protected] > [email protected] 281 $(MV) -f [email protected] [email protected] 399 $(SED) -e 's/^\(.*\)$$/#define \1 _\1/' op-undefined.lst > [email protected] 400 $(CAT_EXT) [email protected] >> [email protected] 282 401 endif 283 402 $(MV) -f [email protected] $@ 284 285 286 # predefined dependencies to the headers are generated. 287 translate-all.c: $(PATH_$(REM_MOD))/op.h $(PATH_$(REM_MOD))/opc.h 288 target-i386/translate.c: $(PATH_$(REM_MOD))/gen-op.h $(PATH_$(REM_MOD))/opc.h 289 290 # atm this will be build because of the direct dependency.403 $(QUIET2)$(APPEND) "[email protected]" 404 $(QUIET2)$(APPEND) "[email protected]" 'define COMPILE_OP_CMDS_PREV' 405 $(QUIET2)$(APPEND) "[email protected]" '$(subst $(NL),'$(NL)$(TAB)@$(APPEND) "[email protected]" ',$(COMPILE_OP_CMDS))' 406 $(QUIET2)$(APPEND) "[email protected]" 'endef' 407 408 409 # Hack for crosscompiling. 291 410 DYNGEN = $(PATH_dyngen)/dyngen$(HOSTSUFF_EXE) 292 411 DYNGEN_EXEC = $(DYNGEN) … … 298 417 endif 299 418 300 419 # The dyngen rules. 301 420 $(PATH_$(REM_MOD))/op.h: $(FILE_OP_OBJ) $(DYNGEN) 302 421 $(call MSG_L1,dyngen => $@) … … 311 430 $(QUIET)$(DYNGEN_EXEC) -g -o $@ $< 312 431 432 # Dyngen dependants (sp?). 433 translate-all.c \ 434 translate-op.c \ 435 target-i386/translate.c \ 436 : $(PATH_$(REM_MOD))/op.h $(PATH_$(REM_MOD))/opc.h $(PATH_$(REM_MOD))/gen-op.h 437 438 439 # Some aliases 440 do_dyngen: $(PATH_$(REM_MOD))/gen-op.h $(PATH_$(REM_MOD))/opc.h $(PATH_$(REM_MOD))/op.h 313 441 importlib: $(LIB_REM) 314 315 316 # 317 # Phony rule for making the qemu source drop. 318 # This is just an incomplete EXAMPLE. It does NOT include all we have to ship! 319 # 320 .PHONY: qemu-source-drop 321 qemu-source-drop: 322 $(RM) -f $(PATH_BIN)/qemu-source.zip 323 zip -9 $(PATH_BIN)/qemu-source.zip \ 324 target-i386/op.c \ 325 target-i386/helper.c \ 326 target-i386/ops_template_mem.h \ 327 target-i386/ops_sse.h \ 328 target-i386/helper2.c \ 329 target-i386/ops_template.h \ 330 target-i386/ops_mem.h \ 331 target-i386/translate-copy.c \ 332 target-i386/exec.h \ 333 target-i386/cpu.h \ 334 target-i386/opreg_template.h \ 335 target-i386/translate.c \ 336 \ 337 a.out.h \ 338 COPYING.LIB \ 339 cpu-defs.h \ 340 dyngen.c \ 341 dyngen.h \ 342 elf.h \ 343 exec.c \ 344 softmmu_header.h \ 345 translate-all.c \ 346 bswap.h \ 347 cpu-all.h \ 348 cpu-exec.c \ 349 disas.h \ 350 dyngen-exec.h \ 351 dyngen-op.h \ 352 exec-all.h \ 353 osdep.h \ 354 softmmu_template.h \ 355 vl.h \ 356 \ 357 tests/hello-arm.c \ 358 tests/hello-i386.c \ 359 tests/linux-test.c \ 360 tests/Makefile \ 361 tests/pi_10.com \ 362 tests/qruncom.c \ 363 tests/runcom.c \ 364 tests/sha1.c \ 365 tests/test-i386.c \ 366 tests/test-i386-code16.S \ 367 tests/test-i386.h \ 368 tests/test-i386-muldiv.h \ 369 tests/test-i386-shift.h \ 370 tests/test-i386-vm86.S \ 371 tests/test_path.c \ 372 tests/testthread.c 373 442 op.S: $(PATH_$(REM_MOD))/op.S 443 444 -
trunk/src/recompiler/VBoxREM.def
r1 r2422 1 ; $Id$ 1 2 ;; @file 2 3 ; -
trunk/src/recompiler/VBoxRecompiler.c
r2223 r2422 1 /* $Id$ */ 1 2 /** @file 2 *3 3 * VBox Recompiler - QEMU. 4 4 */ … … 24 24 * Header Files * 25 25 *******************************************************************************/ 26 #define LOG_GROUP LOG_GROUP_REM 26 27 #include "vl.h" 27 28 #include "exec-all.h" … … 47 48 #include <VBox/err.h> 48 49 49 #define LOG_GROUP LOG_GROUP_REM50 50 #include <VBox/log.h> 51 51 #include <iprt/semaphore.h> … … 54 54 #include <iprt/thread.h> 55 55 #include <iprt/string.h> 56 57 56 58 57 /* Don't wanna include everything. */ … … 88 87 static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version); 89 88 static void remR3StateUpdate(PVM pVM); 89 90 #if defined(PGM_DYNAMIC_RAM_ALLOC) && !defined(REM_PHYS_ADDR_IN_TLB) 91 DECLINLINE(target_ulong) remR3HCVirt2GCPhysInlined(PVM pVM, void *addr); 92 DECLINLINE(void *) remR3GCPhys2HCVirtInlined(PVM pVM, target_ulong addr); 93 #endif 94 90 95 static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys); 91 96 static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys); … … 106 111 * Global Variables * 107 112 *******************************************************************************/ 108 109 /** The log level of the recompiler. */110 #if 1111 extern int loglevel;112 #else113 int loglevel = ~0;114 FILE *logfile = NULL;115 #endif116 117 113 118 114 /** @todo Move stats to REM::s some rainy day we have nothing do to. */ … … 128 124 static STAMPROFILEADV gStatMemRead; 129 125 static STAMPROFILEADV gStatMemWrite; 126 #ifndef REM_PHYS_ADDR_IN_TLB 127 static STAMPROFILEADV gStatMemReadHCPtr; 128 static STAMPROFILEADV gStatMemWriteHCPtr; 129 #endif 130 #ifdef PGM_DYNAMIC_RAM_ALLOC 131 static STAMPROFILE gStatGCPhys2HCVirt; 132 static STAMPROFILE gStatHCVirt2GCPhys; 133 #endif 134 static STAMCOUNTER gStatCpuGetTSC; 130 135 static STAMCOUNTER gStatRefuseTFInhibit; 131 136 static STAMCOUNTER gStatRefuseVM86; … … 182 187 }; 183 188 184 #ifndef PGM_DYNAMIC_RAM_ALLOC185 /* Guest physical RAM base. Not to be used in external code. */186 static uint8_t *phys_ram_base;187 #endif188 189 /*190 * Instance stuff.191 */192 /** Pointer to the cpu state. */193 CPUState *cpu_single_env;194 195 189 196 190 #ifdef VBOX_WITH_DEBUGGER … … 227 221 228 222 223 /* Instantiate the structure signatures. */ 224 #define REM_STRUCT_OP 0 225 #include "InnoTek/structs.h" 226 227 228 229 229 /******************************************************************************* 230 230 * Internal Functions * 231 231 *******************************************************************************/ 232 232 static void remAbort(int rc, const char *pszTip); 233 233 extern int testmath(void); 234 234 235 235 /* Put them here to avoid unused variable warning. */ 236 236 AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s)); 237 //AssertCompileMemberSize(REM, Env, REM_ENV_SIZE); 238 //AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE); 237 #if !defined(IPRT_NO_CRT) && (defined(__LINUX__) || defined(__DARWIN__) || defined(__WIN__)) 238 AssertCompileMemberSize(REM, Env, REM_ENV_SIZE); 239 #else 240 AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE); 241 #endif 242 239 243 240 244 /** … … 247 251 { 248 252 uint32_t u32Dummy; 253 unsigned i; 254 255 /* 256 * Assert sanity. 257 */ 249 258 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env))); 250 //AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));259 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE)); 251 260 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem))); 252 #if 0 /* not merged yet */253 261 Assert(!testmath()); 254 #endif 262 ASSERT_STRUCT_TABLE(Misc); 263 ASSERT_STRUCT_TABLE(TLB); 264 ASSERT_STRUCT_TABLE(SegmentCache); 265 ASSERT_STRUCT_TABLE(XMMReg); 266 ASSERT_STRUCT_TABLE(MMXReg); 267 ASSERT_STRUCT_TABLE(float_status); 268 ASSERT_STRUCT_TABLE(float32u); 269 ASSERT_STRUCT_TABLE(float64u); 270 ASSERT_STRUCT_TABLE(floatx80u); 271 ASSERT_STRUCT_TABLE(CPUState); 255 272 256 273 /* … … 272 289 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n")); 273 290 291 /* ignore all notifications */ 292 pVM->rem.s.fIgnoreAll = true; 293 274 294 /* 275 295 * Init the recompiler. … … 280 300 return VERR_GENERAL_FAILURE; 281 301 } 282 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features); 302 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features); 303 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features); 283 304 284 305 /* allocate code buffer for single instruction emulation. */ … … 293 314 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ; 294 315 295 #ifdef DEBUG_bird296 //cpu_breakpoint_insert(&pVM->rem.s.Env, some-address);297 #endif298 299 316 /* 300 317 * Register ram types. 301 318 */ 302 pVM->rem.s.iMMIOMemType = cpu_register_io_memory( 0, g_apfnMMIORead, g_apfnMMIOWrite, pVM);303 AssertReleaseMsg(pVM->rem.s.iMMIOMemType > 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));304 pVM->rem.s.iHandlerMemType = cpu_register_io_memory( 0, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);305 AssertReleaseMsg(pVM->rem.s.iHandlerMemType > 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));319 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM); 320 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType)); 321 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM); 322 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType)); 306 323 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType)); 324 325 /* stop ignoring. */ 326 pVM->rem.s.fIgnoreAll = false; 307 327 308 328 /* … … 342 362 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access."); 343 363 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access."); 364 #ifndef REM_PHYS_ADDR_IN_TLB 365 STAM_REG(pVM, &gStatMemReadHCPtr, STAMTYPE_PROFILE, "/PROF/REM/MemReadHCPtr", STAMUNIT_TICKS_PER_CALL, "Profiling memory access."); 366 STAM_REG(pVM, &gStatMemWriteHCPtr, STAMTYPE_PROFILE, "/PROF/REM/MemWriteHCPtr", STAMUNIT_TICKS_PER_CALL, "Profiling memory access."); 367 #endif 368 #ifdef PGM_DYNAMIC_RAM_ALLOC 369 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion."); 370 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion."); 371 #endif 372 373 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls"); 344 374 345 375 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit"); … … 373 403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync"); 374 404 405 375 406 #endif 407 408 #ifdef DEBUG_ALL_LOGGING 409 loglevel = ~0; 410 #endif 411 376 412 return rc; 377 413 } … … 403 439 REMR3DECL(void) REMR3Reset(PVM pVM) 404 440 { 405 pVM->rem.s.fIgnoreCR3Load = true;406 pVM->rem.s.fIgnoreInvlPg = true;407 pVM->rem.s.fIgnoreCpuMode = true;408 409 441 /* 410 442 * Reset the REM cpu. 411 443 */ 444 pVM->rem.s.fIgnoreAll = true; 412 445 cpu_reset(&pVM->rem.s.Env); 413 446 pVM->rem.s.cInvalidatedPages = 0; 414 415 pVM->rem.s.fIgnoreCR3Load = false; 416 pVM->rem.s.fIgnoreInvlPg = false; 417 pVM->rem.s.fIgnoreCpuMode = false; 447 pVM->rem.s.fIgnoreAll = false; 418 448 } 419 449 … … 441 471 442 472 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */ 443 SSMR3PutU Int(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));473 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0)); 444 474 445 475 /* … … 469 499 uint32_t u32Dummy; 470 500 uint32_t fRawRing0 = false; 471 472 501 LogFlow(("remR3Load:\n")); 473 502 … … 488 517 /* 489 518 * Ignore all ignorable notifications. 490 * Not doing this will cause big trouble. 491 */ 492 pVM->rem.s.fIgnoreCR3Load = true; 493 pVM->rem.s.fIgnoreInvlPg = true; 494 pVM->rem.s.fIgnoreCpuMode = true; 519 * (Not doing this will cause serious trouble.) 520 */ 521 pVM->rem.s.fIgnoreAll = true; 495 522 496 523 /* … … 549 576 * Get the CPUID features. 550 577 */ 551 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features); 578 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features); 579 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features); 552 580 553 581 /* … … 566 594 * Stop ignoring ignornable notifications. 567 595 */ 568 pVM->rem.s.fIgnoreCpuMode = false; 569 pVM->rem.s.fIgnoreInvlPg = false; 570 pVM->rem.s.fIgnoreCR3Load = false; 596 pVM->rem.s.fIgnoreAll = false; 571 597 572 598 return VINF_SUCCESS; … … 628 654 { 629 655 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break; 630 case EXCP_HLT: rc = VINF_EM_HALT; break; 656 case EXCP_HLT: 657 case EXCP_HALTED: rc = VINF_EM_HALT; break; 631 658 case EXCP_RC: 632 659 rc = pVM->rem.s.rc; … … 711 738 REMR3DECL(int) REMR3EmulateInstruction(PVM pVM) 712 739 { 713 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", pVM->rem.s. Env.segs[R_CS].selector, pVM->rem.s.Env.eip));740 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", pVM->rem.s.pCtx->cs, pVM->rem.s.pCtx->eip)); 714 741 715 742 /* … … 777 804 778 805 /* 806 * The VM has halted. 807 */ 808 case EXCP_HALTED: 809 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n")); 810 rc = VINF_EM_HALT; 811 break; 812 813 /* 779 814 * Switch to RAW-mode. 780 815 */ … … 826 861 * right way in will cause serious trouble if a longjmp was attempted.) 827 862 */ 828 #ifdef DEBUG_bird863 # ifdef DEBUG_bird 829 864 remR3DisasInstr(&pVM->rem.s.Env, 1, "REMR3EmulateInstruction"); 830 #endif865 # endif 831 866 int cTimesMax = 16384; 832 867 uint32_t eip = pVM->rem.s.Env.eip; … … 834 869 { 835 870 rc = cpu_exec(&pVM->rem.s.Env); 871 836 872 } while ( eip == pVM->rem.s.Env.eip 837 873 && (rc == EXCP_DEBUG || rc == EXCP_EXECUTE_RAW) … … 863 899 case EXCP_HLT: 864 900 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n")); 901 rc = VINF_EM_HALT; 902 break; 903 904 /* 905 * The VM has halted. 906 */ 907 case EXCP_HALTED: 908 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n")); 865 909 rc = VINF_EM_HALT; 866 910 break; … … 970 1014 case EXCP_HLT: 971 1015 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n")); 1016 rc = VINF_EM_HALT; 1017 break; 1018 1019 /* 1020 * The VM has halted. 1021 */ 1022 case EXCP_HALTED: 1023 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n")); 972 1024 rc = VINF_EM_HALT; 973 1025 break; … … 1025 1077 */ 1026 1078 case EXCP_EXECUTE_HWACC: 1027 Log2(("REMR3 EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));1079 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n")); 1028 1080 rc = VINF_EM_RESCHEDULE_HWACC; 1029 1081 break; … … 1290 1342 * state we disable this path. 1291 1343 */ 1292 if (pVM->rem.s.fIgnoreInvlPg )1344 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll) 1293 1345 return; 1294 1346 Log(("remR3FlushPage: GCPtr=%VGv\n", GCPtr)); 1347 Assert(pVM->rem.s.fInREM); 1295 1348 1296 1349 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL); … … 1321 1374 * @param env Pointer to cpu environment. 1322 1375 */ 1323 void remR3SetPage(CPUState *env, CPUTLBEntry *pRead, CPUTLBEntry *pWrite, int prot, int is_user) 1324 { 1325 uint32_t virt_addr, addend; 1326 1327 Log2(("tlb_set_page_raw read (%x-%x) write (%x-%x) prot %x is_user %d\n", pRead->address, pRead->addend, pWrite->address, pWrite->addend, prot, is_user)); 1328 1376 void remR3SetPage(CPUState *env, CPUTLBEntry *pTLBEntry, CPUTLBEntry *pTLBEntryIgnored, int prot, int is_user) 1377 { 1378 target_ulong virt_addr; 1379 if (env->pVM->rem.s.fIgnoreSetPage || env->pVM->rem.s.fIgnoreAll) 1380 return; 1381 Assert(env->pVM->rem.s.fInREM || env->pVM->rem.s.fInStateSync); 1382 1383 #ifndef PGM_DYNAMIC_RAM_ALLOC 1384 if(!is_user && !(env->state & CPU_RAW_RING0)) 1385 return; /* We are currently not interested in kernel pages */ 1386 #endif 1387 1388 #if !defined(PGM_DYNAMIC_RAM_ALLOC) && !defined(REM_PHYS_ADDR_IN_TLB) 1389 Log2(("tlb_set_page_raw (r=%x|w=%x)-%x prot %x is_user %d phys base %x\n", 1390 pTLBEntry->addr_read, pTLBEntry->addr_write, pTLBEntry->addend, prot, is_user, phys_ram_base)); 1391 #else /* PGM_DYNAMIC_RAM_ALLOC */ 1392 Log2(("tlb_set_page_raw (r=%x|w=%x)-%x prot %x is_user %d\n", 1393 pTLBEntry->addr_read, pTLBEntry->addr_write, pTLBEntry->addend, prot, is_user)); 1394 #endif/* PGM_DYNAMIC_RAM_ALLOC */ 1395 1396 /* 1397 * Extract the virtual address. 1398 */ 1329 1399 if (prot & PAGE_WRITE) 1330 { 1331 addend = pWrite->addend; 1332 virt_addr = pWrite->address; 1333 } 1400 virt_addr = pTLBEntry->addr_write; 1401 else if (prot & PAGE_READ) 1402 virt_addr = pTLBEntry->addr_read; 1334 1403 else 1335 if (prot & PAGE_READ) 1336 { 1337 addend = pRead->addend; 1338 virt_addr = pRead->address; 1339 } 1340 else 1341 { 1342 // Should never happen! 1343 AssertMsgFailed(("tlb_set_page_raw unexpected protection flags %x\n", prot)); 1344 return; 1345 } 1346 1347 // Clear IO_* flags (TODO: are they actually useful for us??) 1348 virt_addr &= ~0xFFF; 1404 AssertMsgFailedReturnVoid(("tlb_set_page_raw unexpected protection flags %x\n", prot)); 1405 virt_addr &= TARGET_PAGE_MASK; 1349 1406 1350 1407 /* … … 1362 1419 if (VBOX_FAILURE(rc)) 1363 1420 { 1364 AssertMsgFailed(("RAWEx_SetPageEntry %x %x %d failed!!\n", virt_addr, prot, is_user)); 1421 #ifdef VBOX_STRICT 1422 target_ulong addend = pTLBEntry->addend; 1423 target_ulong phys_addr; 1424 1425 if (!(addend & IO_MEM_ROM)) 1426 # ifdef REM_PHYS_ADDR_IN_TLB 1427 phys_addr = virt_addr + addend; 1428 # elif defined(PGM_DYNAMIC_RAM_ALLOC) 1429 phys_addr = remR3HCVirt2GCPhysInlined(env->pVM, (void *)(virt_addr + addend)); 1430 # else 1431 phys_addr = virt_addr - (uintptr_t)phys_ram_base + addend; 1432 # endif 1433 else 1434 phys_addr = addend; 1435 AssertMsgFailed(("RAWEx_SetPageEntry %x %x %x %d failed!!\n", virt_addr, phys_addr, prot, is_user)); 1436 #endif /* VBOX_STRICT */ 1365 1437 VM_FF_SET(env->pVM, VM_FF_PGM_SYNC_CR3); 1366 1438 } … … 1375 1447 void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr) 1376 1448 { 1449 Assert(env->pVM->rem.s.fInREM); 1377 1450 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */ 1378 1451 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */ … … 1398 1471 * state we disable this path. 1399 1472 */ 1400 if (pVM->rem.s.fIgnoreCR3Load )1473 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll) 1401 1474 return; 1475 Assert(pVM->rem.s.fInREM); 1402 1476 1403 1477 /* … … 1437 1511 * state this path is disabled. 1438 1512 */ 1439 if (pVM->rem.s.fIgnoreCpuMode )1513 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll) 1440 1514 return; 1515 Assert(pVM->rem.s.fInREM); 1441 1516 1442 1517 /* … … 1520 1595 if(uTrap < 0x20) 1521 1596 { 1597 #ifdef DEBUG 1522 1598 remR3DisasInstr(env, 1, "remR3NotifyTrap: "); 1523 1599 #endif 1524 1600 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 128) 1525 1601 { … … 1575 1651 REMR3DECL(int) REMR3State(PVM pVM) 1576 1652 { 1577 Assert(!pVM->rem.s.fInREM);1578 1653 Log2(("REMR3State:\n")); 1579 1654 STAM_PROFILE_START(&pVM->rem.s.StatsState, a); … … 1581 1656 register unsigned fFlags; 1582 1657 bool fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM); 1658 1659 Assert(!pVM->rem.s.fInREM); 1660 pVM->rem.s.fInStateSync = true; 1583 1661 1584 1662 /* … … 1618 1696 1619 1697 /* 1698 * Clear the halted hidden flag (the interrupt waking up the CPU can 1699 * have been dispatched in raw mode). 1700 */ 1701 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK; 1702 1703 /* 1620 1704 * Replay invlpg? 1621 1705 */ … … 1634 1718 1635 1719 /* 1636 * Registers which are seldomly changed and require special handling / order when changed.1720 * Registers which are rarely changed and require special handling / order when changed. 1637 1721 */ 1638 1722 fFlags = CPUMGetAndClearChangedFlagsREM(pVM); … … 1930 2014 */ 1931 2015 pVM->rem.s.fInREM = true; 2016 pVM->rem.s.fInStateSync = false; 1932 2017 pVM->rem.s.cCanExecuteRaw = 0; 1933 2018 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a); … … 2441 2526 REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvRam, unsigned fFlags) 2442 2527 { 2443 Log (("REMR3NotifyPhysRamRegister: GCPhys=%VGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));2528 LogFlow(("REMR3NotifyPhysRamRegister: GCPhys=%VGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags)); 2444 2529 VM_ASSERT_EMT(pVM); 2445 2530 … … 2458 2543 if (!GCPhys) 2459 2544 { 2460 #if ndef PGM_DYNAMIC_RAM_ALLOC2545 #if !defined(PGM_DYNAMIC_RAM_ALLOC) && !defined(REM_PHYS_ADDR_IN_TLB) 2461 2546 AssertRelease(!phys_ram_base); 2462 2547 phys_ram_base = pvRam; 2463 2548 #endif 2464 2549 phys_ram_size = cb; 2465 phys_ram_dirty = MMR3HeapAllocZ(pVM, MM_TAG_REM, cb >> PAGE_SHIFT); 2466 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", cb >> PAGE_SHIFT)); 2467 } 2468 #ifndef PGM_DYNAMIC_RAM_ALLOC 2469 AssertRelease(phys_ram_base); 2550 phys_ram_dirty_size = cb >> PAGE_SHIFT; 2551 #ifndef VBOX_STRICT 2552 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size); 2553 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size)); 2554 #else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */ 2555 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT); 2556 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT)); 2557 uint32_t cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE); 2558 int rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE); 2559 AssertRC(rc); 2560 phys_ram_dirty += cbBitmap - phys_ram_dirty_size; 2470 2561 #endif 2562 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size); 2563 } 2471 2564 2472 2565 /* 2473 2566 * Register the ram. 2474 2567 */ 2568 Assert(!pVM->rem.s.fIgnoreAll); 2569 pVM->rem.s.fIgnoreAll = true; 2570 2475 2571 #ifdef PGM_DYNAMIC_RAM_ALLOC 2476 2572 if (!GCPhys) … … 2478 2574 else 2479 2575 { 2576 # ifndef REM_PHYS_ADDR_IN_TLB 2480 2577 uint32_t i; 2578 # endif 2481 2579 2482 2580 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fFlags & MM_RAM_FLAGS_RESERVED ? IO_MEM_UNASSIGNED : 0)); 2483 2581 2582 # ifndef REM_PHYS_ADDR_IN_TLB 2484 2583 AssertRelease(pVM->rem.s.cPhysRegistrations < REM_MAX_PHYS_REGISTRATIONS); 2485 for (i =0;i<pVM->rem.s.cPhysRegistrations;i++)2584 for (i = 0; i < pVM->rem.s.cPhysRegistrations; i++) 2486 2585 { 2487 2586 if (pVM->rem.s.aPhysReg[i].GCPhys == GCPhys) … … 2499 2598 pVM->rem.s.cPhysRegistrations++; 2500 2599 } 2501 } 2600 # endif /* !REM_PHYS_ADDR_IN_TLB */ 2601 } 2602 #elif defined(REM_PHYS_ADDR_IN_TLB) 2603 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fFlags & MM_RAM_FLAGS_RESERVED ? IO_MEM_UNASSIGNED : 0)); 2502 2604 #else 2605 AssertRelease(phys_ram_base); 2503 2606 cpu_register_physical_memory(GCPhys, cb, ((uintptr_t)pvRam - (uintptr_t)phys_ram_base) 2504 2607 | (fFlags & MM_RAM_FLAGS_RESERVED ? IO_MEM_UNASSIGNED : 0)); 2505 2608 #endif 2609 Assert(pVM->rem.s.fIgnoreAll); 2610 pVM->rem.s.fIgnoreAll = false; 2506 2611 } 2507 2612 … … 2518 2623 REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags) 2519 2624 { 2625 #ifdef PGM_DYNAMIC_RAM_ALLOC 2626 # ifndef REM_PHYS_ADDR_IN_TLB 2520 2627 uint32_t idx; 2628 #endif 2521 2629 2522 2630 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%VGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags)); … … 2532 2640 Assert(fFlags == 0 /* normal RAM */); 2533 2641 2642 # ifndef REM_PHYS_ADDR_IN_TLB 2534 2643 if (!pVM->rem.s.paHCVirtToGCPhys) 2535 2644 { … … 2570 2679 } 2571 2680 } 2681 # endif /* !REM_PHYS_ADDR_IN_TLB */ 2682 2683 Assert(!pVM->rem.s.fIgnoreAll); 2684 pVM->rem.s.fIgnoreAll = true; 2685 2572 2686 cpu_register_physical_memory(GCPhys, cb, GCPhys); 2573 } 2687 2688 Assert(pVM->rem.s.fIgnoreAll); 2689 pVM->rem.s.fIgnoreAll = false; 2690 2691 #else 2692 AssertReleaseFailed(); 2693 #endif 2694 } 2695 2696 2697 #ifdef PGM_DYNAMIC_RAM_ALLOC 2698 # ifndef REM_PHYS_ADDR_IN_TLB 2699 #if 0 2700 static const uint8_t gabZeroPage[PAGE_SIZE]; 2701 #endif 2574 2702 2575 2703 /** … … 2580 2708 * @param addr The physical address. 2581 2709 */ 2582 void *remR3GCPhys2HCVirt(void *env, target_ulong addr) 2583 { 2584 #ifdef PGM_DYNAMIC_RAM_ALLOC 2585 PVM pVM = ((CPUState *)env)->pVM; 2710 DECLINLINE(void *) remR3GCPhys2HCVirtInlined(PVM pVM, target_ulong addr) 2711 { 2586 2712 uint32_t i; 2587 2713 void *pv; 2714 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a); 2715 2716 #if 1 2588 2717 /* lookup in pVM->rem.s.aPhysReg array first (for ROM range(s) inside the guest's RAM) */ 2589 for (i =0;i<pVM->rem.s.cPhysRegistrations;i++)2590 { 2591 uint32_toff = addr - pVM->rem.s.aPhysReg[i].GCPhys;2718 for (i = 0; i < pVM->rem.s.cPhysRegistrations; i++) 2719 { 2720 RTGCPHYS off = addr - pVM->rem.s.aPhysReg[i].GCPhys; 2592 2721 if (off < pVM->rem.s.aPhysReg[i].cb) 2593 2722 { 2594 Log2(("remR3GCPhys2HCVirt: %x -> %x\n", addr, pVM->rem.s.aPhysReg[i].HCVirt + off)); 2595 return (void *)(pVM->rem.s.aPhysReg[i].HCVirt + off); 2723 pv = (void *)(pVM->rem.s.aPhysReg[i].HCVirt + off); 2724 Log2(("remR3GCPhys2HCVirt: %x -> %x\n", addr, pv)); 2725 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a); 2726 return pv; 2596 2727 } 2597 2728 } 2598 2729 AssertMsg(addr < phys_ram_size, ("remR3GCPhys2HCVirt: unknown physical address %x\n", addr)); 2599 Log2(("remR3GCPhys2HCVirt: %x -> %x\n", addr, pVM->rem.s.paGCPhysToHCVirt[addr >> PGM_DYNAMIC_CHUNK_SHIFT] + (addr & PGM_DYNAMIC_CHUNK_OFFSET_MASK)));2600 return (void *)(pVM->rem.s.paGCPhysToHCVirt[addr >> PGM_DYNAMIC_CHUNK_SHIFT] + (addr & PGM_DYNAMIC_CHUNK_OFFSET_MASK));2730 pv = (void *)(pVM->rem.s.paGCPhysToHCVirt[addr >> PGM_DYNAMIC_CHUNK_SHIFT] + (addr & PGM_DYNAMIC_CHUNK_OFFSET_MASK)); 2731 Log2(("remR3GCPhys2HCVirt: %x -> %x\n", addr, pv)); 2601 2732 #else 2602 return phys_ram_base + addr; 2603 #endif 2604 } 2733 /** @todo figure out why this is faster than the above code. */ 2734 int rc = PGMPhysGCPhys2HCPtr(pVM, addr & X86_PTE_PAE_PG_MASK, PAGE_SIZE, &pv); 2735 if (RT_FAILURE(rc)) 2736 { 2737 AssertMsgFailed(("remR3GCPhys2HCVirt: unknown physical address %x\n", addr)); 2738 pv = gabZeroPage; 2739 } 2740 pv = (void *)((uintptr_t)pv | (addr & PAGE_OFFSET_MASK)); 2741 #endif 2742 return pv; 2743 } 2744 2605 2745 2606 2746 /** … … 2611 2751 * @param addr The physical address. 2612 2752 */ 2613 target_ulong remR3HCVirt2GCPhys(void *env, void *addr) 2614 { 2615 #ifdef PGM_DYNAMIC_RAM_ALLOC 2616 PVM pVM = ((CPUState *)env)->pVM; 2753 DECLINLINE(target_ulong) remR3HCVirt2GCPhysInlined(PVM pVM, void *addr) 2754 { 2617 2755 RTHCUINTPTR HCVirt = (RTHCUINTPTR)addr; 2618 2756 uint32_t idx = (HCVirt >> PGM_DYNAMIC_CHUNK_SHIFT); 2619 2757 RTHCUINTPTR off; 2620 2758 RTUINT i; 2759 target_ulong GCPhys; 2621 2760 2622 2761 off = HCVirt - pVM->rem.s.paHCVirtToGCPhys[idx].pChunk1; 2623 2762 2624 if ( pVM->rem.s.paHCVirtToGCPhys[idx].pChunk1 2763 if ( pVM->rem.s.paHCVirtToGCPhys[idx].pChunk1 2625 2764 && off < PGM_DYNAMIC_CHUNK_SIZE) 2626 2765 { 2627 Log2(("remR3HCVirt2GCPhys %x -> %x\n", addr, pVM->rem.s.paHCVirtToGCPhys[idx].GCPhys1 + off)); 2628 return pVM->rem.s.paHCVirtToGCPhys[idx].GCPhys1 + off; 2629 } 2630 2766 GCPhys = pVM->rem.s.paHCVirtToGCPhys[idx].GCPhys1 + off; 2767 Log2(("remR3HCVirt2GCPhys %x -> %x\n", addr, GCPhys)); 2768 return GCPhys; 2769 } 2770 2631 2771 off = HCVirt - pVM->rem.s.paHCVirtToGCPhys[idx].pChunk2; 2632 if ( pVM->rem.s.paHCVirtToGCPhys[idx].pChunk2 2772 if ( pVM->rem.s.paHCVirtToGCPhys[idx].pChunk2 2633 2773 && off < PGM_DYNAMIC_CHUNK_SIZE) 2634 2774 { 2635 Log2(("remR3HCVirt2GCPhys %x -> %x\n", addr, pVM->rem.s.paHCVirtToGCPhys[idx].GCPhys2 + off)); 2636 return pVM->rem.s.paHCVirtToGCPhys[idx].GCPhys2 + off; 2775 GCPhys = pVM->rem.s.paHCVirtToGCPhys[idx].GCPhys2 + off; 2776 Log2(("remR3HCVirt2GCPhys %x -> %x\n", addr, GCPhys)); 2777 return GCPhys; 2637 2778 } 2638 2779 2639 2780 /* Must be externally registered RAM/ROM range */ 2640 for (i =0;i<pVM->rem.s.cPhysRegistrations;i++)2781 for (i = 0; i < pVM->rem.s.cPhysRegistrations; i++) 2641 2782 { 2642 2783 uint32_t off = HCVirt - pVM->rem.s.aPhysReg[i].HCVirt; 2643 2784 if (off < pVM->rem.s.aPhysReg[i].cb) 2644 2785 { 2645 Log2(("remR3HCVirt2GCPhys %x -> %x\n", addr, pVM->rem.s.aPhysReg[i].GCPhys + off)); 2646 return pVM->rem.s.aPhysReg[i].GCPhys + off; 2786 GCPhys = pVM->rem.s.aPhysReg[i].GCPhys + off; 2787 Log2(("remR3HCVirt2GCPhys %x -> %x\n", addr, GCPhys)); 2788 return GCPhys; 2647 2789 } 2648 2790 } 2649 2791 AssertReleaseMsgFailed(("No translation for physical address %VHv???\n", addr)); 2650 2792 return 0; 2651 #else 2652 return (target_ulong)addr - (target_ulong)phys_ram_base; 2653 #endif 2654 } 2793 } 2794 2795 /** 2796 * Convert GC physical address to HC virt 2797 * 2798 * @returns The HC virt address corresponding to addr. 2799 * @param env The cpu environment. 2800 * @param addr The physical address. 2801 */ 2802 void *remR3GCPhys2HCVirt(void *env, target_ulong addr) 2803 { 2804 PVM pVM = ((CPUState *)env)->pVM; 2805 void *pv; 2806 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a); 2807 pv = remR3GCPhys2HCVirtInlined(pVM, addr); 2808 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a); 2809 return pv; 2810 } 2811 2812 2813 /** 2814 * Convert GC physical address to HC virt 2815 * 2816 * @returns The HC virt address corresponding to addr. 2817 * @param env The cpu environment. 2818 * @param addr The physical address. 2819 */ 2820 target_ulong remR3HCVirt2GCPhys(void *env, void *addr) 2821 { 2822 PVM pVM = ((CPUState *)env)->pVM; 2823 target_ulong GCPhys; 2824 STAM_PROFILE_START(&gStatHCVirt2GCPhys, a); 2825 GCPhys = remR3HCVirt2GCPhysInlined(pVM, addr); 2826 STAM_PROFILE_STOP(&gStatHCVirt2GCPhys, a); 2827 return GCPhys; 2828 } 2829 2830 # endif /* !REM_PHYS_ADDR_IN_TLB */ 2655 2831 2656 2832 /** … … 2675 2851 } 2676 2852 2853 #endif /* PGM_DYNAMIC_RAM_ALLOC */ 2854 2855 2677 2856 /** 2678 2857 * Notification about a successful MMR3PhysRomRegister() call. … … 2685 2864 REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy) 2686 2865 { 2687 #if def PGM_DYNAMIC_RAM_ALLOC2866 #if defined(PGM_DYNAMIC_RAM_ALLOC) && !defined(REM_PHYS_ADDR_IN_TLB) 2688 2867 uint32_t i; 2689 2868 #endif 2690 Log (("REMR3NotifyPhysRomRegister: GCPhys=%VGp cb=%d pvCopy=%p\n", GCPhys, cb, pvCopy));2869 LogFlow(("REMR3NotifyPhysRomRegister: GCPhys=%VGp cb=%d pvCopy=%p\n", GCPhys, cb, pvCopy)); 2691 2870 VM_ASSERT_EMT(pVM); 2692 2871 … … 2703 2882 * Register the rom. 2704 2883 */ 2705 #ifdef PGM_DYNAMIC_RAM_ALLOC 2884 Assert(!pVM->rem.s.fIgnoreAll); 2885 pVM->rem.s.fIgnoreAll = true; 2886 2887 #ifdef REM_PHYS_ADDR_IN_TLB 2888 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_ROM); 2889 #elif defined(PGM_DYNAMIC_RAM_ALLOC) 2706 2890 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_ROM); 2707 2891 AssertRelease(pVM->rem.s.cPhysRegistrations < REM_MAX_PHYS_REGISTRATIONS); 2708 for (i =0;i<pVM->rem.s.cPhysRegistrations;i++)2892 for (i = 0; i < pVM->rem.s.cPhysRegistrations; i++) 2709 2893 { 2710 2894 if (pVM->rem.s.aPhysReg[i].GCPhys == GCPhys) … … 2726 2910 cpu_register_physical_memory(GCPhys, cb, ((uintptr_t)pvCopy - (uintptr_t)phys_ram_base) | IO_MEM_ROM); 2727 2911 #endif 2912 2728 2913 Log2(("%.64Vhxd\n", (char *)pvCopy + cb - 64)); 2914 2915 Assert(pVM->rem.s.fIgnoreAll); 2916 pVM->rem.s.fIgnoreAll = false; 2729 2917 } 2730 2918 … … 2752 2940 * Unassigning the memory. 2753 2941 */ 2942 Assert(!pVM->rem.s.fIgnoreAll); 2943 pVM->rem.s.fIgnoreAll = true; 2944 2754 2945 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED); 2946 2947 Assert(pVM->rem.s.fIgnoreAll); 2948 pVM->rem.s.fIgnoreAll = false; 2755 2949 } 2756 2950 … … 2779 2973 REMR3ReplayHandlerNotifications(pVM); 2780 2974 2975 Assert(!pVM->rem.s.fIgnoreAll); 2976 pVM->rem.s.fIgnoreAll = true; 2977 2781 2978 if (enmType == PGMPHYSHANDLERTYPE_MMIO) 2782 2979 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType); 2783 2980 else if (fHasHCHandler) 2784 2981 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType); 2982 2983 Assert(pVM->rem.s.fIgnoreAll); 2984 pVM->rem.s.fIgnoreAll = false; 2785 2985 } 2786 2986 … … 2804 3004 if (pVM->rem.s.cHandlerNotifications) 2805 3005 REMR3ReplayHandlerNotifications(pVM); 3006 3007 Assert(!pVM->rem.s.fIgnoreAll); 3008 pVM->rem.s.fIgnoreAll = true; 2806 3009 2807 3010 if (enmType == PGMPHYSHANDLERTYPE_MMIO) … … 2816 3019 else 2817 3020 { 2818 /* This is not p refect, but it'll do for PD monitoring... */3021 /* This is not perfect, but it'll do for PD monitoring... */ 2819 3022 Assert(cb == PAGE_SIZE); 2820 3023 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys); 2821 Assert(remR3HCVirt2GCPhys(cpu_single_env, pvHCPtr) < MMR3PhysGetRamSize(pVM)); 2822 #ifdef PGM_DYNAMIC_RAM_ALLOC 3024 #ifdef REM_PHYS_ADDR_IN_TLB 3025 cpu_register_physical_memory(GCPhys, cb, GCPhys); 3026 #elif defined(PGM_DYNAMIC_RAM_ALLOC) 3027 Assert(remR3HCVirt2GCPhysInlined(pVM, pvHCPtr) < MMR3PhysGetRamSize(pVM)); 2823 3028 cpu_register_physical_memory(GCPhys, cb, GCPhys); 2824 3029 #else 2825 cpu_register_physical_memory(GCPhys, cb, remR3HCVirt2GCPhys(cpu_single_env, pvHCPtr)); 3030 Assert((uintptr_t)pvHCPtr - (uintptr_t)phys_ram_base < MMR3PhysGetRamSize(pVM)); 3031 cpu_register_physical_memory(GCPhys, cb, (uintptr_t)pvHCPtr - (uintptr_t)phys_ram_base); 2826 3032 #endif 2827 3033 } 2828 3034 } 3035 3036 Assert(pVM->rem.s.fIgnoreAll); 3037 pVM->rem.s.fIgnoreAll = false; 2829 3038 } 2830 3039 … … 2853 3062 if (fHasHCHandler) 2854 3063 { 3064 Assert(!pVM->rem.s.fIgnoreAll); 3065 pVM->rem.s.fIgnoreAll = true; 3066 2855 3067 /* 2856 3068 * Reset the old page. … … 2860 3072 else 2861 3073 { 2862 /* This is not p refect, but it'll do for PD monitoring... */3074 /* This is not perfect, but it'll do for PD monitoring... */ 2863 3075 Assert(cb == PAGE_SIZE); 2864 3076 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld); 2865 Assert(remR3HCVirt2GCPhys(cpu_single_env, pvHCPtr) < MMR3PhysGetRamSize(pVM)); 2866 #ifdef PGM_DYNAMIC_RAM_ALLOC 3077 #ifdef REM_PHYS_ADDR_IN_TLB 3078 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld); 3079 #elif defined(PGM_DYNAMIC_RAM_ALLOC) 3080 Assert(remR3HCVirt2GCPhysInlined(pVM, pvHCPtr) < MMR3PhysGetRamSize(pVM)); 2867 3081 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld); 2868 3082 #else 2869 cpu_register_physical_memory(GCPhysOld, cb, remR3HCVirt2GCPhys(cpu_single_env, pvHCPtr)); 3083 AssertMsg((uintptr_t)pvHCPtr - (uintptr_t)phys_ram_base < MMR3PhysGetRamSize(pVM), 3084 ("pvHCPtr=%p phys_ram_base=%p size=%RX64 cb=%RGp\n", pvHCPtr, phys_ram_base, MMR3PhysGetRamSize(pVM), cb)); 3085 cpu_register_physical_memory(GCPhysOld, cb, (uintptr_t)pvHCPtr - (uintptr_t)phys_ram_base); 2870 3086 #endif 2871 3087 } … … 2877 3093 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb); 2878 3094 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType); 3095 3096 Assert(pVM->rem.s.fIgnoreAll); 3097 pVM->rem.s.fIgnoreAll = false; 2879 3098 } 2880 3099 } … … 2921 3140 { 2922 3141 PVM pVM = env->pVM; 2923 if ((pTLBEntry->addr ess& ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)3142 if ((pTLBEntry->addr_code & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType) 2924 3143 { 2925 3144 target_ulong ret = pTLBEntry->addend + addr; 2926 AssertMsg2("remR3PhysGetPhysicalAddressCode: addr=%VGv addr ess=%VGv addend=%VGp ret=%VGp\n",2927 (RTGCPTR)addr, (RTGCPTR)pTLBEntry->address, (RTGCPHYS)pTLBEntry->addend, ret);3145 AssertMsg2("remR3PhysGetPhysicalAddressCode: addr=%VGv addr_code=%VGv addend=%VGp ret=%VGp\n", 3146 (RTGCPTR)addr, (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, ret); 2928 3147 return ret; 2929 3148 } 2930 LogRel(("\nTrying to execute code with memory type addr ess=%VGv addend=%VGp at %VGv! (iHandlerMemType=%#x iMMIOMemType=%#x)\n"3149 LogRel(("\nTrying to execute code with memory type addr_code=%VGv addend=%VGp at %VGv! (iHandlerMemType=%#x iMMIOMemType=%#x)\n" 2931 3150 "*** handlers\n", 2932 (RTGCPTR)pTLBEntry->addr ess, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType));3151 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType)); 2933 3152 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp()); 2934 3153 LogRel(("*** mmio\n")); … … 2936 3155 LogRel(("*** phys\n")); 2937 3156 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp()); 2938 cpu_abort(env, "Trying to execute code with memory type addr ess=%VGv addend=%VGp at %VGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",2939 (RTGCPTR)pTLBEntry->addr ess, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);3157 cpu_abort(env, "Trying to execute code with memory type addr_code=%VGv addend=%VGp at %VGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n", 3158 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType); 2940 3159 AssertFatalFailed(); 2941 3160 } 3161 3162 3163 /** Validate the physical address passed to the read functions. 3164 * Useful for finding non-guest-ram reads/writes. */ 3165 #if 1 /* disable if it becomes bothersome... */ 3166 # define VBOX_CHECK_ADDR(GCPhys) AssertMsg(PGMPhysIsGCPhysValid(cpu_single_env->pVM, (GCPhys)), ("%VGp\n", (GCPhys))) 3167 #else 3168 # define VBOX_CHECK_ADDR(GCPhys) do { } while (0) 3169 #endif 3170 3171 /** 3172 * Read guest RAM and ROM. 3173 * 3174 * @param SrcGCPhys The source address (guest physical). 3175 * @param pvDst The destination address. 3176 * @param cb Number of bytes 3177 */ 3178 void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb) 3179 { 3180 STAM_PROFILE_ADV_START(&gStatMemRead, a); 3181 VBOX_CHECK_ADDR(SrcGCPhys); 3182 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb); 3183 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3184 } 3185 3186 3187 /** 3188 * Read guest RAM and ROM, unsigned 8-bit. 3189 * 3190 * @param SrcGCPhys The source address (guest physical). 3191 */ 3192 uint8_t remR3PhysReadU8(RTGCPHYS SrcGCPhys) 3193 { 3194 uint8_t val; 3195 STAM_PROFILE_ADV_START(&gStatMemRead, a); 3196 VBOX_CHECK_ADDR(SrcGCPhys); 3197 val = PGMR3PhysReadByte(cpu_single_env->pVM, SrcGCPhys); 3198 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3199 return val; 3200 } 3201 3202 3203 /** 3204 * Read guest RAM and ROM, signed 8-bit. 3205 * 3206 * @param SrcGCPhys The source address (guest physical). 3207 */ 3208 int8_t remR3PhysReadS8(RTGCPHYS SrcGCPhys) 3209 { 3210 int8_t val; 3211 STAM_PROFILE_ADV_START(&gStatMemRead, a); 3212 VBOX_CHECK_ADDR(SrcGCPhys); 3213 val = PGMR3PhysReadByte(cpu_single_env->pVM, SrcGCPhys); 3214 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3215 return val; 3216 } 3217 3218 3219 /** 3220 * Read guest RAM and ROM, unsigned 16-bit. 3221 * 3222 * @param SrcGCPhys The source address (guest physical). 3223 */ 3224 uint16_t remR3PhysReadU16(RTGCPHYS SrcGCPhys) 3225 { 3226 uint16_t val; 3227 STAM_PROFILE_ADV_START(&gStatMemRead, a); 3228 VBOX_CHECK_ADDR(SrcGCPhys); 3229 val = PGMR3PhysReadWord(cpu_single_env->pVM, SrcGCPhys); 3230 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3231 return val; 3232 } 3233 3234 3235 /** 3236 * Read guest RAM and ROM, signed 16-bit. 3237 * 3238 * @param SrcGCPhys The source address (guest physical). 3239 */ 3240 int16_t remR3PhysReadS16(RTGCPHYS SrcGCPhys) 3241 { 3242 uint16_t val; 3243 STAM_PROFILE_ADV_START(&gStatMemRead, a); 3244 VBOX_CHECK_ADDR(SrcGCPhys); 3245 val = PGMR3PhysReadWord(cpu_single_env->pVM, SrcGCPhys); 3246 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3247 return val; 3248 } 3249 3250 3251 /** 3252 * Read guest RAM and ROM, unsigned 32-bit. 3253 * 3254 * @param SrcGCPhys The source address (guest physical). 3255 */ 3256 uint32_t remR3PhysReadU32(RTGCPHYS SrcGCPhys) 3257 { 3258 uint32_t val; 3259 STAM_PROFILE_ADV_START(&gStatMemRead, a); 3260 VBOX_CHECK_ADDR(SrcGCPhys); 3261 val = PGMR3PhysReadDword(cpu_single_env->pVM, SrcGCPhys); 3262 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3263 return val; 3264 } 3265 3266 3267 /** 3268 * Read guest RAM and ROM, signed 32-bit. 3269 * 3270 * @param SrcGCPhys The source address (guest physical). 3271 */ 3272 int32_t remR3PhysReadS32(RTGCPHYS SrcGCPhys) 3273 { 3274 int32_t val; 3275 STAM_PROFILE_ADV_START(&gStatMemRead, a); 3276 VBOX_CHECK_ADDR(SrcGCPhys); 3277 val = PGMR3PhysReadDword(cpu_single_env->pVM, SrcGCPhys); 3278 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3279 return val; 3280 } 3281 3282 3283 /** 3284 * Read guest RAM and ROM, unsigned 64-bit. 3285 * 3286 * @param SrcGCPhys The source address (guest physical). 3287 */ 3288 uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys) 3289 { 3290 uint64_t val; 3291 STAM_PROFILE_ADV_START(&gStatMemRead, a); 3292 VBOX_CHECK_ADDR(SrcGCPhys); 3293 val = PGMR3PhysReadDword(cpu_single_env->pVM, SrcGCPhys) 3294 | ((uint64_t)PGMR3PhysReadDword(cpu_single_env->pVM, SrcGCPhys + 4) << 32); /** @todo fix me! */ 3295 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3296 return val; 3297 } 3298 3299 3300 /** 3301 * Write guest RAM. 3302 * 3303 * @param DstGCPhys The destination address (guest physical). 3304 * @param pvSrc The source address. 3305 * @param cb Number of bytes to write 3306 */ 3307 void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb) 3308 { 3309 STAM_PROFILE_ADV_START(&gStatMemWrite, a); 3310 VBOX_CHECK_ADDR(DstGCPhys); 3311 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb); 3312 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3313 } 3314 3315 3316 /** 3317 * Write guest RAM, unsigned 8-bit. 3318 * 3319 * @param DstGCPhys The destination address (guest physical). 3320 * @param val Value 3321 */ 3322 void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val) 3323 { 3324 STAM_PROFILE_ADV_START(&gStatMemWrite, a); 3325 VBOX_CHECK_ADDR(DstGCPhys); 3326 PGMR3PhysWriteByte(cpu_single_env->pVM, DstGCPhys, val); 3327 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3328 } 3329 3330 3331 /** 3332 * Write guest RAM, unsigned 8-bit. 3333 * 3334 * @param DstGCPhys The destination address (guest physical). 3335 * @param val Value 3336 */ 3337 void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val) 3338 { 3339 STAM_PROFILE_ADV_START(&gStatMemWrite, a); 3340 VBOX_CHECK_ADDR(DstGCPhys); 3341 PGMR3PhysWriteWord(cpu_single_env->pVM, DstGCPhys, val); 3342 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3343 } 3344 3345 3346 /** 3347 * Write guest RAM, unsigned 32-bit. 3348 * 3349 * @param DstGCPhys The destination address (guest physical). 3350 * @param val Value 3351 */ 3352 void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val) 3353 { 3354 STAM_PROFILE_ADV_START(&gStatMemWrite, a); 3355 VBOX_CHECK_ADDR(DstGCPhys); 3356 PGMR3PhysWriteDword(cpu_single_env->pVM, DstGCPhys, val); 3357 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3358 } 3359 3360 3361 /** 3362 * Write guest RAM, unsigned 64-bit. 3363 * 3364 * @param DstGCPhys The destination address (guest physical). 3365 * @param val Value 3366 */ 3367 void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val) 3368 { 3369 STAM_PROFILE_ADV_START(&gStatMemWrite, a); 3370 VBOX_CHECK_ADDR(DstGCPhys); 3371 PGMR3PhysWriteDword(cpu_single_env->pVM, DstGCPhys, (uint32_t)val); /** @todo add U64 interface. */ 3372 PGMR3PhysWriteDword(cpu_single_env->pVM, DstGCPhys + 4, val >> 32); 3373 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3374 } 3375 3376 3377 #ifndef REM_PHYS_ADDR_IN_TLB 2942 3378 2943 3379 /** … … 2948 3384 * @param cb Number of bytes 2949 3385 */ 2950 void remR3PhysRead Bytes(uint8_t *pbSrcPhys, void *pvDst, unsigned cb)2951 { 2952 STAM_PROFILE_ADV_START(&gStatMemRead , a);3386 void remR3PhysReadHCPtr(uint8_t *pbSrcPhys, void *pvDst, unsigned cb) 3387 { 3388 STAM_PROFILE_ADV_START(&gStatMemReadHCPtr, a); 2953 3389 2954 3390 /* … … 2956 3392 * ROM is accessed this way, even if it's not part of the RAM. 2957 3393 */ 2958 /** @todo This is rather ugly, but there's no other way when we don't wish to touch *many* other files. */ 2959 uintptr_t off = remR3HCVirt2GCPhys(cpu_single_env, pbSrcPhys); 2960 if (off < (uintptr_t)phys_ram_size) 2961 PGMPhysRead(cpu_single_env->pVM, (RTGCPHYS)off, pvDst, cb); 2962 else 2963 { 2964 /* ROM range outside physical RAM, HC address passed directly */ 2965 Log4(("remR3PhysReadBytes ROM: %p\n", pbSrcPhys)); 2966 memcpy(pvDst, pbSrcPhys, cb); 2967 } 2968 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 2969 } 2970 2971 /** @todo r=bird: s/Byte/U8/ s/Word/U16/ s/Dword/U32/, see MMIO and other functions. 2972 * It could be an idea to inline these wrapper functions... */ 2973 2974 /** 2975 * Read guest RAM and ROM. 3394 #ifdef PGM_DYNAMIC_RAM_ALLOC 3395 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbSrcPhys); 3396 #else 3397 uintptr_t off = pbSrcPhys - phys_ram_base; 3398 #endif 3399 PGMPhysRead(cpu_single_env->pVM, (RTGCPHYS)off, pvDst, cb); 3400 STAM_PROFILE_ADV_STOP(&gStatMemReadHCPtr, a); 3401 } 3402 3403 3404 /** 3405 * Read guest RAM and ROM, unsigned 8-bit. 2976 3406 * 2977 3407 * @param pbSrcPhys The source address. Relative to guest RAM. 2978 3408 */ 2979 uint8_t remR3PhysRead UByte(uint8_t *pbSrcPhys)3409 uint8_t remR3PhysReadHCPtrU8(uint8_t *pbSrcPhys) 2980 3410 { 2981 3411 uint8_t val; 2982 3412 2983 STAM_PROFILE_ADV_START(&gStatMemRead , a);3413 STAM_PROFILE_ADV_START(&gStatMemReadHCPtr, a); 2984 3414 2985 3415 /* … … 2987 3417 * ROM is accessed this way, even if it's not part of the RAM. 2988 3418 */ 2989 /** @todo This is rather ugly, but there's no other way when we don't wish to touch *many* other files. */ 2990 uintptr_t off = remR3HCVirt2GCPhys(cpu_single_env, pbSrcPhys); 2991 if (off < (uintptr_t)phys_ram_size) 2992 val = PGMR3PhysReadByte(cpu_single_env->pVM, (RTGCPHYS)off); 2993 else 2994 { 2995 /* ROM range outside physical RAM, HC address passed directly */ 2996 Log4(("remR3PhysReadBytes ROM: %p\n", pbSrcPhys)); 2997 val = *pbSrcPhys; 2998 } 2999 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3419 #ifdef PGM_DYNAMIC_RAM_ALLOC 3420 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbSrcPhys); 3421 #else 3422 uintptr_t off = pbSrcPhys - phys_ram_base; 3423 #endif 3424 val = PGMR3PhysReadByte(cpu_single_env->pVM, (RTGCPHYS)off); 3425 STAM_PROFILE_ADV_STOP(&gStatMemReadHCPtr, a); 3000 3426 return val; 3001 3427 } 3002 3428 3003 /** 3004 * Read guest RAM and ROM. 3429 3430 /** 3431 * Read guest RAM and ROM, signed 8-bit. 3005 3432 * 3006 3433 * @param pbSrcPhys The source address. Relative to guest RAM. 3007 3434 */ 3008 int8_t remR3PhysRead SByte(uint8_t *pbSrcPhys)3435 int8_t remR3PhysReadHCPtrS8(uint8_t *pbSrcPhys) 3009 3436 { 3010 3437 int8_t val; 3011 3438 3012 STAM_PROFILE_ADV_START(&gStatMemRead , a);3439 STAM_PROFILE_ADV_START(&gStatMemReadHCPtr, a); 3013 3440 3014 3441 /* … … 3016 3443 * ROM is accessed this way, even if it's not part of the RAM. 3017 3444 */ 3018 /** @todo This is rather ugly, but there's no other way when we don't wish to touch *many* other files. */ 3019 uintptr_t off = remR3HCVirt2GCPhys(cpu_single_env, pbSrcPhys); 3020 if (off < (uintptr_t)phys_ram_size) 3021 val = PGMR3PhysReadByte(cpu_single_env->pVM, (RTGCPHYS)off); 3022 else 3023 { 3024 /* ROM range outside physical RAM, HC address passed directly */ 3025 Log4(("remR3PhysReadBytes ROM: %p\n", pbSrcPhys)); 3026 val = *(int8_t *)pbSrcPhys; 3027 } 3028 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3445 #ifdef PGM_DYNAMIC_RAM_ALLOC 3446 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbSrcPhys); 3447 #else 3448 uintptr_t off = pbSrcPhys - phys_ram_base; 3449 #endif 3450 val = PGMR3PhysReadByte(cpu_single_env->pVM, (RTGCPHYS)off); 3451 STAM_PROFILE_ADV_STOP(&gStatMemReadHCPtr, a); 3029 3452 return val; 3030 3453 } 3031 3454 3032 /** 3033 * Read guest RAM and ROM. 3455 3456 /** 3457 * Read guest RAM and ROM, unsigned 16-bit. 3034 3458 * 3035 3459 * @param pbSrcPhys The source address. Relative to guest RAM. 3036 3460 */ 3037 uint16_t remR3PhysRead UWord(uint8_t *pbSrcPhys)3461 uint16_t remR3PhysReadHCPtrU16(uint8_t *pbSrcPhys) 3038 3462 { 3039 3463 uint16_t val; 3040 3464 3041 STAM_PROFILE_ADV_START(&gStatMemRead , a);3465 STAM_PROFILE_ADV_START(&gStatMemReadHCPtr, a); 3042 3466 3043 3467 /* … … 3045 3469 * ROM is accessed this way, even if it's not part of the RAM. 3046 3470 */ 3047 /** @todo This is rather ugly, but there's no other way when we don't wish to touch *many* other files. */ 3048 uintptr_t off = remR3HCVirt2GCPhys(cpu_single_env, pbSrcPhys); 3049 if (off < (uintptr_t)phys_ram_size) 3050 val = PGMR3PhysReadWord(cpu_single_env->pVM, (RTGCPHYS)off); 3051 else 3052 { 3053 /* ROM range outside physical RAM, HC address passed directly */ 3054 Log4(("remR3PhysReadBytes ROM: %p\n", pbSrcPhys)); 3055 val = *(uint16_t *)pbSrcPhys; 3056 } 3057 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3471 #ifdef PGM_DYNAMIC_RAM_ALLOC 3472 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbSrcPhys); 3473 #else 3474 uintptr_t off = pbSrcPhys - phys_ram_base; 3475 #endif 3476 val = PGMR3PhysReadWord(cpu_single_env->pVM, (RTGCPHYS)off); 3477 STAM_PROFILE_ADV_STOP(&gStatMemReadHCPtr, a); 3058 3478 return val; 3059 3479 } 3060 3480 3061 /** 3062 * Read guest RAM and ROM. 3481 3482 /** 3483 * Read guest RAM and ROM, signed 16-bit. 3063 3484 * 3064 3485 * @param pbSrcPhys The source address. Relative to guest RAM. 3065 3486 */ 3066 int16_t remR3PhysRead SWord(uint8_t *pbSrcPhys)3487 int16_t remR3PhysReadHCPtrS16(uint8_t *pbSrcPhys) 3067 3488 { 3068 3489 int16_t val; 3069 3490 3070 STAM_PROFILE_ADV_START(&gStatMemRead , a);3491 STAM_PROFILE_ADV_START(&gStatMemReadHCPtr, a); 3071 3492 3072 3493 /* … … 3075 3496 */ 3076 3497 /** @todo This is rather ugly, but there's no other way when we don't wish to touch *many* other files. */ 3077 uintptr_t off = remR3HCVirt2GCPhys(cpu_single_env, pbSrcPhys); 3078 if (off < (uintptr_t)phys_ram_size) 3079 val = PGMR3PhysReadWord(cpu_single_env->pVM, (RTGCPHYS)off); 3080 else 3081 { 3082 /* ROM range outside physical RAM, HC address passed directly */ 3083 Log4(("remR3PhysReadBytes ROM: %p\n", pbSrcPhys)); 3084 val = *(int16_t *)pbSrcPhys; 3085 } 3086 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3498 #ifdef PGM_DYNAMIC_RAM_ALLOC 3499 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbSrcPhys); 3500 #else 3501 uintptr_t off = pbSrcPhys - phys_ram_base; 3502 #endif 3503 val = PGMR3PhysReadWord(cpu_single_env->pVM, (RTGCPHYS)off); 3504 STAM_PROFILE_ADV_STOP(&gStatMemReadHCPtr, a); 3087 3505 return val; 3088 3506 } 3089 3507 3090 /** 3091 * Read guest RAM and ROM. 3508 3509 /** 3510 * Read guest RAM and ROM, unsigned 32-bit. 3092 3511 * 3093 3512 * @param pbSrcPhys The source address. Relative to guest RAM. 3094 3513 */ 3095 uint32_t remR3PhysRead ULong(uint8_t *pbSrcPhys)3514 uint32_t remR3PhysReadHCPtrU32(uint8_t *pbSrcPhys) 3096 3515 { 3097 3516 uint32_t val; 3098 3517 3099 STAM_PROFILE_ADV_START(&gStatMemRead , a);3518 STAM_PROFILE_ADV_START(&gStatMemReadHCPtr, a); 3100 3519 3101 3520 /* … … 3103 3522 * ROM is accessed this way, even if it's not part of the RAM. 3104 3523 */ 3105 /** @todo This is rather ugly, but there's no other way when we don't wish to touch *many* other files. */ 3106 uintptr_t off = remR3HCVirt2GCPhys(cpu_single_env, pbSrcPhys); 3107 if (off < (uintptr_t)phys_ram_size) 3108 val = PGMR3PhysReadDword(cpu_single_env->pVM, (RTGCPHYS)off); 3109 else 3110 { 3111 /* ROM range outside physical RAM, HC address passed directly */ 3112 Log4(("remR3PhysReadBytes ROM: %p\n", pbSrcPhys)); 3113 val = *(uint32_t *)pbSrcPhys; 3114 } 3115 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3524 #ifdef PGM_DYNAMIC_RAM_ALLOC 3525 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbSrcPhys); 3526 #else 3527 uintptr_t off = pbSrcPhys - phys_ram_base; 3528 #endif 3529 val = PGMR3PhysReadDword(cpu_single_env->pVM, (RTGCPHYS)off); 3530 STAM_PROFILE_ADV_STOP(&gStatMemReadHCPtr, a); 3116 3531 return val; 3117 3532 } 3118 3533 3119 /** 3120 * Read guest RAM and ROM. 3534 3535 /** 3536 * Read guest RAM and ROM, signed 32-bit. 3121 3537 * 3122 3538 * @param pbSrcPhys The source address. Relative to guest RAM. 3123 3539 */ 3124 int32_t remR3PhysRead SLong(uint8_t *pbSrcPhys)3540 int32_t remR3PhysReadHCPtrS32(uint8_t *pbSrcPhys) 3125 3541 { 3126 3542 int32_t val; 3127 3543 3128 STAM_PROFILE_ADV_START(&gStatMemRead , a);3544 STAM_PROFILE_ADV_START(&gStatMemReadHCPtr, a); 3129 3545 3130 3546 /* … … 3132 3548 * ROM is accessed this way, even if it's not part of the RAM. 3133 3549 */ 3134 /** @todo This is rather ugly, but there's no other way when we don't wish to touch *many* other files. */ 3135 uintptr_t off = remR3HCVirt2GCPhys(cpu_single_env, pbSrcPhys); 3136 if (off < (uintptr_t)phys_ram_size) 3137 val = PGMR3PhysReadDword(cpu_single_env->pVM, (RTGCPHYS)off); 3138 else 3139 { 3140 /* ROM range outside physical RAM, HC address passed directly */ 3141 Log4(("remR3PhysReadBytes ROM: %p\n", pbSrcPhys)); 3142 val = *(int32_t *)pbSrcPhys; 3143 } 3144 STAM_PROFILE_ADV_STOP(&gStatMemRead, a); 3550 #ifdef PGM_DYNAMIC_RAM_ALLOC 3551 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbSrcPhys); 3552 #else 3553 uintptr_t off = pbSrcPhys - phys_ram_base; 3554 #endif 3555 val = PGMR3PhysReadDword(cpu_single_env->pVM, (RTGCPHYS)off); 3556 STAM_PROFILE_ADV_STOP(&gStatMemReadHCPtr, a); 3145 3557 return val; 3146 3558 } 3559 3560 3561 /** 3562 * Read guest RAM and ROM, unsigned 64-bit. 3563 * 3564 * @param pbSrcPhys The source address. Relative to guest RAM. 3565 */ 3566 uint64_t remR3PhysReadHCPtrU64(uint8_t *pbSrcPhys) 3567 { 3568 uint64_t val; 3569 3570 STAM_PROFILE_ADV_START(&gStatMemReadHCPtr, a); 3571 3572 /* 3573 * Calc the physical address ('off') and check that it's within the RAM. 3574 * ROM is accessed this way, even if it's not part of the RAM. 3575 */ 3576 #ifdef PGM_DYNAMIC_RAM_ALLOC 3577 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbSrcPhys); 3578 #else 3579 uintptr_t off = pbSrcPhys - phys_ram_base; 3580 #endif 3581 val = PGMR3PhysReadDword(cpu_single_env->pVM, (RTGCPHYS)off) 3582 | ((uint64_t)PGMR3PhysReadDword(cpu_single_env->pVM, (RTGCPHYS)off + 4) << 32); /** @todo fix me! */ 3583 STAM_PROFILE_ADV_STOP(&gStatMemReadHCPtr, a); 3584 return val; 3585 } 3586 3147 3587 3148 3588 /** … … 3153 3593 * @param cb Number of bytes to write 3154 3594 */ 3155 void remR3PhysWrite Bytes(uint8_t *pbDstPhys, const void *pvSrc, unsigned cb)3156 { 3157 STAM_PROFILE_ADV_START(&gStatMemWrite , a);3595 void remR3PhysWriteHCPtr(uint8_t *pbDstPhys, const void *pvSrc, unsigned cb) 3596 { 3597 STAM_PROFILE_ADV_START(&gStatMemWriteHCPtr, a); 3158 3598 /* 3159 3599 * Calc the physical address ('off') and check that it's within the RAM. 3160 3600 */ 3161 uintptr_t off = remR3HCVirt2GCPhys(cpu_single_env, pbDstPhys); 3162 if (off < (uintptr_t)phys_ram_size) 3163 PGMPhysWrite(cpu_single_env->pVM, (RTGCPHYS)off, pvSrc, cb); 3164 else 3165 AssertMsgFailed(("pbDstPhys=%p off=%p cb=%d\n", pbDstPhys, off, cb)); 3166 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3167 } 3168 3169 3170 /** 3171 * Write guest RAM. 3601 #ifdef PGM_DYNAMIC_RAM_ALLOC 3602 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbDstPhys); 3603 #else 3604 uintptr_t off = pbDstPhys - phys_ram_base; 3605 #endif 3606 PGMPhysWrite(cpu_single_env->pVM, (RTGCPHYS)off, pvSrc, cb); 3607 STAM_PROFILE_ADV_STOP(&gStatMemWriteHCPtr, a); 3608 } 3609 3610 3611 /** 3612 * Write guest RAM, unsigned 8-bit. 3172 3613 * 3173 3614 * @param pbDstPhys The destination address. Relative to guest RAM. 3174 3615 * @param val Value 3175 3616 */ 3176 void remR3PhysWrite Byte(uint8_t *pbDstPhys, uint8_t val)3177 { 3178 STAM_PROFILE_ADV_START(&gStatMemWrite , a);3617 void remR3PhysWriteHCPtrU8(uint8_t *pbDstPhys, uint8_t val) 3618 { 3619 STAM_PROFILE_ADV_START(&gStatMemWriteHCPtr, a); 3179 3620 /* 3180 3621 * Calc the physical address ('off') and check that it's within the RAM. 3181 3622 */ 3182 uintptr_t off = remR3HCVirt2GCPhys(cpu_single_env, pbDstPhys); 3183 if (off < (uintptr_t)phys_ram_size) 3184 PGMR3PhysWriteByte(cpu_single_env->pVM, (RTGCPHYS)off, val); 3185 else 3186 AssertMsgFailed(("pbDstPhys=%p off=%p cb=%d\n", pbDstPhys, off, 1)); 3187 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3188 } 3189 3190 /** 3191 * Write guest RAM. 3623 #ifdef PGM_DYNAMIC_RAM_ALLOC 3624 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbDstPhys); 3625 #else 3626 uintptr_t off = pbDstPhys - phys_ram_base; 3627 #endif 3628 PGMR3PhysWriteByte(cpu_single_env->pVM, (RTGCPHYS)off, val); 3629 STAM_PROFILE_ADV_STOP(&gStatMemWriteHCPtr, a); 3630 } 3631 3632 3633 /** 3634 * Write guest RAM, unsigned 16-bit. 3192 3635 * 3193 3636 * @param pbDstPhys The destination address. Relative to guest RAM. 3194 3637 * @param val Value 3195 3638 */ 3196 void remR3PhysWrite Word(uint8_t *pbDstPhys, uint16_t val)3197 { 3198 STAM_PROFILE_ADV_START(&gStatMemWrite , a);3639 void remR3PhysWriteHCPtrU16(uint8_t *pbDstPhys, uint16_t val) 3640 { 3641 STAM_PROFILE_ADV_START(&gStatMemWriteHCPtr, a); 3199 3642 /* 3200 3643 * Calc the physical address ('off') and check that it's within the RAM. 3201 3644 */ 3202 uintptr_t off = remR3HCVirt2GCPhys(cpu_single_env, pbDstPhys); 3203 if (off < (uintptr_t)phys_ram_size) 3204 PGMR3PhysWriteWord(cpu_single_env->pVM, (RTGCPHYS)off, val); 3205 else 3206 AssertMsgFailed(("pbDstPhys=%p off=%p cb=%d\n", pbDstPhys, off, 2)); 3207 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3208 } 3209 3210 /** 3211 * Write guest RAM. 3645 #ifdef PGM_DYNAMIC_RAM_ALLOC 3646 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbDstPhys); 3647 #else 3648 uintptr_t off = pbDstPhys - phys_ram_base; 3649 #endif 3650 PGMR3PhysWriteWord(cpu_single_env->pVM, (RTGCPHYS)off, val); 3651 STAM_PROFILE_ADV_STOP(&gStatMemWriteHCPtr, a); 3652 } 3653 3654 3655 /** 3656 * Write guest RAM, unsigned 32-bit. 3212 3657 * 3213 3658 * @param pbDstPhys The destination address. Relative to guest RAM. 3214 3659 * @param val Value 3215 3660 */ 3216 void remR3PhysWrite Dword(uint8_t *pbDstPhys, uint32_t val)3217 { 3218 STAM_PROFILE_ADV_START(&gStatMemWrite , a);3661 void remR3PhysWriteHCPtrU32(uint8_t *pbDstPhys, uint32_t val) 3662 { 3663 STAM_PROFILE_ADV_START(&gStatMemWriteHCPtr, a); 3219 3664 /* 3220 3665 * Calc the physical address ('off') and check that it's within the RAM. 3221 3666 */ 3222 uintptr_t off = remR3HCVirt2GCPhys(cpu_single_env, pbDstPhys); 3223 if (off < (uintptr_t)phys_ram_size) 3224 PGMR3PhysWriteDword(cpu_single_env->pVM, (RTGCPHYS)off, val); 3225 else 3226 AssertMsgFailed(("pbDstPhys=%p off=%p cb=%d\n", pbDstPhys, off, 4)); 3227 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a); 3228 } 3229 3667 #ifdef PGM_DYNAMIC_RAM_ALLOC 3668 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbDstPhys); 3669 #else 3670 uintptr_t off = pbDstPhys - phys_ram_base; 3671 #endif 3672 PGMR3PhysWriteDword(cpu_single_env->pVM, (RTGCPHYS)off, val); 3673 STAM_PROFILE_ADV_STOP(&gStatMemWriteHCPtr, a); 3674 } 3675 3676 3677 /** 3678 * Write guest RAM, unsigned 64-bit. 3679 * 3680 * @param pbDstPhys The destination address. Relative to guest RAM. 3681 * @param val Value 3682 */ 3683 void remR3PhysWriteHCPtrU64(uint8_t *pbDstPhys, uint64_t val) 3684 { 3685 STAM_PROFILE_ADV_START(&gStatMemWriteHCPtr, a); 3686 /* 3687 * Calc the physical address ('off') and check that it's within the RAM. 3688 */ 3689 #ifdef PGM_DYNAMIC_RAM_ALLOC 3690 uintptr_t off = remR3HCVirt2GCPhysInlined(cpu_single_env->pVM, pbDstPhys); 3691 #else 3692 uintptr_t off = pbDstPhys - phys_ram_base; 3693 #endif 3694 PGMR3PhysWriteDword(cpu_single_env->pVM, (RTGCPHYS)off, (uint32_t)val); /** @todo add U64 interface. */ 3695 PGMR3PhysWriteDword(cpu_single_env->pVM, (RTGCPHYS)off + 4, val >> 32); 3696 STAM_PROFILE_ADV_STOP(&gStatMemWriteHCPtr, a); 3697 } 3698 3699 #endif /* !REM_PHYS_ADDR_IN_TLB */ 3230 3700 3231 3701 … … 3459 3929 { 3460 3930 /* physical address */ 3461 int rc = PGMPhysGCPhys2HCPtr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions *16, &pvPC);3931 int rc = PGMPhysGCPhys2HCPtr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16, &pvPC); 3462 3932 if (VBOX_FAILURE(rc)) 3463 3933 return false; … … 3682 4152 for (;;) 3683 4153 { 3684 char szBuf[256];3685 size_tcbInstr;4154 char szBuf[256]; 4155 uint32_t cbInstr; 3686 4156 int rc = DBGFR3DisasInstrEx(pVM, 3687 4157 cs, … … 3978 4448 uint64_t cpu_get_tsc(CPUX86State *env) 3979 4449 { 4450 STAM_COUNTER_INC(&gStatCpuGetTSC); 3980 4451 return TMCpuTickGet(env->pVM); 3981 4452 } … … 4898 5369 }; 4899 5370 uint32_t uEAX; 4900 #ifndef DEBUG_bird4901 5371 if (!LogIsEnabled()) 4902 5372 return; 4903 #endif4904 5373 uEAX = CPUMGetGuestEAX(pVM); 4905 5374 switch (uEAX) … … 4919 5388 } 4920 5389 } 5390 5391 5392 #if defined(IPRT_NO_CRT) && defined(__WIN__) && defined(__X86__) 5393 /** 5394 * The Dll main entry point (stub). 5395 */ 5396 bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved) 5397 { 5398 return true; 5399 } 5400 5401 void *memcpy(void *dst, const void *src, size_t size) 5402 { 5403 uint8_t*pbDst = dst, *pbSrc = src; 5404 while (size-- > 0) 5405 *pbDst++ = *pbSrc++; 5406 return dst; 5407 } 5408 5409 #endif 5410 -
trunk/src/recompiler/a.out.h
r1 r2422 152 152 #define E_DIMNUM 4 /* # array dimensions in auxiliary entry */ 153 153 154 #pragma pack(1) 155 156 struct external_syment 154 struct __attribute__((packed)) external_syment 157 155 { 158 156 union { … … 169 167 char e_numaux[1]; 170 168 }; 171 172 #pragma pack()173 169 174 170 #define N_BTMASK (0xf) -
trunk/src/recompiler/bswap.h
r1 r2422 43 43 #else 44 44 45 #define bswap_16(x) \45 #define bswap_16(x) __extension__ /* <- VBOX */ \ 46 46 ({ \ 47 47 uint16_t __x = (x); \ … … 51 51 }) 52 52 53 #define bswap_32(x) \53 #define bswap_32(x) __extension__ /* <- VBOX */ \ 54 54 ({ \ 55 55 uint32_t __x = (x); \ … … 61 61 }) 62 62 63 #define bswap_64(x) \63 #define bswap_64(x) __extension__ /* <- VBOX */ \ 64 64 ({ \ 65 65 uint64_t __x = (x); \ -
trunk/src/recompiler/cpu-all.h
r55 r2422 21 21 #define CPU_ALL_H 22 22 23 #ifdef VBOX 24 # ifndef LOG_GROUP 25 # include <VBox/log.h> 26 # define LOG_GROUP LOG_GROUP_REM 27 # endif 28 # include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */ 29 #endif 30 23 31 #if defined(__arm__) || defined(__sparc__) 24 32 #define WORDS_ALIGNED … … 110 118 #define tswapl(s) tswap32(s) 111 119 #define tswapls(s) tswap32s((uint32_t *)(s)) 120 #define bswaptls(s) bswap32s(s) 112 121 #else 113 122 #define tswapl(s) tswap64(s) 114 123 #define tswapls(s) tswap64s((uint64_t *)(s)) 124 #define bswaptls(s) bswap64s(s) 115 125 #endif 116 126 … … 118 128 endian ! */ 119 129 typedef union { 120 double d; 121 #if defined(WORDS_BIGENDIAN) || (defined(__arm__) && !defined(__VFP_FP__)) 130 float64 d; 131 #if defined(WORDS_BIGENDIAN) \ 132 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) 122 133 struct { 123 134 uint32_t upper; … … 170 181 #ifdef VBOX 171 182 172 #if !defined(REMR3PHYSREADWRITE_DEFINED) 173 #define REMR3PHYSREADWRITE_DEFINED 174 /* Header sharing between vbox & qemu is rather ugly. */ 175 void remR3PhysReadBytes(uint8_t *pbSrcPhys, void *pvDst, unsigned cb); 176 uint8_t remR3PhysReadUByte(uint8_t *pbSrcPhys); 177 int8_t remR3PhysReadSByte(uint8_t *pbSrcPhys); 178 uint16_t remR3PhysReadUWord(uint8_t *pbSrcPhys); 179 int16_t remR3PhysReadSWord(uint8_t *pbSrcPhys); 180 uint32_t remR3PhysReadULong(uint8_t *pbSrcPhys); 181 int32_t remR3PhysReadSLong(uint8_t *pbSrcPhys); 182 void remR3PhysWriteBytes(uint8_t *pbDstPhys, const void *pvSrc, unsigned cb); 183 void remR3PhysWriteByte(uint8_t *pbDstPhys, uint8_t val); 184 void remR3PhysWriteWord(uint8_t *pbDstPhys, uint16_t val); 185 void remR3PhysWriteDword(uint8_t *pbDstPhys, uint32_t val); 183 void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb); 184 uint8_t remR3PhysReadU8(RTGCPHYS SrcGCPhys); 185 int8_t remR3PhysReadS8(RTGCPHYS SrcGCPhys); 186 uint16_t remR3PhysReadU16(RTGCPHYS SrcGCPhys); 187 int16_t remR3PhysReadS16(RTGCPHYS SrcGCPhys); 188 uint32_t remR3PhysReadU32(RTGCPHYS SrcGCPhys); 189 int32_t remR3PhysReadS32(RTGCPHYS SrcGCPhys); 190 uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys); 191 int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys); 192 void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb); 193 void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val); 194 void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val); 195 void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val); 196 void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val); 197 198 #ifndef REM_PHYS_ADDR_IN_TLB 199 void remR3PhysReadHCPtr(uint8_t *pbSrcPhys, void *pvDst, unsigned cb); 200 uint8_t remR3PhysReadHCPtrU8(uint8_t *pbSrcPhys); 201 int8_t remR3PhysReadHCPtrS8(uint8_t *pbSrcPhys); 202 uint16_t remR3PhysReadHCPtrU16(uint8_t *pbSrcPhys); 203 int16_t remR3PhysReadHCPtrS16(uint8_t *pbSrcPhys); 204 uint32_t remR3PhysReadHCPtrU32(uint8_t *pbSrcPhys); 205 int32_t remR3PhysReadHCPtrS32(uint8_t *pbSrcPhys); 206 uint64_t remR3PhysReadHCPtrU64(uint8_t *pbSrcPhys); 207 int64_t remR3PhysReadHCPtrS64(uint8_t *pbSrcPhys); 208 void remR3PhysWriteHCPtr(uint8_t *pbDstPhys, const void *pvSrc, unsigned cb); 209 void remR3PhysWriteHCPtrU8(uint8_t *pbDstPhys, uint8_t val); 210 void remR3PhysWriteHCPtrU16(uint8_t *pbDstPhys, uint16_t val); 211 void remR3PhysWriteHCPtrU32(uint8_t *pbDstPhys, uint32_t val); 212 void remR3PhysWriteHCPtrU64(uint8_t *pbDstPhys, uint64_t val); 213 #endif 214 215 #ifdef PGM_DYNAMIC_RAM_ALLOC 216 # ifndef REM_PHYS_ADDR_IN_TLB 186 217 void *remR3GCPhys2HCVirt(void *env, target_ulong addr); 187 218 target_ulong remR3HCVirt2GCPhys(void *env, void *addr); 219 # endif 188 220 void remR3GrowDynRange(unsigned long physaddr); 189 221 #endif 222 #if 0 /*defined(__AMD64__) && defined(VBOX_STRICT)*/ 223 # define VBOX_CHECK_ADDR(ptr) do { if ((uintptr_t)(ptr) >= _4G) __asm__("int3"); } while (0) 224 #else 225 # define VBOX_CHECK_ADDR(ptr) do { } while (0) 226 #endif 190 227 191 228 static inline int ldub_p(void *ptr) 192 229 { 193 return remR3PhysReadUByte(ptr); 230 #ifdef REM_PHYS_ADDR_IN_TLB 231 VBOX_CHECK_ADDR(ptr); 232 return remR3PhysReadU8((uintptr_t)ptr); 233 #else 234 return remR3PhysReadHCPtrU8(ptr); 235 #endif 194 236 } 195 237 196 238 static inline int ldsb_p(void *ptr) 197 239 { 198 return remR3PhysReadSByte(ptr); 240 #ifdef REM_PHYS_ADDR_IN_TLB 241 VBOX_CHECK_ADDR(ptr); 242 return remR3PhysReadS8((uintptr_t)ptr); 243 #else 244 return remR3PhysReadHCPtrS8(ptr); 245 #endif 199 246 } 200 247 201 248 static inline void stb_p(void *ptr, int v) 202 249 { 203 remR3PhysWriteByte(ptr, v); 204 } 205 206 #else 250 #ifdef REM_PHYS_ADDR_IN_TLB 251 VBOX_CHECK_ADDR(ptr); 252 remR3PhysWriteU8((uintptr_t)ptr, v); 253 #else 254 remR3PhysWriteHCPtrU8(ptr, v); 255 #endif 256 } 257 258 static inline int lduw_le_p(void *ptr) 259 { 260 #ifdef REM_PHYS_ADDR_IN_TLB 261 VBOX_CHECK_ADDR(ptr); 262 return remR3PhysReadU16((uintptr_t)ptr); 263 #else 264 return remR3PhysReadHCPtrU16(ptr); 265 #endif 266 } 267 268 static inline int ldsw_le_p(void *ptr) 269 { 270 #ifdef REM_PHYS_ADDR_IN_TLB 271 VBOX_CHECK_ADDR(ptr); 272 return remR3PhysReadS16((uintptr_t)ptr); 273 #else 274 return remR3PhysReadHCPtrS16(ptr); 275 #endif 276 } 277 278 static inline void stw_le_p(void *ptr, int v) 279 { 280 #ifdef REM_PHYS_ADDR_IN_TLB 281 VBOX_CHECK_ADDR(ptr); 282 remR3PhysWriteU16((uintptr_t)ptr, v); 283 #else 284 remR3PhysWriteHCPtrU16(ptr, v); 285 #endif 286 } 287 288 static inline int ldl_le_p(void *ptr) 289 { 290 #ifdef REM_PHYS_ADDR_IN_TLB 291 VBOX_CHECK_ADDR(ptr); 292 return remR3PhysReadU32((uintptr_t)ptr); 293 #else 294 return remR3PhysReadHCPtrU32(ptr); 295 #endif 296 } 297 298 static inline void stl_le_p(void *ptr, int v) 299 { 300 #ifdef REM_PHYS_ADDR_IN_TLB 301 VBOX_CHECK_ADDR(ptr); 302 remR3PhysWriteU32((uintptr_t)ptr, v); 303 #else 304 remR3PhysWriteHCPtrU32(ptr, v); 305 #endif 306 } 307 308 static inline void stq_le_p(void *ptr, uint64_t v) 309 { 310 #ifdef REM_PHYS_ADDR_IN_TLB 311 VBOX_CHECK_ADDR(ptr); 312 remR3PhysWriteU64((uintptr_t)ptr, v); 313 #else 314 remR3PhysWriteHCPtrU64(ptr, v); 315 #endif 316 } 317 318 static inline uint64_t ldq_le_p(void *ptr) 319 { 320 #ifdef REM_PHYS_ADDR_IN_TLB 321 VBOX_CHECK_ADDR(ptr); 322 return remR3PhysReadU64((uintptr_t)ptr); 323 #else 324 return remR3PhysReadHCPtrU64(ptr); 325 #endif 326 } 327 328 #undef VBOX_CHECK_ADDR 329 330 /* float access */ 331 332 static inline float32 ldfl_le_p(void *ptr) 333 { 334 union { 335 float32 f; 336 uint32_t i; 337 } u; 338 u.i = ldl_le_p(ptr); 339 return u.f; 340 } 341 342 static inline void stfl_le_p(void *ptr, float32 v) 343 { 344 union { 345 float32 f; 346 uint32_t i; 347 } u; 348 u.f = v; 349 stl_le_p(ptr, u.i); 350 } 351 352 static inline float64 ldfq_le_p(void *ptr) 353 { 354 CPU_DoubleU u; 355 u.l.lower = ldl_le_p(ptr); 356 u.l.upper = ldl_le_p(ptr + 4); 357 return u.d; 358 } 359 360 static inline void stfq_le_p(void *ptr, float64 v) 361 { 362 CPU_DoubleU u; 363 u.d = v; 364 stl_le_p(ptr, u.l.lower); 365 stl_le_p(ptr + 4, u.l.upper); 366 } 367 368 #else /* !VBOX */ 369 207 370 static inline int ldub_p(void *ptr) 208 371 { … … 219 382 *(uint8_t *)ptr = v; 220 383 } 221 #endif222 384 223 385 /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the 224 386 kernel handles unaligned load/stores may give better results, but 225 387 it is a system wide setting : bad */ 226 #if !defined(TARGET_WORDS_BIGENDIAN) && (defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)) 388 #if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) 389 227 390 /* conservative code for little endian unaligned accesses */ 228 static inline int lduw_ p(void *ptr)391 static inline int lduw_le_p(void *ptr) 229 392 { 230 393 #ifdef __powerpc__ … … 238 401 } 239 402 240 static inline int ldsw_ p(void *ptr)403 static inline int ldsw_le_p(void *ptr) 241 404 { 242 405 #ifdef __powerpc__ … … 250 413 } 251 414 252 static inline int ldl_ p(void *ptr)415 static inline int ldl_le_p(void *ptr) 253 416 { 254 417 #ifdef __powerpc__ … … 262 425 } 263 426 264 static inline uint64_t ldq_ p(void *ptr)427 static inline uint64_t ldq_le_p(void *ptr) 265 428 { 266 429 uint8_t *p = ptr; 267 430 uint32_t v1, v2; 268 v1 = ldl_ p(p);269 v2 = ldl_ p(p + 4);431 v1 = ldl_le_p(p); 432 v2 = ldl_le_p(p + 4); 270 433 return v1 | ((uint64_t)v2 << 32); 271 434 } 272 435 273 static inline void stw_ p(void *ptr, int v)436 static inline void stw_le_p(void *ptr, int v) 274 437 { 275 438 #ifdef __powerpc__ … … 282 445 } 283 446 284 static inline void stl_ p(void *ptr, int v)447 static inline void stl_le_p(void *ptr, int v) 285 448 { 286 449 #ifdef __powerpc__ … … 295 458 } 296 459 297 static inline void stq_ p(void *ptr, uint64_t v)460 static inline void stq_le_p(void *ptr, uint64_t v) 298 461 { 299 462 uint8_t *p = ptr; 300 stl_ p(p, (uint32_t)v);301 stl_ p(p + 4, v >> 32);463 stl_le_p(p, (uint32_t)v); 464 stl_le_p(p + 4, v >> 32); 302 465 } 303 466 304 467 /* float access */ 305 468 306 static inline float ldfl_p(void *ptr)469 static inline float32 ldfl_le_p(void *ptr) 307 470 { 308 471 union { 309 float f;472 float32 f; 310 473 uint32_t i; 311 474 } u; 312 u.i = ldl_ p(ptr);475 u.i = ldl_le_p(ptr); 313 476 return u.f; 314 477 } 315 478 316 static inline void stfl_ p(void *ptr, floatv)479 static inline void stfl_le_p(void *ptr, float32 v) 317 480 { 318 481 union { 319 float f;482 float32 f; 320 483 uint32_t i; 321 484 } u; 322 485 u.f = v; 323 stl_ p(ptr, u.i);324 } 325 326 static inline double ldfq_p(void *ptr)486 stl_le_p(ptr, u.i); 487 } 488 489 static inline float64 ldfq_le_p(void *ptr) 327 490 { 328 491 CPU_DoubleU u; 329 u.l.lower = ldl_ p(ptr);330 u.l.upper = ldl_ p(ptr + 4);492 u.l.lower = ldl_le_p(ptr); 493 u.l.upper = ldl_le_p(ptr + 4); 331 494 return u.d; 332 495 } 333 496 334 static inline void stfq_ p(void *ptr, doublev)497 static inline void stfq_le_p(void *ptr, float64 v) 335 498 { 336 499 CPU_DoubleU u; 337 500 u.d = v; 338 stl_p(ptr, u.l.lower); 339 stl_p(ptr + 4, u.l.upper); 340 } 341 342 #elif defined(TARGET_WORDS_BIGENDIAN) && (!defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)) 343 static inline int lduw_p(void *ptr) 501 stl_le_p(ptr, u.l.lower); 502 stl_le_p(ptr + 4, u.l.upper); 503 } 504 505 #else 506 507 static inline int lduw_le_p(void *ptr) 508 { 509 return *(uint16_t *)ptr; 510 } 511 512 static inline int ldsw_le_p(void *ptr) 513 { 514 return *(int16_t *)ptr; 515 } 516 517 static inline int ldl_le_p(void *ptr) 518 { 519 return *(uint32_t *)ptr; 520 } 521 522 static inline uint64_t ldq_le_p(void *ptr) 523 { 524 return *(uint64_t *)ptr; 525 } 526 527 static inline void stw_le_p(void *ptr, int v) 528 { 529 *(uint16_t *)ptr = v; 530 } 531 532 static inline void stl_le_p(void *ptr, int v) 533 { 534 *(uint32_t *)ptr = v; 535 } 536 537 static inline void stq_le_p(void *ptr, uint64_t v) 538 { 539 *(uint64_t *)ptr = v; 540 } 541 542 /* float access */ 543 544 static inline float32 ldfl_le_p(void *ptr) 545 { 546 return *(float32 *)ptr; 547 } 548 549 static inline float64 ldfq_le_p(void *ptr) 550 { 551 return *(float64 *)ptr; 552 } 553 554 static inline void stfl_le_p(void *ptr, float32 v) 555 { 556 *(float32 *)ptr = v; 557 } 558 559 static inline void stfq_le_p(void *ptr, float64 v) 560 { 561 *(float64 *)ptr = v; 562 } 563 #endif 564 #endif /* !VBOX */ 565 566 #if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) 567 568 static inline int lduw_be_p(void *ptr) 344 569 { 345 570 #if defined(__i386__) … … 356 581 } 357 582 358 static inline int ldsw_ p(void *ptr)583 static inline int ldsw_be_p(void *ptr) 359 584 { 360 585 #if defined(__i386__) … … 371 596 } 372 597 373 static inline int ldl_ p(void *ptr)598 static inline int ldl_be_p(void *ptr) 374 599 { 375 600 #if defined(__i386__) || defined(__x86_64__) … … 386 611 } 387 612 388 static inline uint64_t ldq_ p(void *ptr)613 static inline uint64_t ldq_be_p(void *ptr) 389 614 { 390 615 uint32_t a,b; 391 a = ldl_ p(ptr);392 b = ldl_ p(ptr+4);616 a = ldl_be_p(ptr); 617 b = ldl_be_p(ptr+4); 393 618 return (((uint64_t)a<<32)|b); 394 619 } 395 620 396 static inline void stw_ p(void *ptr, int v)621 static inline void stw_be_p(void *ptr, int v) 397 622 { 398 623 #if defined(__i386__) … … 408 633 } 409 634 410 static inline void stl_ p(void *ptr, int v)635 static inline void stl_be_p(void *ptr, int v) 411 636 { 412 637 #if defined(__i386__) || defined(__x86_64__) … … 424 649 } 425 650 426 static inline void stq_ p(void *ptr, uint64_t v)427 { 428 stl_ p(ptr, v >> 32);429 stl_ p(ptr + 4, v);651 static inline void stq_be_p(void *ptr, uint64_t v) 652 { 653 stl_be_p(ptr, v >> 32); 654 stl_be_p(ptr + 4, v); 430 655 } 431 656 432 657 /* float access */ 433 658 434 static inline float ldfl_p(void *ptr)659 static inline float32 ldfl_be_p(void *ptr) 435 660 { 436 661 union { 437 float f;662 float32 f; 438 663 uint32_t i; 439 664 } u; 440 u.i = ldl_ p(ptr);665 u.i = ldl_be_p(ptr); 441 666 return u.f; 442 667 } 443 668 444 static inline void stfl_ p(void *ptr, floatv)669 static inline void stfl_be_p(void *ptr, float32 v) 445 670 { 446 671 union { 447 float f;672 float32 f; 448 673 uint32_t i; 449 674 } u; 450 675 u.f = v; 451 stl_ p(ptr, u.i);452 } 453 454 static inline double ldfq_p(void *ptr)676 stl_be_p(ptr, u.i); 677 } 678 679 static inline float64 ldfq_be_p(void *ptr) 455 680 { 456 681 CPU_DoubleU u; 457 u.l.upper = ldl_ p(ptr);458 u.l.lower = ldl_ p(ptr + 4);682 u.l.upper = ldl_be_p(ptr); 683 u.l.lower = ldl_be_p(ptr + 4); 459 684 return u.d; 460 685 } 461 686 462 static inline void stfq_ p(void *ptr, doublev)687 static inline void stfq_be_p(void *ptr, float64 v) 463 688 { 464 689 CPU_DoubleU u; 465 690 u.d = v; 466 stl_p(ptr, u.l.upper); 467 stl_p(ptr + 4, u.l.lower); 468 } 469 470 #else 471 472 #ifdef VBOX 473 static inline int lduw_p(void *ptr) 474 { 475 return remR3PhysReadUWord(ptr); 476 } 477 478 static inline int ldsw_p(void *ptr) 479 { 480 return remR3PhysReadSWord(ptr); 481 } 482 483 static inline int ldl_p(void *ptr) 484 { 485 return remR3PhysReadULong(ptr); 486 } 487 488 static inline uint64_t ldq_p(void *ptr) 489 { 490 uint64_t val; 491 492 remR3PhysReadBytes(ptr, &val, sizeof(val)); 493 return val; 494 } 495 496 static inline void stw_p(void *ptr, int v) 497 { 498 remR3PhysWriteWord(ptr, (uint16_t)v); 499 } 500 501 static inline void stl_p(void *ptr, int v) 502 { 503 remR3PhysWriteDword(ptr, (uint32_t)v); 504 } 505 506 static inline void stq_p(void *ptr, uint64_t v) 507 { 508 remR3PhysWriteBytes(ptr, &v, sizeof(v)); 691 stl_be_p(ptr, u.l.upper); 692 stl_be_p(ptr + 4, u.l.lower); 693 } 694 695 #else 696 697 static inline int lduw_be_p(void *ptr) 698 { 699 return *(uint16_t *)ptr; 700 } 701 702 static inline int ldsw_be_p(void *ptr) 703 { 704 return *(int16_t *)ptr; 705 } 706 707 static inline int ldl_be_p(void *ptr) 708 { 709 return *(uint32_t *)ptr; 710 } 711 712 static inline uint64_t ldq_be_p(void *ptr) 713 { 714 return *(uint64_t *)ptr; 715 } 716 717 static inline void stw_be_p(void *ptr, int v) 718 { 719 *(uint16_t *)ptr = v; 720 } 721 722 static inline void stl_be_p(void *ptr, int v) 723 { 724 *(uint32_t *)ptr = v; 725 } 726 727 static inline void stq_be_p(void *ptr, uint64_t v) 728 { 729 *(uint64_t *)ptr = v; 509 730 } 510 731 511 732 /* float access */ 512 733 513 static inline float ldfl_p(void *ptr) 514 { 515 float val; 516 517 remR3PhysReadBytes(ptr, &val, sizeof(val)); 518 return val; 519 } 520 521 static inline double ldfq_p(void *ptr) 522 { 523 double val; 524 525 remR3PhysReadBytes(ptr, &val, sizeof(val)); 526 return val; 527 } 528 529 static inline void stfl_p(void *ptr, float v) 530 { 531 remR3PhysWriteBytes(ptr, &v, sizeof(v)); 532 } 533 534 static inline void stfq_p(void *ptr, double v) 535 { 536 remR3PhysWriteBytes(ptr, &v, sizeof(v)); 537 } 538 #else 539 static inline int lduw_p(void *ptr) 540 { 541 return *(uint16_t *)ptr; 542 } 543 544 static inline int ldsw_p(void *ptr) 545 { 546 return *(int16_t *)ptr; 547 } 548 549 static inline int ldl_p(void *ptr) 550 { 551 return *(uint32_t *)ptr; 552 } 553 554 static inline uint64_t ldq_p(void *ptr) 555 { 556 return *(uint64_t *)ptr; 557 } 558 559 static inline void stw_p(void *ptr, int v) 560 { 561 *(uint16_t *)ptr = v; 562 } 563 564 static inline void stl_p(void *ptr, int v) 565 { 566 *(uint32_t *)ptr = v; 567 } 568 569 static inline void stq_p(void *ptr, uint64_t v) 570 { 571 *(uint64_t *)ptr = v; 572 } 573 574 /* float access */ 575 576 static inline float ldfl_p(void *ptr) 577 { 578 return *(float *)ptr; 579 } 580 581 static inline double ldfq_p(void *ptr) 582 { 583 return *(double *)ptr; 584 } 585 586 static inline void stfl_p(void *ptr, float v) 587 { 588 *(float *)ptr = v; 589 } 590 591 static inline void stfq_p(void *ptr, double v) 592 { 593 *(double *)ptr = v; 594 } 595 #endif /* VBOX */ 596 734 static inline float32 ldfl_be_p(void *ptr) 735 { 736 return *(float32 *)ptr; 737 } 738 739 static inline float64 ldfq_be_p(void *ptr) 740 { 741 return *(float64 *)ptr; 742 } 743 744 static inline void stfl_be_p(void *ptr, float32 v) 745 { 746 *(float32 *)ptr = v; 747 } 748 749 static inline void stfq_be_p(void *ptr, float64 v) 750 { 751 *(float64 *)ptr = v; 752 } 753 754 #endif 755 756 /* target CPU memory access functions */ 757 #if defined(TARGET_WORDS_BIGENDIAN) 758 #define lduw_p(p) lduw_be_p(p) 759 #define ldsw_p(p) ldsw_be_p(p) 760 #define ldl_p(p) ldl_be_p(p) 761 #define ldq_p(p) ldq_be_p(p) 762 #define ldfl_p(p) ldfl_be_p(p) 763 #define ldfq_p(p) ldfq_be_p(p) 764 #define stw_p(p, v) stw_be_p(p, v) 765 #define stl_p(p, v) stl_be_p(p, v) 766 #define stq_p(p, v) stq_be_p(p, v) 767 #define stfl_p(p, v) stfl_be_p(p, v) 768 #define stfq_p(p, v) stfq_be_p(p, v) 769 #else 770 #define lduw_p(p) lduw_le_p(p) 771 #define ldsw_p(p) ldsw_le_p(p) 772 #define ldl_p(p) ldl_le_p(p) 773 #define ldq_p(p) ldq_le_p(p) 774 #define ldfl_p(p) ldfl_le_p(p) 775 #define ldfq_p(p) ldfq_le_p(p) 776 #define stw_p(p, v) stw_le_p(p, v) 777 #define stl_p(p, v) stl_le_p(p, v) 778 #define stq_p(p, v) stq_le_p(p, v) 779 #define stfl_p(p, v) stfl_le_p(p, v) 780 #define stfq_p(p, v) stfq_le_p(p, v) 597 781 #endif 598 782 599 783 /* MMU memory access macros */ 600 784 785 #if defined(CONFIG_USER_ONLY) 786 /* On some host systems the guest address space is reserved on the host. 787 * This allows the guest address space to be offset to a convenient location. 788 */ 789 //#define GUEST_BASE 0x20000000 790 #define GUEST_BASE 0 791 792 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ 793 #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE)) 794 #define h2g(x) ((target_ulong)(x - GUEST_BASE)) 795 796 #define saddr(x) g2h(x) 797 #define laddr(x) g2h(x) 798 799 #else /* !CONFIG_USER_ONLY */ 601 800 /* NOTE: we use double casts if pointers and target_ulong have 602 801 different sizes */ 603 #define ldub_raw(p) ldub_p((uint8_t *)(long)(p)) 604 #define ldsb_raw(p) ldsb_p((uint8_t *)(long)(p)) 605 #define lduw_raw(p) lduw_p((uint8_t *)(long)(p)) 606 #define ldsw_raw(p) ldsw_p((uint8_t *)(long)(p)) 607 #define ldl_raw(p) ldl_p((uint8_t *)(long)(p)) 608 #define ldq_raw(p) ldq_p((uint8_t *)(long)(p)) 609 #define ldfl_raw(p) ldfl_p((uint8_t *)(long)(p)) 610 #define ldfq_raw(p) ldfq_p((uint8_t *)(long)(p)) 611 #define stb_raw(p, v) stb_p((uint8_t *)(long)(p), v) 612 #define stw_raw(p, v) stw_p((uint8_t *)(long)(p), v) 613 #define stl_raw(p, v) stl_p((uint8_t *)(long)(p), v) 614 #define stq_raw(p, v) stq_p((uint8_t *)(long)(p), v) 615 #define stfl_raw(p, v) stfl_p((uint8_t *)(long)(p), v) 616 #define stfq_raw(p, v) stfq_p((uint8_t *)(long)(p), v) 802 #define saddr(x) (uint8_t *)(long)(x) 803 #define laddr(x) (uint8_t *)(long)(x) 804 #endif 805 806 #define ldub_raw(p) ldub_p(laddr((p))) 807 #define ldsb_raw(p) ldsb_p(laddr((p))) 808 #define lduw_raw(p) lduw_p(laddr((p))) 809 #define ldsw_raw(p) ldsw_p(laddr((p))) 810 #define ldl_raw(p) ldl_p(laddr((p))) 811 #define ldq_raw(p) ldq_p(laddr((p))) 812 #define ldfl_raw(p) ldfl_p(laddr((p))) 813 #define ldfq_raw(p) ldfq_p(laddr((p))) 814 #define stb_raw(p, v) stb_p(saddr((p)), v) 815 #define stw_raw(p, v) stw_p(saddr((p)), v) 816 #define stl_raw(p, v) stl_p(saddr((p)), v) 817 #define stq_raw(p, v) stq_p(saddr((p)), v) 818 #define stfl_raw(p, v) stfl_p(saddr((p)), v) 819 #define stfq_raw(p, v) stfq_p(saddr((p)), v) 617 820 618 821 … … 663 866 #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) 664 867 868 /* ??? These should be the larger of unsigned long and target_ulong. */ 665 869 extern unsigned long qemu_real_host_page_size; 666 870 extern unsigned long qemu_host_page_bits; … … 681 885 682 886 void page_dump(FILE *f); 683 int page_get_flags( unsignedlong address);684 void page_set_flags( unsigned long start, unsignedlong end, int flags);685 void page_unprotect_range( uint8_t *data, unsignedlong data_size);887 int page_get_flags(target_ulong address); 888 void page_set_flags(target_ulong start, target_ulong end, int flags); 889 void page_unprotect_range(target_ulong data, target_ulong data_size); 686 890 687 891 #define SINGLE_CPU_DEFINES … … 720 924 #define cpu_signal_handler cpu_ppc_signal_handler 721 925 926 #elif defined(TARGET_M68K) 927 #define CPUState CPUM68KState 928 #define cpu_init cpu_m68k_init 929 #define cpu_exec cpu_m68k_exec 930 #define cpu_gen_code cpu_m68k_gen_code 931 #define cpu_signal_handler cpu_m68k_signal_handler 932 933 #elif defined(TARGET_MIPS) 934 #define CPUState CPUMIPSState 935 #define cpu_init cpu_mips_init 936 #define cpu_exec cpu_mips_exec 937 #define cpu_gen_code cpu_mips_gen_code 938 #define cpu_signal_handler cpu_mips_signal_handler 939 940 #elif defined(TARGET_SH4) 941 #define CPUState CPUSH4State 942 #define cpu_init cpu_sh4_init 943 #define cpu_exec cpu_sh4_exec 944 #define cpu_gen_code cpu_sh4_gen_code 945 #define cpu_signal_handler cpu_sh4_signal_handler 946 722 947 #else 723 948 … … 733 958 734 959 void cpu_abort(CPUState *env, const char *fmt, ...); 960 extern CPUState *first_cpu; 735 961 extern CPUState *cpu_single_env; 736 962 extern int code_copy_enabled; … … 740 966 #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */ 741 967 #define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */ 968 #define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */ 969 #define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */ 970 #define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */ 971 742 972 #ifdef VBOX 743 973 /** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */ 744 #define CPU_INTERRUPT_SINGLE_INSTR 0x0 040974 #define CPU_INTERRUPT_SINGLE_INSTR 0x0200 745 975 /** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */ 746 #define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x0 080976 #define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x0400 747 977 /** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */ 748 #define CPU_INTERRUPT_RC 0x0 100978 #define CPU_INTERRUPT_RC 0x0800 749 979 /** Exit current TB to process an external interrupt request (also in op.c!!) */ 750 #define CPU_INTERRUPT_EXTERNAL_EXIT 0x 0200980 #define CPU_INTERRUPT_EXTERNAL_EXIT 0x1000 751 981 /** Exit current TB to process an external interrupt request (also in op.c!!) */ 752 #define CPU_INTERRUPT_EXTERNAL_HARD 0x 0400982 #define CPU_INTERRUPT_EXTERNAL_HARD 0x2000 753 983 /** Exit current TB to process an external interrupt request (also in op.c!!) */ 754 #define CPU_INTERRUPT_EXTERNAL_TIMER 0x 0800984 #define CPU_INTERRUPT_EXTERNAL_TIMER 0x4000 755 985 /** Exit current TB to process an external interrupt request (also in op.c!!) */ 756 #define CPU_INTERRUPT_EXTERNAL_DMA 0x 1000986 #define CPU_INTERRUPT_EXTERNAL_DMA 0x8000 757 987 #endif /* VBOX */ 758 988 void cpu_interrupt(CPUState *s, int mask); … … 806 1036 807 1037 /* memory API */ 808 extern uint32_t phys_ram_size; 1038 809 1039 #ifndef VBOX 1040 extern int phys_ram_size; 810 1041 extern int phys_ram_fd; 811 1042 extern int phys_ram_size; 1043 #else /* VBOX */ 1044 extern RTGCPHYS phys_ram_size; 1045 /** This is required for bounds checking the phys_ram_dirty accesses. */ 1046 extern uint32_t phys_ram_dirty_size; 1047 #endif /* VBOX */ 1048 #if !defined(VBOX) || !(defined(PGM_DYNAMIC_RAM_ALLOC) || defined(REM_PHYS_ADDR_IN_TLB)) 812 1049 extern uint8_t *phys_ram_base; 813 1050 #endif … … 815 1052 816 1053 /* physical memory access */ 817 #define IO_MEM_NB_ENTRIES 256818 1054 #define TLB_INVALID_MASK (1 << 3) 819 1055 #define IO_MEM_SHIFT 4 1056 #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT)) 820 1057 821 1058 #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */ 822 1059 #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */ 823 1060 #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT) 824 #define IO_MEM_CODE (3 << IO_MEM_SHIFT) /* used internally, never use directly */825 1061 #define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */ 826 #if def VBOX1062 #if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC) 827 1063 #define IO_MEM_RAM_MISSING (5 << IO_MEM_SHIFT) /* used internally, never use directly */ 828 1064 #endif 1065 /* acts like a ROM when read and like a device when written. As an 1066 exception, the write memory callback gets the ram offset instead of 1067 the physical address */ 1068 #define IO_MEM_ROMD (1) 829 1069 830 1070 typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value); … … 834 1074 unsigned long size, 835 1075 unsigned long phys_offset); 1076 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr); 836 1077 int cpu_register_io_memory(int io_index, 837 1078 CPUReadMemoryFunc **mem_read, … … 853 1094 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1); 854 1095 } 1096 uint32_t ldub_phys(target_phys_addr_t addr); 1097 uint32_t lduw_phys(target_phys_addr_t addr); 855 1098 uint32_t ldl_phys(target_phys_addr_t addr); 1099 uint64_t ldq_phys(target_phys_addr_t addr); 856 1100 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val); 1101 void stb_phys(target_phys_addr_t addr, uint32_t val); 1102 void stw_phys(target_phys_addr_t addr, uint32_t val); 857 1103 void stl_phys(target_phys_addr_t addr, uint32_t val); 858 1104 void stq_phys(target_phys_addr_t addr, uint64_t val); 1105 1106 void cpu_physical_memory_write_rom(target_phys_addr_t addr, 1107 const uint8_t *buf, int len); 859 1108 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 860 1109 uint8_t *buf, int len, int is_write); 861 1110 1111 #define VGA_DIRTY_FLAG 0x01 1112 #define CODE_DIRTY_FLAG 0x02 1113 862 1114 /* read dirty bit (return 0 or 1) */ 863 static inline int cpu_physical_memory_is_dirty(target_ulong addr) 864 { 865 return phys_ram_dirty[addr >> TARGET_PAGE_BITS]; 866 } 867 868 static inline void cpu_physical_memory_set_dirty(target_ulong addr) 869 { 870 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 1; 871 } 872 873 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end); 1115 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) 1116 { 1117 #ifdef VBOX 1118 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1119 { 1120 Log(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr)); 1121 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));*/ 1122 return 0; 1123 } 1124 #endif 1125 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; 1126 } 1127 1128 static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, 1129 int dirty_flags) 1130 { 1131 #ifdef VBOX 1132 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1133 { 1134 Log(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr)); 1135 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));*/ 1136 return 0xff & dirty_flags; /** @todo I don't think this is the right thing to return, fix! */ 1137 } 1138 #endif 1139 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; 1140 } 1141 1142 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) 1143 { 1144 #ifdef VBOX 1145 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1146 { 1147 Log(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr)); 1148 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));*/ 1149 return; 1150 } 1151 #endif 1152 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff; 1153 } 1154 1155 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, 1156 int dirty_flags); 1157 void cpu_tlb_update_dirty(CPUState *env); 874 1158 875 1159 void dump_exec_info(FILE *f, 876 1160 int (*cpu_fprintf)(FILE *f, const char *fmt, ...)); 877 1161 1162 /*******************************************/ 1163 /* host CPU ticks (if available) */ 1164 1165 #if defined(__powerpc__) 1166 1167 static inline uint32_t get_tbl(void) 1168 { 1169 uint32_t tbl; 1170 asm volatile("mftb %0" : "=r" (tbl)); 1171 return tbl; 1172 } 1173 1174 static inline uint32_t get_tbu(void) 1175 { 1176 uint32_t tbl; 1177 asm volatile("mftbu %0" : "=r" (tbl)); 1178 return tbl; 1179 } 1180 1181 static inline int64_t cpu_get_real_ticks(void) 1182 { 1183 uint32_t l, h, h1; 1184 /* NOTE: we test if wrapping has occurred */ 1185 do { 1186 h = get_tbu(); 1187 l = get_tbl(); 1188 h1 = get_tbu(); 1189 } while (h != h1); 1190 return ((int64_t)h << 32) | l; 1191 } 1192 1193 #elif defined(__i386__) 1194 1195 static inline int64_t cpu_get_real_ticks(void) 1196 { 1197 int64_t val; 1198 asm volatile ("rdtsc" : "=A" (val)); 1199 return val; 1200 } 1201 1202 #elif defined(__x86_64__) 1203 1204 static inline int64_t cpu_get_real_ticks(void) 1205 { 1206 uint32_t low,high; 1207 int64_t val; 1208 asm volatile("rdtsc" : "=a" (low), "=d" (high)); 1209 val = high; 1210 val <<= 32; 1211 val |= low; 1212 return val; 1213 } 1214 1215 #elif defined(__ia64) 1216 1217 static inline int64_t cpu_get_real_ticks(void) 1218 { 1219 int64_t val; 1220 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); 1221 return val; 1222 } 1223 1224 #elif defined(__s390__) 1225 1226 static inline int64_t cpu_get_real_ticks(void) 1227 { 1228 int64_t val; 1229 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); 1230 return val; 1231 } 1232 1233 #elif defined(__sparc_v9__) 1234 1235 static inline int64_t cpu_get_real_ticks (void) 1236 { 1237 #if defined(_LP64) 1238 uint64_t rval; 1239 asm volatile("rd %%tick,%0" : "=r"(rval)); 1240 return rval; 1241 #else 1242 union { 1243 uint64_t i64; 1244 struct { 1245 uint32_t high; 1246 uint32_t low; 1247 } i32; 1248 } rval; 1249 asm volatile("rd %%tick,%1; srlx %1,32,%0" 1250 : "=r"(rval.i32.high), "=r"(rval.i32.low)); 1251 return rval.i64; 1252 #endif 1253 } 1254 #else 1255 /* The host CPU doesn't have an easily accessible cycle counter. 1256 Just return a monotonically increasing vlue. This will be totally wrong, 1257 but hopefully better than nothing. */ 1258 static inline int64_t cpu_get_real_ticks (void) 1259 { 1260 static int64_t ticks = 0; 1261 return ticks++; 1262 } 1263 #endif 1264 1265 /* profiling */ 1266 #ifdef CONFIG_PROFILER 1267 static inline int64_t profile_getclock(void) 1268 { 1269 return cpu_get_real_ticks(); 1270 } 1271 1272 extern int64_t kqemu_time, kqemu_time_start; 1273 extern int64_t qemu_time, qemu_time_start; 1274 extern int64_t tlb_flush_time; 1275 extern int64_t kqemu_exec_count; 1276 extern int64_t dev_time; 1277 extern int64_t kqemu_ret_int_count; 1278 extern int64_t kqemu_ret_excp_count; 1279 extern int64_t kqemu_ret_intr_count; 1280 1281 #endif 878 1282 879 1283 #ifdef VBOX 880 1284 void tb_invalidate_virt(CPUState *env, uint32_t eip); 881 #endif 1285 #endif /* VBOX */ 882 1286 883 1287 #endif /* CPU_ALL_H */ -
trunk/src/recompiler/cpu-defs.h
r1 r2422 30 30 #endif 31 31 32 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)33 #define HOST_LONG_BITS 6434 #else35 #define HOST_LONG_BITS 3236 #endif37 38 32 #ifndef TARGET_PHYS_ADDR_BITS 39 33 #if TARGET_LONG_BITS >= HOST_LONG_BITS … … 54 48 typedef int64_t target_long; 55 49 typedef uint64_t target_ulong; 56 #define TARGET_FMT_lx "%016 llx"50 #define TARGET_FMT_lx "%016" PRIx64 57 51 #else 58 52 #error TARGET_LONG_SIZE undefined … … 73 67 #endif 74 68 69 /* address in the RAM (different from a physical address) */ 70 typedef unsigned long ram_addr_t; 71 75 72 #define HOST_LONG_SIZE (HOST_LONG_BITS / 8) 76 73 77 #define EXCP_INTERRUPT 256 /* async interruption */ 78 #define EXCP_HLT 257 /* hlt instruction reached */ 79 #define EXCP_DEBUG 258 /* cpu stopped after a breakpoint or singlestep */ 74 #define EXCP_INTERRUPT 0x10000 /* async interruption */ 75 #define EXCP_HLT 0x10001 /* hlt instruction reached */ 76 #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ 77 #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ 80 78 #if defined(VBOX) 81 #define EXCP_EXECUTE_RAW 1024 /* execute raw mode. */82 #define EXCP_EXECUTE_HWACC 1025 /* execute hardware accelerated raw mode. */83 #define EXCP_SINGLE_INSTR 1026 /* executed single instruction. */84 #define EXCP_RC 1027 /* a EM rc was raised (VMR3Reset/Suspend/PowerOff). */79 #define EXCP_EXECUTE_RAW 0x11024 /* execute raw mode. */ 80 #define EXCP_EXECUTE_HWACC 0x11025 /* execute hardware accelerated raw mode. */ 81 #define EXCP_SINGLE_INSTR 0x11026 /* executed single instruction. */ 82 #define EXCP_RC 0x11027 /* a EM rc was raised (VMR3Reset/Suspend/PowerOff). */ 85 83 #endif /* VBOX */ 86 87 84 #define MAX_BREAKPOINTS 32 88 85 89 #define CPU_TLB_SIZE 256 86 #define TB_JMP_CACHE_BITS 12 87 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) 88 89 /* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for 90 addresses on the same page. The top bits are the same. This allows 91 TLB invalidation to quickly clear a subset of the hash table. */ 92 #define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2) 93 #define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS) 94 #define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) 95 #define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) 96 97 #define CPU_TLB_BITS 8 98 #define CPU_TLB_SIZE (1 << CPU_TLB_BITS) 90 99 91 100 typedef struct CPUTLBEntry { … … 96 105 bit 2..0 : zero 97 106 */ 98 target_ulong address; 107 target_ulong addr_read; 108 target_ulong addr_write; 109 target_ulong addr_code; 99 110 /* addend to virtual address to get physical address */ 100 111 target_phys_addr_t addend; 101 112 } CPUTLBEntry; 102 113 114 #define CPU_COMMON \ 115 struct TranslationBlock *current_tb; /* currently executing TB */ \ 116 /* soft mmu support */ \ 117 /* in order to avoid passing too many arguments to the memory \ 118 write helpers, we store some rarely used information in the CPU \ 119 context) */ \ 120 unsigned long mem_write_pc; /* host pc at which the memory was \ 121 written */ \ 122 target_ulong mem_write_vaddr; /* target virtual addr at which the \ 123 memory was written */ \ 124 /* 0 = kernel, 1 = user */ \ 125 CPUTLBEntry tlb_table[2][CPU_TLB_SIZE]; \ 126 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ 127 \ 128 /* from this point: preserved by CPU reset */ \ 129 /* ice debug support */ \ 130 target_ulong breakpoints[MAX_BREAKPOINTS]; \ 131 int nb_breakpoints; \ 132 int singlestep_enabled; \ 133 \ 134 void *next_cpu; /* next CPU sharing TB cache */ \ 135 int cpu_index; /* CPU index (informative) */ \ 136 /* user data */ \ 137 void *opaque; 138 103 139 #endif -
trunk/src/recompiler/cpu-exec.c
r1147 r2422 1 1 /* 2 2 * i386 emulator main execution loop 3 * 4 * Copyright (c) 2003 Fabrice Bellard3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 5 * 6 6 * This library is free software; you can redistribute it and/or … … 41 41 //#define DEBUG_SIGNAL 42 42 43 #if defined(TARGET_ARM) || defined(TARGET_SPARC) 43 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_M68K) 44 44 /* XXX: unify with i386 target */ 45 45 void cpu_loop_exit(void) … … 47 47 longjmp(env->jmp_env, 1); 48 48 } 49 #endif 50 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K)) 51 #define reg_T2 49 52 #endif 50 53 … … 52 55 restored in a state compatible with the CPU emulator 53 56 */ 54 void cpu_resume_from_signal(CPUState *env1, void *puc) 57 void cpu_resume_from_signal(CPUState *env1, void *puc) 55 58 { 56 59 #if !defined(CONFIG_SOFTMMU) … … 71 74 } 72 75 76 77 static TranslationBlock *tb_find_slow(target_ulong pc, 78 target_ulong cs_base, 79 unsigned int flags) 80 { 81 TranslationBlock *tb, **ptb1; 82 int code_gen_size; 83 unsigned int h; 84 target_ulong phys_pc, phys_page1, phys_page2, virt_page2; 85 uint8_t *tc_ptr; 86 87 spin_lock(&tb_lock); 88 89 tb_invalidated_flag = 0; 90 91 regs_to_env(); /* XXX: do it just before cpu_gen_code() */ 92 93 /* find translated block using physical mappings */ 94 phys_pc = get_phys_addr_code(env, pc); 95 phys_page1 = phys_pc & TARGET_PAGE_MASK; 96 phys_page2 = -1; 97 h = tb_phys_hash_func(phys_pc); 98 ptb1 = &tb_phys_hash[h]; 99 for(;;) { 100 tb = *ptb1; 101 if (!tb) 102 goto not_found; 103 if (tb->pc == pc && 104 tb->page_addr[0] == phys_page1 && 105 tb->cs_base == cs_base && 106 tb->flags == flags) { 107 /* check next page if needed */ 108 if (tb->page_addr[1] != -1) { 109 virt_page2 = (pc & TARGET_PAGE_MASK) + 110 TARGET_PAGE_SIZE; 111 phys_page2 = get_phys_addr_code(env, virt_page2); 112 if (tb->page_addr[1] == phys_page2) 113 goto found; 114 } else { 115 goto found; 116 } 117 } 118 ptb1 = &tb->phys_hash_next; 119 } 120 not_found: 121 /* if no translated code available, then translate it now */ 122 tb = tb_alloc(pc); 123 if (!tb) { 124 /* flush must be done */ 125 tb_flush(env); 126 /* cannot fail at this point */ 127 tb = tb_alloc(pc); 128 /* don't forget to invalidate previous TB info */ 129 tb_invalidated_flag = 1; 130 } 131 tc_ptr = code_gen_ptr; 132 tb->tc_ptr = tc_ptr; 133 tb->cs_base = cs_base; 134 tb->flags = flags; 135 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); 136 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); 137 138 /* check next page if needed */ 139 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; 140 phys_page2 = -1; 141 if ((pc & TARGET_PAGE_MASK) != virt_page2) { 142 phys_page2 = get_phys_addr_code(env, virt_page2); 143 } 144 tb_link_phys(tb, phys_pc, phys_page2); 145 146 found: 147 /* we add the TB in the virtual pc hash table */ 148 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb; 149 spin_unlock(&tb_lock); 150 return tb; 151 } 152 153 static inline TranslationBlock *tb_find_fast(void) 154 { 155 TranslationBlock *tb; 156 target_ulong cs_base, pc; 157 unsigned int flags; 158 159 /* we record a subset of the CPU state. It will 160 always be the same before a given translated block 161 is executed. */ 162 #if defined(TARGET_I386) 163 flags = env->hflags; 164 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 165 cs_base = env->segs[R_CS].base; 166 pc = cs_base + env->eip; 167 #elif defined(TARGET_ARM) 168 flags = env->thumb | (env->vfp.vec_len << 1) 169 | (env->vfp.vec_stride << 4); 170 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) 171 flags |= (1 << 6); 172 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) 173 flags |= (1 << 7); 174 cs_base = 0; 175 pc = env->regs[15]; 176 #elif defined(TARGET_SPARC) 177 #ifdef TARGET_SPARC64 178 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled 179 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2)) 180 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2); 181 #else 182 // FPU enable . MMU enabled . MMU no-fault . Supervisor 183 flags = (env->psref << 3) | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1) 184 | env->psrs; 185 #endif 186 cs_base = env->npc; 187 pc = env->pc; 188 #elif defined(TARGET_PPC) 189 flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) | 190 (msr_se << MSR_SE) | (msr_le << MSR_LE); 191 cs_base = 0; 192 pc = env->nip; 193 #elif defined(TARGET_MIPS) 194 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK); 195 cs_base = 0; 196 pc = env->PC; 197 #elif defined(TARGET_M68K) 198 flags = env->fpcr & M68K_FPCR_PREC; 199 cs_base = 0; 200 pc = env->pc; 201 #elif defined(TARGET_SH4) 202 flags = env->sr & (SR_MD | SR_RB); 203 cs_base = 0; /* XXXXX */ 204 pc = env->pc; 205 #else 206 #error unsupported CPU 207 #endif 208 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; 209 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base || 210 tb->flags != flags, 0)) { 211 tb = tb_find_slow(pc, cs_base, flags); 212 /* Note: we do it here to avoid a gcc bug on Mac OS X when 213 doing it in tb_find_slow */ 214 if (tb_invalidated_flag) { 215 /* as some TB could have been invalidated because 216 of memory exceptions while generating the code, we 217 must recompute the hash index here */ 218 T0 = 0; 219 } 220 } 221 return tb; 222 } 223 224 73 225 /* main execution loop */ 74 226 … … 77 229 int cpu_exec(CPUState *env1) 78 230 { 79 int saved_T0, saved_T1, saved_T2; 80 CPUState *saved_env; 81 #ifdef reg_EAX 82 int saved_EAX; 83 #endif 84 #ifdef reg_ECX 85 int saved_ECX; 86 #endif 87 #ifdef reg_EDX 88 int saved_EDX; 89 #endif 90 #ifdef reg_EBX 91 int saved_EBX; 92 #endif 93 #ifdef reg_ESP 94 int saved_ESP; 95 #endif 96 #ifdef reg_EBP 97 int saved_EBP; 98 #endif 99 #ifdef reg_ESI 100 int saved_ESI; 101 #endif 102 #ifdef reg_EDI 103 int saved_EDI; 104 #endif 105 int code_gen_size, ret, interrupt_request; 231 #define DECLARE_HOST_REGS 1 232 #include "hostregs_helper.h" 233 int ret, interrupt_request; 106 234 void (*gen_func)(void); 107 TranslationBlock *tb, **ptb; 108 target_ulong cs_base, pc; 235 TranslationBlock *tb; 109 236 uint8_t *tc_ptr; 110 unsigned int flags; 237 238 #if defined(TARGET_I386) 239 /* handle exit of HALTED state */ 240 if (env1->hflags & HF_HALTED_MASK) { 241 /* disable halt condition */ 242 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) && 243 (env1->eflags & IF_MASK)) { 244 env1->hflags &= ~HF_HALTED_MASK; 245 } else { 246 return EXCP_HALTED; 247 } 248 } 249 #elif defined(TARGET_PPC) 250 if (env1->halted) { 251 if (env1->msr[MSR_EE] && 252 (env1->interrupt_request & 253 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) { 254 env1->halted = 0; 255 } else { 256 return EXCP_HALTED; 257 } 258 } 259 #elif defined(TARGET_SPARC) 260 if (env1->halted) { 261 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) && 262 (env1->psret != 0)) { 263 env1->halted = 0; 264 } else { 265 return EXCP_HALTED; 266 } 267 } 268 #elif defined(TARGET_ARM) 269 if (env1->halted) { 270 /* An interrupt wakes the CPU even if the I and F CPSR bits are 271 set. */ 272 if (env1->interrupt_request 273 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) { 274 env1->halted = 0; 275 } else { 276 return EXCP_HALTED; 277 } 278 } 279 #elif defined(TARGET_MIPS) 280 if (env1->halted) { 281 if (env1->interrupt_request & 282 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) { 283 env1->halted = 0; 284 } else { 285 return EXCP_HALTED; 286 } 287 } 288 #endif 289 290 cpu_single_env = env1; 111 291 112 292 /* first we save global registers */ 113 saved_env = env; 293 #define SAVE_HOST_REGS 1 294 #include "hostregs_helper.h" 114 295 env = env1; 115 saved_T0 = T0; 116 saved_T1 = T1; 117 saved_T2 = T2; 118 119 #ifdef reg_EAX 120 saved_EAX = EAX; 121 #endif 122 #ifdef reg_ECX 123 saved_ECX = ECX; 124 #endif 125 #ifdef reg_EDX 126 saved_EDX = EDX; 127 #endif 128 #ifdef reg_EBX 129 saved_EBX = EBX; 130 #endif 131 #ifdef reg_ESP 132 saved_ESP = ESP; 133 #endif 134 #ifdef reg_EBP 135 saved_EBP = EBP; 136 #endif 137 #ifdef reg_ESI 138 saved_ESI = ESI; 139 #endif 140 #ifdef reg_EDI 141 saved_EDI = EDI; 142 #endif 296 #if defined(__sparc__) && !defined(HOST_SOLARIS) 297 /* we also save i7 because longjmp may not restore it */ 298 asm volatile ("mov %%i7, %0" : "=r" (saved_i7)); 299 #endif 300 301 #if defined(TARGET_I386) 143 302 144 303 env_to_regs(); … … 148 307 CC_OP = CC_OP_EFLAGS; 149 308 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 150 309 #elif defined(TARGET_ARM) 310 #elif defined(TARGET_SPARC) 311 #if defined(reg_REGWPTR) 312 saved_regwptr = REGWPTR; 313 #endif 314 #elif defined(TARGET_PPC) 315 #elif defined(TARGET_MIPS) 316 #elif defined(TARGET_SH4) 317 /* XXXXX */ 318 #else 319 #error unsupported target CPU 320 #endif 151 321 #ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */ 152 322 env->exception_index = -1; 153 #endif 323 #endif 154 324 155 325 /* prepare setjmp context for exception handling */ 156 326 for(;;) { 157 if (setjmp(env->jmp_env) == 0) 327 if (setjmp(env->jmp_env) == 0) 158 328 { 159 329 env->current_tb = NULL; … … 164 334 * Check for fatal errors first 165 335 */ 166 if (env->interrupt_request & CPU_INTERRUPT_RC) 167 { 336 if (env->interrupt_request & CPU_INTERRUPT_RC) { 168 337 env->exception_index = EXCP_RC; 169 338 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC); … … 173 342 174 343 /* if an exception is pending, we execute it here */ 175 if (env->exception_index >= 0) 176 { 344 if (env->exception_index >= 0) { 177 345 Assert(!env->user_mode_only); 178 if (env->exception_index >= EXCP_INTERRUPT) 179 { 346 if (env->exception_index >= EXCP_INTERRUPT) { 180 347 /* exit request from the cpu execution loop */ 181 348 ret = env->exception_index; 182 349 break; 183 } 184 else 185 { 350 } else { 186 351 /* simulate a real cpu exception. On i386, it can 187 352 trigger new exceptions, but we do not handle … … 189 354 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING); 190 355 Log(("do_interrupt %d %d %08x\n", env->exception_index, env->exception_is_int, env->exception_next_eip)); 191 do_interrupt(env->exception_index, 192 env->exception_is_int, 193 env->error_code, 356 do_interrupt(env->exception_index, 357 env->exception_is_int, 358 env->error_code, 194 359 env->exception_next_eip, 0); 195 360 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING); … … 235 400 236 401 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING); 237 /* if hardware interrupt pending, we execute it */ 238 if ((interrupt_request & CPU_INTERRUPT_HARD) && 239 (env->eflags & IF_MASK) && 240 !(env->hflags & HF_INHIBIT_IRQ_MASK)) 402 if ((interrupt_request & CPU_INTERRUPT_SMI) && 403 !(env->hflags & HF_SMM_MASK)) { 404 env->interrupt_request &= ~CPU_INTERRUPT_SMI; 405 do_smm_enter(); 406 T0 = 0; 407 } 408 else if ((interrupt_request & CPU_INTERRUPT_HARD) && 409 (env->eflags & IF_MASK) && 410 !(env->hflags & HF_INHIBIT_IRQ_MASK)) 241 411 { 412 /* if hardware interrupt pending, we execute it */ 242 413 int intno; 243 414 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_HARD); … … 252 423 T0 = 0; 253 424 } 254 if ( interrupt_request & CPU_INTERRUPT_EXITTB)425 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) 255 426 { 256 427 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXITTB); … … 260 431 } 261 432 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING); 262 if (interrupt_request & CPU_INTERRUPT_EXIT) 433 if (interrupt_request & CPU_INTERRUPT_EXIT) 263 434 { 264 435 env->exception_index = EXCP_INTERRUPT; … … 267 438 cpu_loop_exit(); 268 439 } 269 if (interrupt_request & CPU_INTERRUPT_RC) 440 if (interrupt_request & CPU_INTERRUPT_RC) 270 441 { 271 442 env->exception_index = EXCP_RC; … … 275 446 } 276 447 } 277 /* we record a subset of the CPU state. It will 278 always be the same before a given translated block 279 is executed. */ 280 flags = env->hflags; 281 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 282 cs_base = env->segs[R_CS].base; 283 pc = cs_base + env->eip; 284 448 449 /* 450 * Check if we the CPU state allows us to execute the code in raw-mode. 451 */ 285 452 RAWEx_ProfileStart(env, STATS_RAW_CHECK); 286 if (remR3CanExecuteRaw(env, pc, flags, &env->exception_index)) 453 if (remR3CanExecuteRaw(env, 454 env->eip + env->segs[R_CS].base, 455 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)), 456 &env->exception_index)) 287 457 { 288 458 RAWEx_ProfileStop(env, STATS_RAW_CHECK); … … 293 463 294 464 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP); 295 tb = tb_find(&ptb, pc, cs_base, 296 flags); 297 if (!tb) 465 tb = tb_find_fast(); 466 467 /* see if we can patch the calling TB. When the TB 468 spans two pages, we cannot safely do a direct 469 jump. */ 470 if (T0 != 0 471 && !(tb->cflags & CF_RAW_MODE) 472 && tb->page_addr[1] == -1) 298 473 { 299 TranslationBlock **ptb1;300 unsigned int h;301 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;302 303 spin_lock(&tb_lock);304 305 tb_invalidated_flag = 0;306 307 regs_to_env(); /* XXX: do it just before cpu_gen_code() */308 309 /* find translated block using physical mappings */310 phys_pc = get_phys_addr_code(env, pc);311 phys_page1 = phys_pc & TARGET_PAGE_MASK;312 phys_page2 = -1;313 h = tb_phys_hash_func(phys_pc);314 ptb1 = &tb_phys_hash[h];315 for(;;) {316 tb = *ptb1;317 if (!tb)318 goto not_found;319 if (tb->pc == pc &&320 tb->page_addr[0] == phys_page1 &&321 tb->cs_base == cs_base &&322 tb->flags == flags) {323 /* check next page if needed */324 if (tb->page_addr[1] != -1) {325 virt_page2 = (pc & TARGET_PAGE_MASK) +326 TARGET_PAGE_SIZE;327 phys_page2 = get_phys_addr_code(env, virt_page2);328 if (tb->page_addr[1] == phys_page2)329 goto found;330 } else {331 goto found;332 }333 }334 ptb1 = &tb->phys_hash_next;335 }336 not_found:337 /* if no translated code available, then translate it now */338 tb = tb_alloc(pc);339 if (!tb) {340 /* flush must be done */341 tb_flush(env);342 /* cannot fail at this point */343 tb = tb_alloc(pc);344 /* don't forget to invalidate previous TB info */345 ptb = &tb_hash[tb_hash_func(pc)];346 T0 = 0;347 }348 tc_ptr = code_gen_ptr;349 tb->tc_ptr = tc_ptr;350 tb->cs_base = cs_base;351 tb->flags = flags;352 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);353 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);354 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);355 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));356 357 /* check next page if needed */358 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;359 phys_page2 = -1;360 if ((pc & TARGET_PAGE_MASK) != virt_page2) {361 phys_page2 = get_phys_addr_code(env, virt_page2);362 }363 tb_link_phys(tb, phys_pc, phys_page2);364 365 found:366 if (tb_invalidated_flag) {367 /* as some TB could have been invalidated because368 of memory exceptions while generating the code, we369 must recompute the hash index here */370 ptb = &tb_hash[tb_hash_func(pc)];371 while (*ptb != NULL)372 ptb = &(*ptb)->hash_next;373 T0 = 0;374 }375 /* we add the TB in the virtual pc hash table */376 *ptb = tb;377 tb->hash_next = NULL;378 tb_link(tb);379 spin_unlock(&tb_lock);380 }381 /* see if we can patch the calling TB. */382 {383 if (T0 != 0384 && !(tb->cflags & CF_RAW_MODE)385 ) {386 474 spin_lock(&tb_lock); 387 475 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb); 388 476 spin_unlock(&tb_lock); 389 }390 477 } 391 478 tc_ptr = tb->tc_ptr; … … 397 484 #if defined(DEBUG) && defined(VBOX) && !defined(DEBUG_dmik) 398 485 #if !defined(DEBUG_bird) 399 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK)) 486 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK)) 400 487 { 401 488 if(!(env->state & CPU_EMULATE_SINGLE_STEP)) … … 405 492 } 406 493 else 407 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK)) 494 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK)) 408 495 { 409 496 if(!(env->state & CPU_EMULATE_SINGLE_STEP)) … … 411 498 if(env->eflags & VM_MASK) 412 499 { 413 Log(("EMV86: %04X:%04X IF=%d TF=%d CPL=%d flags=%08X CR0=%08X\n", env->segs[R_CS].selector, env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, flags, env->cr[0]));500 Log(("EMV86: %04X:%04X IF=%d TF=%d CPL=%d CR0=%08X\n", env->segs[R_CS].selector, env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0])); 414 501 } 415 502 else 416 503 { 417 Log(("EMR3: %08X ESP=%08X IF=%d TF=%d CPL=%d IOPL=%d flags=%08X CR0=%08X\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), flags, env->cr[0]));504 Log(("EMR3: %08X ESP=%08X IF=%d TF=%d CPL=%d IOPL=%d CR0=%08X\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), env->cr[0])); 418 505 } 419 506 } 420 507 } 421 else 508 else 422 509 { 423 510 Log(("EMRM: %04X:%08X SS:ESP=%04X:%08X IF=%d TF=%d CPL=%d PE=%d PG=%d\n", env->segs[R_CS].selector, env->eip, env->segs[R_SS].selector, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0] & X86_CR0_PE, env->cr[0] & X86_CR0_PG)); … … 437 524 #else 438 525 env->state &= ~CPU_EMULATE_SINGLE_STEP; 439 #endif 440 } 441 #endif 442 526 #endif 527 } 528 #endif 443 529 TMCpuTickPause(env->pVM); 444 530 remR3DisasInstr(env, -1, NULL); … … 449 535 } 450 536 } 451 else 537 else 452 538 { 453 539 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE); … … 477 563 } 478 564 #ifdef VBOX_HIGH_RES_TIMERS_HACK 479 /* NULL the current_tb here so cpu_interrupt() doesn't do 565 /* NULL the current_tb here so cpu_interrupt() doesn't do 480 566 anything unnecessary (like crashing during emulate single instruction). */ 481 567 env->current_tb = NULL; … … 487 573 /* restore flags in standard format */ 488 574 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); 489 490 /* restore global registers */491 #ifdef reg_EAX492 EAX = saved_EAX;493 #endif494 #ifdef reg_ECX495 ECX = saved_ECX;496 #endif497 #ifdef reg_EDX498 EDX = saved_EDX;499 #endif500 #ifdef reg_EBX501 EBX = saved_EBX;502 #endif503 #ifdef reg_ESP504 ESP = saved_ESP;505 #endif506 #ifdef reg_EBP507 EBP = saved_EBP;508 #endif509 #ifdef reg_ESI510 ESI = saved_ESI;511 #endif512 #ifdef reg_EDI513 EDI = saved_EDI;514 #endif515 575 #else 516 576 #error unsupported target CPU 517 577 #endif 518 T0 = saved_T0; 519 T1 = saved_T1; 520 T2 = saved_T2; 521 env = saved_env; 578 #include "hostregs_helper.h" 522 579 return ret; 523 580 } … … 529 586 int cpu_exec(CPUState *env1) 530 587 { 531 int saved_T0, saved_T1, saved_T2; 532 CPUState *saved_env; 533 #ifdef reg_EAX 534 int saved_EAX; 535 #endif 536 #ifdef reg_ECX 537 int saved_ECX; 538 #endif 539 #ifdef reg_EDX 540 int saved_EDX; 541 #endif 542 #ifdef reg_EBX 543 int saved_EBX; 544 #endif 545 #ifdef reg_ESP 546 int saved_ESP; 547 #endif 548 #ifdef reg_EBP 549 int saved_EBP; 550 #endif 551 #ifdef reg_ESI 552 int saved_ESI; 553 #endif 554 #ifdef reg_EDI 555 int saved_EDI; 556 #endif 557 #ifdef __sparc__ 558 int saved_i7, tmp_T0; 559 #endif 560 int code_gen_size, ret, interrupt_request; 588 #define DECLARE_HOST_REGS 1 589 #include "hostregs_helper.h" 590 #if defined(__sparc__) && !defined(HOST_SOLARIS) 591 int saved_i7; 592 target_ulong tmp_T0; 593 #endif 594 int ret, interrupt_request; 561 595 void (*gen_func)(void); 562 TranslationBlock *tb, **ptb; 563 target_ulong cs_base, pc; 596 TranslationBlock *tb; 564 597 uint8_t *tc_ptr; 565 unsigned int flags; 598 599 #if defined(TARGET_I386) 600 /* handle exit of HALTED state */ 601 if (env1->hflags & HF_HALTED_MASK) { 602 /* disable halt condition */ 603 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) && 604 (env1->eflags & IF_MASK)) { 605 env1->hflags &= ~HF_HALTED_MASK; 606 } else { 607 return EXCP_HALTED; 608 } 609 } 610 #elif defined(TARGET_PPC) 611 if (env1->halted) { 612 if (env1->msr[MSR_EE] && 613 (env1->interrupt_request & 614 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) { 615 env1->halted = 0; 616 } else { 617 return EXCP_HALTED; 618 } 619 } 620 #elif defined(TARGET_SPARC) 621 if (env1->halted) { 622 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) && 623 (env1->psret != 0)) { 624 env1->halted = 0; 625 } else { 626 return EXCP_HALTED; 627 } 628 } 629 #elif defined(TARGET_ARM) 630 if (env1->halted) { 631 /* An interrupt wakes the CPU even if the I and F CPSR bits are 632 set. */ 633 if (env1->interrupt_request 634 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) { 635 env1->halted = 0; 636 } else { 637 return EXCP_HALTED; 638 } 639 } 640 #elif defined(TARGET_MIPS) 641 if (env1->halted) { 642 if (env1->interrupt_request & 643 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) { 644 env1->halted = 0; 645 } else { 646 return EXCP_HALTED; 647 } 648 } 649 #endif 650 651 cpu_single_env = env1; 566 652 567 653 /* first we save global registers */ 568 saved_env = env; 654 #define SAVE_HOST_REGS 1 655 #include "hostregs_helper.h" 569 656 env = env1; 570 saved_T0 = T0; 571 saved_T1 = T1; 572 saved_T2 = T2; 573 #ifdef __sparc__ 657 #if defined(__sparc__) && !defined(HOST_SOLARIS) 574 658 /* we also save i7 because longjmp may not restore it */ 575 659 asm volatile ("mov %%i7, %0" : "=r" (saved_i7)); … … 577 661 578 662 #if defined(TARGET_I386) 579 #ifdef reg_EAX580 saved_EAX = EAX;581 #endif582 #ifdef reg_ECX583 saved_ECX = ECX;584 #endif585 #ifdef reg_EDX586 saved_EDX = EDX;587 #endif588 #ifdef reg_EBX589 saved_EBX = EBX;590 #endif591 #ifdef reg_ESP592 saved_ESP = ESP;593 #endif594 #ifdef reg_EBP595 saved_EBP = EBP;596 #endif597 #ifdef reg_ESI598 saved_ESI = ESI;599 #endif600 #ifdef reg_EDI601 saved_EDI = EDI;602 #endif603 604 663 env_to_regs(); 605 664 /* put eflags in CPU temporary format */ … … 609 668 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 610 669 #elif defined(TARGET_ARM) 611 {612 unsigned int psr;613 psr = env->cpsr;614 env->CF = (psr >> 29) & 1;615 env->NZF = (psr & 0xc0000000) ^ 0x40000000;616 env->VF = (psr << 3) & 0x80000000;617 env->QF = (psr >> 27) & 1;618 env->cpsr = psr & ~CACHED_CPSR_BITS;619 }620 670 #elif defined(TARGET_SPARC) 671 #if defined(reg_REGWPTR) 672 saved_regwptr = REGWPTR; 673 #endif 621 674 #elif defined(TARGET_PPC) 675 #elif defined(TARGET_M68K) 676 env->cc_op = CC_OP_FLAGS; 677 env->cc_dest = env->sr & 0xf; 678 env->cc_x = (env->sr >> 4) & 1; 679 #elif defined(TARGET_MIPS) 680 #elif defined(TARGET_SH4) 681 /* XXXXX */ 622 682 #else 623 683 #error unsupported target CPU … … 625 685 #ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */ 626 686 env->exception_index = -1; 627 #endif 687 #endif 628 688 629 689 /* prepare setjmp context for exception handling */ … … 631 691 if (setjmp(env->jmp_env) == 0) { 632 692 env->current_tb = NULL; 633 #ifdef VBOX 693 #ifdef VBOX 634 694 VMMR3Unlock(env->pVM); 635 695 VMMR3Lock(env->pVM); 636 696 637 /* Check for high priority requests first (like fatal 697 /* Check for high priority requests first (like fatal 638 698 errors). */ 639 699 if (env->interrupt_request & CPU_INTERRUPT_RC) { 640 700 env->exception_index = EXCP_RC; 641 701 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC); 702 ret = env->exception_index; 642 703 cpu_loop_exit(); 643 704 } … … 653 714 } else if (env->user_mode_only) { 654 715 /* if user mode only, we simulate a fake exception 655 which will be han lded outside the cpu execution716 which will be handled outside the cpu execution 656 717 loop */ 657 718 #if defined(TARGET_I386) 658 do_interrupt_user(env->exception_index, 659 env->exception_is_int, 660 env->error_code, 719 do_interrupt_user(env->exception_index, 720 env->exception_is_int, 721 env->error_code, 661 722 env->exception_next_eip); 662 723 #endif … … 668 729 trigger new exceptions, but we do not handle 669 730 double or triple faults yet. */ 670 do_interrupt(env->exception_index, 671 env->exception_is_int, 672 env->error_code, 731 do_interrupt(env->exception_index, 732 env->exception_is_int, 733 env->error_code, 673 734 env->exception_next_eip, 0); 674 735 #elif defined(TARGET_PPC) 675 736 do_interrupt(env); 737 #elif defined(TARGET_MIPS) 738 do_interrupt(env); 676 739 #elif defined(TARGET_SPARC) 677 do_interrupt(env->exception_index, 678 env->error_code); 740 do_interrupt(env->exception_index); 741 #elif defined(TARGET_ARM) 742 do_interrupt(env); 743 #elif defined(TARGET_SH4) 744 do_interrupt(env); 679 745 #endif 680 746 } 681 747 env->exception_index = -1; 748 } 749 #ifdef USE_KQEMU 750 if (kqemu_is_ok(env) && env->interrupt_request == 0) { 751 int ret; 752 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); 753 ret = kqemu_cpu_exec(env); 754 /* put eflags in CPU temporary format */ 755 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 756 DF = 1 - (2 * ((env->eflags >> 10) & 1)); 757 CC_OP = CC_OP_EFLAGS; 758 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 759 if (ret == 1) { 760 /* exception */ 761 longjmp(env->jmp_env, 1); 762 } else if (ret == 2) { 763 /* softmmu execution needed */ 764 } else { 765 if (env->interrupt_request != 0) { 766 /* hardware interrupt will be executed just after */ 767 } else { 768 /* otherwise, we restart */ 769 longjmp(env->jmp_env, 1); 770 } 771 } 682 772 } 773 #endif 683 774 684 775 T0 = 0; /* force lookup of first TB */ 685 776 for(;;) { 686 #if def __sparc__687 /* g1 can be modified by some libc? functions */ 777 #if defined(__sparc__) && !defined(HOST_SOLARIS) 778 /* g1 can be modified by some libc? functions */ 688 779 tmp_T0 = T0; 689 #endif 780 #endif 690 781 interrupt_request = env->interrupt_request; 691 782 if (__builtin_expect(interrupt_request, 0)) { 692 783 #ifdef VBOX 693 784 /* Single instruction exec request, we execute it and return (one way or the other). 694 The caller will cleans the request if it's one of the other ways... the caller695 also locks everything so no atomic and/or required. */696 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR){785 The caller will always reschedule after doing this operation! */ 786 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR) 787 { 697 788 /* not in flight are we? */ 698 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT)) { 699 env->interrupt_request |= CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT; 789 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT)) 790 { 791 ASMAtomicOrS32(&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT); 700 792 env->exception_index = EXCP_SINGLE_INSTR; 701 793 if (emulate_single_instr(env) == -1) 702 794 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%08x!!\n", env->eip)); 795 796 /* When we receive an external interrupt during execution of this single 797 instruction, then we should stay here. We will leave when we're ready 798 for raw-mode or when interrupted by pending EMT requests. */ 799 interrupt_request = env->interrupt_request; /* reload this! */ 800 if ( !(interrupt_request & CPU_INTERRUPT_HARD) 801 || !(env->eflags & IF_MASK) 802 || (env->hflags & HF_INHIBIT_IRQ_MASK) 803 ) 804 { 805 env->exception_index = ret = EXCP_SINGLE_INSTR; 806 cpu_loop_exit(); 807 } 703 808 } 704 809 env->exception_index = EXCP_SINGLE_INSTR; 705 810 cpu_loop_exit(); 706 811 } 812 813 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING); 707 814 #endif /* VBOX */ 708 815 #if defined(TARGET_I386) 709 /* if hardware interrupt pending, we execute it */ 710 if ((interrupt_request & CPU_INTERRUPT_HARD) && 711 (env->eflags & IF_MASK) && 816 if ((interrupt_request & CPU_INTERRUPT_SMI) && 817 !(env->hflags & HF_SMM_MASK)) { 818 env->interrupt_request &= ~CPU_INTERRUPT_SMI; 819 do_smm_enter(); 820 #if defined(__sparc__) && !defined(HOST_SOLARIS) 821 tmp_T0 = 0; 822 #else 823 T0 = 0; 824 #endif 825 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 826 (env->eflags & IF_MASK) && 712 827 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 713 828 int intno; … … 716 831 #else 717 832 env->interrupt_request &= ~CPU_INTERRUPT_HARD; 718 #endif 833 #endif 719 834 intno = cpu_get_pic_interrupt(env); 720 835 if (loglevel & CPU_LOG_TB_IN_ASM) { … … 727 842 /* ensure that no TB jump will be modified as 728 843 the program flow was changed */ 729 #if def __sparc__844 #if defined(__sparc__) && !defined(HOST_SOLARIS) 730 845 tmp_T0 = 0; 731 846 #else … … 740 855 #endif 741 856 if (msr_ee != 0) { 742 if ((interrupt_request & CPU_INTERRUPT_HARD)) {857 if ((interrupt_request & CPU_INTERRUPT_HARD)) { 743 858 /* Raise it */ 744 859 env->exception_index = EXCP_EXTERNAL; 745 860 env->error_code = 0; 746 861 do_interrupt(env); 747 env->interrupt_request &= ~CPU_INTERRUPT_HARD; 748 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) { 749 /* Raise it */ 750 env->exception_index = EXCP_DECR; 751 env->error_code = 0; 752 do_interrupt(env); 862 env->interrupt_request &= ~CPU_INTERRUPT_HARD; 863 #if defined(__sparc__) && !defined(HOST_SOLARIS) 864 tmp_T0 = 0; 865 #else 866 T0 = 0; 867 #endif 868 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) { 869 /* Raise it */ 870 env->exception_index = EXCP_DECR; 871 env->error_code = 0; 872 do_interrupt(env); 753 873 env->interrupt_request &= ~CPU_INTERRUPT_TIMER; 874 #if defined(__sparc__) && !defined(HOST_SOLARIS) 875 tmp_T0 = 0; 876 #else 877 T0 = 0; 878 #endif 879 } 880 } 881 #elif defined(TARGET_MIPS) 882 if ((interrupt_request & CPU_INTERRUPT_HARD) && 883 (env->CP0_Status & (1 << CP0St_IE)) && 884 (env->CP0_Status & env->CP0_Cause & 0x0000FF00) && 885 !(env->hflags & MIPS_HFLAG_EXL) && 886 !(env->hflags & MIPS_HFLAG_ERL) && 887 !(env->hflags & MIPS_HFLAG_DM)) { 888 /* Raise it */ 889 env->exception_index = EXCP_EXT_INTERRUPT; 890 env->error_code = 0; 891 do_interrupt(env); 892 #if defined(__sparc__) && !defined(HOST_SOLARIS) 893 tmp_T0 = 0; 894 #else 895 T0 = 0; 896 #endif 897 } 898 #elif defined(TARGET_SPARC) 899 if ((interrupt_request & CPU_INTERRUPT_HARD) && 900 (env->psret != 0)) { 901 int pil = env->interrupt_index & 15; 902 int type = env->interrupt_index & 0xf0; 903 904 if (((type == TT_EXTINT) && 905 (pil == 15 || pil > env->psrpil)) || 906 type != TT_EXTINT) { 907 env->interrupt_request &= ~CPU_INTERRUPT_HARD; 908 do_interrupt(env->interrupt_index); 909 env->interrupt_index = 0; 910 #if defined(__sparc__) && !defined(HOST_SOLARIS) 911 tmp_T0 = 0; 912 #else 913 T0 = 0; 914 #endif 754 915 } 755 }756 #elif defined(TARGET_SPARC)757 if (interrupt_request & CPU_INTERRUPT_HARD) {758 do_interrupt(env->interrupt_index, 0);759 env->interrupt_request &= ~CPU_INTERRUPT_HARD;760 916 } else if (interrupt_request & CPU_INTERRUPT_TIMER) { 761 917 //do_interrupt(0, 0, 0, 0, 0); 762 918 env->interrupt_request &= ~CPU_INTERRUPT_TIMER; 763 } 764 #endif 765 if (interrupt_request & CPU_INTERRUPT_EXITTB) { 919 } else if (interrupt_request & CPU_INTERRUPT_HALT) { 920 env->interrupt_request &= ~CPU_INTERRUPT_HALT; 921 env->halted = 1; 922 env->exception_index = EXCP_HLT; 923 cpu_loop_exit(); 924 } 925 #elif defined(TARGET_ARM) 926 if (interrupt_request & CPU_INTERRUPT_FIQ 927 && !(env->uncached_cpsr & CPSR_F)) { 928 env->exception_index = EXCP_FIQ; 929 do_interrupt(env); 930 } 931 if (interrupt_request & CPU_INTERRUPT_HARD 932 && !(env->uncached_cpsr & CPSR_I)) { 933 env->exception_index = EXCP_IRQ; 934 do_interrupt(env); 935 } 936 #elif defined(TARGET_SH4) 937 /* XXXXX */ 938 #endif 939 /* Don't use the cached interupt_request value, 940 do_interrupt may have updated the EXITTB flag. */ 941 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) { 766 942 #if defined(VBOX) 767 943 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXITTB); 768 944 #else 769 945 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; 770 #endif 946 #endif 771 947 /* ensure that no TB jump will be modified as 772 948 the program flow was changed */ 773 #if def __sparc__949 #if defined(__sparc__) && !defined(HOST_SOLARIS) 774 950 tmp_T0 = 0; 775 951 #else … … 777 953 #endif 778 954 } 955 #ifdef VBOX 956 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING); 957 #endif 779 958 if (interrupt_request & CPU_INTERRUPT_EXIT) { 780 959 #if defined(VBOX) … … 784 963 env->interrupt_request &= ~CPU_INTERRUPT_EXIT; 785 964 env->exception_index = EXCP_INTERRUPT; 786 #endif 965 #endif 787 966 cpu_loop_exit(); 788 967 } … … 796 975 } 797 976 #ifdef DEBUG_EXEC 798 if ((loglevel & CPU_LOG_ EXEC)) {977 if ((loglevel & CPU_LOG_TB_CPU)) { 799 978 #if defined(TARGET_I386) 800 979 /* restore flags in standard format */ 980 #ifdef reg_EAX 801 981 env->regs[R_EAX] = EAX; 982 #endif 983 #ifdef reg_EBX 802 984 env->regs[R_EBX] = EBX; 985 #endif 986 #ifdef reg_ECX 803 987 env->regs[R_ECX] = ECX; 988 #endif 989 #ifdef reg_EDX 804 990 env->regs[R_EDX] = EDX; 991 #endif 992 #ifdef reg_ESI 805 993 env->regs[R_ESI] = ESI; 994 #endif 995 #ifdef reg_EDI 806 996 env->regs[R_EDI] = EDI; 997 #endif 998 #ifdef reg_EBP 807 999 env->regs[R_EBP] = EBP; 1000 #endif 1001 #ifdef reg_ESP 808 1002 env->regs[R_ESP] = ESP; 1003 #endif 809 1004 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); 810 1005 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); 811 1006 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 812 1007 #elif defined(TARGET_ARM) 813 env->cpsr = compute_cpsr();814 1008 cpu_dump_state(env, logfile, fprintf, 0); 815 env->cpsr &= ~CACHED_CPSR_BITS;816 1009 #elif defined(TARGET_SPARC) 817 cpu_dump_state (env, logfile, fprintf, 0); 1010 REGWPTR = env->regbase + (env->cwp * 16); 1011 env->regwptr = REGWPTR; 1012 cpu_dump_state(env, logfile, fprintf, 0); 818 1013 #elif defined(TARGET_PPC) 819 1014 cpu_dump_state(env, logfile, fprintf, 0); 820 #else 821 #error unsupported target CPU 822 #endif 823 } 824 #endif 825 /* we record a subset of the CPU state. It will 826 always be the same before a given translated block 827 is executed. */ 828 #if defined(TARGET_I386) 829 flags = env->hflags; 830 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 831 cs_base = env->segs[R_CS].base; 832 pc = cs_base + env->eip; 833 #elif defined(TARGET_ARM) 834 flags = env->thumb; 835 cs_base = 0; 836 pc = env->regs[15]; 837 #elif defined(TARGET_SPARC) 838 flags = 0; 839 cs_base = env->npc; 840 pc = env->pc; 841 #elif defined(TARGET_PPC) 842 flags = 0; 843 cs_base = 0; 844 pc = env->nip; 845 #else 846 #error unsupported CPU 847 #endif 848 1015 #elif defined(TARGET_M68K) 1016 cpu_m68k_flush_flags(env, env->cc_op); 1017 env->cc_op = CC_OP_FLAGS; 1018 env->sr = (env->sr & 0xffe0) 1019 | env->cc_dest | (env->cc_x << 4); 1020 cpu_dump_state(env, logfile, fprintf, 0); 1021 #elif defined(TARGET_MIPS) 1022 cpu_dump_state(env, logfile, fprintf, 0); 1023 #elif defined(TARGET_SH4) 1024 cpu_dump_state(env, logfile, fprintf, 0); 1025 #else 1026 #error unsupported target CPU 1027 #endif 1028 } 1029 #endif 849 1030 #ifdef VBOX 850 if (remR3CanExecuteRaw(env, pc, flags, &env->exception_index)) 1031 /* 1032 * Check if we the CPU state allows us to execute the code in raw-mode. 1033 */ 1034 RAWEx_ProfileStart(env, STATS_RAW_CHECK); 1035 if (remR3CanExecuteRaw(env, 1036 env->eip + env->segs[R_CS].base, 1037 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)) 1038 flags, &env->exception_index)) 1039 { 1040 RAWEx_ProfileStop(env, STATS_RAW_CHECK); 1041 ret = env->exception_index; 851 1042 cpu_loop_exit(); 1043 } 1044 RAWEx_ProfileStop(env, STATS_RAW_CHECK); 852 1045 #endif /* VBOX */ 853 854 tb = tb_find(&ptb, pc, cs_base, 855 flags); 856 if (!tb) { 857 TranslationBlock **ptb1; 858 unsigned int h; 859 target_ulong phys_pc, phys_page1, phys_page2, virt_page2; 860 861 862 spin_lock(&tb_lock); 863 864 tb_invalidated_flag = 0; 865 866 regs_to_env(); /* XXX: do it just before cpu_gen_code() */ 867 868 /* find translated block using physical mappings */ 869 phys_pc = get_phys_addr_code(env, pc); 870 phys_page1 = phys_pc & TARGET_PAGE_MASK; 871 phys_page2 = -1; 872 h = tb_phys_hash_func(phys_pc); 873 ptb1 = &tb_phys_hash[h]; 874 for(;;) { 875 tb = *ptb1; 876 if (!tb) 877 goto not_found; 878 if (tb->pc == pc && 879 tb->page_addr[0] == phys_page1 && 880 tb->cs_base == cs_base && 881 tb->flags == flags) { 882 /* check next page if needed */ 883 if (tb->page_addr[1] != -1) { 884 virt_page2 = (pc & TARGET_PAGE_MASK) + 885 TARGET_PAGE_SIZE; 886 phys_page2 = get_phys_addr_code(env, virt_page2); 887 if (tb->page_addr[1] == phys_page2) 888 goto found; 889 } else { 890 goto found; 891 } 892 } 893 ptb1 = &tb->phys_hash_next; 894 } 895 not_found: 896 /* if no translated code available, then translate it now */ 897 tb = tb_alloc(pc); 898 if (!tb) { 899 /* flush must be done */ 900 tb_flush(env); 901 /* cannot fail at this point */ 902 tb = tb_alloc(pc); 903 /* don't forget to invalidate previous TB info */ 904 ptb = &tb_hash[tb_hash_func(pc)]; 905 T0 = 0; 906 } 907 tc_ptr = code_gen_ptr; 908 tb->tc_ptr = tc_ptr; 909 tb->cs_base = cs_base; 910 tb->flags = flags; 911 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); 912 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); 913 914 /* check next page if needed */ 915 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; 916 phys_page2 = -1; 917 if ((pc & TARGET_PAGE_MASK) != virt_page2) { 918 phys_page2 = get_phys_addr_code(env, virt_page2); 919 } 920 tb_link_phys(tb, phys_pc, phys_page2); 921 922 found: 923 if (tb_invalidated_flag) { 924 /* as some TB could have been invalidated because 925 of memory exceptions while generating the code, we 926 must recompute the hash index here */ 927 ptb = &tb_hash[tb_hash_func(pc)]; 928 while (*ptb != NULL) 929 ptb = &(*ptb)->hash_next; 930 T0 = 0; 931 } 932 /* we add the TB in the virtual pc hash table */ 933 *ptb = tb; 934 tb->hash_next = NULL; 935 tb_link(tb); 936 spin_unlock(&tb_lock); 937 } 1046 tb = tb_find_fast(); 938 1047 #ifdef DEBUG_EXEC 939 1048 if ((loglevel & CPU_LOG_EXEC)) { … … 943 1052 } 944 1053 #endif 945 #if def __sparc__1054 #if defined(__sparc__) && !defined(HOST_SOLARIS) 946 1055 T0 = tmp_T0; 947 #endif 948 /* see if we can patch the calling TB. */ 1056 #endif 1057 /* see if we can patch the calling TB. When the TB 1058 spans two pages, we cannot safely do a direct 1059 jump. */ 949 1060 { 950 if (T0 != 0 1061 if (T0 != 0 && 1062 #if USE_KQEMU 1063 (env->kqemu_enabled != 2) && 1064 #endif 951 1065 #ifdef VBOX 952 && !(tb->cflags & CF_RAW_MODE) 953 #endif 1066 !(tb->cflags & CF_RAW_MODE) && 1067 #endif 1068 tb->page_addr[1] == -1 954 1069 #if defined(TARGET_I386) && defined(USE_CODE_COPY) 955 && (tb->cflags & CF_CODE_COPY) == 1070 && (tb->cflags & CF_CODE_COPY) == 956 1071 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY) 957 1072 #endif … … 961 1076 #if defined(USE_CODE_COPY) 962 1077 /* propagates the FP use info */ 963 ((TranslationBlock *)(T0 & ~3))->cflags |= 1078 ((TranslationBlock *)(T0 & ~3))->cflags |= 964 1079 (tb->cflags & CF_FP_USED); 965 1080 #endif … … 975 1090 "mov %%o7,%%i0" 976 1091 : /* no outputs */ 977 : "r" (gen_func) 978 : "i0", "i1", "i2", "i3", "i4", "i5"); 1092 : "r" (gen_func) 1093 : "i0", "i1", "i2", "i3", "i4", "i5", 1094 "l0", "l1", "l2", "l3", "l4", "l5", 1095 "l6", "l7"); 979 1096 #elif defined(__arm__) 980 1097 asm volatile ("mov pc, %0\n\t" … … 1058 1175 } 1059 1176 } 1177 #elif defined(__ia64) 1178 struct fptr { 1179 void *ip; 1180 void *gp; 1181 } fp; 1182 1183 fp.ip = tc_ptr; 1184 fp.gp = code_gen_buffer + 2 * (1 << 20); 1185 (*(void (*)(void)) &fp)(); 1060 1186 #else 1061 1187 #if defined(DEBUG) && defined(VBOX) && !defined(DEBUG_dmik) 1062 1188 #if !defined(DEBUG_bird) 1063 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK)) 1189 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK)) 1064 1190 { 1065 1191 if(!(env->state & CPU_EMULATE_SINGLE_STEP)) … … 1069 1195 } 1070 1196 else 1071 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK)) 1197 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK)) 1072 1198 { 1073 1199 if(!(env->state & CPU_EMULATE_SINGLE_STEP)) … … 1096 1222 #else 1097 1223 env->state &= ~CPU_EMULATE_SINGLE_STEP; 1098 #endif 1099 } 1100 #endif 1224 #endif 1225 } 1226 #endif 1101 1227 TMCpuTickPause(env->pVM); 1102 1228 remR3DisasInstr(env, -1, NULL); … … 1107 1233 } 1108 1234 } 1109 else 1235 else 1110 1236 { 1111 1237 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE); … … 1135 1261 } 1136 1262 #endif 1263 #if defined(USE_KQEMU) 1264 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000) 1265 if (kqemu_is_ok(env) && 1266 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) { 1267 cpu_loop_exit(); 1268 } 1269 #endif 1137 1270 } 1138 1271 } else { … … 1150 1283 /* restore flags in standard format */ 1151 1284 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); 1152 1153 /* restore global registers */1154 #ifdef reg_EAX1155 EAX = saved_EAX;1156 #endif1157 #ifdef reg_ECX1158 ECX = saved_ECX;1159 #endif1160 #ifdef reg_EDX1161 EDX = saved_EDX;1162 #endif1163 #ifdef reg_EBX1164 EBX = saved_EBX;1165 #endif1166 #ifdef reg_ESP1167 ESP = saved_ESP;1168 #endif1169 #ifdef reg_EBP1170 EBP = saved_EBP;1171 #endif1172 #ifdef reg_ESI1173 ESI = saved_ESI;1174 #endif1175 #ifdef reg_EDI1176 EDI = saved_EDI;1177 #endif1178 1285 #elif defined(TARGET_ARM) 1179 env->cpsr = compute_cpsr();1286 /* XXX: Save/restore host fpu exception state?. */ 1180 1287 #elif defined(TARGET_SPARC) 1288 #if defined(reg_REGWPTR) 1289 REGWPTR = saved_regwptr; 1290 #endif 1181 1291 #elif defined(TARGET_PPC) 1292 #elif defined(TARGET_M68K) 1293 cpu_m68k_flush_flags(env, env->cc_op); 1294 env->cc_op = CC_OP_FLAGS; 1295 env->sr = (env->sr & 0xffe0) 1296 | env->cc_dest | (env->cc_x << 4); 1297 #elif defined(TARGET_MIPS) 1298 #elif defined(TARGET_SH4) 1299 /* XXXXX */ 1182 1300 #else 1183 1301 #error unsupported target CPU 1184 1302 #endif 1185 #if def __sparc__1303 #if defined(__sparc__) && !defined(HOST_SOLARIS) 1186 1304 asm volatile ("mov %0, %%i7" : : "r" (saved_i7)); 1187 1305 #endif 1188 T0 = saved_T0; 1189 T1 = saved_T1; 1190 T2 = saved_T2;1191 env = saved_env;1306 #include "hostregs_helper.h" 1307 1308 /* fail safe : never use cpu_single_env outside cpu_exec() */ 1309 cpu_single_env = NULL; 1192 1310 return ret; 1193 1311 } … … 1218 1336 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { 1219 1337 selector &= 0xffff; 1220 cpu_x86_load_seg_cache(env, seg_reg, selector, 1338 cpu_x86_load_seg_cache(env, seg_reg, selector, 1221 1339 (selector << 4), 0xffff, 0); 1222 1340 } else { … … 1232 1350 saved_env = env; 1233 1351 env = s; 1234 1352 1235 1353 helper_fsave((target_ulong)ptr, data32); 1236 1354 … … 1244 1362 saved_env = env; 1245 1363 env = s; 1246 1364 1247 1365 helper_frstor((target_ulong)ptr, data32); 1248 1366 … … 1261 1379 signal set which should be restored */ 1262 1380 static inline int handle_cpu_signal(unsigned long pc, unsigned long address, 1263 int is_write, sigset_t *old_set, 1381 int is_write, sigset_t *old_set, 1264 1382 void *puc) 1265 1383 { … … 1270 1388 env = cpu_single_env; /* XXX: find a correct solution for multithread */ 1271 1389 #if defined(DEBUG_SIGNAL) 1272 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1390 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1273 1391 pc, address, is_write, *(unsigned long *)old_set); 1274 1392 #endif 1275 1393 /* XXX: locking issue */ 1276 if (is_write && page_unprotect( address, pc, puc)) {1394 if (is_write && page_unprotect(h2g(address), pc, puc)) { 1277 1395 return 1; 1278 1396 } 1279 1397 1280 1398 /* see if it is an MMU fault */ 1281 ret = cpu_x86_handle_mmu_fault(env, address, is_write, 1399 ret = cpu_x86_handle_mmu_fault(env, address, is_write, 1282 1400 ((env->hflags & HF_CPL_MASK) == 3), 0); 1283 1401 if (ret < 0) … … 1294 1412 if (ret == 1) { 1295 1413 #if 0 1296 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n", 1414 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n", 1297 1415 env->eip, env->cr[2], env->error_code); 1298 1416 #endif … … 1300 1418 do it (XXX: use sigsetjmp) */ 1301 1419 sigprocmask(SIG_SETMASK, old_set, NULL); 1302 raise_exception_err( EXCP0E_PAGE, env->error_code);1420 raise_exception_err(env->exception_index, env->error_code); 1303 1421 } else { 1304 1422 /* activate soft MMU for this block */ … … 1321 1439 env = cpu_single_env; /* XXX: find a correct solution for multithread */ 1322 1440 #if defined(DEBUG_SIGNAL) 1323 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1441 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1324 1442 pc, address, is_write, *(unsigned long *)old_set); 1325 1443 #endif 1326 1444 /* XXX: locking issue */ 1327 if (is_write && page_unprotect( address, pc, puc)) {1445 if (is_write && page_unprotect(h2g(address), pc, puc)) { 1328 1446 return 1; 1329 1447 } … … 1357 1475 env = cpu_single_env; /* XXX: find a correct solution for multithread */ 1358 1476 #if defined(DEBUG_SIGNAL) 1359 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1477 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1360 1478 pc, address, is_write, *(unsigned long *)old_set); 1361 1479 #endif 1362 1480 /* XXX: locking issue */ 1363 if (is_write && page_unprotect( address, pc, puc)) {1481 if (is_write && page_unprotect(h2g(address), pc, puc)) { 1364 1482 return 1; 1365 1483 } … … 1389 1507 TranslationBlock *tb; 1390 1508 int ret; 1391 1509 1392 1510 if (cpu_single_env) 1393 1511 env = cpu_single_env; /* XXX: find a correct solution for multithread */ 1394 1512 #if defined(DEBUG_SIGNAL) 1395 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1513 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1396 1514 pc, address, is_write, *(unsigned long *)old_set); 1397 1515 #endif 1398 1516 /* XXX: locking issue */ 1399 if (is_write && page_unprotect( address, pc, puc)) {1517 if (is_write && page_unprotect(h2g(address), pc, puc)) { 1400 1518 return 1; 1401 1519 } … … 1417 1535 if (ret == 1) { 1418 1536 #if 0 1419 printf("PF exception: NIP=0x%08x error=0x%x %p\n", 1537 printf("PF exception: NIP=0x%08x error=0x%x %p\n", 1420 1538 env->nip, env->error_code, tb); 1421 1539 #endif … … 1431 1549 return 1; 1432 1550 } 1551 1552 #elif defined(TARGET_M68K) 1553 static inline int handle_cpu_signal(unsigned long pc, unsigned long address, 1554 int is_write, sigset_t *old_set, 1555 void *puc) 1556 { 1557 TranslationBlock *tb; 1558 int ret; 1559 1560 if (cpu_single_env) 1561 env = cpu_single_env; /* XXX: find a correct solution for multithread */ 1562 #if defined(DEBUG_SIGNAL) 1563 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1564 pc, address, is_write, *(unsigned long *)old_set); 1565 #endif 1566 /* XXX: locking issue */ 1567 if (is_write && page_unprotect(address, pc, puc)) { 1568 return 1; 1569 } 1570 /* see if it is an MMU fault */ 1571 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0); 1572 if (ret < 0) 1573 return 0; /* not an MMU fault */ 1574 if (ret == 0) 1575 return 1; /* the MMU fault was handled without causing real CPU fault */ 1576 /* now we have a real cpu fault */ 1577 tb = tb_find_pc(pc); 1578 if (tb) { 1579 /* the PC is inside the translated code. It means that we have 1580 a virtual CPU fault */ 1581 cpu_restore_state(tb, env, pc, puc); 1582 } 1583 /* we restore the process signal mask as the sigreturn should 1584 do it (XXX: use sigsetjmp) */ 1585 sigprocmask(SIG_SETMASK, old_set, NULL); 1586 cpu_loop_exit(); 1587 /* never comes here */ 1588 return 1; 1589 } 1590 1591 #elif defined (TARGET_MIPS) 1592 static inline int handle_cpu_signal(unsigned long pc, unsigned long address, 1593 int is_write, sigset_t *old_set, 1594 void *puc) 1595 { 1596 TranslationBlock *tb; 1597 int ret; 1598 1599 if (cpu_single_env) 1600 env = cpu_single_env; /* XXX: find a correct solution for multithread */ 1601 #if defined(DEBUG_SIGNAL) 1602 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1603 pc, address, is_write, *(unsigned long *)old_set); 1604 #endif 1605 /* XXX: locking issue */ 1606 if (is_write && page_unprotect(h2g(address), pc, puc)) { 1607 return 1; 1608 } 1609 1610 /* see if it is an MMU fault */ 1611 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0); 1612 if (ret < 0) 1613 return 0; /* not an MMU fault */ 1614 if (ret == 0) 1615 return 1; /* the MMU fault was handled without causing real CPU fault */ 1616 1617 /* now we have a real cpu fault */ 1618 tb = tb_find_pc(pc); 1619 if (tb) { 1620 /* the PC is inside the translated code. It means that we have 1621 a virtual CPU fault */ 1622 cpu_restore_state(tb, env, pc, puc); 1623 } 1624 if (ret == 1) { 1625 #if 0 1626 printf("PF exception: NIP=0x%08x error=0x%x %p\n", 1627 env->nip, env->error_code, tb); 1628 #endif 1629 /* we restore the process signal mask as the sigreturn should 1630 do it (XXX: use sigsetjmp) */ 1631 sigprocmask(SIG_SETMASK, old_set, NULL); 1632 do_raise_exception_err(env->exception_index, env->error_code); 1633 } else { 1634 /* activate soft MMU for this block */ 1635 cpu_resume_from_signal(env, puc); 1636 } 1637 /* never comes here */ 1638 return 1; 1639 } 1640 1641 #elif defined (TARGET_SH4) 1642 static inline int handle_cpu_signal(unsigned long pc, unsigned long address, 1643 int is_write, sigset_t *old_set, 1644 void *puc) 1645 { 1646 TranslationBlock *tb; 1647 int ret; 1648 1649 if (cpu_single_env) 1650 env = cpu_single_env; /* XXX: find a correct solution for multithread */ 1651 #if defined(DEBUG_SIGNAL) 1652 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1653 pc, address, is_write, *(unsigned long *)old_set); 1654 #endif 1655 /* XXX: locking issue */ 1656 if (is_write && page_unprotect(h2g(address), pc, puc)) { 1657 return 1; 1658 } 1659 1660 /* see if it is an MMU fault */ 1661 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0); 1662 if (ret < 0) 1663 return 0; /* not an MMU fault */ 1664 if (ret == 0) 1665 return 1; /* the MMU fault was handled without causing real CPU fault */ 1666 1667 /* now we have a real cpu fault */ 1668 tb = tb_find_pc(pc); 1669 if (tb) { 1670 /* the PC is inside the translated code. It means that we have 1671 a virtual CPU fault */ 1672 cpu_restore_state(tb, env, pc, puc); 1673 } 1674 #if 0 1675 printf("PF exception: NIP=0x%08x error=0x%x %p\n", 1676 env->nip, env->error_code, tb); 1677 #endif 1678 /* we restore the process signal mask as the sigreturn should 1679 do it (XXX: use sigsetjmp) */ 1680 sigprocmask(SIG_SETMASK, old_set, NULL); 1681 cpu_loop_exit(); 1682 /* never comes here */ 1683 return 1; 1684 } 1433 1685 #else 1434 1686 #error unsupported target CPU … … 1438 1690 1439 1691 #if defined(USE_CODE_COPY) 1440 static void cpu_send_trap(unsigned long pc, int trap, 1692 static void cpu_send_trap(unsigned long pc, int trap, 1441 1693 struct ucontext *uc) 1442 1694 { … … 1457 1709 #endif 1458 1710 1459 int cpu_signal_handler(int host_signum, struct siginfo *info,1711 int cpu_signal_handler(int host_signum, void *pinfo, 1460 1712 void *puc) 1461 1713 { 1714 siginfo_t *info = pinfo; 1462 1715 struct ucontext *uc = puc; 1463 1716 unsigned long pc; … … 1479 1732 } else 1480 1733 #endif 1481 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1482 trapno == 0xe ? 1734 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1735 trapno == 0xe ? 1483 1736 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0, 1484 1737 &uc->uc_sigmask, puc); … … 1487 1740 #elif defined(__x86_64__) 1488 1741 1489 int cpu_signal_handler(int host_signum, struct siginfo *info,1742 int cpu_signal_handler(int host_signum, void *pinfo, 1490 1743 void *puc) 1491 1744 { 1745 siginfo_t *info = pinfo; 1492 1746 struct ucontext *uc = puc; 1493 1747 unsigned long pc; 1494 1748 1495 1749 pc = uc->uc_mcontext.gregs[REG_RIP]; 1496 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1497 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ? 1750 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1751 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ? 1498 1752 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0, 1499 1753 &uc->uc_sigmask, puc); … … 1551 1805 #endif /* __APPLE__ */ 1552 1806 1553 int cpu_signal_handler(int host_signum, struct siginfo *info,1807 int cpu_signal_handler(int host_signum, void *pinfo, 1554 1808 void *puc) 1555 1809 { 1810 siginfo_t *info = pinfo; 1556 1811 struct ucontext *uc = puc; 1557 1812 unsigned long pc; … … 1568 1823 is_write = 1; 1569 1824 #endif 1570 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1825 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1571 1826 is_write, &uc->uc_sigmask, puc); 1572 1827 } … … 1574 1829 #elif defined(__alpha__) 1575 1830 1576 int cpu_signal_handler(int host_signum, struct siginfo *info,1831 int cpu_signal_handler(int host_signum, void *pinfo, 1577 1832 void *puc) 1578 1833 { 1834 siginfo_t *info = pinfo; 1579 1835 struct ucontext *uc = puc; 1580 1836 uint32_t *pc = uc->uc_mcontext.sc_pc; … … 1598 1854 } 1599 1855 1600 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1856 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1601 1857 is_write, &uc->uc_sigmask, puc); 1602 1858 } 1603 1859 #elif defined(__sparc__) 1604 1860 1605 int cpu_signal_handler(int host_signum, struct siginfo *info,1861 int cpu_signal_handler(int host_signum, void *pinfo, 1606 1862 void *puc) 1607 1863 { 1864 siginfo_t *info = pinfo; 1608 1865 uint32_t *regs = (uint32_t *)(info + 1); 1609 1866 void *sigmask = (regs + 20); … … 1611 1868 int is_write; 1612 1869 uint32_t insn; 1613 1870 1614 1871 /* XXX: is there a standard glibc define ? */ 1615 1872 pc = regs[1]; … … 1630 1887 } 1631 1888 } 1632 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1889 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1633 1890 is_write, sigmask, NULL); 1634 1891 } … … 1636 1893 #elif defined(__arm__) 1637 1894 1638 int cpu_signal_handler(int host_signum, struct siginfo *info,1895 int cpu_signal_handler(int host_signum, void *pinfo, 1639 1896 void *puc) 1640 1897 { 1898 siginfo_t *info = pinfo; 1641 1899 struct ucontext *uc = puc; 1642 1900 unsigned long pc; 1643 1901 int is_write; 1644 1902 1645 1903 pc = uc->uc_mcontext.gregs[R15]; 1646 1904 /* XXX: compute is_write */ 1647 1905 is_write = 0; 1648 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1906 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1649 1907 is_write, 1650 &uc->uc_sigmask );1908 &uc->uc_sigmask, puc); 1651 1909 } 1652 1910 1653 1911 #elif defined(__mc68000) 1654 1912 1655 int cpu_signal_handler(int host_signum, struct siginfo *info,1913 int cpu_signal_handler(int host_signum, void *pinfo, 1656 1914 void *puc) 1657 1915 { 1916 siginfo_t *info = pinfo; 1658 1917 struct ucontext *uc = puc; 1659 1918 unsigned long pc; 1660 1919 int is_write; 1661 1920 1662 1921 pc = uc->uc_mcontext.gregs[16]; 1663 1922 /* XXX: compute is_write */ 1664 1923 is_write = 0; 1665 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1924 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1666 1925 is_write, 1667 1926 &uc->uc_sigmask, puc); 1668 1927 } 1669 1928 1929 #elif defined(__ia64) 1930 1931 #ifndef __ISR_VALID 1932 /* This ought to be in <bits/siginfo.h>... */ 1933 # define __ISR_VALID 1 1934 #endif 1935 1936 int cpu_signal_handler(int host_signum, void *pinfo, void *puc) 1937 { 1938 siginfo_t *info = pinfo; 1939 struct ucontext *uc = puc; 1940 unsigned long ip; 1941 int is_write = 0; 1942 1943 ip = uc->uc_mcontext.sc_ip; 1944 switch (host_signum) { 1945 case SIGILL: 1946 case SIGFPE: 1947 case SIGSEGV: 1948 case SIGBUS: 1949 case SIGTRAP: 1950 if (info->si_code && (info->si_segvflags & __ISR_VALID)) 1951 /* ISR.W (write-access) is bit 33: */ 1952 is_write = (info->si_isr >> 33) & 1; 1953 break; 1954 1955 default: 1956 break; 1957 } 1958 return handle_cpu_signal(ip, (unsigned long)info->si_addr, 1959 is_write, 1960 &uc->uc_sigmask, puc); 1961 } 1962 1963 #elif defined(__s390__) 1964 1965 int cpu_signal_handler(int host_signum, void *pinfo, 1966 void *puc) 1967 { 1968 siginfo_t *info = pinfo; 1969 struct ucontext *uc = puc; 1970 unsigned long pc; 1971 int is_write; 1972 1973 pc = uc->uc_mcontext.psw.addr; 1974 /* XXX: compute is_write */ 1975 is_write = 0; 1976 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1977 is_write, 1978 &uc->uc_sigmask, puc); 1979 } 1980 1670 1981 #else 1671 1982 -
trunk/src/recompiler/disas.h
r1 r2422 5 5 void disas(FILE *out, void *code, unsigned long size); 6 6 void target_disas(FILE *out, target_ulong code, target_ulong size, int flags); 7 void monitor_disas(target_ulong pc, int nb_insn, int is_physical, int flags); 7 void monitor_disas(CPUState *env, 8 target_ulong pc, int nb_insn, int is_physical, int flags); 8 9 9 10 /* Look up symbol for debugging purpose. Returns "" if unknown. */ -
trunk/src/recompiler/dyngen-exec.h
r1 r2422 21 21 #define __DYNGEN_EXEC_H__ 22 22 23 /* prevent Solaris from trying to typedef FILE in gcc's 24 include/floatingpoint.h which will conflict with the 25 definition down below */ 26 #ifdef __sun__ 27 #define _FILEDEFED 28 #endif 29 23 30 /* NOTE: standard headers should be used with special care at this 24 31 point because host CPU registers are used as global variables. Some … … 26 33 #include <stddef.h> 27 34 28 /* There are some conflicts with the uClibc headers. I'm _very_ amazed 29 that we don't get the same conflicts when compiling against glibc ... */ 30 #if !defined(__L4ENV__) && !defined(__MINGW32__) /* VBOX: mingw 3.4.x got into trouble here. */ 35 #ifndef VBOX 31 36 32 37 typedef unsigned char uint8_t; 33 38 typedef unsigned short uint16_t; 34 39 typedef unsigned int uint32_t; 40 // Linux/Sparc64 defines uint64_t 41 #if !(defined (__sparc_v9__) && defined(__linux__)) 35 42 /* XXX may be done for all 64 bits targets ? */ 36 #if defined (__x86_64__) 43 #if defined (__x86_64__) || defined(__ia64) 37 44 typedef unsigned long uint64_t; 38 45 #else 39 46 typedef unsigned long long uint64_t; 40 47 #endif 41 48 #endif 49 50 /* if Solaris/__sun__, don't typedef int8_t, as it will be typedef'd 51 prior to this and will cause an error in compliation, conflicting 52 with /usr/include/sys/int_types.h, line 75 */ 53 #ifndef __sun__ 42 54 typedef signed char int8_t; 55 #endif 43 56 typedef signed short int16_t; 44 57 typedef signed int int32_t; 45 #if defined (__x86_64__) 58 // Linux/Sparc64 defines int64_t 59 #if !(defined (__sparc_v9__) && defined(__linux__)) 60 #if defined (__x86_64__) || defined(__ia64) 46 61 typedef signed long int64_t; 47 62 #else 48 63 typedef signed long long int64_t; 49 64 #endif 65 #endif 66 67 /* XXX: This may be wrong for 64-bit ILP32 hosts. */ 68 typedef void * host_reg_t; 50 69 51 70 #define INT8_MIN (-128) … … 62 81 #define UINT64_MAX ((uint64_t)(18446744073709551615)) 63 82 64 #else /* __L4ENV__ */65 66 #include <stdint.h>67 68 #endif /* __L4ENV__ */69 70 83 typedef struct FILE FILE; 71 84 extern int fprintf(FILE *, const char *, ...); … … 73 86 #undef NULL 74 87 #define NULL 0 75 #if defined(_BSD) && !defined(__APPLE__) 76 #include <ieeefp.h> 77 78 #define FE_TONEAREST FP_RN 79 #define FE_DOWNWARD FP_RM 80 #define FE_UPWARD FP_RP 81 #define FE_TOWARDZERO FP_RZ 82 #define fesetround(x) fpsetround(x) 83 #else 84 #include <fenv.h> 85 #endif 88 89 #else /* VBOX */ 90 91 /* XXX: This may be wrong for 64-bit ILP32 hosts. */ 92 typedef void * host_reg_t; 93 94 #include <iprt/stdint.h> 95 #include <stdio.h> 96 97 #endif /* VBOX */ 86 98 87 99 #ifdef __i386__ … … 96 108 #define AREG2 "r12" 97 109 #define AREG3 "r13" 98 #define AREG4 "r14"99 #define AREG5 "r15"110 //#define AREG4 "r14" 111 //#define AREG5 "r15" 100 112 #endif 101 113 #ifdef __powerpc__ … … 131 143 #endif 132 144 #ifdef __sparc__ 145 #ifdef HOST_SOLARIS 146 #define AREG0 "g2" 147 #define AREG1 "g3" 148 #define AREG2 "g4" 149 #define AREG3 "g5" 150 #define AREG4 "g6" 151 #else 152 #ifdef __sparc_v9__ 153 #define AREG0 "g1" 154 #define AREG1 "g4" 155 #define AREG2 "g5" 156 #define AREG3 "g7" 157 #else 133 158 #define AREG0 "g6" 134 159 #define AREG1 "g1" … … 143 168 #define AREG10 "l6" 144 169 #define AREG11 "l7" 170 #endif 171 #endif 145 172 #define USE_FP_CONVERT 146 173 #endif … … 170 197 #endif 171 198 #ifdef __ia64__ 172 #define AREG0 "r 27"173 #define AREG1 "r 24"174 #define AREG2 "r 25"175 #define AREG3 "r 26"199 #define AREG0 "r7" 200 #define AREG1 "r4" 201 #define AREG2 "r5" 202 #define AREG3 "r6" 176 203 #endif 177 204 178 205 /* force GCC to generate only one epilog at the end of the function */ 179 #define FORCE_RET() asm volatile ("");206 #define FORCE_RET() __asm__ __volatile__("" : : : "memory"); 180 207 181 208 #ifndef OPPROTO … … 192 219 #define __hidden __attribute__((visibility("hidden"))) 193 220 #else 194 #define __hidden 221 #define __hidden 195 222 #endif 196 223 … … 232 259 #ifdef __x86_64__ 233 260 #define EXIT_TB() asm volatile ("ret") 261 #define GOTO_LABEL_PARAM(n) asm volatile ("jmp " ASM_NAME(__op_gen_label) #n) 234 262 #endif 235 263 #ifdef __powerpc__ … … 239 267 #ifdef __s390__ 240 268 #define EXIT_TB() asm volatile ("br %r14") 269 #define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n) 241 270 #endif 242 271 #ifdef __alpha__ … … 245 274 #ifdef __ia64__ 246 275 #define EXIT_TB() asm volatile ("br.ret.sptk.many b0;;") 276 #define GOTO_LABEL_PARAM(n) asm volatile ("br.sptk.many " \ 277 ASM_NAME(__op_gen_label) #n) 247 278 #endif 248 279 #ifdef __sparc__ 249 #define EXIT_TB() asm volatile ("jmpl %i0 + 8, %g0 \n" \250 "nop")280 #define EXIT_TB() asm volatile ("jmpl %i0 + 8, %g0; nop") 281 #define GOTO_LABEL_PARAM(n) asm volatile ("ba " ASM_NAME(__op_gen_label) #n ";nop") 251 282 #endif 252 283 #ifdef __arm__ 253 284 #define EXIT_TB() asm volatile ("b exec_loop") 285 #define GOTO_LABEL_PARAM(n) asm volatile ("b " ASM_NAME(__op_gen_label) #n) 254 286 #endif 255 287 #ifdef __mc68000 -
trunk/src/recompiler/dyngen.c
r1 r2422 130 130 typedef uint32_t host_ulong; 131 131 #define swabls(x) swab32s(x) 132 #define swablss(x) swab32ss(x) 132 133 #else 133 134 typedef int64_t host_long; 134 135 typedef uint64_t host_ulong; 135 136 #define swabls(x) swab64s(x) 137 #define swablss(x) swab64ss(x) 136 138 #endif 137 139 … … 181 183 #include <mach-o/nlist.h> 182 184 #include <mach-o/reloc.h> 185 #if !defined(HOST_I386) 183 186 #include <mach-o/ppc/reloc.h> 187 #endif 184 188 185 189 # define check_mach_header(x) (x.magic == MH_MAGIC) … … 202 206 #define EXE_RELOC struct relocation_info 203 207 #define EXE_SYM struct nlist_extended 208 #if defined(HOST_I386) 209 # define r_offset r_address 210 #endif 204 211 205 212 #endif /* CONFIG_FORMAT_MACH */ … … 222 229 char n_other; 223 230 short n_desc; 224 unsigned long st_value; / / n_value -> st_value225 unsigned long st_size; / / added231 unsigned long st_value; /* n_value -> st_value */ 232 unsigned long st_size; /* added */ 226 233 }; 227 234 … … 237 244 OUT_GEN_OP, 238 245 OUT_CODE, 239 OUT_INDEX_OP ,246 OUT_INDEX_OP 240 247 }; 241 248 … … 314 321 } 315 322 323 void swab32ss(int32_t *p) 324 { 325 *p = bswap32(*p); 326 } 327 316 328 void swab64s(uint64_t *p) 329 { 330 *p = bswap64(*p); 331 } 332 333 void swab64ss(int64_t *p) 317 334 { 318 335 *p = bswap64(*p); … … 427 444 swabls(&rel->r_info); 428 445 #ifdef ELF_USES_RELOCA 429 swabls (&rel->r_addend);446 swablss(&rel->r_addend); 430 447 #endif 431 448 } … … 487 504 ELF_RELOC *rel; 488 505 489 fd = open(filename, O_RDONLY); 506 fd = open(filename, O_RDONLY 507 #ifdef O_BINARY 508 | O_BINARY 509 #endif 510 ); 490 511 if (fd < 0) 491 512 error("can't open file '%s'", filename); … … 508 529 elf_swap_ehdr(&ehdr); 509 530 if (ehdr.e_ident[EI_CLASS] != ELF_CLASS) 510 error("Unsupported ELF class ");531 error("Unsupported ELF class (%#x)", ehdr.e_ident[EI_CLASS]); 511 532 if (ehdr.e_type != ET_REL) 512 533 error("ELF object file expected"); … … 535 556 536 557 sec = &shdr[ehdr.e_shstrndx]; 537 shstr = sdata[ehdr.e_shstrndx];558 shstr = (char *)sdata[ehdr.e_shstrndx]; 538 559 539 560 /* swap relocations */ … … 571 592 572 593 symtab = (ElfW(Sym) *)sdata[symtab_sec - shdr]; 573 strtab = sdata[symtab_sec->sh_link];594 strtab = (char *)sdata[symtab_sec->sh_link]; 574 595 575 596 nb_syms = symtab_sec->sh_size / sizeof(ElfW(Sym)); … … 665 686 if (!strcmp(name, ".data")) 666 687 name = name_for_dotdata(rel); 688 if (name[0] == '.') 689 return NULL; 667 690 return name; 668 691 } … … 704 727 705 728 fd = open(filename, O_RDONLY 706 #ifdef _WIN32729 #ifdef O_BINARY 707 730 | O_BINARY 708 731 #endif … … 907 930 908 931 932 #if defined(HOST_PPC) 909 933 static inline void fetch_next_pair_value(struct relocation_info * rel, unsigned int *value) 910 934 { … … 922 946 } 923 947 } 948 #endif 924 949 925 950 /* find a sym name given its value, in a section number */ … … 1014 1039 } 1015 1040 1041 #if defined(HOST_I386) 1042 /* ignore internal pc relative fixups where both ends are in the text section. */ 1043 if (rel->r_pcrel && !rel->r_extern && rel->r_symbolnum == 1 /* ASSUMES text */) 1044 return NULL; 1045 #endif 1046 1016 1047 /* Intruction contains an offset to the symbols pointed to, in the rel->r_symbolnum section */ 1017 1048 sectoffset = *(uint32_t *)(text + rel->r_address) & 0xffff; … … 1024 1055 error("sectnum > segment->nsects"); 1025 1056 1057 #if defined(HOST_PPC) 1026 1058 switch(rel->r_type) 1027 1059 { 1028 case PPC_RELOC_LO16: fetch_next_pair_value(rel+1, &other_half); sectoffset = (sectoffset & 0xffff);1060 case PPC_RELOC_LO16: fetch_next_pair_value(rel+1, &other_half); sectoffset |= (other_half << 16); 1029 1061 break; 1030 case PPC_RELOC_HI16: fetch_next_pair_value(rel+1, &other_half); sectoffset = ( other_half & 0xffff);1062 case PPC_RELOC_HI16: fetch_next_pair_value(rel+1, &other_half); sectoffset = (sectoffset << 16) | (uint16_t)(other_half & 0xffff); 1031 1063 break; 1032 case PPC_RELOC_HA16: fetch_next_pair_value(rel+1, &other_half); sectoffset = ( other_half & 0xffff);1064 case PPC_RELOC_HA16: fetch_next_pair_value(rel+1, &other_half); sectoffset = (sectoffset << 16) + (int16_t)(other_half & 0xffff); 1033 1065 break; 1034 1066 case PPC_RELOC_BR24: … … 1039 1071 error("switch(rel->type) not found"); 1040 1072 } 1073 #elif defined(HOST_I386) 1074 /* The intruction contains the addend. */ 1075 sectoffset = *(uint32_t *)(text + rel->r_address); 1076 #else 1077 #error unsupported mach-o host 1078 #endif 1041 1079 1042 1080 if(rel->r_pcrel) 1043 1081 sectoffset += rel->r_address; 1044 1082 1083 #if defined(HOST_PPC) 1045 1084 if (rel->r_type == PPC_RELOC_BR24) 1046 1085 name = (char *)find_reloc_name_in_sec_ptr((int)sectoffset, §ion_hdr[sectnum-1]); 1086 #endif 1047 1087 1048 1088 /* search it in the full symbol list, if not found */ … … 1053 1093 } 1054 1094 1095 #if defined(HOST_I386) 1096 static const char *get_rel_sym_name_and_addend(EXE_RELOC *rel, int *addend) 1097 { 1098 const char *name = NULL; 1099 1100 if (R_SCATTERED & rel->r_address) { 1101 unsigned int i; 1102 struct scattered_relocation_info * sca_rel = (struct scattered_relocation_info*)rel; 1103 if (sca_rel->r_length != 2 || rel->r_pcrel) { 1104 error("Fully implement R_SCATTERED! r_address=%#x r_type=%#x r_length=%d r_pcrel=%d r_value=%#x\n", 1105 (int)sca_rel->r_address, sca_rel->r_type, sca_rel->r_length, sca_rel->r_pcrel, sca_rel->r_value); 1106 } 1107 1108 /* this seems to be the way to calc the addend. */ 1109 *addend = *(int32_t *)(text + sca_rel->r_address) - sca_rel->r_value; 1110 1111 /* todo: do we need to ignore internal relocations? */ 1112 #if 0 1113 if (sca_rel->r_pcrel ...) 1114 return NULL; 1115 #endif 1116 1117 /* find_reloc_name_given_its_address doesn't do the right thing here, so 1118 we locate the section and use find_sym_with_value_and_sec_number */ 1119 for (i = 0; i < segment->nsects ; i++) { 1120 if ((uintptr_t)sca_rel->r_value - section_hdr[i].addr < section_hdr[i].size) { 1121 int off = 0; 1122 name = find_sym_with_value_and_sec_number(sca_rel->r_value, i + 1, &off); 1123 if (name) { 1124 *addend += off; 1125 break; 1126 } 1127 } 1128 } 1129 if (!name) 1130 error("Fully implement R_SCATTERED! r_address=%#x r_type=%#x r_length=%d r_pcrel=%d r_value=%#x\n", 1131 (int)sca_rel->r_address, sca_rel->r_type, sca_rel->r_length, sca_rel->r_pcrel, sca_rel->r_value); 1132 } 1133 else 1134 { 1135 /* ignore debug syms (paranoia). */ 1136 if (symtab[rel->r_symbolnum].n_type & N_STAB) 1137 return NULL; 1138 1139 /* ignore internal pc relative fixups where both ends are in the text section. */ 1140 if (rel->r_pcrel && !rel->r_extern && rel->r_symbolnum == 1 /* ASSUMES text */) 1141 return NULL; 1142 1143 /* get the addend, it is in the instruction stream. */ 1144 *addend = *(int32_t *)(text + rel->r_address); 1145 if (rel->r_pcrel) 1146 *addend += rel->r_address; 1147 1148 /* external fixups are easy. */ 1149 if (rel->r_extern) 1150 { 1151 if (rel->r_symbolnum >= nb_syms) 1152 error("rel->r_symbolnum (%d) >= nb_syms (%d)", rel->r_symbolnum, nb_syms); 1153 name = get_sym_name(&symtab[rel->r_symbolnum]); 1154 } 1155 else 1156 { 1157 /* sanity checks. */ 1158 if (rel->r_symbolnum == 0xffffff) 1159 return NULL; 1160 if (rel->r_symbolnum > segment->nsects) 1161 error("sectnum (%d) > segment->nsects (%d)", rel->r_symbolnum, segment->nsects); 1162 if (rel->r_pcrel) 1163 error("internal pcrel fixups not implemented"); 1164 1165 /* search for the symbol. */ 1166 name = find_sym_with_value_and_sec_number(*addend, rel->r_symbolnum, addend); 1167 } 1168 } 1169 return name; 1170 } 1171 #endif /* HOST_I386 */ 1172 1055 1173 /* Used by dyngen common code */ 1056 1174 static const char * get_rel_sym_name(EXE_RELOC * rel) 1057 1175 { 1058 1176 int sslide; 1177 #if defined(HOST_I386) 1178 return get_rel_sym_name_and_addend(rel, &sslide); 1179 #else 1059 1180 return get_reloc_name( rel, &sslide); 1181 #endif 1060 1182 } 1061 1183 … … 1082 1204 struct nlist *syment; 1083 1205 1084 fd = open(filename, O_RDONLY); 1206 fd = open(filename, O_RDONLY 1207 #ifdef O_BINARY 1208 | O_BINARY 1209 #endif 1210 ); 1085 1211 if (fd < 0) 1086 1212 error("can't open file '%s'", filename); … … 1094 1220 error("bad Mach header"); 1095 1221 } 1096 1222 1223 #if defined(HOST_PPC) 1097 1224 if (mach_hdr.cputype != CPU_TYPE_POWERPC) 1225 #elif defined(HOST_I386) 1226 if (mach_hdr.cputype != CPU_TYPE_X86) 1227 #else 1228 #error unsupported host 1229 #endif 1098 1230 error("Unsupported CPU"); 1099 1231 … … 1176 1308 /* Now transform the symtab, to an extended version, with the sym size, and the C name */ 1177 1309 for(i = 0, sym = symtab, syment = symtab_std; i < nb_syms; i++, sym++, syment++) { 1178 const char *name; 1179 struct nlist *sym_follow, *sym_next = 0; 1310 struct nlist *sym_cur, *sym_next = 0; 1180 1311 unsigned int j; 1181 name = find_str_by_index(sym->n_un.n_strx);1182 1312 memset(sym, 0, sizeof(*sym)); 1183 1313 1184 if ( sym ->n_type & N_STAB ) /* Debug symbols are skipped */1314 if ( syment->n_type & N_STAB ) /* Debug symbols are skipped */ 1185 1315 continue; 1186 1316 1187 1317 memcpy(sym, syment, sizeof(*syment)); 1318 1319 #if defined(VBOX) 1320 /* don't bother calcing size of internal symbol local symbols. */ 1321 if (strstart(find_str_by_index(sym->n_un.n_strx), ".L", NULL)) { 1322 sym->st_size = 0; 1323 continue; 1324 } 1325 #endif 1188 1326 1189 1327 /* Find the following symbol in order to get the current symbol size */ 1190 for(j = 0, sym_follow = symtab_std; j < nb_syms; j++, sym_follow++) { 1191 if ( sym_follow->n_sect != 1 || sym_follow->n_type & N_STAB || !(sym_follow->n_value > sym->st_value)) 1328 for (j = 0, sym_cur = symtab_std; j < nb_syms; j++, sym_cur++) { 1329 if ( sym_cur->n_sect != /*syment->n_sect*/ 1 1330 || (sym_cur->n_type & N_STAB) 1331 || sym_cur->n_value <= syment->n_value) 1192 1332 continue; 1193 if (!sym_next) {1194 sym_next = sym_follow;1333 if ( sym_next 1334 && sym_next->n_value <= sym_cur->n_value) 1195 1335 continue; 1196 } 1197 if(!(sym_next->n_value > sym_follow->n_value)) 1336 #if defined(HOST_I386) 1337 /* Ignore local labels (.Lxxx). */ 1338 if (strstart(find_str_by_index(sym_cur->n_un.n_strx), ".L", NULL)) 1198 1339 continue; 1199 sym_next = sym_follow; 1340 #endif 1341 /* a good one */ 1342 sym_next = sym_cur; 1200 1343 } 1201 1344 if(sym_next) … … 1334 1477 } 1335 1478 1336 /* load a a.out object file */1479 /* load an a.out object file */ 1337 1480 int load_object(const char *filename) 1338 1481 { … … 1438 1581 #ifdef HOST_SPARC 1439 1582 if (sym_name[0] == '.') 1440 snprintf(name, sizeof(name),1583 snprintf(name, name_size, 1441 1584 "(long)(&__dot_%s)", 1442 1585 sym_name + 1); … … 1447 1590 } 1448 1591 1592 #ifdef HOST_IA64 1593 1594 #define PLT_ENTRY_SIZE 16 /* 1 bundle containing "brl" */ 1595 1596 struct plt_entry { 1597 struct plt_entry *next; 1598 const char *name; 1599 unsigned long addend; 1600 } *plt_list; 1601 1602 static int 1603 get_plt_index (const char *name, unsigned long addend) 1604 { 1605 struct plt_entry *plt, *prev= NULL; 1606 int index = 0; 1607 1608 /* see if we already have an entry for this target: */ 1609 for (plt = plt_list; plt; ++index, prev = plt, plt = plt->next) 1610 if (strcmp(plt->name, name) == 0 && plt->addend == addend) 1611 return index; 1612 1613 /* nope; create a new PLT entry: */ 1614 1615 plt = malloc(sizeof(*plt)); 1616 if (!plt) { 1617 perror("malloc"); 1618 exit(1); 1619 } 1620 memset(plt, 0, sizeof(*plt)); 1621 plt->name = strdup(name); 1622 plt->addend = addend; 1623 1624 /* append to plt-list: */ 1625 if (prev) 1626 prev->next = plt; 1627 else 1628 plt_list = plt; 1629 return index; 1630 } 1631 1632 #endif 1633 1449 1634 #ifdef HOST_ARM 1450 1635 … … 1455 1640 uint8_t *p; 1456 1641 uint32_t insn; 1457 int offset, min_offset, pc_offset, data_size ;1642 int offset, min_offset, pc_offset, data_size, spare, max_pool; 1458 1643 uint8_t data_allocated[1024]; 1459 1644 unsigned int data_index; 1645 int type; 1460 1646 1461 1647 memset(data_allocated, 0, sizeof(data_allocated)); … … 1463 1649 p = p_start; 1464 1650 min_offset = p_end - p_start; 1651 spare = 0x7fffffff; 1465 1652 while (p < p_start + min_offset) { 1466 1653 insn = get32((uint32_t *)p); 1654 /* TODO: Armv5e ldrd. */ 1655 /* TODO: VFP load. */ 1467 1656 if ((insn & 0x0d5f0000) == 0x051f0000) { 1468 1657 /* ldr reg, [pc, #im] */ 1469 1658 offset = insn & 0xfff; 1470 1659 if (!(insn & 0x00800000)) 1471 offset = -offset; 1660 offset = -offset; 1661 max_pool = 4096; 1662 type = 0; 1663 } else if ((insn & 0x0e5f0f00) == 0x0c1f0100) { 1664 /* FPA ldf. */ 1665 offset = (insn & 0xff) << 2; 1666 if (!(insn & 0x00800000)) 1667 offset = -offset; 1668 max_pool = 1024; 1669 type = 1; 1670 } else if ((insn & 0x0fff0000) == 0x028f0000) { 1671 /* Some gcc load a doubleword immediate with 1672 add regN, pc, #imm 1673 ldmia regN, {regN, regM} 1674 Hope and pray the compiler never generates somethin like 1675 add reg, pc, #imm1; ldr reg, [reg, #-imm2]; */ 1676 int r; 1677 1678 r = (insn & 0xf00) >> 7; 1679 offset = ((insn & 0xff) >> r) | ((insn & 0xff) << (32 - r)); 1680 max_pool = 1024; 1681 type = 2; 1682 } else { 1683 max_pool = 0; 1684 type = -1; 1685 } 1686 if (type >= 0) { 1687 /* PC-relative load needs fixing up. */ 1688 if (spare > max_pool - offset) 1689 spare = max_pool - offset; 1472 1690 if ((offset & 3) !=0) 1473 error("%s:%04x: ldr pc offset must be 32 bit aligned", 1691 error("%s:%04x: pc offset must be 32 bit aligned", 1692 name, start_offset + p - p_start); 1693 if (offset < 0) 1694 error("%s:%04x: Embedded literal value", 1474 1695 name, start_offset + p - p_start); 1475 1696 pc_offset = p - p_start + offset + 8; 1476 1697 if (pc_offset <= (p - p_start) || 1477 1698 pc_offset >= (p_end - p_start)) 1478 error("%s:%04x: ldrpc offset must point inside the function code",1699 error("%s:%04x: pc offset must point inside the function code", 1479 1700 name, start_offset + p - p_start); 1480 1701 if (pc_offset < min_offset) 1481 1702 min_offset = pc_offset; 1482 1703 if (outfile) { 1483 /* ldrposition */1704 /* The intruction position */ 1484 1705 fprintf(outfile, " arm_ldr_ptr->ptr = gen_code_ptr + %d;\n", 1485 1706 p - p_start); 1486 /* ldr data index*/1487 data_index = ((p_end - p_start) - pc_offset - 4) >> 2;1488 fprintf(outfile, " arm_ldr_ptr->data_ptr = arm_data_ptr +%d;\n",1707 /* The position of the constant pool data. */ 1708 data_index = ((p_end - p_start) - pc_offset) >> 2; 1709 fprintf(outfile, " arm_ldr_ptr->data_ptr = arm_data_ptr - %d;\n", 1489 1710 data_index); 1711 fprintf(outfile, " arm_ldr_ptr->type = %d;\n", type); 1490 1712 fprintf(outfile, " arm_ldr_ptr++;\n"); 1491 if (data_index >= sizeof(data_allocated))1492 error("%s: too many data", name);1493 if (!data_allocated[data_index]) {1494 ELF_RELOC *rel;1495 int i, addend, type;1496 const char *sym_name, *p;1497 char relname[1024];1498 1499 data_allocated[data_index] = 1;1500 1501 /* data value */1502 addend = get32((uint32_t *)(p_start + pc_offset));1503 relname[0] = '\0';1504 for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) {1505 if (rel->r_offset == (pc_offset + start_offset)) {1506 sym_name = get_rel_sym_name(rel);1507 /* the compiler leave some unnecessary references to the code */1508 get_reloc_expr(relname, sizeof(relname), sym_name);1509 type = ELF32_R_TYPE(rel->r_info);1510 if (type != R_ARM_ABS32)1511 error("%s: unsupported data relocation", name);1512 break;1513 }1514 }1515 fprintf(outfile, " arm_data_ptr[%d] = 0x%x",1516 data_index, addend);1517 if (relname[0] != '\0')1518 fprintf(outfile, " + %s", relname);1519 fprintf(outfile, ";\n");1520 }1521 1713 } 1522 1714 } 1523 1715 p += 4; 1524 1716 } 1717 1718 /* Copy and relocate the constant pool data. */ 1525 1719 data_size = (p_end - p_start) - min_offset; 1526 1720 if (data_size > 0 && outfile) { 1527 fprintf(outfile, " arm_data_ptr += %d;\n", data_size >> 2); 1528 } 1529 1530 /* the last instruction must be a mov pc, lr */ 1721 spare += min_offset; 1722 fprintf(outfile, " arm_data_ptr -= %d;\n", data_size >> 2); 1723 fprintf(outfile, " arm_pool_ptr -= %d;\n", data_size); 1724 fprintf(outfile, " if (arm_pool_ptr > gen_code_ptr + %d)\n" 1725 " arm_pool_ptr = gen_code_ptr + %d;\n", 1726 spare, spare); 1727 1728 data_index = 0; 1729 for (pc_offset = min_offset; 1730 pc_offset < p_end - p_start; 1731 pc_offset += 4) { 1732 1733 ELF_RELOC *rel; 1734 int i, addend, type; 1735 const char *sym_name; 1736 char relname[1024]; 1737 1738 /* data value */ 1739 addend = get32((uint32_t *)(p_start + pc_offset)); 1740 relname[0] = '\0'; 1741 for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { 1742 if (rel->r_offset == (pc_offset + start_offset)) { 1743 sym_name = get_rel_sym_name(rel); 1744 /* the compiler leave some unnecessary references to the code */ 1745 get_reloc_expr(relname, sizeof(relname), sym_name); 1746 type = ELF32_R_TYPE(rel->r_info); 1747 if (type != R_ARM_ABS32) 1748 error("%s: unsupported data relocation", name); 1749 break; 1750 } 1751 } 1752 fprintf(outfile, " arm_data_ptr[%d] = 0x%x", 1753 data_index, addend); 1754 if (relname[0] != '\0') 1755 fprintf(outfile, " + %s", relname); 1756 fprintf(outfile, ";\n"); 1757 1758 data_index++; 1759 } 1760 } 1761 1531 1762 if (p == p_start) 1532 1763 goto arm_ret_error; 1533 1764 p -= 4; 1534 1765 insn = get32((uint32_t *)p); 1535 if ((insn & 0xffff0000) != 0xe91b0000) { 1766 /* The last instruction must be an ldm instruction. There are several 1767 forms generated by gcc: 1768 ldmib sp, {..., pc} (implies a sp adjustment of +4) 1769 ldmia sp, {..., pc} 1770 ldmea fp, {..., pc} */ 1771 if ((insn & 0xffff8000) == 0xe99d8000) { 1772 if (outfile) { 1773 fprintf(outfile, 1774 " *(uint32_t *)(gen_code_ptr + %d) = 0xe28dd004;\n", 1775 p - p_start); 1776 } 1777 p += 4; 1778 } else if ((insn & 0xffff8000) != 0xe89d8000 1779 && (insn & 0xffff8000) != 0xe91b8000) { 1536 1780 arm_ret_error: 1537 1781 if (!outfile) 1538 1782 printf("%s: invalid epilog\n", name); 1539 1783 } 1540 return p - p_start; 1784 return p - p_start; 1541 1785 } 1542 1786 #endif … … 1566 1810 start_offset = offset; 1567 1811 #if defined(HOST_I386) || defined(HOST_X86_64) 1568 #if defined(CONFIG_FORMAT_COFF) || defined(CONFIG_FORMAT_AOUT) 1812 #if defined(CONFIG_FORMAT_COFF) || defined(CONFIG_FORMAT_AOUT) || defined(CONFIG_FORMAT_MACH) 1569 1813 { 1570 1814 uint8_t *p; … … 1636 1880 if (get32((uint32_t *)p) != 0x00840008) 1637 1881 error("br.ret.sptk.many b0;; expected at the end of %s", name); 1638 copy_size = p- p_start;1882 copy_size = p_end - p_start; 1639 1883 } 1640 1884 #elif defined(HOST_SPARC) 1641 1885 { 1886 #define INSN_SAVE 0x9de3a000 1887 #define INSN_RET 0x81c7e008 1888 #define INSN_RETL 0x81c3e008 1889 #define INSN_RESTORE 0x81e80000 1890 #define INSN_RETURN 0x81cfe008 1891 #define INSN_NOP 0x01000000 1892 #define INSN_ADD_SP 0x9c03a000 /* add %sp, nn, %sp */ 1893 #define INSN_SUB_SP 0x9c23a000 /* sub %sp, nn, %sp */ 1894 1642 1895 uint32_t start_insn, end_insn1, end_insn2; 1643 1896 uint8_t *p; … … 1648 1901 end_insn1 = get32((uint32_t *)(p + 0x0)); 1649 1902 end_insn2 = get32((uint32_t *)(p + 0x4)); 1650 if ((start_insn & ~0x1fff) == 0x9de3a000) { 1903 if (((start_insn & ~0x1fff) == INSN_SAVE) || 1904 (start_insn & ~0x1fff) == INSN_ADD_SP) { 1651 1905 p_start += 0x4; 1652 1906 start_offset += 0x4; 1653 if ((int)(start_insn | ~0x1fff) < -128) 1654 error("Found bogus save at the start of %s", name); 1655 if (end_insn1 != 0x81c7e008 || end_insn2 != 0x81e80000) 1907 if (end_insn1 == INSN_RET && end_insn2 == INSN_RESTORE) 1908 /* SPARC v7: ret; restore; */ ; 1909 else if (end_insn1 == INSN_RETURN && end_insn2 == INSN_NOP) 1910 /* SPARC v9: return; nop; */ ; 1911 else if (end_insn1 == INSN_RETL && (end_insn2 & ~0x1fff) == INSN_SUB_SP) 1912 /* SPARC v7: retl; sub %sp, nn, %sp; */ ; 1913 else 1914 1656 1915 error("ret; restore; not found at end of %s", name); 1916 } else if (end_insn1 == INSN_RETL && end_insn2 == INSN_NOP) { 1917 ; 1657 1918 } else { 1658 1919 error("No save at the beginning of %s", name); … … 1662 1923 if (p > p_start) { 1663 1924 skip_insn = get32((uint32_t *)(p - 0x4)); 1664 if (skip_insn == 0x01000000)1925 if (skip_insn == INSN_NOP) 1665 1926 p -= 4; 1666 1927 } … … 1670 1931 #elif defined(HOST_SPARC64) 1671 1932 { 1933 #define INSN_SAVE 0x9de3a000 1934 #define INSN_RET 0x81c7e008 1935 #define INSN_RETL 0x81c3e008 1936 #define INSN_RESTORE 0x81e80000 1937 #define INSN_RETURN 0x81cfe008 1938 #define INSN_NOP 0x01000000 1939 #define INSN_ADD_SP 0x9c03a000 /* add %sp, nn, %sp */ 1940 #define INSN_SUB_SP 0x9c23a000 /* sub %sp, nn, %sp */ 1941 1672 1942 uint32_t start_insn, end_insn1, end_insn2, skip_insn; 1673 1943 uint8_t *p; 1674 1944 p = (void *)(p_end - 8); 1945 #if 0 1946 /* XXX: check why it occurs */ 1675 1947 if (p <= p_start) 1676 1948 error("empty code for %s", name); 1949 #endif 1677 1950 start_insn = get32((uint32_t *)(p_start + 0x0)); 1678 1951 end_insn1 = get32((uint32_t *)(p + 0x0)); 1679 1952 end_insn2 = get32((uint32_t *)(p + 0x4)); 1680 if ((start_insn & ~0x1fff) == 0x9de3a000) { 1953 if (((start_insn & ~0x1fff) == INSN_SAVE) || 1954 (start_insn & ~0x1fff) == INSN_ADD_SP) { 1681 1955 p_start += 0x4; 1682 1956 start_offset += 0x4; 1683 if ((int)(start_insn | ~0x1fff) < -256) 1684 error("Found bogus save at the start of %s", name); 1685 if (end_insn1 != 0x81c7e008 || end_insn2 != 0x81e80000) 1957 if (end_insn1 == INSN_RET && end_insn2 == INSN_RESTORE) 1958 /* SPARC v7: ret; restore; */ ; 1959 else if (end_insn1 == INSN_RETURN && end_insn2 == INSN_NOP) 1960 /* SPARC v9: return; nop; */ ; 1961 else if (end_insn1 == INSN_RETL && (end_insn2 & ~0x1fff) == INSN_SUB_SP) 1962 /* SPARC v7: retl; sub %sp, nn, %sp; */ ; 1963 else 1964 1686 1965 error("ret; restore; not found at end of %s", name); 1966 } else if (end_insn1 == INSN_RETL && end_insn2 == INSN_NOP) { 1967 ; 1687 1968 } else { 1688 1969 error("No save at the beginning of %s", name); … … 1700 1981 #elif defined(HOST_ARM) 1701 1982 { 1983 uint32_t insn; 1984 1702 1985 if ((p_end - p_start) <= 16) 1703 1986 error("%s: function too small", name); … … 1708 1991 p_start += 12; 1709 1992 start_offset += 12; 1993 insn = get32((uint32_t *)p_start); 1994 if ((insn & 0xffffff00) == 0xe24dd000) { 1995 /* Stack adjustment. Assume op uses the frame pointer. */ 1996 p_start -= 4; 1997 start_offset -= 4; 1998 } 1710 1999 copy_size = arm_emit_ldr_info(name, start_offset, NULL, p_start, p_end, 1711 2000 relocs, nb_relocs); … … 1717 2006 if (p == p_start) 1718 2007 error("empty code for %s", name); 1719 / / remove NOP's, probably added for alignment2008 /* remove NOP's, probably added for alignment */ 1720 2009 while ((get16((uint16_t *)p) == 0x4e71) && 1721 2010 (p>p_start)) … … 1773 2062 fprintf(outfile, ";\n"); 1774 2063 } 2064 #if defined(HOST_IA64) 2065 fprintf(outfile, " extern char %s;\n", name); 2066 #else 1775 2067 fprintf(outfile, " extern void %s();\n", name); 2068 #endif 1776 2069 1777 2070 for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { … … 1783 2076 continue; 1784 2077 if (*sym_name && 2078 #ifdef VBOX 2079 !strstart(sym_name, "remR3PhysWrite", NULL) && 2080 !strstart(sym_name, "remR3PhysRead", NULL) && 2081 #endif 1785 2082 !strstart(sym_name, "__op_param", NULL) && 1786 2083 !strstart(sym_name, "__op_jmp", NULL) && … … 1794 2091 } 1795 2092 #endif 1796 #ifdef VBOX 1797 if ( strcmp(sym_name, "remR3PhysWriteBytes") 1798 && strcmp(sym_name, "remR3PhysReadBytes") 1799 && strcmp(sym_name, "remR3PhysReadUByte") 1800 && strcmp(sym_name, "remR3PhysReadSByte") 1801 && strcmp(sym_name, "remR3PhysReadUWord") 1802 && strcmp(sym_name, "remR3PhysReadSWord") 1803 && strcmp(sym_name, "remR3PhysReadULong") 1804 && strcmp(sym_name, "remR3PhysReadSLong") 1805 && strcmp(sym_name, "remR3PhysWriteByte") 1806 && strcmp(sym_name, "remR3PhysWriteWord") 1807 && strcmp(sym_name, "remR3PhysWriteDword")) 1808 #endif /* VBOX */ 1809 #ifdef __APPLE__ 2093 #if defined(__APPLE__) 1810 2094 /* set __attribute((unused)) on darwin because we wan't to avoid warning when we don't use the symbol */ 1811 2095 fprintf(outfile, "extern char %s __attribute__((unused));\n", sym_name); 2096 #elif defined(HOST_IA64) 2097 if (ELF64_R_TYPE(rel->r_info) != R_IA64_PCREL21B) 2098 /* 2099 * PCREL21 br.call targets generally 2100 * are out of range and need to go 2101 * through an "import stub". 2102 */ 2103 fprintf(outfile, " extern char %s;\n", 2104 sym_name); 1812 2105 #else 1813 2106 fprintf(outfile, "extern char %s;\n", sym_name); … … 1889 2182 if (val >= start_offset && val <= start_offset + copy_size) { 1890 2183 n = strtol(p, NULL, 10); 1891 fprintf(outfile, " label_offsets[%d] = %ld + (gen_code_ptr - gen_code_buf);\n", n, val - start_offset);2184 fprintf(outfile, " label_offsets[%d] = %ld + (gen_code_ptr - gen_code_buf);\n", n, (long)(val - start_offset)); 1892 2185 } 1893 2186 } … … 1901 2194 1902 2195 /* patch relocations */ 1903 #if defined(HOST_I386) 2196 #if defined(HOST_I386) 1904 2197 { 1905 2198 char name[256]; 1906 2199 int type; 1907 2200 int addend; 2201 int reloc_offset; 1908 2202 for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { 1909 if (rel->r_offset >= start_offset && 1910 rel->r_offset < start_offset + copy_size) { 1911 #ifdef CONFIG_FORMAT_AOUT 2203 host_ulong offset = get_rel_offset(rel); 2204 if (offset >= start_offset && 2205 offset < start_offset + copy_size) { 2206 #if defined(CONFIG_FORMAT_AOUT) || defined(CONFIG_FORMAT_MACH) 1912 2207 sym_name = get_rel_sym_name_and_addend(rel, &addend); 1913 2208 #else 1914 2209 sym_name = get_rel_sym_name(rel); 1915 2210 #endif 2211 if (!sym_name) 2212 continue; 2213 reloc_offset = offset - start_offset; 1916 2214 if (strstart(sym_name, "__op_jmp", &p)) { 1917 2215 int n; … … 1922 2220 needs to be stored */ 1923 2221 fprintf(outfile, " jmp_offsets[%d] = %d + (gen_code_ptr - gen_code_buf);\n", 1924 n, rel ->r_offset - start_offset);2222 n, reloc_offset); 1925 2223 continue; 1926 2224 } 1927 2225 1928 2226 get_reloc_expr(name, sizeof(name), sym_name); 1929 #if ndef CONFIG_FORMAT_AOUT1930 addend = get32((uint32_t *)(text + rel->r_offset));2227 #if !defined(CONFIG_FORMAT_AOUT) && !defined(CONFIG_FORMAT_MACH) 2228 addend = get32((uint32_t *)(text + offset)); 1931 2229 #endif 1932 2230 #ifdef CONFIG_FORMAT_ELF … … 1935 2233 case R_386_32: 1936 2234 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s + %d;\n", 1937 rel ->r_offset - start_offset, name, addend);2235 reloc_offset, name, addend); 1938 2236 break; 1939 2237 case R_386_PC32: 1940 2238 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s - (long)(gen_code_ptr + %d) + %d;\n", 1941 rel ->r_offset - start_offset, name, rel->r_offset - start_offset, addend);2239 reloc_offset, name, reloc_offset, addend); 1942 2240 break; 1943 2241 default: … … 1962 2260 case DIR32: 1963 2261 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s + %d;\n", 1964 rel ->r_offset - start_offset, name, addend);2262 reloc_offset, name, addend); 1965 2263 break; 1966 2264 case DISP32: 1967 2265 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s - (long)(gen_code_ptr + %d) + %d -4;\n", 1968 rel ->r_offset - start_offset, name, rel->r_offset - start_offset, addend);2266 reloc_offset, name, reloc_offset, addend); 1969 2267 break; 1970 2268 default: 1971 2269 error("unsupported i386 relocation (%d)", type); 1972 2270 } 1973 #elif defined(CONFIG_FORMAT_AOUT) 2271 #elif defined(CONFIG_FORMAT_AOUT) || defined(CONFIG_FORMAT_MACH) 1974 2272 if (rel->r_pcrel) { 1975 2273 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s - (long)(gen_code_ptr + %d) + %d;\n", 1976 rel->r_offset - start_offset, name, rel->r_offset - start_offset, addend);2274 offset - start_offset, name, offset - start_offset, addend); 1977 2275 } else { 1978 2276 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s + %d;\n", 1979 rel->r_offset - start_offset, name, addend);2277 offset - start_offset, name, addend); 1980 2278 } 1981 2279 (void)type; … … 1991 2289 int type; 1992 2290 int addend; 2291 int reloc_offset; 1993 2292 for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { 1994 2293 if (rel->r_offset >= start_offset && … … 1998 2297 type = ELF32_R_TYPE(rel->r_info); 1999 2298 addend = rel->r_addend; 2299 reloc_offset = rel->r_offset - start_offset; 2000 2300 switch(type) { 2001 2301 case R_X86_64_32: 2002 2302 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = (uint32_t)%s + %d;\n", 2003 rel ->r_offset - start_offset, name, addend);2303 reloc_offset, name, addend); 2004 2304 break; 2005 2305 case R_X86_64_32S: 2006 2306 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = (int32_t)%s + %d;\n", 2007 rel ->r_offset - start_offset, name, addend);2307 reloc_offset, name, addend); 2008 2308 break; 2009 2309 case R_X86_64_PC32: 2010 2310 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s - (long)(gen_code_ptr + %d) + %d;\n", 2011 rel ->r_offset - start_offset, name, rel->r_offset - start_offset, addend);2311 reloc_offset, name, reloc_offset, addend); 2012 2312 break; 2313 #ifdef VBOX /** @todo Re-check the sanity of this */ 2314 case R_X86_64_64: 2315 fprintf(outfile, " *(uint64_t *)(gen_code_ptr + %d) = (uint64_t)%s + %d;\n", 2316 reloc_offset, name, addend); 2317 break; 2318 #endif 2013 2319 default: 2014 2320 error("unsupported X86_64 relocation (%d)", type); … … 2023 2329 int type; 2024 2330 int addend; 2331 int reloc_offset; 2025 2332 for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { 2026 2333 if (rel->r_offset >= start_offset && 2027 2334 rel->r_offset < start_offset + copy_size) { 2028 2335 sym_name = strtab + symtab[ELFW(R_SYM)(rel->r_info)].st_name; 2336 reloc_offset = rel->r_offset - start_offset; 2029 2337 if (strstart(sym_name, "__op_jmp", &p)) { 2030 2338 int n; … … 2035 2343 needs to be stored */ 2036 2344 fprintf(outfile, " jmp_offsets[%d] = %d + (gen_code_ptr - gen_code_buf);\n", 2037 n, rel ->r_offset - start_offset);2345 n, reloc_offset); 2038 2346 continue; 2039 2347 } … … 2045 2353 case R_PPC_ADDR32: 2046 2354 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s + %d;\n", 2047 rel ->r_offset - start_offset, name, addend);2355 reloc_offset, name, addend); 2048 2356 break; 2049 2357 case R_PPC_ADDR16_LO: 2050 2358 fprintf(outfile, " *(uint16_t *)(gen_code_ptr + %d) = (%s + %d);\n", 2051 rel ->r_offset - start_offset, name, addend);2359 reloc_offset, name, addend); 2052 2360 break; 2053 2361 case R_PPC_ADDR16_HI: 2054 2362 fprintf(outfile, " *(uint16_t *)(gen_code_ptr + %d) = (%s + %d) >> 16;\n", 2055 rel ->r_offset - start_offset, name, addend);2363 reloc_offset, name, addend); 2056 2364 break; 2057 2365 case R_PPC_ADDR16_HA: 2058 2366 fprintf(outfile, " *(uint16_t *)(gen_code_ptr + %d) = (%s + %d + 0x8000) >> 16;\n", 2059 rel ->r_offset - start_offset, name, addend);2367 reloc_offset, name, addend); 2060 2368 break; 2061 2369 case R_PPC_REL24: 2062 2370 /* warning: must be at 32 MB distancy */ 2063 2371 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = (*(uint32_t *)(gen_code_ptr + %d) & ~0x03fffffc) | ((%s - (long)(gen_code_ptr + %d) + %d) & 0x03fffffc);\n", 2064 rel ->r_offset - start_offset, rel->r_offset - start_offset, name, rel->r_offset - start_offset, addend);2372 reloc_offset, reloc_offset, name, reloc_offset, addend); 2065 2373 break; 2066 2374 default: … … 2128 2436 switch(type) { 2129 2437 case PPC_RELOC_BR24: 2130 fprintf(outfile, "{\n"); 2131 fprintf(outfile, " uint32_t imm = *(uint32_t *)(gen_code_ptr + %d) & 0x3fffffc;\n", slide); 2132 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = (*(uint32_t *)(gen_code_ptr + %d) & ~0x03fffffc) | ((imm + ((long)%s - (long)gen_code_ptr) + %d) & 0x03fffffc);\n", 2438 if (!strstart(sym_name,"__op_gen_label",&p)) { 2439 fprintf(outfile, "{\n"); 2440 fprintf(outfile, " uint32_t imm = *(uint32_t *)(gen_code_ptr + %d) & 0x3fffffc;\n", slide); 2441 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = (*(uint32_t *)(gen_code_ptr + %d) & ~0x03fffffc) | ((imm + ((long)%s - (long)gen_code_ptr) + %d) & 0x03fffffc);\n", 2133 2442 slide, slide, name, sslide ); 2134 fprintf(outfile, "}\n"); 2443 fprintf(outfile, "}\n"); 2444 } else { 2445 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = (*(uint32_t *)(gen_code_ptr + %d) & ~0x03fffffc) | (((long)%s - (long)gen_code_ptr - %d) & 0x03fffffc);\n", 2446 slide, slide, final_sym_name, slide); 2447 } 2135 2448 break; 2136 2449 case PPC_RELOC_HI16: … … 2159 2472 int type; 2160 2473 int addend; 2474 int reloc_offset; 2161 2475 for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { 2162 2476 if (rel->r_offset >= start_offset && … … 2166 2480 type = ELF32_R_TYPE(rel->r_info); 2167 2481 addend = rel->r_addend; 2482 reloc_offset = rel->r_offset - start_offset; 2168 2483 switch(type) { 2169 2484 case R_390_32: 2170 2485 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s + %d;\n", 2171 rel ->r_offset - start_offset, name, addend);2486 reloc_offset, name, addend); 2172 2487 break; 2173 2488 case R_390_16: 2174 2489 fprintf(outfile, " *(uint16_t *)(gen_code_ptr + %d) = %s + %d;\n", 2175 rel ->r_offset - start_offset, name, addend);2490 reloc_offset, name, addend); 2176 2491 break; 2177 2492 case R_390_8: 2178 2493 fprintf(outfile, " *(uint8_t *)(gen_code_ptr + %d) = %s + %d;\n", 2179 rel ->r_offset - start_offset, name, addend);2494 reloc_offset, name, addend); 2180 2495 break; 2181 2496 default: … … 2190 2505 if (rel->r_offset >= start_offset && rel->r_offset < start_offset + copy_size) { 2191 2506 int type; 2507 long reloc_offset; 2192 2508 2193 2509 type = ELF64_R_TYPE(rel->r_info); 2194 2510 sym_name = strtab + symtab[ELF64_R_SYM(rel->r_info)].st_name; 2511 reloc_offset = rel->r_offset - start_offset; 2195 2512 switch (type) { 2196 2513 case R_ALPHA_GPDISP: … … 2198 2515 as an immediate instead of constructing it from the pv or ra. */ 2199 2516 fprintf(outfile, " immediate_ldah(gen_code_ptr + %ld, gp);\n", 2200 rel ->r_offset - start_offset);2517 reloc_offset); 2201 2518 fprintf(outfile, " immediate_lda(gen_code_ptr + %ld, gp);\n", 2202 rel ->r_offset - start_offset +rel->r_addend);2519 reloc_offset + (int)rel->r_addend); 2203 2520 break; 2204 2521 case R_ALPHA_LITUSE: … … 2220 2537 if (strstart(sym_name, "__op_param", &p)) 2221 2538 fprintf(outfile, " immediate_ldah(gen_code_ptr + %ld, param%s);\n", 2222 rel ->r_offset - start_offset, p);2539 reloc_offset, p); 2223 2540 break; 2224 2541 case R_ALPHA_GPRELLOW: 2225 2542 if (strstart(sym_name, "__op_param", &p)) 2226 2543 fprintf(outfile, " immediate_lda(gen_code_ptr + %ld, param%s);\n", 2227 rel ->r_offset - start_offset, p);2544 reloc_offset, p); 2228 2545 break; 2229 2546 case R_ALPHA_BRSGP: … … 2231 2548 set up the gp from the pv. */ 2232 2549 fprintf(outfile, " fix_bsr(gen_code_ptr + %ld, (uint8_t *) &%s - (gen_code_ptr + %ld + 4) + 8);\n", 2233 rel ->r_offset - start_offset, sym_name, rel->r_offset - start_offset);2550 reloc_offset, sym_name, reloc_offset); 2234 2551 break; 2235 2552 default: … … 2241 2558 #elif defined(HOST_IA64) 2242 2559 { 2560 unsigned long sym_idx; 2561 long code_offset; 2243 2562 char name[256]; 2244 2563 int type; 2245 int addend; 2564 long addend; 2565 2246 2566 for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { 2247 if (rel->r_offset >= start_offset && rel->r_offset < start_offset + copy_size) { 2248 sym_name = strtab + symtab[ELF64_R_SYM(rel->r_info)].st_name; 2249 get_reloc_expr(name, sizeof(name), sym_name); 2250 type = ELF64_R_TYPE(rel->r_info); 2251 addend = rel->r_addend; 2252 switch(type) { 2253 case R_IA64_LTOFF22: 2254 error("must implemnt R_IA64_LTOFF22 relocation"); 2255 case R_IA64_PCREL21B: 2256 error("must implemnt R_IA64_PCREL21B relocation"); 2257 default: 2258 error("unsupported ia64 relocation (%d)", type); 2259 } 2260 } 2567 sym_idx = ELF64_R_SYM(rel->r_info); 2568 if (rel->r_offset < start_offset 2569 || rel->r_offset >= start_offset + copy_size) 2570 continue; 2571 sym_name = (strtab + symtab[sym_idx].st_name); 2572 code_offset = rel->r_offset - start_offset; 2573 if (strstart(sym_name, "__op_jmp", &p)) { 2574 int n; 2575 n = strtol(p, NULL, 10); 2576 /* __op_jmp relocations are done at 2577 runtime to do translated block 2578 chaining: the offset of the instruction 2579 needs to be stored */ 2580 fprintf(outfile, " jmp_offsets[%d] =" 2581 "%ld + (gen_code_ptr - gen_code_buf);\n", 2582 n, code_offset); 2583 continue; 2584 } 2585 get_reloc_expr(name, sizeof(name), sym_name); 2586 type = ELF64_R_TYPE(rel->r_info); 2587 addend = rel->r_addend; 2588 switch(type) { 2589 case R_IA64_IMM64: 2590 fprintf(outfile, 2591 " ia64_imm64(gen_code_ptr + %ld, " 2592 "%s + %ld);\n", 2593 code_offset, name, addend); 2594 break; 2595 case R_IA64_LTOFF22X: 2596 case R_IA64_LTOFF22: 2597 fprintf(outfile, " IA64_LTOFF(gen_code_ptr + %ld," 2598 " %s + %ld, %d);\n", 2599 code_offset, name, addend, 2600 (type == R_IA64_LTOFF22X)); 2601 break; 2602 case R_IA64_LDXMOV: 2603 fprintf(outfile, 2604 " ia64_ldxmov(gen_code_ptr + %ld," 2605 " %s + %ld);\n", code_offset, name, addend); 2606 break; 2607 2608 case R_IA64_PCREL21B: 2609 if (strstart(sym_name, "__op_gen_label", NULL)) { 2610 fprintf(outfile, 2611 " ia64_imm21b(gen_code_ptr + %ld," 2612 " (long) (%s + %ld -\n\t\t" 2613 "((long) gen_code_ptr + %ld)) >> 4);\n", 2614 code_offset, name, addend, 2615 code_offset & ~0xfUL); 2616 } else { 2617 fprintf(outfile, 2618 " IA64_PLT(gen_code_ptr + %ld, " 2619 "%d);\t/* %s + %ld */\n", 2620 code_offset, 2621 get_plt_index(sym_name, addend), 2622 sym_name, addend); 2623 } 2624 break; 2625 default: 2626 error("unsupported ia64 relocation (0x%x)", 2627 type); 2628 } 2261 2629 } 2630 fprintf(outfile, " ia64_nop_b(gen_code_ptr + %d);\n", 2631 copy_size - 16 + 2); 2262 2632 } 2263 2633 #elif defined(HOST_SPARC) … … 2266 2636 int type; 2267 2637 int addend; 2638 int reloc_offset; 2268 2639 for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { 2269 2640 if (rel->r_offset >= start_offset && … … 2273 2644 type = ELF32_R_TYPE(rel->r_info); 2274 2645 addend = rel->r_addend; 2646 reloc_offset = rel->r_offset - start_offset; 2275 2647 switch(type) { 2276 2648 case R_SPARC_32: 2277 2649 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s + %d;\n", 2278 rel ->r_offset - start_offset, name, addend);2650 reloc_offset, name, addend); 2279 2651 break; 2280 2652 case R_SPARC_HI22: … … 2284 2656 " & ~0x3fffff) " 2285 2657 " | (((%s + %d) >> 10) & 0x3fffff);\n", 2286 rel->r_offset - start_offset, 2287 rel->r_offset - start_offset, 2288 name, addend); 2658 reloc_offset, reloc_offset, name, addend); 2289 2659 break; 2290 2660 case R_SPARC_LO10: … … 2294 2664 " & ~0x3ff) " 2295 2665 " | ((%s + %d) & 0x3ff);\n", 2296 rel->r_offset - start_offset, 2297 rel->r_offset - start_offset, 2298 name, addend); 2666 reloc_offset, reloc_offset, name, addend); 2299 2667 break; 2300 2668 case R_SPARC_WDISP30: … … 2305 2673 " | ((((%s + %d) - (long)(gen_code_ptr + %d))>>2) " 2306 2674 " & 0x3fffffff);\n", 2307 rel->r_offset - start_offset, 2308 rel->r_offset - start_offset, 2309 name, addend, 2310 rel->r_offset - start_offset); 2675 reloc_offset, reloc_offset, name, addend, 2676 reloc_offset); 2311 2677 break; 2678 case R_SPARC_WDISP22: 2679 fprintf(outfile, 2680 " *(uint32_t *)(gen_code_ptr + %d) = " 2681 "((*(uint32_t *)(gen_code_ptr + %d)) " 2682 " & ~0x3fffff) " 2683 " | ((((%s + %d) - (long)(gen_code_ptr + %d))>>2) " 2684 " & 0x3fffff);\n", 2685 rel->r_offset - start_offset, 2686 rel->r_offset - start_offset, 2687 name, addend, 2688 rel->r_offset - start_offset); 2689 break; 2312 2690 default: 2313 2691 error("unsupported sparc relocation (%d)", type); … … 2321 2699 int type; 2322 2700 int addend; 2701 int reloc_offset; 2323 2702 for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { 2324 2703 if (rel->r_offset >= start_offset && … … 2326 2705 sym_name = strtab + symtab[ELF64_R_SYM(rel->r_info)].st_name; 2327 2706 get_reloc_expr(name, sizeof(name), sym_name); 2328 type = ELF 64_R_TYPE(rel->r_info);2707 type = ELF32_R_TYPE(rel->r_info); 2329 2708 addend = rel->r_addend; 2709 reloc_offset = rel->r_offset - start_offset; 2330 2710 switch(type) { 2331 2711 case R_SPARC_32: 2332 2712 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s + %d;\n", 2333 rel ->r_offset - start_offset, name, addend);2713 reloc_offset, name, addend); 2334 2714 break; 2335 2715 case R_SPARC_HI22: … … 2339 2719 " & ~0x3fffff) " 2340 2720 " | (((%s + %d) >> 10) & 0x3fffff);\n", 2341 rel->r_offset - start_offset, 2342 rel->r_offset - start_offset, 2343 name, addend); 2721 reloc_offset, reloc_offset, name, addend); 2344 2722 break; 2345 2723 case R_SPARC_LO10: … … 2349 2727 " & ~0x3ff) " 2350 2728 " | ((%s + %d) & 0x3ff);\n", 2351 rel->r_offset - start_offset, 2352 rel->r_offset - start_offset, 2353 name, addend); 2729 reloc_offset, reloc_offset, name, addend); 2730 break; 2731 case R_SPARC_OLO10: 2732 addend += ELF64_R_TYPE_DATA (rel->r_info); 2733 fprintf(outfile, 2734 " *(uint32_t *)(gen_code_ptr + %d) = " 2735 "((*(uint32_t *)(gen_code_ptr + %d)) " 2736 " & ~0x3ff) " 2737 " | ((%s + %d) & 0x3ff);\n", 2738 reloc_offset, reloc_offset, name, addend); 2354 2739 break; 2355 2740 case R_SPARC_WDISP30: … … 2360 2745 " | ((((%s + %d) - (long)(gen_code_ptr + %d))>>2) " 2361 2746 " & 0x3fffffff);\n", 2362 rel->r_offset - start_offset, 2363 rel->r_offset - start_offset, 2364 name, addend, 2365 rel->r_offset - start_offset); 2747 reloc_offset, reloc_offset, name, addend, 2748 reloc_offset); 2366 2749 break; 2750 case R_SPARC_WDISP22: 2751 fprintf(outfile, 2752 " *(uint32_t *)(gen_code_ptr + %d) = " 2753 "((*(uint32_t *)(gen_code_ptr + %d)) " 2754 " & ~0x3fffff) " 2755 " | ((((%s + %d) - (long)(gen_code_ptr + %d))>>2) " 2756 " & 0x3fffff);\n", 2757 reloc_offset, reloc_offset, name, addend, 2758 reloc_offset); 2759 break; 2367 2760 default: 2368 error("unsupported sparc64 relocation (%d) ", type);2761 error("unsupported sparc64 relocation (%d) for symbol %s", type, name); 2369 2762 } 2370 2763 } … … 2376 2769 int type; 2377 2770 int addend; 2378 2771 int reloc_offset; 2772 uint32_t insn; 2773 2774 insn = get32((uint32_t *)(p_start + 4)); 2775 /* If prologue ends in sub sp, sp, #const then assume 2776 op has a stack frame and needs the frame pointer. */ 2777 if ((insn & 0xffffff00) == 0xe24dd000) { 2778 int i; 2779 uint32_t opcode; 2780 opcode = 0xe28db000; /* add fp, sp, #0. */ 2781 #if 0 2782 /* ??? Need to undo the extra stack adjustment at the end of the op. 2783 For now just leave the stack misaligned and hope it doesn't break anything 2784 too important. */ 2785 if ((insn & 4) != 0) { 2786 /* Preserve doubleword stack alignment. */ 2787 fprintf(outfile, 2788 " *(uint32_t *)(gen_code_ptr + 4)= 0x%x;\n", 2789 insn + 4); 2790 opcode -= 4; 2791 } 2792 #endif 2793 insn = get32((uint32_t *)(p_start - 4)); 2794 /* Calculate the size of the saved registers, 2795 excluding pc. */ 2796 for (i = 0; i < 15; i++) { 2797 if (insn & (1 << i)) 2798 opcode += 4; 2799 } 2800 fprintf(outfile, 2801 " *(uint32_t *)gen_code_ptr = 0x%x;\n", opcode); 2802 } 2379 2803 arm_emit_ldr_info(name, start_offset, outfile, p_start, p_end, 2380 2804 relocs, nb_relocs); … … 2390 2814 type = ELF32_R_TYPE(rel->r_info); 2391 2815 addend = get32((uint32_t *)(text + rel->r_offset)); 2816 reloc_offset = rel->r_offset - start_offset; 2392 2817 switch(type) { 2393 2818 case R_ARM_ABS32: 2394 2819 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s + %d;\n", 2395 rel ->r_offset - start_offset, name, addend);2820 reloc_offset, name, addend); 2396 2821 break; 2397 2822 case R_ARM_PC24: 2823 case R_ARM_JUMP24: 2824 case R_ARM_CALL: 2398 2825 fprintf(outfile, " arm_reloc_pc24((uint32_t *)(gen_code_ptr + %d), 0x%x, %s);\n", 2399 rel ->r_offset - start_offset, addend, name);2826 reloc_offset, addend, name); 2400 2827 break; 2401 2828 default: … … 2410 2837 int type; 2411 2838 int addend; 2839 int reloc_offset; 2412 2840 Elf32_Sym *sym; 2413 2841 for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) { … … 2419 2847 type = ELF32_R_TYPE(rel->r_info); 2420 2848 addend = get32((uint32_t *)(text + rel->r_offset)) + rel->r_addend; 2849 reloc_offset = rel->r_offset - start_offset; 2421 2850 switch(type) { 2422 2851 case R_68K_32: 2423 2852 fprintf(outfile, " /* R_68K_32 RELOC, offset %x */\n", rel->r_offset) ; 2424 2853 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s + %#x;\n", 2425 rel ->r_offset - start_offset, name, addend );2854 reloc_offset, name, addend ); 2426 2855 break; 2427 2856 case R_68K_PC32: 2428 2857 fprintf(outfile, " /* R_68K_PC32 RELOC, offset %x */\n", rel->r_offset); 2429 2858 fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s - (long)(gen_code_ptr + %#x) + %#x;\n", 2430 rel ->r_offset - start_offset, name, rel->r_offset - start_offset, /*sym->st_value+*/ addend);2859 reloc_offset, name, reloc_offset, /*sym->st_value+*/ addend); 2431 2860 break; 2432 2861 default: … … 2498 2927 } else { 2499 2928 /* generate big code generation switch */ 2929 2930 #ifdef HOST_ARM 2931 /* We need to know the size of all the ops so we can figure out when 2932 to emit constant pools. This must be consistent with opc.h. */ 2933 fprintf(outfile, 2934 "static const uint32_t arm_opc_size[] = {\n" 2935 " 0,\n" /* end */ 2936 " 0,\n" /* nop */ 2937 " 0,\n" /* nop1 */ 2938 " 0,\n" /* nop2 */ 2939 " 0,\n"); /* nop3 */ 2940 for(i = 0, sym = symtab; i < nb_syms; i++, sym++) { 2941 const char *name; 2942 name = get_sym_name(sym); 2943 if (strstart(name, OP_PREFIX, NULL)) { 2944 fprintf(outfile, " %d,\n", sym->st_size); 2945 } 2946 } 2947 fprintf(outfile, 2948 "};\n"); 2949 #endif 2950 2500 2951 fprintf(outfile, 2501 2952 "int dyngen_code(uint8_t *gen_code_buf,\n" … … 2508 2959 2509 2960 #ifdef HOST_ARM 2961 /* Arm is tricky because it uses constant pools for loading immediate values. 2962 We assume (and require) each function is code followed by a constant pool. 2963 All the ops are small so this should be ok. For each op we figure 2964 out how much "spare" range we have in the load instructions. This allows 2965 us to insert subsequent ops in between the op and the constant pool, 2966 eliminating the neeed to jump around the pool. 2967 2968 We currently generate: 2969 2970 [ For this example we assume merging would move op1_pool out of range. 2971 In practice we should be able to combine many ops before the offset 2972 limits are reached. ] 2973 op1_code; 2974 op2_code; 2975 goto op3; 2976 op2_pool; 2977 op1_pool; 2978 op3: 2979 op3_code; 2980 ret; 2981 op3_pool; 2982 2983 Ideally we'd put op1_pool before op2_pool, but that requires two passes. 2984 */ 2510 2985 fprintf(outfile, 2511 2986 " uint8_t *last_gen_code_ptr = gen_code_buf;\n" 2512 2987 " LDREntry *arm_ldr_ptr = arm_ldr_table;\n" 2513 " uint32_t *arm_data_ptr = arm_data_table;\n"); 2988 " uint32_t *arm_data_ptr = arm_data_table + ARM_LDR_TABLE_SIZE;\n" 2989 /* Initialise the parmissible pool offset to an arbitary large value. */ 2990 " uint8_t *arm_pool_ptr = gen_code_buf + 0x1000000;\n"); 2991 #endif 2992 #ifdef HOST_IA64 2993 { 2994 long addend, not_first = 0; 2995 unsigned long sym_idx; 2996 int index, max_index; 2997 const char *sym_name; 2998 EXE_RELOC *rel; 2999 3000 max_index = -1; 3001 for (i = 0, rel = relocs;i < nb_relocs; i++, rel++) { 3002 sym_idx = ELF64_R_SYM(rel->r_info); 3003 sym_name = (strtab + symtab[sym_idx].st_name); 3004 if (strstart(sym_name, "__op_gen_label", NULL)) 3005 continue; 3006 if (ELF64_R_TYPE(rel->r_info) != R_IA64_PCREL21B) 3007 continue; 3008 3009 addend = rel->r_addend; 3010 index = get_plt_index(sym_name, addend); 3011 if (index <= max_index) 3012 continue; 3013 max_index = index; 3014 fprintf(outfile, " extern void %s(void);\n", sym_name); 3015 } 3016 3017 fprintf(outfile, 3018 " struct ia64_fixup *plt_fixes = NULL, " 3019 "*ltoff_fixes = NULL;\n" 3020 " static long plt_target[] = {\n\t"); 3021 3022 max_index = -1; 3023 for (i = 0, rel = relocs;i < nb_relocs; i++, rel++) { 3024 sym_idx = ELF64_R_SYM(rel->r_info); 3025 sym_name = (strtab + symtab[sym_idx].st_name); 3026 if (strstart(sym_name, "__op_gen_label", NULL)) 3027 continue; 3028 if (ELF64_R_TYPE(rel->r_info) != R_IA64_PCREL21B) 3029 continue; 3030 3031 addend = rel->r_addend; 3032 index = get_plt_index(sym_name, addend); 3033 if (index <= max_index) 3034 continue; 3035 max_index = index; 3036 3037 if (not_first) 3038 fprintf(outfile, ",\n\t"); 3039 not_first = 1; 3040 if (addend) 3041 fprintf(outfile, "(long) &%s + %ld", sym_name, addend); 3042 else 3043 fprintf(outfile, "(long) &%s", sym_name); 3044 } 3045 fprintf(outfile, "\n };\n" 3046 " unsigned int plt_offset[%u] = { 0 };\n", max_index + 1); 3047 } 2514 3048 #endif 2515 3049 … … 2523 3057 2524 3058 fprintf(outfile, 2525 " for(;;) {\n" 2526 " switch(*opc_ptr++) {\n" 2527 ); 3059 " for(;;) {\n"); 3060 3061 #ifdef HOST_ARM 3062 /* Generate constant pool if needed */ 3063 fprintf(outfile, 3064 " if (gen_code_ptr + arm_opc_size[*opc_ptr] >= arm_pool_ptr) {\n" 3065 " gen_code_ptr = arm_flush_ldr(gen_code_ptr, arm_ldr_table, " 3066 "arm_ldr_ptr, arm_data_ptr, arm_data_table + ARM_LDR_TABLE_SIZE, 1);\n" 3067 " last_gen_code_ptr = gen_code_ptr;\n" 3068 " arm_ldr_ptr = arm_ldr_table;\n" 3069 " arm_data_ptr = arm_data_table + ARM_LDR_TABLE_SIZE;\n" 3070 " arm_pool_ptr = gen_code_ptr + 0x1000000;\n" 3071 " }\n"); 3072 #endif 3073 3074 fprintf(outfile, 3075 " switch(*opc_ptr++) {\n"); 2528 3076 2529 3077 for(i = 0, sym = symtab; i < nb_syms; i++, sym++) { … … 2559 3107 " }\n"); 2560 3108 2561 #ifdef HOST_ARM2562 /* generate constant table if needed */2563 fprintf(outfile,2564 " if ((gen_code_ptr - last_gen_code_ptr) >= (MAX_FRAG_SIZE - MAX_OP_SIZE)) {\n"2565 " gen_code_ptr = arm_flush_ldr(gen_code_ptr, arm_ldr_table, arm_ldr_ptr, arm_data_table, arm_data_ptr, 1);\n"2566 " last_gen_code_ptr = gen_code_ptr;\n"2567 " arm_ldr_ptr = arm_ldr_table;\n"2568 " arm_data_ptr = arm_data_table;\n"2569 " }\n");2570 #endif2571 2572 3109 2573 3110 fprintf(outfile, … … 2575 3112 " the_end:\n" 2576 3113 ); 3114 #ifdef HOST_IA64 3115 fprintf(outfile, 3116 " {\n" 3117 " extern char code_gen_buffer[];\n" 3118 " ia64_apply_fixes(&gen_code_ptr, ltoff_fixes, " 3119 "(uint64_t) code_gen_buffer + 2*(1<<20), plt_fixes,\n\t\t\t" 3120 "sizeof(plt_target)/sizeof(plt_target[0]),\n\t\t\t" 3121 "plt_target, plt_offset);\n }\n"); 3122 #endif 2577 3123 2578 3124 /* generate some code patching */ 2579 3125 #ifdef HOST_ARM 2580 fprintf(outfile, "gen_code_ptr = arm_flush_ldr(gen_code_ptr, arm_ldr_table, arm_ldr_ptr, arm_data_table, arm_data_ptr, 0);\n"); 3126 fprintf(outfile, 3127 "if (arm_data_ptr != arm_data_table + ARM_LDR_TABLE_SIZE)\n" 3128 " gen_code_ptr = arm_flush_ldr(gen_code_ptr, arm_ldr_table, " 3129 "arm_ldr_ptr, arm_data_ptr, arm_data_table + ARM_LDR_TABLE_SIZE, 0);\n"); 2581 3130 #endif 2582 3131 /* flush instruction cache */ … … 2641 3190 return 0; 2642 3191 } 3192 3193 /* bird added: */ 3194 /* 3195 * Local Variables: 3196 * mode: c 3197 * c-file-style: k&r 3198 * c-basic-offset: 4 3199 * tab-width: 4 3200 * indent-tabs-mode: t 3201 * End: 3202 */ 3203 -
trunk/src/recompiler/dyngen.h
r1 r2422 20 20 21 21 int __op_param1, __op_param2, __op_param3; 22 int __op_gen_label1, __op_gen_label2, __op_gen_label3; 22 #if defined(__sparc__) || defined(__arm__) 23 void __op_gen_label1(){} 24 void __op_gen_label2(){} 25 void __op_gen_label3(){} 26 #else 27 int __op_gen_label1, __op_gen_label2, __op_gen_label3; 28 #endif 23 29 int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3; 24 30 … … 44 50 static inline void flush_icache_range(unsigned long start, unsigned long stop) 45 51 { 52 while (start < stop) { 53 asm volatile ("fc %0" :: "r"(start)); 54 start += 32; 55 } 56 asm volatile (";;sync.i;;srlz.i;;"); 46 57 } 47 58 #endif … … 55 66 unsigned long p; 56 67 57 p = start &~(MIN_CACHE_LINE_SIZE - 1);68 start &= ~(MIN_CACHE_LINE_SIZE - 1); 58 69 stop = (stop + MIN_CACHE_LINE_SIZE - 1) & ~(MIN_CACHE_LINE_SIZE - 1); 59 70 … … 135 146 #ifdef __arm__ 136 147 137 #define MAX_OP_SIZE (128 * 4) /* in bytes */ 138 /* max size of the code that can be generated without calling arm_flush_ldr */ 139 #define MAX_FRAG_SIZE (1024 * 4) 140 //#define MAX_FRAG_SIZE (135 * 4) /* for testing */ 148 #define ARM_LDR_TABLE_SIZE 1024 141 149 142 150 typedef struct LDREntry { 143 151 uint8_t *ptr; 144 152 uint32_t *data_ptr; 153 unsigned type:2; 145 154 } LDREntry; 146 155 147 156 static LDREntry arm_ldr_table[1024]; 148 static uint32_t arm_data_table[ 1024];157 static uint32_t arm_data_table[ARM_LDR_TABLE_SIZE]; 149 158 150 159 extern char exec_loop; … … 165 174 uint8_t *data_ptr; 166 175 uint32_t insn; 176 uint32_t mask; 167 177 168 data_size = ( uint8_t *)data_end - (uint8_t *)data_start;178 data_size = (data_end - data_start) << 2; 169 179 170 180 if (gen_jmp) { … … 188 198 (unsigned long)data_ptr - 189 199 (unsigned long)ptr - 8; 190 insn = *ptr & ~(0xfff | 0x00800000);191 200 if (offset < 0) { 192 offset = - offset; 193 } else { 194 insn |= 0x00800000; 195 } 196 if (offset > 0xfff) { 197 fprintf(stderr, "Error ldr offset\n"); 201 fprintf(stderr, "Negative constant pool offset\n"); 198 202 abort(); 199 203 } 200 insn |= offset; 204 switch (le->type) { 205 case 0: /* ldr */ 206 mask = ~0x00800fff; 207 if (offset >= 4096) { 208 fprintf(stderr, "Bad ldr offset\n"); 209 abort(); 210 } 211 break; 212 case 1: /* ldc */ 213 mask = ~0x008000ff; 214 if (offset >= 1024 ) { 215 fprintf(stderr, "Bad ldc offset\n"); 216 abort(); 217 } 218 break; 219 case 2: /* add */ 220 mask = ~0xfff; 221 if (offset >= 1024 ) { 222 fprintf(stderr, "Bad add offset\n"); 223 abort(); 224 } 225 break; 226 default: 227 fprintf(stderr, "Bad pc relative fixup\n"); 228 abort(); 229 } 230 insn = *ptr & mask; 231 switch (le->type) { 232 case 0: /* ldr */ 233 insn |= offset | 0x00800000; 234 break; 235 case 1: /* ldc */ 236 insn |= (offset >> 2) | 0x00800000; 237 break; 238 case 2: /* add */ 239 insn |= (offset >> 2) | 0xf00; 240 break; 241 } 201 242 *ptr = insn; 202 243 } … … 205 246 206 247 #endif /* __arm__ */ 248 249 #ifdef __ia64 250 251 252 /* Patch instruction with "val" where "mask" has 1 bits. */ 253 static inline void ia64_patch (uint64_t insn_addr, uint64_t mask, uint64_t val) 254 { 255 uint64_t m0, m1, v0, v1, b0, b1, *b = (uint64_t *) (insn_addr & -16); 256 # define insn_mask ((1UL << 41) - 1) 257 unsigned long shift; 258 259 b0 = b[0]; b1 = b[1]; 260 shift = 5 + 41 * (insn_addr % 16); /* 5 template, 3 x 41-bit insns */ 261 if (shift >= 64) { 262 m1 = mask << (shift - 64); 263 v1 = val << (shift - 64); 264 } else { 265 m0 = mask << shift; m1 = mask >> (64 - shift); 266 v0 = val << shift; v1 = val >> (64 - shift); 267 b[0] = (b0 & ~m0) | (v0 & m0); 268 } 269 b[1] = (b1 & ~m1) | (v1 & m1); 270 } 271 272 static inline void ia64_patch_imm60 (uint64_t insn_addr, uint64_t val) 273 { 274 ia64_patch(insn_addr, 275 0x011ffffe000UL, 276 ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ 277 | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); 278 ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18); 279 } 280 281 static inline void ia64_imm64 (void *insn, uint64_t val) 282 { 283 /* Ignore the slot number of the relocation; GCC and Intel 284 toolchains differed for some time on whether IMM64 relocs are 285 against slot 1 (Intel) or slot 2 (GCC). */ 286 uint64_t insn_addr = (uint64_t) insn & ~3UL; 287 288 ia64_patch(insn_addr + 2, 289 0x01fffefe000UL, 290 ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ 291 | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */ 292 | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */ 293 | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */ 294 | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */) 295 ); 296 ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22); 297 } 298 299 static inline void ia64_imm60b (void *insn, uint64_t val) 300 { 301 /* Ignore the slot number of the relocation; GCC and Intel 302 toolchains differed for some time on whether IMM64 relocs are 303 against slot 1 (Intel) or slot 2 (GCC). */ 304 uint64_t insn_addr = (uint64_t) insn & ~3UL; 305 306 if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) 307 fprintf(stderr, "%s: value %ld out of IMM60 range\n", 308 __FUNCTION__, (int64_t) val); 309 ia64_patch_imm60(insn_addr + 2, val); 310 } 311 312 static inline void ia64_imm22 (void *insn, uint64_t val) 313 { 314 if (val + (1 << 21) >= (1 << 22)) 315 fprintf(stderr, "%s: value %li out of IMM22 range\n", 316 __FUNCTION__, (int64_t)val); 317 ia64_patch((uint64_t) insn, 0x01fffcfe000UL, 318 ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ 319 | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */ 320 | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */ 321 | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */)); 322 } 323 324 /* Like ia64_imm22(), but also clear bits 20-21. For addl, this has 325 the effect of turning "addl rX=imm22,rY" into "addl 326 rX=imm22,r0". */ 327 static inline void ia64_imm22_r0 (void *insn, uint64_t val) 328 { 329 if (val + (1 << 21) >= (1 << 22)) 330 fprintf(stderr, "%s: value %li out of IMM22 range\n", 331 __FUNCTION__, (int64_t)val); 332 ia64_patch((uint64_t) insn, 0x01fffcfe000UL | (0x3UL << 20), 333 ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ 334 | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */ 335 | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */ 336 | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */)); 337 } 338 339 static inline void ia64_imm21b (void *insn, uint64_t val) 340 { 341 if (val + (1 << 20) >= (1 << 21)) 342 fprintf(stderr, "%s: value %li out of IMM21b range\n", 343 __FUNCTION__, (int64_t)val); 344 ia64_patch((uint64_t) insn, 0x11ffffe000UL, 345 ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */ 346 | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */)); 347 } 348 349 static inline void ia64_nop_b (void *insn) 350 { 351 ia64_patch((uint64_t) insn, (1UL << 41) - 1, 2UL << 37); 352 } 353 354 static inline void ia64_ldxmov(void *insn, uint64_t val) 355 { 356 if (val + (1 << 21) < (1 << 22)) 357 ia64_patch((uint64_t) insn, 0x1fff80fe000UL, 8UL << 37); 358 } 359 360 static inline int ia64_patch_ltoff(void *insn, uint64_t val, 361 int relaxable) 362 { 363 if (relaxable && (val + (1 << 21) < (1 << 22))) { 364 ia64_imm22_r0(insn, val); 365 return 0; 366 } 367 return 1; 368 } 369 370 struct ia64_fixup { 371 struct ia64_fixup *next; 372 void *addr; /* address that needs to be patched */ 373 long value; 374 }; 375 376 #define IA64_PLT(insn, plt_index) \ 377 do { \ 378 struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \ 379 fixup->next = plt_fixes; \ 380 plt_fixes = fixup; \ 381 fixup->addr = (insn); \ 382 fixup->value = (plt_index); \ 383 plt_offset[(plt_index)] = 1; \ 384 } while (0) 385 386 #define IA64_LTOFF(insn, val, relaxable) \ 387 do { \ 388 if (ia64_patch_ltoff(insn, val, relaxable)) { \ 389 struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \ 390 fixup->next = ltoff_fixes; \ 391 ltoff_fixes = fixup; \ 392 fixup->addr = (insn); \ 393 fixup->value = (val); \ 394 } \ 395 } while (0) 396 397 static inline void ia64_apply_fixes (uint8_t **gen_code_pp, 398 struct ia64_fixup *ltoff_fixes, 399 uint64_t gp, 400 struct ia64_fixup *plt_fixes, 401 int num_plts, 402 unsigned long *plt_target, 403 unsigned int *plt_offset) 404 { 405 static const uint8_t plt_bundle[] = { 406 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; movl r1=GP */ 407 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x60, 408 409 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; brl IP */ 410 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0 411 }; 412 uint8_t *gen_code_ptr = *gen_code_pp, *plt_start, *got_start, *vp; 413 struct ia64_fixup *fixup; 414 unsigned int offset = 0; 415 struct fdesc { 416 long ip; 417 long gp; 418 } *fdesc; 419 int i; 420 421 if (plt_fixes) { 422 plt_start = gen_code_ptr; 423 424 for (i = 0; i < num_plts; ++i) { 425 if (plt_offset[i]) { 426 plt_offset[i] = offset; 427 offset += sizeof(plt_bundle); 428 429 fdesc = (struct fdesc *) plt_target[i]; 430 memcpy(gen_code_ptr, plt_bundle, sizeof(plt_bundle)); 431 ia64_imm64 (gen_code_ptr + 0x02, fdesc->gp); 432 ia64_imm60b(gen_code_ptr + 0x12, 433 (fdesc->ip - (long) (gen_code_ptr + 0x10)) >> 4); 434 gen_code_ptr += sizeof(plt_bundle); 435 } 436 } 437 438 for (fixup = plt_fixes; fixup; fixup = fixup->next) 439 ia64_imm21b(fixup->addr, 440 ((long) plt_start + plt_offset[fixup->value] 441 - ((long) fixup->addr & ~0xf)) >> 4); 442 } 443 444 got_start = gen_code_ptr; 445 446 /* First, create the GOT: */ 447 for (fixup = ltoff_fixes; fixup; fixup = fixup->next) { 448 /* first check if we already have this value in the GOT: */ 449 for (vp = got_start; vp < gen_code_ptr; ++vp) 450 if (*(uint64_t *) vp == fixup->value) 451 break; 452 if (vp == gen_code_ptr) { 453 /* Nope, we need to put the value in the GOT: */ 454 *(uint64_t *) vp = fixup->value; 455 gen_code_ptr += 8; 456 } 457 ia64_imm22(fixup->addr, (long) vp - gp); 458 } 459 /* Keep code ptr aligned. */ 460 if ((long) gen_code_ptr & 15) 461 gen_code_ptr += 8; 462 *gen_code_pp = gen_code_ptr; 463 } 464 465 #endif -
trunk/src/recompiler/elf.h
r1 r2422 32 32 #define PT_HIPROC 0x7fffffff 33 33 #define PT_MIPS_REGINFO 0x70000000 34 #define PT_MIPS_OPTIONS 0x70000001 34 35 35 36 /* Flags in the e_flags field of the header */ 37 /* MIPS architecture level. */ 38 #define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */ 39 #define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */ 40 #define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */ 41 #define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */ 42 #define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */ 43 #define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */ 44 #define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */ 45 46 /* The ABI of a file. */ 47 #define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */ 48 #define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */ 49 36 50 #define EF_MIPS_NOREORDER 0x00000001 37 51 #define EF_MIPS_PIC 0x00000002 38 52 #define EF_MIPS_CPIC 0x00000004 53 #define EF_MIPS_ABI2 0x00000020 54 #define EF_MIPS_OPTIONS_FIRST 0x00000080 55 #define EF_MIPS_32BITMODE 0x00000100 56 #define EF_MIPS_ABI 0x0000f000 39 57 #define EF_MIPS_ARCH 0xf0000000 40 58 … … 210 228 #define ELF64_R_SYM(i) ((i) >> 32) 211 229 #define ELF64_R_TYPE(i) ((i) & 0xffffffff) 230 #define ELF64_R_TYPE_DATA(i) (((ELF64_R_TYPE(i) >> 8) ^ 0x00800000) - 0x00800000) 212 231 213 232 #define R_386_NONE 0 … … 309 328 #define R_SPARC_11 31 310 329 #define R_SPARC_64 32 330 #define R_SPARC_OLO10 33 311 331 #define R_SPARC_WDISP16 40 312 332 #define R_SPARC_WDISP19 41 … … 483 503 #define R_ARM_GOT32 26 /* 32 bit GOT entry */ 484 504 #define R_ARM_PLT32 27 /* 32 bit PLT address */ 505 #define R_ARM_CALL 28 506 #define R_ARM_JUMP24 29 485 507 #define R_ARM_GNU_VTENTRY 100 486 508 #define R_ARM_GNU_VTINHERIT 101 -
trunk/src/recompiler/exec-all.h
r1 r2422 25 25 26 26 #ifdef VBOX 27 #include <VBox/tm.h> 28 #ifndef LOG_GROUP 29 #define LOG_GROUP LOG_GROUP_REM 30 #endif 31 #include <VBox/log.h> 32 #include "REMInternal.h" 27 # include <VBox/tm.h> 28 # include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */ 29 # ifndef LOG_GROUP 30 # define LOG_GROUP LOG_GROUP_REM 31 # endif 32 # include <VBox/log.h> 33 # include "REMInternal.h" 34 # include <VBox/vm.h> 33 35 #endif /* VBOX */ 34 36 … … 40 42 #endif 41 43 42 #if GCC_MAJOR< 344 #if __GNUC__ < 3 43 45 #define __builtin_expect(x, n) (x) 44 46 #endif … … 73 75 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; 74 76 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; 77 extern target_ulong gen_opc_jump_pc[2]; 78 extern uint32_t gen_opc_hflags[OPC_BUF_SIZE]; 75 79 76 80 typedef void (GenOpFunc)(void); … … 102 106 void *puc); 103 107 void cpu_resume_from_signal(CPUState *env1, void *puc); 104 void cpu_exec_init( void);105 int page_unprotect( unsignedlong address, unsigned long pc, void *puc);108 void cpu_exec_init(CPUState *env); 109 int page_unprotect(target_ulong address, unsigned long pc, void *puc); 106 110 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 107 111 int is_cpu_write_access); … … 109 113 void tlb_flush_page(CPUState *env, target_ulong addr); 110 114 void tlb_flush(CPUState *env, int flush_global); 111 int tlb_set_page(CPUState *env, target_ulong vaddr, 112 target_phys_addr_t paddr, int prot, 113 int is_user, int is_softmmu); 114 115 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 116 target_phys_addr_t paddr, int prot, 117 int is_user, int is_softmmu); 118 static inline int tlb_set_page(CPUState *env, target_ulong vaddr, 119 target_phys_addr_t paddr, int prot, 120 int is_user, int is_softmmu) 121 { 122 if (prot & PAGE_READ) 123 prot |= PAGE_EXEC; 124 return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu); 125 } 115 126 116 127 #define CODE_GEN_MAX_SIZE 65536 117 128 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ 118 119 #define CODE_GEN_HASH_BITS 15120 #define CODE_GEN_HASH_SIZE (1 << CODE_GEN_HASH_BITS)121 129 122 130 #define CODE_GEN_PHYS_HASH_BITS 15 … … 138 146 #if defined(__alpha__) 139 147 #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024) 148 #elif defined(__ia64) 149 #define CODE_GEN_BUFFER_SIZE (4 * 1024 * 1024) /* range of addl */ 140 150 #elif defined(__powerpc__) 141 151 #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024) 142 152 #else 143 #define CODE_GEN_BUFFER_SIZE ( 8* 1024 * 1024)153 #define CODE_GEN_BUFFER_SIZE (16 * 1024 * 1024) 144 154 #endif 145 155 … … 183 193 184 194 uint8_t *tc_ptr; /* pointer to the translated code */ 185 struct TranslationBlock *hash_next; /* next matching tb for virtual address */186 195 /* next matching tb for physical address. */ 187 196 struct TranslationBlock *phys_hash_next; … … 197 206 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */ 198 207 #else 208 # if defined(VBOX) && defined(__DARWIN__) && defined(__AMD64__) 209 # error "First 4GB aren't reachable. jmp dword [tb_next] wont work." 210 # endif 199 211 uint32_t tb_next[2]; /* address of jump generated code */ 200 212 #endif … … 207 219 } TranslationBlock; 208 220 209 static inline unsigned int tb_hash_func(target_ulong pc) 210 { 211 return pc & (CODE_GEN_HASH_SIZE - 1); 221 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) 222 { 223 target_ulong tmp; 224 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); 225 return (tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK; 226 } 227 228 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) 229 { 230 target_ulong tmp; 231 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); 232 return (((tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK) | 233 (tmp & TB_JMP_ADDR_MASK)); 212 234 } 213 235 … … 219 241 TranslationBlock *tb_alloc(target_ulong pc); 220 242 void tb_flush(CPUState *env); 221 void tb_link(TranslationBlock *tb);222 243 void tb_link_phys(TranslationBlock *tb, 223 244 target_ulong phys_pc, target_ulong phys_page2); 224 245 225 extern TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];226 246 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 227 247 228 248 extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]; 229 249 extern uint8_t *code_gen_ptr; 230 231 /* find a translation block in the translation cache. If not found,232 return NULL and the pointer to the last element of the list in pptb */233 static inline TranslationBlock *tb_find(TranslationBlock ***pptb,234 target_ulong pc,235 target_ulong cs_base,236 unsigned int flags)237 {238 TranslationBlock **ptb, *tb;239 unsigned int h;240 241 h = tb_hash_func(pc);242 ptb = &tb_hash[h];243 for(;;) {244 tb = *ptb;245 if (!tb)246 break;247 if (tb->pc == pc && tb->cs_base == cs_base && tb->flags == flags)248 return tb;249 ptb = &tb->hash_next;250 }251 *pptb = ptb;252 return NULL;253 }254 255 250 256 251 #if defined(USE_DIRECT_JUMP) … … 336 331 #endif 337 332 333 #define ASM_OP_LABEL_NAME(n, opname) \ 334 ASM_NAME(__op_label) #n "." ASM_NAME(opname) 335 338 336 #if defined(__powerpc__) 339 337 … … 342 340 do {\ 343 341 asm volatile (ASM_DATA_SECTION\ 344 ASM_ NAME(__op_label) #n "." ASM_NAME(opname) ":\n"\342 ASM_OP_LABEL_NAME(n, opname) ":\n"\ 345 343 ".long 1f\n"\ 346 344 ASM_PREVIOUS_SECTION \ … … 355 353 do {\ 356 354 asm volatile (".section .data\n"\ 357 ASM_ NAME(__op_label) #n "." ASM_NAME(opname) ":\n"\355 ASM_OP_LABEL_NAME(n, opname) ":\n"\ 358 356 ".long 1f\n"\ 359 357 ASM_PREVIOUS_SECTION \ … … 366 364 /* jump to next block operations (more portable code, does not need 367 365 cache flushing, but slower because of indirect jump) */ 366 # ifdef VBOX /* bird: GCC4 (and Ming 3.4.x?) will remove the two unused static 367 variables. I've added a dummy __asm__ statement which reference 368 the two variables to prevent this. */ 369 # if __GNUC__ >= 4 370 # define GOTO_TB(opname, tbparam, n)\ 371 do {\ 372 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\ 373 static void __attribute__((unused)) *__op_label ## n \ 374 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\ 375 __asm__ ("" : : "m" (__op_label ## n), "m" (dummy ## n));\ 376 goto *(void *)(uintptr_t)(((TranslationBlock *)tbparam)->tb_next[n]);\ 377 label ## n: ;\ 378 dummy_label ## n: ;\ 379 } while (0) 380 # else 381 # define GOTO_TB(opname, tbparam, n)\ 382 do {\ 383 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\ 384 static void __attribute__((unused)) *__op_label ## n \ 385 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\ 386 goto *(void *)(uintptr_t)(((TranslationBlock *)tbparam)->tb_next[n]);\ 387 label ## n: ;\ 388 dummy_label ## n: ;\ 389 } while (0) 390 # endif 391 # else /* !VBOX */ 368 392 #define GOTO_TB(opname, tbparam, n)\ 369 393 do {\ 370 394 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\ 371 static void __attribute__((unused)) *__op_label ## n = &&label ## n;\ 395 static void __attribute__((unused)) *__op_label ## n \ 396 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\ 372 397 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\ 373 398 label ## n: ;\ 374 399 dummy_label ## n: ;\ 375 400 } while (0) 376 377 #endif 378 379 /* XXX: will be suppressed */ 380 #define JUMP_TB(opname, tbparam, n, eip)\ 381 do {\ 382 GOTO_TB(opname, tbparam, n);\ 383 T0 = (long)(tbparam) + (n);\ 384 EIP = (int32_t)eip;\ 385 EXIT_TB();\ 386 } while (0) 401 # endif /* !VBOX */ 402 403 #endif 387 404 388 405 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; … … 505 522 #endif 506 523 524 #ifdef __ia64 525 #include <ia64intrin.h> 526 527 static inline int testandset (int *p) 528 { 529 return __sync_lock_test_and_set (p, 1); 530 } 531 #endif 532 507 533 typedef int spinlock_t; 508 534 … … 578 604 # ifdef VBOX 579 605 target_ulong remR3PhysGetPhysicalAddressCode(CPUState *env, target_ulong addr, CPUTLBEntry *pTLBEntry); 606 # if defined(PGM_DYNAMIC_RAM_ALLOC) && !defined(REM_PHYS_ADDR_IN_TLB) 580 607 target_ulong remR3HCVirt2GCPhys(void *env, void *addr); 581 # endif 608 # endif 609 # endif 582 610 /* NOTE: this function can trigger an exception */ 583 611 /* NOTE2: the returned address is not exactly the physical address: it 584 612 is the offset relative to phys_ram_base */ 585 /* XXX: i386 target specific */586 613 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) 587 614 { … … 593 620 #elif defined (TARGET_PPC) 594 621 is_user = msr_pr; 622 #elif defined (TARGET_MIPS) 623 is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM); 595 624 #elif defined (TARGET_SPARC) 596 625 is_user = (env->psrs == 0); 597 #else 598 #error "Unimplemented !" 599 #endif 600 if (__builtin_expect(env->tlb_read[is_user][index].address != 626 #elif defined (TARGET_ARM) 627 is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR); 628 #elif defined (TARGET_SH4) 629 is_user = ((env->sr & SR_MD) == 0); 630 #else 631 #error unimplemented CPU 632 #endif 633 if (__builtin_expect(env->tlb_table[is_user][index].addr_code != 601 634 (addr & TARGET_PAGE_MASK), 0)) { 602 635 ldub_code(addr); 603 636 } 604 pd = env->tlb_ read[is_user][index].address& ~TARGET_PAGE_MASK;605 if (pd > IO_MEM_ROM ) {606 # ifdef VBOX637 pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK; 638 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { 639 # ifdef VBOX 607 640 /* deal with non-MMIO access handlers. */ 608 return remR3PhysGetPhysicalAddressCode(env, addr, &env->tlb_ read[is_user][index]);609 # else641 return remR3PhysGetPhysicalAddressCode(env, addr, &env->tlb_table[is_user][index]); 642 # else 610 643 cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr); 611 # endif644 # endif 612 645 } 613 #ifdef VBOX 614 return remR3HCVirt2GCPhys(env, (void *)(addr + env->tlb_read[is_user][index].addend)); 615 #else 616 return addr + env->tlb_read[is_user][index].addend - (unsigned long)phys_ram_base; 617 #endif 618 } 619 #endif 646 # if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 647 return addr + env->tlb_table[is_user][index].addend; 648 # elif defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC) 649 return remR3HCVirt2GCPhys(env, (void *)(addr + env->tlb_table[is_user][index].addend)); 650 # else 651 return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base; 652 # endif 653 } 654 #endif 655 656 657 #ifdef USE_KQEMU 658 #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG)) 659 660 int kqemu_init(CPUState *env); 661 int kqemu_cpu_exec(CPUState *env); 662 void kqemu_flush_page(CPUState *env, target_ulong addr); 663 void kqemu_flush(CPUState *env, int global); 664 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr); 665 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr); 666 void kqemu_cpu_interrupt(CPUState *env); 667 void kqemu_record_dump(void); 668 669 static inline int kqemu_is_ok(CPUState *env) 670 { 671 return(env->kqemu_enabled && 672 (env->cr[0] & CR0_PE_MASK) && 673 !(env->hflags & HF_INHIBIT_IRQ_MASK) && 674 (env->eflags & IF_MASK) && 675 !(env->eflags & VM_MASK) && 676 (env->kqemu_enabled == 2 || 677 ((env->hflags & HF_CPL_MASK) == 3 && 678 (env->eflags & IOPL_MASK) != IOPL_MASK))); 679 } 680 681 #endif -
trunk/src/recompiler/exec.c
r55 r2422 19 19 */ 20 20 #include "config.h" 21 #ifndef VBOX 21 22 #ifdef _WIN32 22 23 #include <windows.h> … … 32 33 #include <unistd.h> 33 34 #include <inttypes.h> 35 #else /* VBOX */ 36 # include <stdlib.h> 37 # include <stdio.h> 38 # include <inttypes.h> 39 # include <iprt/alloc.h> 40 # include <iprt/string.h> 41 # include <iprt/param.h> 42 # include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */ 43 #endif /* VBOX */ 34 44 35 45 #include "cpu.h" 36 46 #include "exec-all.h" 37 38 #ifdef VBOX 39 #include <VBox/vm.h> 47 #if defined(CONFIG_USER_ONLY) 48 #include <qemu.h> 40 49 #endif 41 50 … … 43 52 //#define DEBUG_FLUSH 44 53 //#define DEBUG_TLB 54 //#define DEBUG_UNASSIGNED 45 55 46 56 /* make various TB consistency checks */ … … 48 58 //#define DEBUG_TLB_CHECK 49 59 60 #if !defined(CONFIG_USER_ONLY) 61 /* TB consistency checks only implemented for usermode emulation. */ 62 #undef DEBUG_TB_CHECK 63 #endif 64 50 65 /* threshold to flush the translated code buffer */ 51 66 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE) … … 56 71 #define MMAP_AREA_END 0xa8000000 57 72 73 #if defined(TARGET_SPARC64) 74 #define TARGET_PHYS_ADDR_SPACE_BITS 41 75 #elif defined(TARGET_PPC64) 76 #define TARGET_PHYS_ADDR_SPACE_BITS 42 77 #else 78 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */ 79 #define TARGET_PHYS_ADDR_SPACE_BITS 32 80 #endif 81 58 82 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; 59 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];60 83 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 61 84 int nb_tbs; … … 63 86 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; 64 87 65 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]; 88 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] 89 #if defined(__MINGW32__) 90 __attribute__((aligned (16))); 91 #else 92 __attribute__((aligned (32))); 93 #endif 66 94 uint8_t *code_gen_ptr; 67 95 68 #if !defined(VBOX) 96 #ifndef VBOX 97 int phys_ram_size; 69 98 int phys_ram_fd; 70 #endif /* !VBOX */ 71 uint32_t phys_ram_size; 99 int phys_ram_size; 100 #else /* VBOX */ 101 RTGCPHYS phys_ram_size; 102 /* we have memory ranges (the high PC-BIOS mapping) which 103 causes some pages to fall outside the dirty map here. */ 104 uint32_t phys_ram_dirty_size; 105 #endif /* VBOX */ 106 #if !defined(VBOX) || !(defined(PGM_DYNAMIC_RAM_ALLOC) || defined(REM_PHYS_ADDR_IN_TLB)) 72 107 uint8_t *phys_ram_base; 108 #endif 73 109 uint8_t *phys_ram_dirty; 110 111 CPUState *first_cpu; 112 /* current CPU in the current thread. It is only valid inside 113 cpu_exec() */ 114 CPUState *cpu_single_env; 74 115 75 116 typedef struct PageDesc { … … 87 128 typedef struct PhysPageDesc { 88 129 /* offset in host memory of the page + io_index in the low 12 bits */ 89 u nsigned longphys_offset;130 uint32_t phys_offset; 90 131 } PhysPageDesc; 91 92 typedef struct VirtPageDesc {93 /* physical address of code page. It is valid only if 'valid_tag'94 matches 'virt_valid_tag' */95 target_ulong phys_addr;96 unsigned int valid_tag;97 #if !defined(CONFIG_SOFTMMU)98 /* original page access rights. It is valid only if 'valid_tag'99 matches 'virt_valid_tag' */100 unsigned int prot;101 #endif102 } VirtPageDesc;103 132 104 133 #define L2_BITS 10 … … 117 146 /* XXX: for system emulation, it could just be an array */ 118 147 static PageDesc *l1_map[L1_SIZE]; 119 static PhysPageDesc *l1_phys_map[L1_SIZE]; 120 121 #if !defined(CONFIG_USER_ONLY) 122 static VirtPageDesc *l1_virt_map[L1_SIZE]; 123 static unsigned int virt_valid_tag; 124 #endif 148 PhysPageDesc **l1_phys_map; 125 149 126 150 /* io memory support */ … … 133 157 /* log support */ 134 158 char *logfilename = "/tmp/qemu.log"; 159 #endif /* !VBOX */ 135 160 FILE *logfile; 136 161 int loglevel; 137 #endif138 162 139 163 /* statistics */ 140 164 static int tlb_flush_count; 141 165 static int tb_flush_count; 166 #ifndef VBOX 142 167 static int tb_phys_invalidate_count; 168 #endif /* !VBOX */ 143 169 144 170 static void page_init(void) … … 146 172 /* NOTE: we can always suppose that qemu_host_page_size >= 147 173 TARGET_PAGE_SIZE */ 174 #ifdef VBOX 175 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer), 176 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE); 177 qemu_real_host_page_size = PAGE_SIZE; 178 #else /* !VBOX */ 148 179 #ifdef _WIN32 149 180 { … … 173 204 } 174 205 #endif 206 #endif /* !VBOX */ 175 207 176 208 if (qemu_host_page_size == 0) … … 182 214 qemu_host_page_bits++; 183 215 qemu_host_page_mask = ~(qemu_host_page_size - 1); 184 #if !defined(CONFIG_USER_ONLY) 185 virt_valid_tag = 1; 186 #endif 216 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); 217 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); 187 218 } 188 219 … … 212 243 } 213 244 214 static inline PhysPageDesc *phys_page_find_alloc(unsigned int index) 215 { 216 PhysPageDesc **lp, *p; 217 218 lp = &l1_phys_map[index >> L2_BITS]; 245 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) 246 { 247 void **lp, **p; 248 PhysPageDesc *pd; 249 250 p = (void **)l1_phys_map; 251 #if TARGET_PHYS_ADDR_SPACE_BITS > 32 252 253 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) 254 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS 255 #endif 256 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); 219 257 p = *lp; 220 258 if (!p) { 221 259 /* allocate if not found */ 222 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE); 223 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE); 260 if (!alloc) 261 return NULL; 262 p = qemu_vmalloc(sizeof(void *) * L1_SIZE); 263 memset(p, 0, sizeof(void *) * L1_SIZE); 224 264 *lp = p; 225 265 } 226 return p + (index & (L2_SIZE - 1)); 227 } 228 229 static inline PhysPageDesc *phys_page_find(unsigned int index) 230 { 231 PhysPageDesc *p; 232 233 p = l1_phys_map[index >> L2_BITS]; 234 if (!p) 235 return 0; 236 #ifdef VBOX 237 p = p + (index & (L2_SIZE - 1)); 238 if ((p->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING) 239 remR3GrowDynRange(p->phys_offset & TARGET_PAGE_MASK); 240 return p; 241 #else 242 return p + (index & (L2_SIZE - 1)); 243 #endif 266 #endif 267 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); 268 pd = *lp; 269 if (!pd) { 270 int i; 271 /* allocate if not found */ 272 if (!alloc) 273 return NULL; 274 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); 275 *lp = pd; 276 for (i = 0; i < L2_SIZE; i++) 277 pd[i].phys_offset = IO_MEM_UNASSIGNED; 278 } 279 #if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC) 280 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); 281 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)) 282 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK); 283 return pd; 284 #else 285 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); 286 #endif 287 } 288 289 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) 290 { 291 return phys_page_find_alloc(index, 0); 244 292 } 245 293 246 294 #if !defined(CONFIG_USER_ONLY) 247 static void tlb_protect_code(CPUState *env, target_ulong addr); 248 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr); 249 250 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index) 251 { 252 VirtPageDesc **lp, *p; 253 254 /* XXX: should not truncate for 64 bit addresses */ 255 #if TARGET_LONG_BITS > 32 256 index &= (L1_SIZE - 1); 257 #endif 258 lp = &l1_virt_map[index >> L2_BITS]; 259 p = *lp; 260 if (!p) { 261 /* allocate if not found */ 262 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE); 263 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE); 264 *lp = p; 265 } 266 return p + (index & (L2_SIZE - 1)); 267 } 268 269 static inline VirtPageDesc *virt_page_find(unsigned int index) 270 { 271 VirtPageDesc *p; 272 273 p = l1_virt_map[index >> L2_BITS]; 274 if (!p) 275 return 0; 276 return p + (index & (L2_SIZE - 1)); 277 } 278 279 static void virt_page_flush(void) 280 { 281 int i, j; 282 VirtPageDesc *p; 283 284 virt_valid_tag++; 285 286 if (virt_valid_tag == 0) { 287 virt_valid_tag = 1; 288 for(i = 0; i < L1_SIZE; i++) { 289 p = l1_virt_map[i]; 290 if (p) { 291 for(j = 0; j < L2_SIZE; j++) 292 p[j].valid_tag = 0; 293 } 294 } 295 } 296 } 297 #else 298 static void virt_page_flush(void) 299 { 300 } 301 #endif 302 303 void cpu_exec_init(void) 304 { 295 static void tlb_protect_code(ram_addr_t ram_addr); 296 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 297 target_ulong vaddr); 298 #endif 299 300 void cpu_exec_init(CPUState *env) 301 { 302 CPUState **penv; 303 int cpu_index; 304 305 305 if (!code_gen_ptr) { 306 306 code_gen_ptr = code_gen_buffer; … … 308 308 io_mem_init(); 309 309 } 310 env->next_cpu = NULL; 311 penv = &first_cpu; 312 cpu_index = 0; 313 while (*penv != NULL) { 314 penv = (CPUState **)&(*penv)->next_cpu; 315 cpu_index++; 316 } 317 env->cpu_index = cpu_index; 318 *penv = env; 310 319 } 311 320 … … 339 348 /* flush all the translation blocks */ 340 349 /* XXX: tb_flush is currently not thread safe */ 341 void tb_flush(CPUState *env) 342 { 350 void tb_flush(CPUState *env1) 351 { 352 CPUState *env; 343 353 #if defined(DEBUG_FLUSH) 344 354 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", … … 348 358 #endif 349 359 nb_tbs = 0; 350 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *)); 351 virt_page_flush(); 360 361 for(env = first_cpu; env != NULL; env = env->next_cpu) { 362 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); 363 } 352 364 353 365 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); … … 367 379 int i; 368 380 address &= TARGET_PAGE_MASK; 369 for(i = 0;i < CODE_GEN_ HASH_SIZE; i++) {370 for(tb = tb_ hash[i]; tb != NULL; tb = tb->hash_next) {381 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { 382 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { 371 383 if (!(address + TARGET_PAGE_SIZE <= tb->pc || 372 384 address >= tb->pc + tb->size)) { 373 385 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n", 374 address, tb->pc, tb->size);386 address, (long)tb->pc, tb->size); 375 387 } 376 388 } … … 384 396 int i, flags1, flags2; 385 397 386 for(i = 0;i < CODE_GEN_ HASH_SIZE; i++) {387 for(tb = tb_ hash[i]; tb != NULL; tb = tb->hash_next) {398 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { 399 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { 388 400 flags1 = page_get_flags(tb->pc); 389 401 flags2 = page_get_flags(tb->pc + tb->size - 1); 390 402 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { 391 403 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", 392 tb->pc, tb->size, flags1, flags2);404 (long)tb->pc, tb->size, flags1, flags2); 393 405 } 394 406 } … … 485 497 } 486 498 487 static inline void tb_invalidate(TranslationBlock *tb) 488 { 499 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) 500 { 501 CPUState *env; 502 PageDesc *p; 489 503 unsigned int h, n1; 490 TranslationBlock *tb1, *tb2, **ptb; 504 target_ulong phys_pc; 505 TranslationBlock *tb1, *tb2; 491 506 507 /* remove the TB from the hash list */ 508 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 509 h = tb_phys_hash_func(phys_pc); 510 tb_remove(&tb_phys_hash[h], tb, 511 offsetof(TranslationBlock, phys_hash_next)); 512 513 /* remove the TB from the page list */ 514 if (tb->page_addr[0] != page_addr) { 515 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); 516 tb_page_remove(&p->first_tb, tb); 517 invalidate_page_bitmap(p); 518 } 519 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { 520 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); 521 tb_page_remove(&p->first_tb, tb); 522 invalidate_page_bitmap(p); 523 } 524 492 525 tb_invalidated_flag = 1; 493 526 494 527 /* remove the TB from the hash list */ 495 h = tb_hash_func(tb->pc); 496 ptb = &tb_hash[h]; 497 for(;;) { 498 tb1 = *ptb; 499 /* NOTE: the TB is not necessarily linked in the hash. It 500 indicates that it is not currently used */ 501 if (tb1 == NULL) 502 return; 503 if (tb1 == tb) { 504 *ptb = tb1->hash_next; 505 break; 506 } 507 ptb = &tb1->hash_next; 528 h = tb_jmp_cache_hash_func(tb->pc); 529 for(env = first_cpu; env != NULL; env = env->next_cpu) { 530 if (env->tb_jmp_cache[h] == tb) 531 env->tb_jmp_cache[h] = NULL; 508 532 } 509 533 … … 525 549 } 526 550 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ 551 552 #ifndef VBOX 553 tb_phys_invalidate_count++; 554 #endif /* !VBOX */ 527 555 } 528 556 … … 530 558 void tb_invalidate_virt(CPUState *env, uint32_t eip) 531 559 { 532 # if 1560 # if 1 533 561 tb_flush(env); 534 # else562 # else 535 563 uint8_t *cs_base, *pc; 536 564 unsigned int flags, h, phys_pc; … … 547 575 if(tb) 548 576 { 549 # ifdef DEBUG577 # ifdef DEBUG 550 578 printf("invalidating TB (%08X) at %08X\n", tb, eip); 551 # endif579 # endif 552 580 tb_invalidate(tb); 553 581 //Note: this will leak TBs, but the whole cache will be flushed … … 557 585 tb->flags = 0; 558 586 } 559 # endif587 # endif 560 588 } 561 589 … … 571 599 # endif /* VBOX_STRICT */ 572 600 #endif /* VBOX */ 573 574 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)575 {576 PageDesc *p;577 unsigned int h;578 target_ulong phys_pc;579 580 /* remove the TB from the hash list */581 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);582 h = tb_phys_hash_func(phys_pc);583 tb_remove(&tb_phys_hash[h], tb,584 offsetof(TranslationBlock, phys_hash_next));585 586 /* remove the TB from the page list */587 if (tb->page_addr[0] != page_addr) {588 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);589 tb_page_remove(&p->first_tb, tb);590 invalidate_page_bitmap(p);591 }592 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {593 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);594 tb_page_remove(&p->first_tb, tb);595 invalidate_page_bitmap(p);596 }597 598 tb_invalidate(tb);599 tb_phys_invalidate_count++;600 }601 601 602 602 static inline void set_bits(uint8_t *tab, int start, int len) … … 774 774 } 775 775 #endif /* TARGET_HAS_PRECISE_SMC */ 776 saved_tb = env->current_tb; 777 env->current_tb = NULL; 776 /* we need to do that to handle the case where a signal 777 occurs while doing tb_phys_invalidate() */ 778 saved_tb = NULL; 779 if (env) { 780 saved_tb = env->current_tb; 781 env->current_tb = NULL; 782 } 778 783 tb_phys_invalidate(tb, -1); 779 env->current_tb = saved_tb; 780 if (env->interrupt_request && env->current_tb) 781 cpu_interrupt(env, env->interrupt_request); 784 if (env) { 785 env->current_tb = saved_tb; 786 if (env->interrupt_request && env->current_tb) 787 cpu_interrupt(env, env->interrupt_request); 788 } 782 789 } 783 790 tb = tb_next; … … 905 912 /* add the tb in the target page and protect it if necessary */ 906 913 static inline void tb_alloc_page(TranslationBlock *tb, 907 unsigned int n, unsigned intpage_addr)914 unsigned int n, target_ulong page_addr) 908 915 { 909 916 PageDesc *p; … … 911 918 912 919 tb->page_addr[n] = page_addr; 913 p = page_find (page_addr >> TARGET_PAGE_BITS);920 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS); 914 921 tb->page_next[n] = p->first_tb; 915 922 last_first_tb = p->first_tb; … … 921 928 #if defined(CONFIG_USER_ONLY) 922 929 if (p->flags & PAGE_WRITE) { 923 unsigned long host_start, host_end, addr; 930 target_ulong addr; 931 PageDesc *p2; 924 932 int prot; 925 933 926 934 /* force the host page as non writable (writes will have a 927 935 page fault + mprotect overhead) */ 928 host_start = page_addr & qemu_host_page_mask; 929 host_end = host_start + qemu_host_page_size; 936 page_addr &= qemu_host_page_mask; 930 937 prot = 0; 931 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) 932 prot |= page_get_flags(addr); 933 mprotect((void *)host_start, qemu_host_page_size, 938 for(addr = page_addr; addr < page_addr + qemu_host_page_size; 939 addr += TARGET_PAGE_SIZE) { 940 941 p2 = page_find (addr >> TARGET_PAGE_BITS); 942 if (!p2) 943 continue; 944 prot |= p2->flags; 945 p2->flags &= ~PAGE_WRITE; 946 page_get_flags(addr); 947 } 948 mprotect(g2h(page_addr), qemu_host_page_size, 934 949 (prot & PAGE_BITS) & ~PAGE_WRITE); 935 950 #ifdef DEBUG_TB_INVALIDATE 936 951 printf("protecting code page: 0x%08lx\n", 937 host_start); 938 #endif 939 p->flags &= ~PAGE_WRITE; 952 page_addr); 953 #endif 940 954 } 941 955 #else … … 944 958 allocated in a physical page */ 945 959 if (!last_first_tb) { 946 target_ulong virt_addr; 947 948 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS); 949 tlb_protect_code(cpu_single_env, virt_addr); 960 tlb_protect_code(page_addr); 950 961 } 951 962 #endif … … 989 1000 else 990 1001 tb->page_addr[1] = -1; 991 #ifdef DEBUG_TB_CHECK992 tb_page_check();993 #endif994 }995 996 /* link the tb with the other TBs */997 void tb_link(TranslationBlock *tb)998 {999 #if !defined(CONFIG_USER_ONLY)1000 {1001 VirtPageDesc *vp;1002 target_ulong addr;1003 1004 /* save the code memory mappings (needed to invalidate the code) */1005 addr = tb->pc & TARGET_PAGE_MASK;1006 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);1007 #ifdef DEBUG_TLB_CHECK1008 if (vp->valid_tag == virt_valid_tag &&1009 vp->phys_addr != tb->page_addr[0]) {1010 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",1011 addr, tb->page_addr[0], vp->phys_addr);1012 }1013 #endif1014 vp->phys_addr = tb->page_addr[0];1015 if (vp->valid_tag != virt_valid_tag) {1016 vp->valid_tag = virt_valid_tag;1017 #if !defined(CONFIG_SOFTMMU)1018 vp->prot = 0;1019 #endif1020 }1021 1022 if (tb->page_addr[1] != -1) {1023 addr += TARGET_PAGE_SIZE;1024 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);1025 #ifdef DEBUG_TLB_CHECK1026 if (vp->valid_tag == virt_valid_tag &&1027 vp->phys_addr != tb->page_addr[1]) {1028 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",1029 addr, tb->page_addr[1], vp->phys_addr);1030 }1031 #endif1032 vp->phys_addr = tb->page_addr[1];1033 if (vp->valid_tag != virt_valid_tag) {1034 vp->valid_tag = virt_valid_tag;1035 #if !defined(CONFIG_SOFTMMU)1036 vp->prot = 0;1037 #endif1038 }1039 }1040 }1041 #endif1042 1002 1043 1003 tb->jmp_first = (TranslationBlock *)((long)tb | 2); … … 1055 1015 if (tb->tb_next_offset[1] != 0xffff) 1056 1016 tb_reset_jump(tb, 1); 1017 1018 #ifdef DEBUG_TB_CHECK 1019 tb_page_check(); 1020 #endif 1057 1021 } 1058 1022 … … 1135 1099 } 1136 1100 1137 #if defined(TARGET_ I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)1101 #if defined(TARGET_HAS_ICE) 1138 1102 static void breakpoint_invalidate(CPUState *env, target_ulong pc) 1139 1103 { 1140 target_ulong phys_addr; 1141 1142 phys_addr = cpu_get_phys_page_debug(env, pc); 1143 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0); 1104 target_ulong addr, pd; 1105 ram_addr_t ram_addr; 1106 PhysPageDesc *p; 1107 1108 addr = cpu_get_phys_page_debug(env, pc); 1109 p = phys_page_find(addr >> TARGET_PAGE_BITS); 1110 if (!p) { 1111 pd = IO_MEM_UNASSIGNED; 1112 } else { 1113 pd = p->phys_offset; 1114 } 1115 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK); 1116 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); 1144 1117 } 1145 1118 #endif … … 1149 1122 int cpu_breakpoint_insert(CPUState *env, target_ulong pc) 1150 1123 { 1151 #if defined(TARGET_ I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)1124 #if defined(TARGET_HAS_ICE) 1152 1125 int i; 1153 1126 … … 1171 1144 int cpu_breakpoint_remove(CPUState *env, target_ulong pc) 1172 1145 { 1173 #if defined(TARGET_ I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)1146 #if defined(TARGET_HAS_ICE) 1174 1147 int i; 1175 1148 for(i = 0; i < env->nb_breakpoints; i++) { … … 1179 1152 return -1; 1180 1153 found: 1181 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],1182 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));1183 1154 env->nb_breakpoints--; 1155 if (i < env->nb_breakpoints) 1156 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints]; 1184 1157 1185 1158 breakpoint_invalidate(env, pc); … … 1194 1167 void cpu_single_step(CPUState *env, int enabled) 1195 1168 { 1196 #if defined(TARGET_ I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)1169 #if defined(TARGET_HAS_ICE) 1197 1170 if (env->singlestep_enabled != enabled) { 1198 1171 env->singlestep_enabled = enabled; … … 1231 1204 logfilename = strdup(filename); 1232 1205 } 1233 #endif 1206 #endif /* !VBOX */ 1234 1207 1235 1208 /* mask must never be zero, except for A20 change call */ … … 1239 1212 static int interrupt_lock; 1240 1213 1241 #if defined(VBOX)1214 #ifdef VBOX 1242 1215 VM_ASSERT_EMT(env->pVM); 1243 1216 ASMAtomicOrS32(&env->interrupt_request, mask); 1244 #else /* VBOX */1217 #else /* !VBOX */ 1245 1218 env->interrupt_request |= mask; 1246 #endif /* VBOX */1219 #endif /* !VBOX */ 1247 1220 /* if the cpu is currently executing code, we must unlink it and 1248 1221 all the potentially executing TB */ … … 1257 1230 void cpu_reset_interrupt(CPUState *env, int mask) 1258 1231 { 1259 #if defined(VBOX)1232 #ifdef VBOX 1260 1233 /* 1261 1234 * Note: the current implementation can be executed by another thread without problems; make sure this remains true … … 1338 1311 #endif /* !VBOX */ 1339 1312 1340 #if !defined(VBOX)/* VBOX: we have our own routine. */1313 #ifndef VBOX /* VBOX: we have our own routine. */ 1341 1314 void cpu_abort(CPUState *env, const char *fmt, ...) 1342 1315 { … … 1373 1346 1374 1347 for(i = 0; i < CPU_TLB_SIZE; i++) { 1375 env->tlb_read[0][i].address = -1; 1376 env->tlb_write[0][i].address = -1; 1377 env->tlb_read[1][i].address = -1; 1378 env->tlb_write[1][i].address = -1; 1379 } 1380 1381 virt_page_flush(); 1382 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *)); 1348 env->tlb_table[0][i].addr_read = -1; 1349 env->tlb_table[0][i].addr_write = -1; 1350 env->tlb_table[0][i].addr_code = -1; 1351 env->tlb_table[1][i].addr_read = -1; 1352 env->tlb_table[1][i].addr_write = -1; 1353 env->tlb_table[1][i].addr_code = -1; 1354 } 1355 1356 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); 1383 1357 1384 1358 #if !defined(CONFIG_SOFTMMU) 1385 1359 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START); 1386 #elif defined(VBOX) 1360 #endif 1361 #ifdef VBOX 1387 1362 /* inform raw mode about TLB flush */ 1388 1363 remR3FlushTLB(env, flush_global); 1389 1364 #endif 1365 #ifdef USE_KQEMU 1366 if (env->kqemu_enabled) { 1367 kqemu_flush(env, flush_global); 1368 } 1369 #endif 1390 1370 tlb_flush_count++; 1391 1371 } … … 1393 1373 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) 1394 1374 { 1395 if (addr == (tlb_entry->address & 1396 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) 1397 tlb_entry->address = -1; 1375 if (addr == (tlb_entry->addr_read & 1376 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || 1377 addr == (tlb_entry->addr_write & 1378 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || 1379 addr == (tlb_entry->addr_code & 1380 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 1381 tlb_entry->addr_read = -1; 1382 tlb_entry->addr_write = -1; 1383 tlb_entry->addr_code = -1; 1384 } 1398 1385 } 1399 1386 1400 1387 void tlb_flush_page(CPUState *env, target_ulong addr) 1401 1388 { 1402 int i, n; 1403 VirtPageDesc *vp; 1404 PageDesc *p; 1389 int i; 1405 1390 TranslationBlock *tb; 1406 1391 1407 1392 #if defined(DEBUG_TLB) 1408 printf("tlb_flush_page: 0x%08x\n", addr);1393 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); 1409 1394 #endif 1410 1395 /* must reset current TB so that interrupts cannot modify the … … 1414 1399 addr &= TARGET_PAGE_MASK; 1415 1400 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 1416 tlb_flush_entry(&env->tlb_read[0][i], addr); 1417 tlb_flush_entry(&env->tlb_write[0][i], addr); 1418 tlb_flush_entry(&env->tlb_read[1][i], addr); 1419 tlb_flush_entry(&env->tlb_write[1][i], addr); 1420 1421 /* remove from the virtual pc hash table all the TB at this 1422 virtual address */ 1423 1424 vp = virt_page_find(addr >> TARGET_PAGE_BITS); 1425 if (vp && vp->valid_tag == virt_valid_tag) { 1426 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS); 1427 if (p) { 1428 /* we remove all the links to the TBs in this virtual page */ 1429 tb = p->first_tb; 1430 while (tb != NULL) { 1431 n = (long)tb & 3; 1432 tb = (TranslationBlock *)((long)tb & ~3); 1433 if ((tb->pc & TARGET_PAGE_MASK) == addr || 1434 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) { 1435 tb_invalidate(tb); 1436 } 1437 tb = tb->page_next[n]; 1438 } 1439 } 1440 vp->valid_tag = 0; 1441 } 1401 tlb_flush_entry(&env->tlb_table[0][i], addr); 1402 tlb_flush_entry(&env->tlb_table[1][i], addr); 1403 1404 /* Discard jump cache entries for any tb which might potentially 1405 overlap the flushed page. */ 1406 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); 1407 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb)); 1408 1409 i = tb_jmp_cache_hash_page(addr); 1410 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb)); 1442 1411 1443 1412 #if !defined(CONFIG_SOFTMMU) 1444 1413 if (addr < MMAP_AREA_END) 1445 1414 munmap((void *)addr, TARGET_PAGE_SIZE); 1446 #elif defined(VBOX) 1415 #endif 1416 #ifdef VBOX 1447 1417 /* inform raw mode about TLB page flush */ 1448 1418 remR3FlushPage(env, addr); 1449 #endif 1450 } 1451 1452 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr) 1453 { 1454 if (addr == (tlb_entry->address & 1455 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) && 1456 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE && 1457 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) { 1458 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE; 1459 } 1419 #endif /* VBOX */ 1420 #ifdef USE_KQEMU 1421 if (env->kqemu_enabled) { 1422 kqemu_flush_page(env, addr); 1423 } 1424 #endif 1460 1425 } 1461 1426 1462 1427 /* update the TLBs so that writes to code in the virtual page 'addr' 1463 1428 can be detected */ 1464 static void tlb_protect_code(CPUState *env, target_ulong addr) 1465 { 1466 int i; 1467 1468 addr &= TARGET_PAGE_MASK; 1469 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 1470 tlb_protect_code1(&env->tlb_write[0][i], addr); 1471 tlb_protect_code1(&env->tlb_write[1][i], addr); 1472 #if !defined(CONFIG_SOFTMMU) 1473 /* NOTE: as we generated the code for this page, it is already at 1474 least readable */ 1475 if (addr < MMAP_AREA_END) 1476 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ); 1477 #endif 1478 1429 static void tlb_protect_code(ram_addr_t ram_addr) 1430 { 1431 cpu_physical_memory_reset_dirty(ram_addr, 1432 ram_addr + TARGET_PAGE_SIZE, 1433 CODE_DIRTY_FLAG); 1479 1434 #if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES) 1480 remR3ProtectCode(env, addr); 1481 #endif 1482 } 1483 1484 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, 1485 unsigned long phys_addr) 1486 { 1487 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE && 1488 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) { 1489 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; 1490 } 1435 /** @todo Retest this? This function has changed... */ 1436 remR3ProtectCode(cpu_single_env, ram_addr); 1437 #endif 1491 1438 } 1492 1439 1493 1440 /* update the TLB so that writes in physical page 'phys_addr' are no longer 1494 tested self modifying code */ 1495 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr) 1496 { 1497 int i; 1498 1499 phys_addr &= TARGET_PAGE_MASK; 1441 tested for self modifying code */ 1442 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 1443 target_ulong vaddr) 1444 { 1500 1445 #ifdef VBOX 1501 phys_addr = (unsigned long)remR3GCPhys2HCVirt(env, phys_addr); 1502 #else 1503 phys_addr += (long)phys_ram_base; 1504 #endif 1505 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 1506 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr); 1507 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr); 1446 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 1447 #endif 1448 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG; 1508 1449 } 1509 1450 … … 1512 1453 { 1513 1454 unsigned long addr; 1514 if ((tlb_entry->addr ess& ~TARGET_PAGE_MASK) == IO_MEM_RAM) {1515 addr = (tlb_entry->addr ess& TARGET_PAGE_MASK) + tlb_entry->addend;1455 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { 1456 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; 1516 1457 if ((addr - start) < length) { 1517 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; 1518 } 1519 } 1520 } 1521 1522 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end) 1458 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; 1459 } 1460 } 1461 } 1462 1463 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, 1464 int dirty_flags) 1523 1465 { 1524 1466 CPUState *env; 1525 1467 unsigned long length, start1; 1526 int i; 1468 int i, mask, len; 1469 uint8_t *p; 1527 1470 1528 1471 start &= TARGET_PAGE_MASK; … … 1532 1475 if (length == 0) 1533 1476 return; 1534 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS); 1535 1536 env = cpu_single_env; 1477 len = length >> TARGET_PAGE_BITS; 1478 #ifdef USE_KQEMU 1479 /* XXX: should not depend on cpu context */ 1480 env = first_cpu; 1481 if (env->kqemu_enabled) { 1482 ram_addr_t addr; 1483 addr = start; 1484 for(i = 0; i < len; i++) { 1485 kqemu_set_notdirty(env, addr); 1486 addr += TARGET_PAGE_SIZE; 1487 } 1488 } 1489 #endif 1490 mask = ~dirty_flags; 1491 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); 1492 #ifdef VBOX 1493 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 1494 #endif 1495 for(i = 0; i < len; i++) 1496 p[i] &= mask; 1497 1537 1498 /* we modify the TLB cache so that the dirty bit will be set again 1538 1499 when accessing the range */ 1539 #if def VBOX1540 start1 = (unsigned long)remR3GCPhys2HCVirt(env, start);1541 #el se1500 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 1501 start1 = start; 1502 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC) 1542 1503 start1 = start + (unsigned long)phys_ram_base; 1543 #endif 1544 for(i = 0; i < CPU_TLB_SIZE; i++) 1545 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length); 1546 for(i = 0; i < CPU_TLB_SIZE; i++) 1547 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length); 1548 1549 #if !defined(CONFIG_SOFTMMU) && !defined(VBOX) 1504 #else 1505 start1 = (unsigned long)remR3GCPhys2HCVirt(first_cpu, start); 1506 #endif 1507 for(env = first_cpu; env != NULL; env = env->next_cpu) { 1508 for(i = 0; i < CPU_TLB_SIZE; i++) 1509 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); 1510 for(i = 0; i < CPU_TLB_SIZE; i++) 1511 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); 1512 } 1513 1514 #if !defined(CONFIG_SOFTMMU) 1515 #ifdef VBOX /**@todo remove this check */ 1516 # error "We shouldn't get here..." 1517 #endif 1550 1518 /* XXX: this is expensive */ 1551 1519 { … … 1576 1544 } 1577 1545 1546 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) 1547 { 1548 ram_addr_t ram_addr; 1549 1550 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { 1551 /* RAM case */ 1552 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 1553 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; 1554 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC) 1555 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 1556 tlb_entry->addend - (unsigned long)phys_ram_base; 1557 #else 1558 ram_addr = remR3HCVirt2GCPhys(first_cpu, (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend); 1559 #endif 1560 if (!cpu_physical_memory_is_dirty(ram_addr)) { 1561 tlb_entry->addr_write |= IO_MEM_NOTDIRTY; 1562 } 1563 } 1564 } 1565 1566 /* update the TLB according to the current state of the dirty bits */ 1567 void cpu_tlb_update_dirty(CPUState *env) 1568 { 1569 int i; 1570 for(i = 0; i < CPU_TLB_SIZE; i++) 1571 tlb_update_dirty(&env->tlb_table[0][i]); 1572 for(i = 0; i < CPU_TLB_SIZE; i++) 1573 tlb_update_dirty(&env->tlb_table[1][i]); 1574 } 1575 1578 1576 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 1579 1577 unsigned long start) 1580 1578 { 1581 1579 unsigned long addr; 1582 if ((tlb_entry->addr ess& ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {1583 addr = (tlb_entry->addr ess& TARGET_PAGE_MASK) + tlb_entry->addend;1580 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) { 1581 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; 1584 1582 if (addr == start) { 1585 tlb_entry->addr ess = (tlb_entry->address& TARGET_PAGE_MASK) | IO_MEM_RAM;1583 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM; 1586 1584 } 1587 1585 } … … 1590 1588 /* update the TLB corresponding to virtual page vaddr and phys addr 1591 1589 addr so that it is no longer dirty */ 1592 static inline void tlb_set_dirty( unsigned long addr, target_ulong vaddr)1593 { 1594 CPUState *env = cpu_single_env; 1590 static inline void tlb_set_dirty(CPUState *env, 1591 unsigned long addr, target_ulong vaddr) 1592 { 1595 1593 int i; 1596 1597 #ifdef VBOX1598 if (remR3HCVirt2GCPhys(env, (void *)addr) > phys_ram_size)1599 {1600 Log(("phys_ram_dirty exceeded at address %VGp, ignoring\n",1601 (RTGCPHYS)(addr - (uintptr_t)phys_ram_base)));1602 return;1603 }1604 phys_ram_dirty[(unsigned long)remR3HCVirt2GCPhys(env, (void *)addr) >> TARGET_PAGE_BITS] = 1;1605 #else1606 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;1607 #endif1608 1609 1594 1610 1595 addr &= TARGET_PAGE_MASK; 1611 1596 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 1612 tlb_set_dirty1(&env->tlb_ write[0][i], addr);1613 tlb_set_dirty1(&env->tlb_ write[1][i], addr);1597 tlb_set_dirty1(&env->tlb_table[0][i], addr); 1598 tlb_set_dirty1(&env->tlb_table[1][i], addr); 1614 1599 } 1615 1600 … … 1618 1603 (can only happen in non SOFTMMU mode for I/O pages or pages 1619 1604 conflicting with the host address space). */ 1620 int tlb_set_page (CPUState *env, target_ulong vaddr,1621 target_phys_addr_t paddr, int prot,1622 int is_user, int is_softmmu)1605 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 1606 target_phys_addr_t paddr, int prot, 1607 int is_user, int is_softmmu) 1623 1608 { 1624 1609 PhysPageDesc *p; 1625 1610 unsigned long pd; 1626 TranslationBlock *first_tb;1627 1611 unsigned int index; 1628 1612 target_ulong address; 1629 unsigned longaddend;1613 target_phys_addr_t addend; 1630 1614 int ret; 1615 CPUTLBEntry *te; 1631 1616 1632 1617 p = phys_page_find(paddr >> TARGET_PAGE_BITS); 1633 first_tb = NULL;1634 1618 if (!p) { 1635 1619 pd = IO_MEM_UNASSIGNED; 1636 1620 } else { 1637 PageDesc *p1;1638 1621 pd = p->phys_offset; 1639 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {1640 /* NOTE: we also allocate the page at this stage */1641 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);1642 first_tb = p1->first_tb;1643 }1644 1622 } 1645 1623 #if defined(DEBUG_TLB) 1646 printf("tlb_set_page: vaddr= 0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",1647 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);1624 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n", 1625 vaddr, (int)paddr, prot, is_user, is_softmmu, pd); 1648 1626 #endif 1649 1627 … … 1653 1631 #endif 1654 1632 { 1655 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM ) {1633 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { 1656 1634 /* IO memory case */ 1657 1635 address = vaddr | pd; … … 1660 1638 /* standard memory */ 1661 1639 address = vaddr; 1662 #ifdef VBOX 1640 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 1641 addend = pd & TARGET_PAGE_MASK; 1642 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC) 1643 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); 1644 #else 1663 1645 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK); 1664 #else 1665 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); 1666 #endif 1646 #endif 1667 1647 } 1668 1648 1669 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);1649 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 1670 1650 addend -= vaddr; 1651 te = &env->tlb_table[is_user][index]; 1652 te->addend = addend; 1671 1653 if (prot & PAGE_READ) { 1672 env->tlb_read[is_user][index].address = address; 1673 env->tlb_read[is_user][index].addend = addend; 1654 te->addr_read = address; 1674 1655 } else { 1675 env->tlb_read[is_user][index].address = -1; 1676 env->tlb_read[is_user][index].addend = -1; 1656 te->addr_read = -1; 1657 } 1658 if (prot & PAGE_EXEC) { 1659 te->addr_code = address; 1660 } else { 1661 te->addr_code = -1; 1677 1662 } 1678 1663 if (prot & PAGE_WRITE) { 1679 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) { 1680 /* ROM: access is ignored (same as unassigned) */ 1681 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM; 1682 env->tlb_write[is_user][index].addend = addend; 1683 } else 1684 /* XXX: the PowerPC code seems not ready to handle 1685 self modifying code with DCBI */ 1686 #if defined(TARGET_HAS_SMC) || 1 1687 if (first_tb) { 1688 /* if code is present, we use a specific memory 1689 handler. It works only for physical memory access */ 1690 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE; 1691 env->tlb_write[is_user][index].addend = addend; 1692 } else 1693 #endif 1694 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 1664 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 1665 (pd & IO_MEM_ROMD)) { 1666 /* write access calls the I/O callback */ 1667 te->addr_write = vaddr | 1668 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD)); 1669 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 1695 1670 !cpu_physical_memory_is_dirty(pd)) { 1696 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY; 1697 env->tlb_write[is_user][index].addend = addend; 1671 te->addr_write = vaddr | IO_MEM_NOTDIRTY; 1698 1672 } else { 1699 env->tlb_write[is_user][index].address = address; 1700 env->tlb_write[is_user][index].addend = addend; 1673 te->addr_write = address; 1701 1674 } 1702 1675 } else { 1703 env->tlb_write[is_user][index].address = -1; 1704 env->tlb_write[is_user][index].addend = -1; 1676 te->addr_write = -1; 1705 1677 } 1706 1678 #ifdef VBOX 1707 1679 /* inform raw mode about TLB page change */ 1708 remR3SetPage(env, &env->tlb_read[is_user][index], &env->tlb_write[is_user][index], prot, is_user); 1680 /** @todo double check and fix this interface. OLD: remR3SetPage(env, &env->tlb_read[is_user][index], &env->tlb_write[is_user][index], prot, is_user); */ 1681 remR3SetPage(env, te, te, prot, is_user); 1709 1682 #endif 1710 1683 } … … 1734 1707 VirtPageDesc *vp; 1735 1708 1736 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS );1709 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1); 1737 1710 vp->phys_addr = pd; 1738 1711 vp->prot = prot; … … 1756 1729 /* called from signal handler: invalidate the code and unprotect the 1757 1730 page. Return TRUE if the fault was succesfully handled. */ 1758 int page_unprotect( unsignedlong addr, unsigned long pc, void *puc)1731 int page_unprotect(target_ulong addr, unsigned long pc, void *puc) 1759 1732 { 1760 1733 #if !defined(CONFIG_SOFTMMU) … … 1786 1759 (unsigned long)addr, vp->prot); 1787 1760 /* set the dirty bit */ 1788 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;1761 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff; 1789 1762 /* flush the code inside */ 1790 1763 tb_invalidate_phys_page(vp->phys_addr, pc, puc); … … 1812 1785 } 1813 1786 1814 int tlb_set_page (CPUState *env, target_ulong vaddr,1815 target_phys_addr_t paddr, int prot,1816 int is_user, int is_softmmu)1787 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 1788 target_phys_addr_t paddr, int prot, 1789 int is_user, int is_softmmu) 1817 1790 { 1818 1791 return 0; … … 1864 1837 #endif /* !VBOX */ 1865 1838 1866 int page_get_flags( unsignedlong address)1839 int page_get_flags(target_ulong address) 1867 1840 { 1868 1841 PageDesc *p; … … 1877 1850 necessary. The flag PAGE_WRITE_ORG is positionned automatically 1878 1851 depending on PAGE_WRITE */ 1879 void page_set_flags( unsigned long start, unsignedlong end, int flags)1852 void page_set_flags(target_ulong start, target_ulong end, int flags) 1880 1853 { 1881 1854 PageDesc *p; 1882 unsignedlong addr;1855 target_ulong addr; 1883 1856 1884 1857 start = start & TARGET_PAGE_MASK; … … 1886 1859 if (flags & PAGE_WRITE) 1887 1860 flags |= PAGE_WRITE_ORG; 1888 #if defined(VBOX)1861 #ifdef VBOX 1889 1862 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n")); 1890 1863 #endif … … 1906 1879 /* called from signal handler: invalidate the code and unprotect the 1907 1880 page. Return TRUE if the fault was succesfully handled. */ 1908 int page_unprotect( unsignedlong address, unsigned long pc, void *puc)1881 int page_unprotect(target_ulong address, unsigned long pc, void *puc) 1909 1882 { 1910 1883 unsigned int page_index, prot, pindex; 1911 1884 PageDesc *p, *p1; 1912 unsignedlong host_start, host_end, addr;1885 target_ulong host_start, host_end, addr; 1913 1886 1914 1887 host_start = address & qemu_host_page_mask; … … 1929 1902 pindex = (address - host_start) >> TARGET_PAGE_BITS; 1930 1903 if (!(p1[pindex].flags & PAGE_WRITE)) { 1931 mprotect((void *) host_start, qemu_host_page_size,1904 mprotect((void *)g2h(host_start), qemu_host_page_size, 1932 1905 (prot & PAGE_BITS) | PAGE_WRITE); 1933 1906 p1[pindex].flags |= PAGE_WRITE; … … 1945 1918 1946 1919 /* call this function when system calls directly modify a memory area */ 1947 void page_unprotect_range(uint8_t *data, unsigned long data_size) 1948 { 1949 unsigned long start, end, addr; 1950 1951 start = (unsigned long)data; 1920 /* ??? This should be redundant now we have lock_user. */ 1921 void page_unprotect_range(target_ulong data, target_ulong data_size) 1922 { 1923 target_ulong start, end, addr; 1924 1925 start = data; 1952 1926 end = start + data_size; 1953 1927 start &= TARGET_PAGE_MASK; … … 1958 1932 } 1959 1933 1960 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr) 1934 static inline void tlb_set_dirty(CPUState *env, 1935 unsigned long addr, target_ulong vaddr) 1961 1936 { 1962 1937 } … … 1970 1945 unsigned long phys_offset) 1971 1946 { 1972 unsigned longaddr, end_addr;1947 target_phys_addr_t addr, end_addr; 1973 1948 PhysPageDesc *p; 1949 CPUState *env; 1974 1950 1975 1951 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; 1976 1952 end_addr = start_addr + size; 1977 1953 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { 1978 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS );1954 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 1979 1955 p->phys_offset = phys_offset; 1980 #ifdef VBOX 1956 #if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC) 1957 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || 1958 (phys_offset & IO_MEM_ROMD)) 1959 #else 1981 1960 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM 1961 || (phys_offset & IO_MEM_ROMD) 1982 1962 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING) 1983 #else 1984 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) 1985 #endif 1963 #endif 1964 1986 1965 phys_offset += TARGET_PAGE_SIZE; 1987 1966 } 1967 1968 /* since each CPU stores ram addresses in its TLB cache, we must 1969 reset the modified entries */ 1970 /* XXX: slow ! */ 1971 for(env = first_cpu; env != NULL; env = env->next_cpu) { 1972 tlb_flush(env, 1); 1973 } 1974 } 1975 1976 /* XXX: temporary until new memory mapping API */ 1977 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr) 1978 { 1979 PhysPageDesc *p; 1980 1981 p = phys_page_find(addr >> TARGET_PAGE_BITS); 1982 if (!p) 1983 return IO_MEM_UNASSIGNED; 1984 return p->phys_offset; 1988 1985 } 1989 1986 1990 1987 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) 1991 1988 { 1989 #ifdef DEBUG_UNASSIGNED 1990 printf("Unassigned mem read 0x%08x\n", (int)addr); 1991 #endif 1992 1992 return 0; 1993 1993 } … … 1995 1995 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) 1996 1996 { 1997 #ifdef DEBUG_UNASSIGNED 1998 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val); 1999 #endif 1997 2000 } 1998 2001 … … 2009 2012 }; 2010 2013 2011 /* self modifying code support in soft mmu mode : writing to a page 2012 containing code comes to these functions */ 2013 2014 static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) 2015 { 2016 unsigned long phys_addr; 2017 2014 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) 2015 { 2016 unsigned long ram_addr; 2017 int dirty_flags; 2018 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 2019 ram_addr = addr; 2020 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC) 2021 ram_addr = addr - (unsigned long)phys_ram_base; 2022 #else 2023 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr); 2024 #endif 2018 2025 #ifdef VBOX 2019 phys_addr = remR3HCVirt2GCPhys(cpu_single_env, (void *)addr); 2020 #else 2021 phys_addr = addr - (unsigned long)phys_ram_base; 2022 #endif 2026 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 2027 dirty_flags = 0xff; 2028 else 2029 #endif /* VBOX */ 2030 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2031 if (!(dirty_flags & CODE_DIRTY_FLAG)) { 2023 2032 #if !defined(CONFIG_USER_ONLY) 2024 tb_invalidate_phys_page_fast(phys_addr, 1); 2025 #endif 2033 tb_invalidate_phys_page_fast(ram_addr, 1); 2034 # ifdef VBOX 2035 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 2036 dirty_flags = 0xff; 2037 else 2038 # endif /* VBOX */ 2039 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2040 #endif 2041 } 2026 2042 stb_p((uint8_t *)(long)addr, val); 2027 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; 2028 } 2029 2030 static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) 2031 { 2032 unsigned long phys_addr; 2033 2043 #ifdef USE_KQEMU 2044 if (cpu_single_env->kqemu_enabled && 2045 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) 2046 kqemu_modify_page(cpu_single_env, ram_addr); 2047 #endif 2048 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 2034 2049 #ifdef VBOX 2035 phys_addr = remR3HCVirt2GCPhys(cpu_single_env, (void *)addr); 2036 #else 2037 phys_addr = addr - (unsigned long)phys_ram_base; 2038 #endif 2050 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 2051 #endif /* !VBOX */ 2052 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; 2053 /* we remove the notdirty callback only if the code has been 2054 flushed */ 2055 if (dirty_flags == 0xff) 2056 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); 2057 } 2058 2059 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) 2060 { 2061 unsigned long ram_addr; 2062 int dirty_flags; 2063 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 2064 ram_addr = addr; 2065 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC) 2066 ram_addr = addr - (unsigned long)phys_ram_base; 2067 #else 2068 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr); 2069 #endif 2070 #ifdef VBOX 2071 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 2072 dirty_flags = 0xff; 2073 else 2074 #endif /* VBOX */ 2075 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2076 if (!(dirty_flags & CODE_DIRTY_FLAG)) { 2039 2077 #if !defined(CONFIG_USER_ONLY) 2040 tb_invalidate_phys_page_fast(phys_addr, 2); 2041 #endif 2078 tb_invalidate_phys_page_fast(ram_addr, 2); 2079 # ifdef VBOX 2080 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 2081 dirty_flags = 0xff; 2082 else 2083 # endif /* VBOX */ 2084 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2085 #endif 2086 } 2042 2087 stw_p((uint8_t *)(long)addr, val); 2043 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; 2044 } 2045 2046 static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) 2047 { 2048 unsigned long phys_addr; 2049 2088 #ifdef USE_KQEMU 2089 if (cpu_single_env->kqemu_enabled && 2090 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) 2091 kqemu_modify_page(cpu_single_env, ram_addr); 2092 #endif 2093 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 2050 2094 #ifdef VBOX 2051 phys_addr = remR3HCVirt2GCPhys(cpu_single_env, (void *)addr); 2052 #else 2053 phys_addr = addr - (unsigned long)phys_ram_base; 2054 #endif 2095 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 2096 #endif 2097 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; 2098 /* we remove the notdirty callback only if the code has been 2099 flushed */ 2100 if (dirty_flags == 0xff) 2101 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); 2102 } 2103 2104 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) 2105 { 2106 unsigned long ram_addr; 2107 int dirty_flags; 2108 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 2109 ram_addr = addr; 2110 #elif !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC) 2111 ram_addr = addr - (unsigned long)phys_ram_base; 2112 #else 2113 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr); 2114 #endif 2115 #ifdef VBOX 2116 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 2117 dirty_flags = 0xff; 2118 else 2119 #endif /* VBOX */ 2120 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2121 if (!(dirty_flags & CODE_DIRTY_FLAG)) { 2055 2122 #if !defined(CONFIG_USER_ONLY) 2056 tb_invalidate_phys_page_fast(phys_addr, 4); 2057 #endif 2123 tb_invalidate_phys_page_fast(ram_addr, 4); 2124 # ifdef VBOX 2125 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 2126 dirty_flags = 0xff; 2127 else 2128 # endif /* VBOX */ 2129 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 2130 #endif 2131 } 2058 2132 stl_p((uint8_t *)(long)addr, val); 2059 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; 2060 } 2061 2062 static CPUReadMemoryFunc *code_mem_read[3] = { 2133 #ifdef USE_KQEMU 2134 if (cpu_single_env->kqemu_enabled && 2135 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) 2136 kqemu_modify_page(cpu_single_env, ram_addr); 2137 #endif 2138 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 2139 #ifdef VBOX 2140 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 2141 #endif 2142 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; 2143 /* we remove the notdirty callback only if the code has been 2144 flushed */ 2145 if (dirty_flags == 0xff) 2146 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); 2147 } 2148 2149 static CPUReadMemoryFunc *error_mem_read[3] = { 2063 2150 NULL, /* never used */ 2064 2151 NULL, /* never used */ 2065 2152 NULL, /* never used */ 2066 2153 }; 2067 2068 static CPUWriteMemoryFunc *code_mem_write[3] = {2069 code_mem_writeb,2070 code_mem_writew,2071 code_mem_writel,2072 };2073 2074 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)2075 {2076 stb_p((uint8_t *)(long)addr, val);2077 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);2078 }2079 2080 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)2081 {2082 stw_p((uint8_t *)(long)addr, val);2083 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);2084 }2085 2086 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)2087 {2088 stl_p((uint8_t *)(long)addr, val);2089 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);2090 }2091 2154 2092 2155 static CPUWriteMemoryFunc *notdirty_mem_write[3] = { … … 2098 2161 static void io_mem_init(void) 2099 2162 { 2100 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);2163 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL); 2101 2164 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); 2102 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL); 2103 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL); 2104 #ifdef VBOX 2165 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); 2166 #if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC) 2105 2167 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); 2106 2168 io_mem_nb = 6; … … 2109 2171 #endif 2110 2172 2111 #if !defined(VBOX)/* VBOX: we do this later when the RAM is allocated. */2173 #ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */ 2112 2174 /* alloc dirty bits array */ 2113 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS); 2175 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); 2176 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS); 2114 2177 #endif /* !VBOX */ 2115 2178 } … … 2129 2192 2130 2193 if (io_index <= 0) { 2131 if (io_ index>= IO_MEM_NB_ENTRIES)2194 if (io_mem_nb >= IO_MEM_NB_ENTRIES) 2132 2195 return -1; 2133 2196 io_index = io_mem_nb++; … … 2136 2199 return -1; 2137 2200 } 2138 2201 2139 2202 for(i = 0;i < 3; i++) { 2140 2203 io_mem_read[io_index][i] = mem_read[i]; … … 2162 2225 int l, flags; 2163 2226 target_ulong page; 2227 void * p; 2164 2228 2165 2229 while (len > 0) { … … 2174 2238 if (!(flags & PAGE_WRITE)) 2175 2239 return; 2176 memcpy((uint8_t *)addr, buf, len); 2240 p = lock_user(addr, len, 0); 2241 memcpy(p, buf, len); 2242 unlock_user(p, addr, len); 2177 2243 } else { 2178 2244 if (!(flags & PAGE_READ)) 2179 2245 return; 2180 memcpy(buf, (uint8_t *)addr, len); 2246 p = lock_user(addr, len, 1); 2247 memcpy(buf, p, len); 2248 unlock_user(p, addr, 0); 2181 2249 } 2182 2250 len -= l; … … 2184 2252 addr += l; 2185 2253 } 2186 }2187 2188 /* never used */2189 uint32_t ldl_phys(target_phys_addr_t addr)2190 {2191 return 0;2192 }2193 2194 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)2195 {2196 }2197 2198 void stl_phys(target_phys_addr_t addr, uint32_t val)2199 {2200 2254 } 2201 2255 … … 2224 2278 2225 2279 if (is_write) { 2226 if ((pd & ~TARGET_PAGE_MASK) != 0) {2280 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 2227 2281 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 2282 /* XXX: could force cpu_single_env to NULL to avoid 2283 potential bugs */ 2228 2284 if (l >= 4 && ((addr & 3) == 0)) { 2229 /* 32 bit read access */ 2285 /* 32 bit write access */ 2286 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 2230 2287 val = ldl_p(buf); 2288 #else 2289 val = *(const uint32_t *)buf; 2290 #endif 2231 2291 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 2232 2292 l = 4; 2233 2293 } else if (l >= 2 && ((addr & 1) == 0)) { 2234 /* 16 bit read access */ 2294 /* 16 bit write access */ 2295 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 2235 2296 val = lduw_p(buf); 2297 #else 2298 val = *(const uint16_t *)buf; 2299 #endif 2236 2300 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); 2237 2301 l = 2; 2238 2302 } else { 2239 /* 8 bit access */ 2303 /* 8 bit write access */ 2304 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 2240 2305 val = ldub_p(buf); 2306 #else 2307 val = *(const uint8_t *)buf; 2308 #endif 2241 2309 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); 2242 2310 l = 1; … … 2247 2315 /* RAM case */ 2248 2316 #ifdef VBOX 2249 ptr = remR3GCPhys2HCVirt(cpu_single_env, addr1);; 2250 remR3PhysWriteBytes(ptr, buf, l); 2317 remR3PhysWrite(addr1, buf, l); NOREF(ptr); 2251 2318 #else 2252 2319 ptr = phys_ram_base + addr1; 2253 2320 memcpy(ptr, buf, l); 2254 2321 #endif 2255 /* invalidate code */ 2256 tb_invalidate_phys_page_range(addr1, addr1 + l, 0); 2257 /* set dirty bit */ 2258 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 1; 2322 if (!cpu_physical_memory_is_dirty(addr1)) { 2323 /* invalidate code */ 2324 tb_invalidate_phys_page_range(addr1, addr1 + l, 0); 2325 /* set dirty bit */ 2326 #ifdef VBOX 2327 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 2328 #endif 2329 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 2330 (0xff & ~CODE_DIRTY_FLAG); 2331 } 2259 2332 } 2260 2333 } else { 2261 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 2262 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {2334 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 2335 !(pd & IO_MEM_ROMD)) { 2263 2336 /* I/O case */ 2264 2337 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); … … 2266 2339 /* 32 bit read access */ 2267 2340 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); 2341 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 2268 2342 stl_p(buf, val); 2343 #else 2344 *(uint32_t *)buf = val; 2345 #endif 2269 2346 l = 4; 2270 2347 } else if (l >= 2 && ((addr & 1) == 0)) { 2271 2348 /* 16 bit read access */ 2272 2349 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); 2350 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 2273 2351 stw_p(buf, val); 2352 #else 2353 *(uint16_t *)buf = val; 2354 #endif 2274 2355 l = 2; 2275 2356 } else { 2276 /* 8 bit access */2357 /* 8 bit read access */ 2277 2358 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); 2359 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 2278 2360 stb_p(buf, val); 2361 #else 2362 *(uint8_t *)buf = val; 2363 #endif 2279 2364 l = 1; 2280 2365 } … … 2282 2367 /* RAM case */ 2283 2368 #ifdef VBOX 2284 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); 2285 remR3PhysReadBytes(ptr, buf, l); 2369 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr); 2286 2370 #else 2287 2371 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + … … 2296 2380 } 2297 2381 } 2382 2383 #ifndef VBOX 2384 /* used for ROM loading : can write in RAM and ROM */ 2385 void cpu_physical_memory_write_rom(target_phys_addr_t addr, 2386 const uint8_t *buf, int len) 2387 { 2388 int l; 2389 uint8_t *ptr; 2390 target_phys_addr_t page; 2391 unsigned long pd; 2392 PhysPageDesc *p; 2393 2394 while (len > 0) { 2395 page = addr & TARGET_PAGE_MASK; 2396 l = (page + TARGET_PAGE_SIZE) - addr; 2397 if (l > len) 2398 l = len; 2399 p = phys_page_find(page >> TARGET_PAGE_BITS); 2400 if (!p) { 2401 pd = IO_MEM_UNASSIGNED; 2402 } else { 2403 pd = p->phys_offset; 2404 } 2405 2406 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM && 2407 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM && 2408 !(pd & IO_MEM_ROMD)) { 2409 /* do nothing */ 2410 } else { 2411 unsigned long addr1; 2412 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 2413 /* ROM/RAM case */ 2414 ptr = phys_ram_base + addr1; 2415 memcpy(ptr, buf, l); 2416 } 2417 len -= l; 2418 buf += l; 2419 addr += l; 2420 } 2421 } 2422 #endif /* !VBOX */ 2423 2298 2424 2299 2425 /* warning: addr must be aligned */ … … 2313 2439 } 2314 2440 2315 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 2316 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {2441 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 2442 !(pd & IO_MEM_ROMD)) { 2317 2443 /* I/O case */ 2318 2444 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); … … 2320 2446 } else { 2321 2447 /* RAM case */ 2322 #ifdef VBOX 2323 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); 2324 #else 2448 #ifndef VBOX 2325 2449 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 2326 2450 (addr & ~TARGET_PAGE_MASK); 2327 #endif2328 2451 val = ldl_p(ptr); 2452 #else 2453 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr); 2454 #endif 2329 2455 } 2330 2456 return val; 2331 2457 } 2332 2458 2333 /* warning: addr must be aligned. The ram page is not masked as dirty 2334 and the code inside is not invalidated. It is useful if the dirty 2335 bits are used to track modified PTEs */ 2336 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) 2459 /* warning: addr must be aligned */ 2460 uint64_t ldq_phys(target_phys_addr_t addr) 2337 2461 { 2338 2462 int io_index; 2339 2463 uint8_t *ptr; 2464 uint64_t val; 2340 2465 unsigned long pd; 2341 2466 PhysPageDesc *p; … … 2348 2473 } 2349 2474 2350 if ((pd & ~TARGET_PAGE_MASK) != 0) { 2475 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 2476 !(pd & IO_MEM_ROMD)) { 2477 /* I/O case */ 2351 2478 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 2352 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 2479 #ifdef TARGET_WORDS_BIGENDIAN 2480 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; 2481 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4); 2482 #else 2483 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); 2484 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32; 2485 #endif 2353 2486 } else { 2354 #ifdef VBOX 2355 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); 2356 #else 2487 /* RAM case */ 2488 #ifndef VBOX 2357 2489 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 2358 2490 (addr & ~TARGET_PAGE_MASK); 2359 #endif 2360 stl_p(ptr, val); 2361 } 2362 } 2363 2364 /* warning: addr must be aligned */ 2365 /* XXX: optimize code invalidation test */ 2366 void stl_phys(target_phys_addr_t addr, uint32_t val) 2491 val = ldq_p(ptr); 2492 #else 2493 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr); 2494 #endif 2495 } 2496 return val; 2497 } 2498 2499 /* XXX: optimize */ 2500 uint32_t ldub_phys(target_phys_addr_t addr) 2501 { 2502 uint8_t val; 2503 cpu_physical_memory_read(addr, &val, 1); 2504 return val; 2505 } 2506 2507 /* XXX: optimize */ 2508 uint32_t lduw_phys(target_phys_addr_t addr) 2509 { 2510 uint16_t val; 2511 cpu_physical_memory_read(addr, (uint8_t *)&val, 2); 2512 return tswap16(val); 2513 } 2514 2515 /* warning: addr must be aligned. The ram page is not masked as dirty 2516 and the code inside is not invalidated. It is useful if the dirty 2517 bits are used to track modified PTEs */ 2518 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) 2367 2519 { 2368 2520 int io_index; … … 2378 2530 } 2379 2531 2380 if ((pd & ~TARGET_PAGE_MASK) != 0) { 2532 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 2533 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 2534 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 2535 } else { 2536 #ifndef VBOX 2537 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 2538 (addr & ~TARGET_PAGE_MASK); 2539 stl_p(ptr, val); 2540 #else 2541 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr); 2542 #endif 2543 } 2544 } 2545 2546 /* warning: addr must be aligned */ 2547 void stl_phys(target_phys_addr_t addr, uint32_t val) 2548 { 2549 int io_index; 2550 uint8_t *ptr; 2551 unsigned long pd; 2552 PhysPageDesc *p; 2553 2554 p = phys_page_find(addr >> TARGET_PAGE_BITS); 2555 if (!p) { 2556 pd = IO_MEM_UNASSIGNED; 2557 } else { 2558 pd = p->phys_offset; 2559 } 2560 2561 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 2381 2562 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 2382 2563 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); … … 2385 2566 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 2386 2567 /* RAM case */ 2568 #ifndef VBOX 2569 ptr = phys_ram_base + addr1; 2570 stl_p(ptr, val); 2571 #else 2572 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr); 2573 #endif 2574 if (!cpu_physical_memory_is_dirty(addr1)) { 2575 /* invalidate code */ 2576 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); 2577 /* set dirty bit */ 2387 2578 #ifdef VBOX 2388 ptr = remR3GCPhys2HCVirt(cpu_single_env, addr1); 2389 #else 2390 ptr = phys_ram_base + addr1; 2391 #endif 2392 stl_p(ptr, val); 2393 /* invalidate code */ 2394 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); 2395 /* set dirty bit */ 2396 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 1; 2397 } 2398 } 2399 2400 #endif 2401 2579 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 2580 #endif 2581 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 2582 (0xff & ~CODE_DIRTY_FLAG); 2583 } 2584 } 2585 } 2586 2587 /* XXX: optimize */ 2588 void stb_phys(target_phys_addr_t addr, uint32_t val) 2589 { 2590 uint8_t v = val; 2591 cpu_physical_memory_write(addr, &v, 1); 2592 } 2593 2594 /* XXX: optimize */ 2595 void stw_phys(target_phys_addr_t addr, uint32_t val) 2596 { 2597 uint16_t v = tswap16(val); 2598 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2); 2599 } 2600 2601 /* XXX: optimize */ 2602 void stq_phys(target_phys_addr_t addr, uint64_t val) 2603 { 2604 val = tswap64(val); 2605 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8); 2606 } 2607 2608 #endif 2609 2610 #ifndef VBOX 2402 2611 /* virtual memory access for debug */ 2403 2612 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, … … 2425 2634 } 2426 2635 2427 #ifndef VBOX2428 2636 void dump_exec_info(FILE *f, 2429 2637 int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) -
trunk/src/recompiler/osdep.h
r1 r2422 2 2 #define QEMU_OSDEP_H 3 3 4 #if defined(VBOX)4 #ifdef VBOX 5 5 6 6 #include <iprt/alloc.h> 7 #include <iprt/stdarg.h> 7 8 #include <iprt/string.h> 8 9 9 #define qemu_vsnprintf(pszBuf, cchBuf, pszFormat, args) \ 10 RTStrPrintfV((pszBuf), (cchBuf), (pszFormat), (args)) 10 #define qemu_snprintf(pszBuf, cbBuf, ...) RTStrPrintf((pszBuf), (cbBuf), __VA_ARGS__) 11 #define qemu_vsnprintf(pszBuf, cbBuf, pszFormat, args) \ 12 RTStrPrintfV((pszBuf), (cbBuf), (pszFormat), (args)) 11 13 #define qemu_vprintf(pszFormat, args) \ 12 14 RTLogPrintfV((pszFormat), (args)) … … 17 19 #define qemu_strdup(psz) RTStrDup(psz) 18 20 21 #define qemu_vmalloc(cb) RTMemPageAlloc(cb) 22 #define qemu_vfree(pv) RTMemPageFree(pv) 23 24 #ifndef NULL 25 # define NULL 0 26 #endif 19 27 20 28 #else /* !VBOX */ … … 22 30 #include <stdarg.h> 23 31 24 int qemu_vsnprintf(char *buf, int buflen, const char *fmt, va_list args); 25 void qemu_vprintf(const char *fmt, va_list ap); 26 void qemu_printf(const char *fmt, ...); 32 #define qemu_snprintf snprintf /* bird */ 33 #define qemu_vsnprintf vsnprintf /* bird */ 34 #define qemu_vprintf vprintf /* bird */ 35 36 #define qemu_printf printf 27 37 28 38 void *qemu_malloc(size_t size); … … 31 41 char *qemu_strdup(const char *str); 32 42 43 void *qemu_vmalloc(size_t size); 44 void qemu_vfree(void *ptr); 45 33 46 void *get_mmap_addr(unsigned long size); 34 35 /* specific kludges for OS compatibility (should be moved elsewhere) */36 #if defined(__i386__) && !defined(CONFIG_SOFTMMU) && !defined(CONFIG_USER_ONLY)37 38 /* disabled pthread version of longjmp which prevent us from using an39 alternative signal stack */40 extern void __longjmp(jmp_buf env, int val);41 #define longjmp __longjmp42 43 #include <signal.h>44 45 /* NOTE: it works only because the glibc sigset_t is >= kernel sigset_t */46 struct qemu_sigaction {47 union {48 void (*_sa_handler)(int);49 void (*_sa_sigaction)(int, struct siginfo *, void *);50 } _u;51 unsigned long sa_flags;52 void (*sa_restorer)(void);53 sigset_t sa_mask; /* mask last for extensibility */54 };55 56 int qemu_sigaction(int signum, const struct qemu_sigaction *act,57 struct qemu_sigaction *oldact);58 59 #undef sigaction60 #undef sa_handler61 #undef sa_sigaction62 #define sigaction qemu_sigaction63 #define sa_handler _u._sa_handler64 #define sa_sigaction _u._sa_sigaction65 66 #endif67 47 68 48 #endif /* !VBOX */ -
trunk/src/recompiler/softmmu_header.h
r1 r2422 18 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 19 */ 20 21 20 #if DATA_SIZE == 8 22 21 #define SUFFIX q … … 57 56 #elif defined (TARGET_PPC) 58 57 #define CPU_MEM_INDEX (msr_pr) 58 #elif defined (TARGET_MIPS) 59 #define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM) 59 60 #elif defined (TARGET_SPARC) 60 61 #define CPU_MEM_INDEX ((env->psrs) == 0) 62 #elif defined (TARGET_ARM) 63 #define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) 64 #elif defined (TARGET_SH4) 65 #define CPU_MEM_INDEX ((env->sr & SR_MD) == 0) 66 #else 67 #error unsupported CPU 61 68 #endif 62 69 #define MMUSUFFIX _mmu … … 68 75 #elif defined (TARGET_PPC) 69 76 #define CPU_MEM_INDEX (msr_pr) 77 #elif defined (TARGET_MIPS) 78 #define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM) 70 79 #elif defined (TARGET_SPARC) 71 80 #define CPU_MEM_INDEX ((env->psrs) == 0) 81 #elif defined (TARGET_ARM) 82 #define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) 83 #elif defined (TARGET_SH4) 84 #define CPU_MEM_INDEX ((env->sr & SR_MD) == 0) 85 #else 86 #error unsupported CPU 72 87 #endif 73 88 #define MMUSUFFIX _cmmu … … 83 98 #endif 84 99 100 #if ACCESS_TYPE == 3 101 #define ADDR_READ addr_code 102 #else 103 #define ADDR_READ addr_read 104 #endif 85 105 86 106 DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, … … 89 109 90 110 #if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \ 91 (ACCESS_TYPE <= 1) && defined(ASM_SOFTMMU) 111 (ACCESS_TYPE <= 1) && defined(ASM_SOFTMMU) && (!defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)) 112 113 #define CPU_TLB_ENTRY_BITS 4 92 114 93 115 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) … … 110 132 "jmp 2f\n" 111 133 "1:\n" 112 "addl 4(%%edx), %%eax\n"134 "addl 12(%%edx), %%eax\n" 113 135 #if DATA_SIZE == 1 114 136 "movzbl (%%eax), %0\n" … … 123 145 : "=r" (res) 124 146 : "r" (ptr), 125 "i" ((CPU_TLB_SIZE - 1) << 3),126 "i" (TARGET_PAGE_BITS - 3),147 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS), 148 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), 127 149 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)), 128 "m" (*(uint32_t *)offsetof(CPUState, tlb_ read[CPU_MEM_INDEX][0].address)),150 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_read)), 129 151 "i" (CPU_MEM_INDEX), 130 152 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX)) … … 159 181 "jmp 2f\n" 160 182 "1:\n" 161 "addl 4(%%edx), %%eax\n"183 "addl 12(%%edx), %%eax\n" 162 184 #if DATA_SIZE == 1 163 185 "movsbl (%%eax), %0\n" … … 170 192 : "=r" (res) 171 193 : "r" (ptr), 172 "i" ((CPU_TLB_SIZE - 1) << 3),173 "i" (TARGET_PAGE_BITS - 3),194 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS), 195 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), 174 196 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)), 175 "m" (*(uint32_t *)offsetof(CPUState, tlb_ read[CPU_MEM_INDEX][0].address)),197 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_read)), 176 198 "i" (CPU_MEM_INDEX), 177 199 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX)) … … 194 216 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 195 217 is_user = CPU_MEM_INDEX; 196 if (__builtin_expect(env->tlb_ write[is_user][index].address!=218 if (__builtin_expect(env->tlb_table[is_user][index].addr_write != 197 219 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 198 220 glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user); 199 221 } else { 200 physaddr = addr + env->tlb_ write[is_user][index].addend;222 physaddr = addr + env->tlb_table[is_user][index].addend; 201 223 glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v); 202 224 } 203 225 } 204 226 205 #else 227 #else /* !VBOX */ 206 228 207 229 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v) … … 230 252 "jmp 2f\n" 231 253 "1:\n" 232 "addl 4(%%edx), %%eax\n"254 "addl 8(%%edx), %%eax\n" 233 255 #if DATA_SIZE == 1 234 256 "movb %b1, (%%eax)\n" … … 246 268 with T1 ! */ 247 269 "r" (v), 248 "i" ((CPU_TLB_SIZE - 1) << 3),249 "i" (TARGET_PAGE_BITS - 3),270 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS), 271 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), 250 272 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)), 251 "m" (*(uint32_t *)offsetof(CPUState, tlb_ write[CPU_MEM_INDEX][0].address)),273 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_write)), 252 274 "i" (CPU_MEM_INDEX), 253 275 "m" (*(uint8_t *)&glue(glue(__st, SUFFIX), MMUSUFFIX)) 254 276 : "%eax", "%ecx", "%edx", "memory", "cc"); 255 277 } 256 #endif /* VBOX */278 #endif /* !VBOX */ 257 279 258 280 #else … … 271 293 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 272 294 is_user = CPU_MEM_INDEX; 273 if (__builtin_expect(env->tlb_ read[is_user][index].address!=295 if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ != 274 296 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 275 297 res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user); 276 298 } else { 277 physaddr = addr + env->tlb_ read[is_user][index].addend;299 physaddr = addr + env->tlb_table[is_user][index].addend; 278 300 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr); 279 301 } … … 292 314 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 293 315 is_user = CPU_MEM_INDEX; 294 if (__builtin_expect(env->tlb_ read[is_user][index].address!=316 if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ != 295 317 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 296 318 res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user); 297 319 } else { 298 physaddr = addr + env->tlb_ read[is_user][index].addend;320 physaddr = addr + env->tlb_table[is_user][index].addend; 299 321 res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr); 300 322 } … … 302 324 } 303 325 #endif 326 327 #if ACCESS_TYPE != 3 304 328 305 329 /* generic store macro */ … … 315 339 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 316 340 is_user = CPU_MEM_INDEX; 317 if (__builtin_expect(env->tlb_ write[is_user][index].address!=341 if (__builtin_expect(env->tlb_table[is_user][index].addr_write != 318 342 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 319 343 glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user); 320 344 } else { 321 physaddr = addr + env->tlb_ write[is_user][index].addend;345 physaddr = addr + env->tlb_table[is_user][index].addend; 322 346 glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v); 323 347 } 324 348 } 325 349 326 #endif 350 #endif /* ACCESS_TYPE != 3 */ 351 352 #endif /* !asm */ 353 354 #if ACCESS_TYPE != 3 327 355 328 356 #if DATA_SIZE == 8 329 static inline doubleglue(ldfq, MEMSUFFIX)(target_ulong ptr)357 static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr) 330 358 { 331 359 union { 332 doubled;360 float64 d; 333 361 uint64_t i; 334 362 } u; … … 337 365 } 338 366 339 static inline void glue(stfq, MEMSUFFIX)(target_ulong ptr, doublev)367 static inline void glue(stfq, MEMSUFFIX)(target_ulong ptr, float64 v) 340 368 { 341 369 union { 342 doubled;370 float64 d; 343 371 uint64_t i; 344 372 } u; … … 349 377 350 378 #if DATA_SIZE == 4 351 static inline float glue(ldfl, MEMSUFFIX)(target_ulong ptr)379 static inline float32 glue(ldfl, MEMSUFFIX)(target_ulong ptr) 352 380 { 353 381 union { 354 float f;382 float32 f; 355 383 uint32_t i; 356 384 } u; … … 359 387 } 360 388 361 static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float v)389 static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v) 362 390 { 363 391 union { 364 float f;392 float32 f; 365 393 uint32_t i; 366 394 } u; … … 369 397 } 370 398 #endif /* DATA_SIZE == 4 */ 399 400 #endif /* ACCESS_TYPE != 3 */ 371 401 372 402 #undef RES_TYPE … … 378 408 #undef CPU_MEM_INDEX 379 409 #undef MMUSUFFIX 410 #undef ADDR_READ -
trunk/src/recompiler/softmmu_template.h
r1 r2422 42 42 #ifdef SOFTMMU_CODE_ACCESS 43 43 #define READ_ACCESS_TYPE 2 44 #define ADDR_READ addr_code 44 45 #else 45 46 #define READ_ACCESS_TYPE 0 47 #define ADDR_READ addr_read 46 48 #endif 47 49 … … 49 51 int is_user, 50 52 void *retaddr); 51 static inline DATA_TYPE glue(io_read, SUFFIX)( unsigned longphysaddr,53 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr, 52 54 target_ulong tlb_addr) 53 55 { … … 67 69 #endif 68 70 #endif /* SHIFT > 2 */ 71 #ifdef USE_KQEMU 72 env->last_io_time = cpu_get_time_fast(); 73 #endif 69 74 return res; 70 75 } … … 77 82 int index; 78 83 target_ulong tlb_addr; 79 unsigned longphysaddr;84 target_phys_addr_t physaddr; 80 85 void *retaddr; 81 86 … … 84 89 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 85 90 redo: 86 tlb_addr = env->tlb_ read[is_user][index].address;91 tlb_addr = env->tlb_table[is_user][index].ADDR_READ; 87 92 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 88 physaddr = addr + env->tlb_ read[is_user][index].addend;93 physaddr = addr + env->tlb_table[is_user][index].addend; 89 94 if (tlb_addr & ~TARGET_PAGE_MASK) { 90 95 /* IO access */ … … 92 97 goto do_unaligned_access; 93 98 res = glue(io_read, SUFFIX)(physaddr, tlb_addr); 94 } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {99 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 95 100 /* slow unaligned access (it spans two pages or IO) */ 96 101 do_unaligned_access: 97 102 retaddr = GETPC(); 103 #ifdef ALIGNED_ONLY 104 do_unaligned_access(addr, READ_ACCESS_TYPE, is_user, retaddr); 105 #endif 98 106 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr, 99 107 is_user, retaddr); 100 108 } else { 101 /* unaligned access in the same page */ 102 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr); 109 /* unaligned/aligned access in the same page */ 110 #ifdef ALIGNED_ONLY 111 if ((addr & (DATA_SIZE - 1)) != 0) { 112 retaddr = GETPC(); 113 do_unaligned_access(addr, READ_ACCESS_TYPE, is_user, retaddr); 114 } 115 #endif 116 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr); 103 117 } 104 118 } else { 105 119 /* the page is not in the TLB : fill it */ 106 120 retaddr = GETPC(); 121 #ifdef ALIGNED_ONLY 122 if ((addr & (DATA_SIZE - 1)) != 0) 123 do_unaligned_access(addr, READ_ACCESS_TYPE, is_user, retaddr); 124 #endif 107 125 tlb_fill(addr, READ_ACCESS_TYPE, is_user, retaddr); 108 126 goto redo; … … 118 136 DATA_TYPE res, res1, res2; 119 137 int index, shift; 120 unsigned longphysaddr;138 target_phys_addr_t physaddr; 121 139 target_ulong tlb_addr, addr1, addr2; 122 140 123 141 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 124 142 redo: 125 tlb_addr = env->tlb_ read[is_user][index].address;143 tlb_addr = env->tlb_table[is_user][index].ADDR_READ; 126 144 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 127 physaddr = addr + env->tlb_ read[is_user][index].addend;145 physaddr = addr + env->tlb_table[is_user][index].addend; 128 146 if (tlb_addr & ~TARGET_PAGE_MASK) { 129 147 /* IO access */ … … 131 149 goto do_unaligned_access; 132 150 res = glue(io_read, SUFFIX)(physaddr, tlb_addr); 133 } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {151 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 134 152 do_unaligned_access: 135 153 /* slow unaligned access (it spans two pages) */ … … 149 167 } else { 150 168 /* unaligned/aligned access in the same page */ 151 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *) physaddr);169 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr); 152 170 } 153 171 } else { … … 166 184 void *retaddr); 167 185 168 static inline void glue(io_write, SUFFIX)( unsigned longphysaddr,186 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr, 169 187 DATA_TYPE val, 170 188 target_ulong tlb_addr, … … 187 205 #endif 188 206 #endif /* SHIFT > 2 */ 207 #ifdef USE_KQEMU 208 env->last_io_time = cpu_get_time_fast(); 209 #endif 189 210 } 190 211 … … 193 214 int is_user) 194 215 { 195 unsigned longphysaddr;216 target_phys_addr_t physaddr; 196 217 target_ulong tlb_addr; 197 218 void *retaddr; … … 200 221 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 201 222 redo: 202 tlb_addr = env->tlb_ write[is_user][index].address;223 tlb_addr = env->tlb_table[is_user][index].addr_write; 203 224 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 204 physaddr = addr + env->tlb_ write[is_user][index].addend;225 physaddr = addr + env->tlb_table[is_user][index].addend; 205 226 if (tlb_addr & ~TARGET_PAGE_MASK) { 206 227 /* IO access */ … … 209 230 retaddr = GETPC(); 210 231 glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr); 211 } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {232 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 212 233 do_unaligned_access: 213 234 retaddr = GETPC(); 235 #ifdef ALIGNED_ONLY 236 do_unaligned_access(addr, 1, is_user, retaddr); 237 #endif 214 238 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val, 215 239 is_user, retaddr); 216 240 } else { 217 241 /* aligned/unaligned access in the same page */ 218 glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, val); 242 #ifdef ALIGNED_ONLY 243 if ((addr & (DATA_SIZE - 1)) != 0) { 244 retaddr = GETPC(); 245 do_unaligned_access(addr, 1, is_user, retaddr); 246 } 247 #endif 248 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val); 219 249 } 220 250 } else { 221 251 /* the page is not in the TLB : fill it */ 222 252 retaddr = GETPC(); 253 #ifdef ALIGNED_ONLY 254 if ((addr & (DATA_SIZE - 1)) != 0) 255 do_unaligned_access(addr, 1, is_user, retaddr); 256 #endif 223 257 tlb_fill(addr, 1, is_user, retaddr); 224 258 goto redo; … … 232 266 void *retaddr) 233 267 { 234 unsigned longphysaddr;268 target_phys_addr_t physaddr; 235 269 target_ulong tlb_addr; 236 270 int index, i; … … 238 272 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 239 273 redo: 240 tlb_addr = env->tlb_ write[is_user][index].address;274 tlb_addr = env->tlb_table[is_user][index].addr_write; 241 275 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 242 physaddr = addr + env->tlb_ write[is_user][index].addend;276 physaddr = addr + env->tlb_table[is_user][index].addend; 243 277 if (tlb_addr & ~TARGET_PAGE_MASK) { 244 278 /* IO access */ … … 246 280 goto do_unaligned_access; 247 281 glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr); 248 } else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {282 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 249 283 do_unaligned_access: 250 284 /* XXX: not efficient, but simple */ … … 260 294 } else { 261 295 /* aligned/unaligned access in the same page */ 262 glue(glue(st, SUFFIX), _raw)((uint8_t *) physaddr, val);296 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val); 263 297 } 264 298 } else { … … 277 311 #undef USUFFIX 278 312 #undef DATA_SIZE 313 #undef ADDR_READ -
trunk/src/recompiler/target-i386/cpu.h
r1 r2422 35 35 #define TARGET_HAS_PRECISE_SMC 36 36 37 #define TARGET_HAS_ICE 1 38 39 #ifdef TARGET_X86_64 40 #define ELF_MACHINE EM_X86_64 41 #else 42 #define ELF_MACHINE EM_386 43 #endif 44 37 45 #include "cpu-defs.h" 38 46 47 #include "softfloat.h" 48 39 49 #if defined(VBOX) 40 # include <iprt/critsect.h>41 # include <iprt/thread.h>42 # include <iprt/assert.h>43 # include <iprt/asm.h>44 # include <VBox/vmm.h>50 # include <iprt/critsect.h> 51 # include <iprt/thread.h> 52 # include <iprt/assert.h> 53 # include <iprt/asm.h> 54 # include <VBox/vmm.h> 45 55 #endif /* VBOX */ 46 56 … … 121 131 122 132 /* hidden flags - used internally by qemu to represent additionnal cpu 123 states. Only the CPL and INHIBIT_IRQare not redundant. We avoid133 states. Only the CPL, INHIBIT_IRQ and HALTED are not redundant. We avoid 124 134 using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring 125 135 with eflags. */ … … 146 156 #define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */ 147 157 #define HF_VM_SHIFT 17 /* must be same as eflags */ 158 #define HF_HALTED_SHIFT 18 /* CPU halted */ 159 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ 148 160 149 161 #define HF_CPL_MASK (3 << HF_CPL_SHIFT) … … 161 173 #define HF_CS64_MASK (1 << HF_CS64_SHIFT) 162 174 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) 175 #define HF_HALTED_MASK (1 << HF_HALTED_SHIFT) 176 #define HF_SMM_MASK (1 << HF_SMM_SHIFT) 163 177 164 178 #define CR0_PE_MASK (1 << 0) … … 192 206 #define PG_PSE_BIT 7 193 207 #define PG_GLOBAL_BIT 8 208 #define PG_NX_BIT 63 194 209 195 210 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) … … 202 217 #define PG_PSE_MASK (1 << PG_PSE_BIT) 203 218 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) 219 #define PG_NX_MASK (1LL << PG_NX_BIT) 204 220 205 221 #define PG_ERROR_W_BIT 1 … … 209 225 #define PG_ERROR_U_MASK 0x04 210 226 #define PG_ERROR_RSVD_MASK 0x08 227 #define PG_ERROR_I_D_MASK 0x10 211 228 212 229 #define MSR_IA32_APICBASE 0x1b … … 220 237 #define MSR_IA32_SYSENTER_EIP 0x176 221 238 #endif 239 240 #define MSR_MCG_CAP 0x179 241 #define MSR_MCG_STATUS 0x17a 242 #define MSR_MCG_CTL 0x17b 243 244 #define MSR_PAT 0x277 222 245 223 246 #define MSR_EFER 0xc0000080 … … 254 277 #define CPUID_CMOV (1 << 15) 255 278 #define CPUID_PAT (1 << 16) 279 #define CPUID_PSE36 (1 << 17) 256 280 #define CPUID_CLFLUSH (1 << 19) 257 281 /* ... */ … … 262 286 263 287 #ifdef VBOX 264 #define CPUID_PSE36 (1 << 17)265 288 #define CPUID_HTT (1 << 28) 266 289 #endif 267 290 268 #define CPUID_EXT_SS 3(1 << 0)291 #define CPUID_EXT_SSE3 (1 << 0) 269 292 #define CPUID_EXT_MONITOR (1 << 3) 270 293 #define CPUID_EXT_CX16 (1 << 13) … … 272 295 #define CPUID_EXT2_SYSCALL (1 << 11) 273 296 #define CPUID_EXT2_NX (1 << 20) 297 #define CPUID_EXT2_FFXSR (1 << 25) 274 298 #define CPUID_EXT2_LM (1 << 29) 275 299 … … 350 374 }; 351 375 352 #if (defined(__i386__) || defined(__x86_64__)) && !defined(_BSD)376 #ifdef FLOATX80 353 377 #define USE_X86LDOUBLE 354 378 #endif 355 379 356 380 #ifdef USE_X86LDOUBLE 357 typedef long doubleCPU86_LDouble;381 typedef floatx80 CPU86_LDouble; 358 382 #else 359 typedef doubleCPU86_LDouble;383 typedef float64 CPU86_LDouble; 360 384 #endif 361 385 … … 376 400 uint32_t _l[4]; 377 401 uint64_t _q[2]; 378 float _s[4];379 double_d[2];402 float32 _s[4]; 403 float64 _d[2]; 380 404 } XMMReg; 381 405 … … 463 487 464 488 /* emulator internal variables */ 489 float_status fp_status; 490 #ifdef VBOX 491 uint32_t alignment3[3]; /* force the long double to start a 16 byte line. */ 492 #endif 465 493 CPU86_LDouble ft0; 494 #if defined(VBOX) && defined(__X86__) && !defined(__DARWIN__) 495 uint32_t alignment4; /* long double is 12 byte, pad it to 16. */ 496 #endif 466 497 union { 467 498 float f; … … 471 502 } fp_convert; 472 503 504 float_status sse_status; 473 505 uint32_t mxcsr; 474 506 XMMReg xmm_regs[CPU_NB_REGS]; … … 480 512 uint32_t sysenter_esp; 481 513 uint32_t sysenter_eip; 514 #ifdef VBOX 515 uint32_t alignment0; 516 #endif 482 517 uint64_t efer; 483 518 uint64_t star; … … 489 524 #endif 490 525 526 uint64_t pat; 527 491 528 /* temporary data for USE_CODE_COPY mode */ 492 529 #ifdef USE_CODE_COPY … … 498 535 /* exception/interrupt handling */ 499 536 jmp_buf jmp_env; 537 #if defined(VBOX) && defined(__WIN__) && defined(__X86__) 538 /* This will be removed when switching to the no-crt code everywhere. */ 539 uint32_t alignment1[23]; 540 #endif 500 541 int exception_index; 501 542 int error_code; 502 543 int exception_is_int; 503 544 target_ulong exception_next_eip; 504 #if defined(VBOX)505 struct TranslationBlock * volatile current_tb; /* currently executing TB */506 #else507 struct TranslationBlock *current_tb; /* currently executing TB */508 #endif509 545 target_ulong dr[8]; /* debug registers */ 510 #if defined(VBOX) 511 volatile int32_t interrupt_request; 512 #else 546 uint32_t smbase; 513 547 int interrupt_request; 514 #endif515 548 int user_mode_only; /* user mode only simulation */ 516 549 517 /* soft mmu support */ 518 /* in order to avoid passing too many arguments to the memory 519 write helpers, we store some rarely used information in the CPU 520 context) */ 521 unsigned long mem_write_pc; /* host pc at which the memory was 522 written */ 523 target_ulong mem_write_vaddr; /* target virtual addr at which the 524 memory was written */ 525 /* 0 = kernel, 1 = user */ 526 CPUTLBEntry tlb_read[2][CPU_TLB_SIZE]; 527 CPUTLBEntry tlb_write[2][CPU_TLB_SIZE]; 528 529 /* from this point: preserved by CPU reset */ 530 /* ice debug support */ 531 target_ulong breakpoints[MAX_BREAKPOINTS]; 532 int nb_breakpoints; 533 int singlestep_enabled; 550 CPU_COMMON 534 551 535 552 #ifdef VBOX … … 546 563 /* processor features (e.g. for CPUID insn) */ 547 564 #ifndef VBOX /* remR3CpuId deals with these */ 565 uint32_t cpuid_level; 548 566 uint32_t cpuid_vendor1; 549 567 uint32_t cpuid_vendor2; … … 551 569 uint32_t cpuid_version; 552 570 #endif /* !VBOX */ 571 uint32_t cpuid_features; 553 572 uint32_t cpuid_ext_features; 554 uint32_t cpuid_features;555 556 573 #ifndef VBOX 574 uint32_t cpuid_xlevel; 575 uint32_t cpuid_model[12]; 576 #endif /* !VBOX */ 577 uint32_t cpuid_ext2_features; 578 579 #ifndef VBOX 580 #ifdef USE_KQEMU 581 int kqemu_enabled; 582 int last_io_time; 583 #endif 557 584 /* in order to simplify APIC support, we leave this pointer to the 558 585 user */ 559 586 struct APICState *apic_state; 560 /* user data */ 561 void *opaque;587 #else 588 uint32_t alignment2[3]; 562 589 #endif 563 590 } CPUX86State; … … 588 615 static inline void cpu_x86_load_seg_cache(CPUX86State *env, 589 616 int seg_reg, unsigned int selector, 590 uint32_t base, unsigned int limit, 617 target_ulong base, 618 unsigned int limit, 591 619 unsigned int flags) 592 620 { … … 668 696 signal handlers to inform the virtual CPU of exceptions. non zero 669 697 is returned if the signal was handled by the virtual CPU. */ 670 struct siginfo; 671 int cpu_x86_signal_handler(int host_signum, struct siginfo *info, 698 int cpu_x86_signal_handler(int host_signum, void *pinfo, 672 699 void *puc); 673 700 void cpu_x86_set_a20(CPUX86State *env, int a20_state); … … 681 708 uint8_t cpu_get_apic_tpr(CPUX86State *env); 682 709 #endif 710 void cpu_smm_update(CPUX86State *env); 683 711 684 712 /* will be suppressed */ … … 688 716 #define X86_DUMP_FPU 0x0001 /* dump FPU state too */ 689 717 #define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */ 718 719 #ifdef USE_KQEMU 720 static inline int cpu_get_time_fast(void) 721 { 722 int low, high; 723 asm volatile("rdtsc" : "=a" (low), "=d" (high)); 724 return low; 725 } 726 #endif 690 727 691 728 #ifdef VBOX -
trunk/src/recompiler/target-i386/exec.h
r1478 r2422 1 1 /* 2 * i386 execution defines 2 * i386 execution defines 3 3 * 4 4 * Copyright (c) 2003 Fabrice Bellard … … 22 22 23 23 /* XXX: factorize this mess */ 24 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)25 #define HOST_LONG_BITS 6426 #else27 #define HOST_LONG_BITS 3228 #endif29 30 24 #ifdef TARGET_X86_64 31 25 #define TARGET_LONG_BITS 64 … … 34 28 #endif 35 29 30 #include "cpu-defs.h" 31 36 32 /* at least 4 register variables are defined */ 37 33 register struct CPUX86State *env asm(AREG0); 38 34 39 /* XXX: use 64 bit regs if HOST_LONG_BITS == 64 */ 40 #if TARGET_LONG_BITS == 32 41 42 register uint32_t T0 asm(AREG1); 43 register uint32_t T1 asm(AREG2); 44 register uint32_t T2 asm(AREG3); 45 46 /* if more registers are available, we define some registers too */ 47 #ifdef AREG4 48 register uint32_t EAX asm(AREG4); 49 #define reg_EAX 50 #endif 51 52 #ifdef AREG5 53 register uint32_t ESP asm(AREG5); 54 #define reg_ESP 55 #endif 56 57 #ifdef AREG6 58 register uint32_t EBP asm(AREG6); 59 #define reg_EBP 60 #endif 61 62 #ifdef AREG7 63 register uint32_t ECX asm(AREG7); 64 #define reg_ECX 65 #endif 66 67 #ifdef AREG8 68 register uint32_t EDX asm(AREG8); 69 #define reg_EDX 70 #endif 71 72 #ifdef AREG9 73 register uint32_t EBX asm(AREG9); 74 #define reg_EBX 75 #endif 76 77 #ifdef AREG10 78 register uint32_t ESI asm(AREG10); 79 #define reg_ESI 80 #endif 81 82 #ifdef AREG11 83 register uint32_t EDI asm(AREG11); 84 #define reg_EDI 85 #endif 86 87 #else 35 #if TARGET_LONG_BITS > HOST_LONG_BITS 88 36 89 37 /* no registers can be used */ … … 92 40 #define T2 (env->t2) 93 41 94 #endif 42 #else 43 44 /* XXX: use unsigned long instead of target_ulong - better code will 45 be generated for 64 bit CPUs */ 46 register target_ulong T0 asm(AREG1); 47 register target_ulong T1 asm(AREG2); 48 register target_ulong T2 asm(AREG3); 49 50 /* if more registers are available, we define some registers too */ 51 #ifdef AREG4 52 register target_ulong EAX asm(AREG4); 53 #define reg_EAX 54 #endif 55 56 #ifdef AREG5 57 register target_ulong ESP asm(AREG5); 58 #define reg_ESP 59 #endif 60 61 #ifdef AREG6 62 register target_ulong EBP asm(AREG6); 63 #define reg_EBP 64 #endif 65 66 #ifdef AREG7 67 register target_ulong ECX asm(AREG7); 68 #define reg_ECX 69 #endif 70 71 #ifdef AREG8 72 register target_ulong EDX asm(AREG8); 73 #define reg_EDX 74 #endif 75 76 #ifdef AREG9 77 register target_ulong EBX asm(AREG9); 78 #define reg_EBX 79 #endif 80 81 #ifdef AREG10 82 register target_ulong ESI asm(AREG10); 83 #define reg_ESI 84 #endif 85 86 #ifdef AREG11 87 register target_ulong EDI asm(AREG11); 88 #define reg_EDI 89 #endif 90 91 #endif /* ! (TARGET_LONG_BITS > HOST_LONG_BITS) */ 95 92 96 93 #define A0 T2 … … 140 137 #endif 141 138 142 #if defined(VBOX) && !defined(REMR3PHYSREADWRITE_DEFINED)143 #define REMR3PHYSREADWRITE_DEFINED144 /* Header sharing between vbox & qemu is rather ugly. */145 void remR3PhysReadBytes(uint8_t *pbSrcPhys, void *pvDst, unsigned cb);146 uint8_t remR3PhysReadUByte(uint8_t *pbSrcPhys);147 uint8_t remR3PhysReadSByte(uint8_t *pbSrcPhys);148 uint16_t remR3PhysReadUWord(uint8_t *pbSrcPhys);149 int16_t remR3PhysReadSWord(uint8_t *pbSrcPhys);150 uint32_t remR3PhysReadULong(uint8_t *pbSrcPhys);151 uint32_t remR3PhysReadSLong(uint8_t *pbSrcPhys);152 void remR3PhysWriteBytes(uint8_t *pbDstPhys, const void *pvSrc, unsigned cb);153 void remR3PhysWriteByte(uint8_t *pbDstPhys, uint8_t val);154 void remR3PhysWriteWord(uint8_t *pbDstPhys, uint16_t val);155 void remR3PhysWriteDword(uint8_t *pbDstPhys, uint32_t val);156 #endif157 158 139 #include "cpu.h" 159 140 #include "exec-all.h" 160 161 /* XXX: add a generic FPU library */162 163 static inline double float32_to_float64(float a)164 {165 return a;166 }167 168 static inline float float64_to_float32(double a)169 {170 return a;171 }172 173 #if defined(__powerpc__)174 /* better to call an helper on ppc */175 float int32_to_float32(int32_t a);176 double int32_to_float64(int32_t a);177 #else178 static inline float int32_to_float32(int32_t a)179 {180 return (float)a;181 }182 183 static inline double int32_to_float64(int32_t a)184 {185 return (double)a;186 }187 #endif188 189 static inline float int64_to_float32(int64_t a)190 {191 return (float)a;192 }193 194 static inline double int64_to_float64(int64_t a)195 {196 return (double)a;197 }198 141 199 142 typedef struct CCTable { … … 215 158 void helper_movl_crN_T0(int reg); 216 159 void helper_movl_drN_T0(int reg); 217 void helper_invlpg( unsigned intaddr);160 void helper_invlpg(target_ulong addr); 218 161 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); 219 162 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); 220 163 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); 221 void cpu_x86_flush_tlb(CPUX86State *env, uint32_taddr);164 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr); 222 165 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 223 166 int is_write, int is_user, int is_softmmu); … … 234 177 void raise_exception_err(int exception_index, int error_code); 235 178 void raise_exception(int exception_index); 179 void do_smm_enter(void); 236 180 void __hidden cpu_loop_exit(void); 237 181 … … 251 195 void helper_divq_EAX_T0(void); 252 196 void helper_idivq_EAX_T0(void); 197 void helper_bswapq_T0(void); 253 198 void helper_cmpxchg8b(void); 254 199 void helper_cpuid(void); 255 200 void helper_enter_level(int level, int data32); 201 void helper_enter64_level(int level, int data64); 256 202 void helper_sysenter(void); 257 203 void helper_sysexit(void); … … 265 211 void helper_verr(void); 266 212 void helper_verw(void); 213 void helper_rsm(void); 267 214 268 215 #ifdef VBOX 269 216 void helper_external_event(void); 270 void helper_hlt(void);271 void helper_monitor(void);272 void helper_mwait(void);273 217 274 218 /* in helper.c */ … … 286 230 void check_iol_DX(void); 287 231 288 /* XXX: move that to a generic header */289 232 #if !defined(CONFIG_USER_ONLY) 290 233 291 #define ldul_user ldl_user 292 #define ldul_kernel ldl_kernel 293 294 #define ACCESS_TYPE 0 295 #define MEMSUFFIX _kernel 296 #define DATA_SIZE 1 297 #include "softmmu_header.h" 298 299 #define DATA_SIZE 2 300 #include "softmmu_header.h" 301 302 #define DATA_SIZE 4 303 #include "softmmu_header.h" 304 305 #define DATA_SIZE 8 306 #include "softmmu_header.h" 307 #undef ACCESS_TYPE 308 #undef MEMSUFFIX 309 310 #define ACCESS_TYPE 1 311 #define MEMSUFFIX _user 312 #define DATA_SIZE 1 313 #include "softmmu_header.h" 314 315 #define DATA_SIZE 2 316 #include "softmmu_header.h" 317 318 #define DATA_SIZE 4 319 #include "softmmu_header.h" 320 321 #define DATA_SIZE 8 322 #include "softmmu_header.h" 323 #undef ACCESS_TYPE 324 #undef MEMSUFFIX 325 326 /* these access are slower, they must be as rare as possible */ 327 #define ACCESS_TYPE 2 328 #define MEMSUFFIX _data 329 #define DATA_SIZE 1 330 #include "softmmu_header.h" 331 332 #define DATA_SIZE 2 333 #include "softmmu_header.h" 334 335 #define DATA_SIZE 4 336 #include "softmmu_header.h" 337 338 #define DATA_SIZE 8 339 #include "softmmu_header.h" 340 #undef ACCESS_TYPE 341 #undef MEMSUFFIX 342 343 #define ldub(p) ldub_data(p) 344 #define ldsb(p) ldsb_data(p) 345 #define lduw(p) lduw_data(p) 346 #define ldsw(p) ldsw_data(p) 347 #define ldl(p) ldl_data(p) 348 #define ldq(p) ldq_data(p) 349 350 #define stb(p, v) stb_data(p, v) 351 #define stw(p, v) stw_data(p, v) 352 #define stl(p, v) stl_data(p, v) 353 #define stq(p, v) stq_data(p, v) 234 #include "softmmu_exec.h" 354 235 355 236 static inline double ldfq(target_ulong ptr) … … 397 278 #ifdef USE_X86LDOUBLE 398 279 /* use long double functions */ 399 #define lrint lrintl 400 #define llrint llrintl 401 #define fabs fabsl 280 #define floatx_to_int32 floatx80_to_int32 281 #define floatx_to_int64 floatx80_to_int64 282 #define floatx_to_int32_round_to_zero floatx80_to_int32_round_to_zero 283 #define floatx_to_int64_round_to_zero floatx80_to_int64_round_to_zero 284 #define floatx_abs floatx80_abs 285 #define floatx_chs floatx80_chs 286 #define floatx_round_to_int floatx80_round_to_int 287 #define floatx_compare floatx80_compare 288 #define floatx_compare_quiet floatx80_compare_quiet 289 #ifdef VBOX 290 #undef sin 291 #undef cos 292 #undef sqrt 293 #undef pow 294 #undef log 295 #undef tan 296 #undef atan2 297 #undef floor 298 #undef ceil 299 #undef ldexp 300 #endif /* !VBOX */ 402 301 #define sin sinl 403 302 #define cos cosl … … 409 308 #define floor floorl 410 309 #define ceil ceill 411 #define rint rintl 412 #endif 413 414 #if !defined(_BSD) 415 extern int lrint(CPU86_LDouble x); 416 extern int64_t llrint(CPU86_LDouble x); 417 #else 418 #define lrint(d) ((int)rint(d)) 419 #define llrint(d) ((int)rint(d)) 420 #endif 421 extern CPU86_LDouble fabs(CPU86_LDouble x); 310 #define ldexp ldexpl 311 #else 312 #define floatx_to_int32 float64_to_int32 313 #define floatx_to_int64 float64_to_int64 314 #define floatx_to_int32_round_to_zero float64_to_int32_round_to_zero 315 #define floatx_to_int64_round_to_zero float64_to_int64_round_to_zero 316 #define floatx_abs float64_abs 317 #define floatx_chs float64_chs 318 #define floatx_round_to_int float64_round_to_int 319 #define floatx_compare float64_compare 320 #define floatx_compare_quiet float64_compare_quiet 321 #endif 322 422 323 extern CPU86_LDouble sin(CPU86_LDouble x); 423 324 extern CPU86_LDouble cos(CPU86_LDouble x); … … 429 330 extern CPU86_LDouble floor(CPU86_LDouble x); 430 331 extern CPU86_LDouble ceil(CPU86_LDouble x); 431 extern CPU86_LDouble rint(CPU86_LDouble x);432 332 433 333 #define RC_MASK 0xc00 … … 438 338 439 339 #define MAXTAN 9223372036854775808.0 440 441 #ifdef __arm__442 /* we have no way to do correct rounding - a FPU emulator is needed */443 #define FE_DOWNWARD FE_TONEAREST444 #define FE_UPWARD FE_TONEAREST445 #define FE_TOWARDZERO FE_TONEAREST446 #endif447 340 448 341 #ifdef USE_X86LDOUBLE … … 633 526 float approx_rsqrt(float a); 634 527 float approx_rcp(float a); 635 double helper_sqrt(double a); 636 int fpu_isnan(double a); 528 void update_fp_status(void); 529 void helper_hlt(void); 530 void helper_monitor(void); 531 void helper_mwait(void); 637 532 638 533 extern const uint8_t parity_table[256]; -
trunk/src/recompiler/target-i386/helper2.c
r1 r2422 23 23 #include <string.h> 24 24 #include <inttypes.h> 25 #ifndef VBOX 25 26 #include <signal.h> 26 27 #include <assert.h> 28 #else 29 # include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */ 30 #endif 27 31 28 32 #include "cpu.h" … … 36 40 #include <linux/version.h> 37 41 38 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount) 42 int modify_ldt(int func, void *ptr, unsigned long bytecount) 43 { 44 return syscall(__NR_modify_ldt, func, ptr, bytecount); 45 } 39 46 40 47 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66) … … 53 60 static int inited; 54 61 55 cpu_exec_init();56 57 62 #ifndef VBOX 58 env = malloc(sizeof(CPUX86State));63 env = qemu_mallocz(sizeof(CPUX86State)); 59 64 if (!env) 60 65 return NULL; 61 memset(env, 0, sizeof(CPUX86State));62 66 #endif /* !VBOX */ 67 cpu_exec_init(env); 68 63 69 /* init various static tables */ 64 70 if (!inited) { … … 111 117 #endif 112 118 #endif 119 env->cpuid_level = 2; 113 120 env->cpuid_version = (family << 8) | (model << 4) | stepping; 114 121 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE | 115 122 CPUID_TSC | CPUID_MSR | CPUID_MCE | 116 CPUID_CX8 | CPUID_PGE | CPUID_CMOV); 117 env->cpuid_ext_features = 0; 118 123 CPUID_CX8 | CPUID_PGE | CPUID_CMOV | 124 CPUID_PAT); 125 env->pat = 0x0007040600070406ULL; 126 env->cpuid_ext_features = CPUID_EXT_SSE3; 119 127 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP; 128 env->cpuid_features |= CPUID_APIC; 129 env->cpuid_xlevel = 0; 130 { 131 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION; 132 int c, len, i; 133 len = strlen(model_id); 134 for(i = 0; i < 48; i++) { 135 if (i >= len) 136 c = '\0'; 137 else 138 c = model_id[i]; 139 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 140 } 141 } 120 142 #ifdef TARGET_X86_64 121 143 /* currently not enabled for std i386 because not fully tested */ 122 env->cpuid_features |= CPUID_APIC; 144 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF); 145 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX; 146 env->cpuid_xlevel = 0x80000008; 147 148 /* these features are needed for Win64 and aren't fully implemented */ 149 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA; 150 /* this feature is needed for Solaris and isn't fully implemented */ 151 env->cpuid_features |= CPUID_PSE36; 123 152 #endif 124 153 } 125 154 #endif /* VBOX */ 126 cpu_single_env = env;127 155 cpu_reset(env); 156 #ifdef USE_KQEMU 157 kqemu_init(env); 158 #endif 128 159 return env; 129 160 } … … 146 177 cpu_x86_update_cr0(env, 0x60000010); 147 178 env->a20_mask = 0xffffffff; 148 179 env->smbase = 0x30000; 180 149 181 env->idt.limit = 0xffff; 150 182 env->gdt.limit = 0xffff; … … 243 275 int flags) 244 276 { 245 int eflags, i ;277 int eflags, i, nb; 246 278 char cc_op_name[32]; 247 279 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" }; … … 251 283 if (env->hflags & HF_CS64_MASK) { 252 284 cpu_fprintf(f, 253 "RAX=%016 llx RBX=%016llx RCX=%016llx RDX=%016llx\n"254 "RSI=%016 llx RDI=%016llx RBP=%016llx RSP=%016llx\n"255 "R8 =%016 llx R9 =%016llx R10=%016llx R11=%016llx\n"256 "R12=%016 llx R13=%016llx R14=%016llx R15=%016llx\n"257 "RIP=%016 llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",285 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n" 286 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n" 287 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n" 288 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n" 289 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n", 258 290 env->regs[R_EAX], 259 291 env->regs[R_EBX], … … 282 314 env->hflags & HF_CPL_MASK, 283 315 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, 284 (env->a20_mask >> 20) & 1); 316 (env->a20_mask >> 20) & 1, 317 (env->hflags >> HF_SMM_SHIFT) & 1, 318 (env->hflags >> HF_HALTED_SHIFT) & 1); 285 319 } else 286 320 #endif … … 288 322 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n" 289 323 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n" 290 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",324 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n", 291 325 (uint32_t)env->regs[R_EAX], 292 326 (uint32_t)env->regs[R_EBX], … … 307 341 env->hflags & HF_CPL_MASK, 308 342 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, 309 (env->a20_mask >> 20) & 1); 343 (env->a20_mask >> 20) & 1, 344 (env->hflags >> HF_SMM_SHIFT) & 1, 345 (env->hflags >> HF_HALTED_SHIFT) & 1); 310 346 } 311 347 … … 314 350 for(i = 0; i < 6; i++) { 315 351 SegmentCache *sc = &env->segs[i]; 316 cpu_fprintf(f, "%s =%04x %016 llx%08x %08x\n",352 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n", 317 353 seg_name[i], 318 354 sc->selector, … … 321 357 sc->flags); 322 358 } 323 cpu_fprintf(f, "LDT=%04x %016 llx%08x %08x\n",359 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n", 324 360 env->ldt.selector, 325 361 env->ldt.base, 326 362 env->ldt.limit, 327 363 env->ldt.flags); 328 cpu_fprintf(f, "TR =%04x %016 llx%08x %08x\n",364 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n", 329 365 env->tr.selector, 330 366 env->tr.base, 331 367 env->tr.limit, 332 368 env->tr.flags); 333 cpu_fprintf(f, "GDT= %016 llx%08x\n",369 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n", 334 370 env->gdt.base, env->gdt.limit); 335 cpu_fprintf(f, "IDT= %016 llx%08x\n",371 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n", 336 372 env->idt.base, env->idt.limit); 337 cpu_fprintf(f, "CR0=%08x CR2=%016 llx CR3=%016llxCR4=%08x\n",373 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n", 338 374 (uint32_t)env->cr[0], 339 375 env->cr[2], … … 374 410 if (flags & X86_DUMP_CCOP) { 375 411 if ((unsigned)env->cc_op < CC_OP_NB) 376 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);412 qemu_snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]); 377 413 else 378 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);414 qemu_snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op); 379 415 #ifdef TARGET_X86_64 380 416 if (env->hflags & HF_CS64_MASK) { 381 cpu_fprintf(f, "CCS=%016 llx CCD=%016llxCCO=%-8s\n",417 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n", 382 418 env->cc_src, env->cc_dst, 383 419 cc_op_name); … … 391 427 } 392 428 if (flags & X86_DUMP_FPU) { 393 cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n", 394 (double)env->fpregs[0].d, 395 (double)env->fpregs[1].d, 396 (double)env->fpregs[2].d, 397 (double)env->fpregs[3].d); 398 cpu_fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n", 399 (double)env->fpregs[4].d, 400 (double)env->fpregs[5].d, 401 (double)env->fpregs[7].d, 402 (double)env->fpregs[8].d); 429 int fptag; 430 fptag = 0; 431 for(i = 0; i < 8; i++) { 432 fptag |= ((!env->fptags[i]) << i); 433 } 434 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n", 435 env->fpuc, 436 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11, 437 env->fpstt, 438 fptag, 439 env->mxcsr); 440 for(i=0;i<8;i++) { 441 #if defined(USE_X86LDOUBLE) 442 union { 443 long double d; 444 struct { 445 uint64_t lower; 446 uint16_t upper; 447 } l; 448 } tmp; 449 tmp.d = env->fpregs[i].d; 450 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x", 451 i, tmp.l.lower, tmp.l.upper); 452 #else 453 cpu_fprintf(f, "FPR%d=%016" PRIx64, 454 i, env->fpregs[i].mmx.q); 455 #endif 456 if ((i & 1) == 1) 457 cpu_fprintf(f, "\n"); 458 else 459 cpu_fprintf(f, " "); 460 } 461 if (env->hflags & HF_CS64_MASK) 462 nb = 16; 463 else 464 nb = 8; 465 for(i=0;i<nb;i++) { 466 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x", 467 i, 468 env->xmm_regs[i].XMM_L(3), 469 env->xmm_regs[i].XMM_L(2), 470 env->xmm_regs[i].XMM_L(1), 471 env->xmm_regs[i].XMM_L(0)); 472 if ((i & 1) == 1) 473 cpu_fprintf(f, "\n"); 474 else 475 cpu_fprintf(f, " "); 476 } 403 477 } 404 478 } … … 467 541 #ifdef VBOX 468 542 remR3ChangeCpuMode(env); 469 #endif 543 #endif 470 544 } 471 545 … … 503 577 #ifdef VBOX 504 578 remR3ChangeCpuMode(env); 505 #endif 579 #endif 506 580 } 507 581 508 582 /* XXX: also flush 4MB pages */ 509 void cpu_x86_flush_tlb(CPUX86State *env, uint32_taddr)583 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr) 510 584 { 511 585 #if defined(DEBUG) && defined(VBOX) 512 586 uint32_t pde; 513 uint8_t *pde_ptr;514 587 515 588 /* page directory entry */ 516 pde _ptr = remR3GCPhys2HCVirt(env, (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask));517 pde = ldl_raw(pde_ptr); 589 pde = remR3PhysReadU32(((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask); 590 518 591 /* if PSE bit is set, then we use a 4MB page */ 519 592 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { … … 534 607 env->error_code = (is_write << PG_ERROR_W_BIT); 535 608 env->error_code |= PG_ERROR_U_MASK; 609 env->exception_index = EXCP0E_PAGE; 536 610 return 1; 537 611 } … … 543 617 544 618 #else 619 620 #define PHYS_ADDR_MASK 0xfffff000 545 621 546 622 /* return value: … … 551 627 */ 552 628 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 553 int is_write, int is_user, int is_softmmu) 554 { 629 int is_write1, int is_user, int is_softmmu) 630 { 631 uint64_t ptep, pte; 555 632 uint32_t pdpe_addr, pde_addr, pte_addr; 556 uint32_t pde, pte, ptep, pdpe; 557 int error_code, is_dirty, prot, page_size, ret; 633 int error_code, is_dirty, prot, page_size, ret, is_write; 558 634 unsigned long paddr, page_offset; 559 635 target_ulong vaddr, virt_addr; … … 561 637 #if defined(DEBUG_MMU) 562 638 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", 563 addr, is_write , is_user, env->eip);564 #endif 565 is_write &=1;639 addr, is_write1, is_user, env->eip); 640 #endif 641 is_write = is_write1 & 1; 566 642 567 643 if (!(env->cr[0] & CR0_PG_MASK)) { 568 644 pte = addr; 569 645 virt_addr = addr & TARGET_PAGE_MASK; 570 prot = PAGE_READ | PAGE_WRITE ;646 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 571 647 page_size = 4096; 572 648 goto do_mapping; … … 574 650 575 651 if (env->cr[4] & CR4_PAE_MASK) { 652 uint64_t pde, pdpe; 653 576 654 /* XXX: we only use 32 bit physical addresses */ 577 655 #ifdef TARGET_X86_64 578 656 if (env->hflags & HF_LMA_MASK) { 579 uint32_t pml4e_addr, pml4e; 657 uint32_t pml4e_addr; 658 uint64_t pml4e; 580 659 int32_t sext; 581 660 582 /* XXX: handle user + rw rights */583 /* XXX: handle NX flag */584 661 /* test virtual address sign extension */ 585 662 sext = (int64_t)addr >> 47; 586 663 if (sext != 0 && sext != -1) { 587 error_code = 0; 588 goto do_fault; 664 env->error_code = 0; 665 env->exception_index = EXCP0D_GPF; 666 return 1; 589 667 } 590 668 591 669 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 592 670 env->a20_mask; 593 pml4e = ld l_phys(pml4e_addr);671 pml4e = ldq_phys(pml4e_addr); 594 672 if (!(pml4e & PG_PRESENT_MASK)) { 595 673 error_code = 0; 596 674 goto do_fault; 597 675 } 676 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) { 677 error_code = PG_ERROR_RSVD_MASK; 678 goto do_fault; 679 } 598 680 if (!(pml4e & PG_ACCESSED_MASK)) { 599 681 pml4e |= PG_ACCESSED_MASK; 600 682 stl_phys_notdirty(pml4e_addr, pml4e); 601 683 } 602 603 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &684 ptep = pml4e ^ PG_NX_MASK; 685 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) & 604 686 env->a20_mask; 605 pdpe = ld l_phys(pdpe_addr);687 pdpe = ldq_phys(pdpe_addr); 606 688 if (!(pdpe & PG_PRESENT_MASK)) { 607 689 error_code = 0; 608 690 goto do_fault; 609 691 } 692 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) { 693 error_code = PG_ERROR_RSVD_MASK; 694 goto do_fault; 695 } 696 ptep &= pdpe ^ PG_NX_MASK; 610 697 if (!(pdpe & PG_ACCESSED_MASK)) { 611 698 pdpe |= PG_ACCESSED_MASK; 612 699 stl_phys_notdirty(pdpe_addr, pdpe); 613 700 } 614 } else 701 } else 615 702 #endif 616 703 { 704 /* XXX: load them when cr3 is loaded ? */ 617 705 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & 618 706 env->a20_mask; 619 pdpe = ld l_phys(pdpe_addr);707 pdpe = ldq_phys(pdpe_addr); 620 708 if (!(pdpe & PG_PRESENT_MASK)) { 621 709 error_code = 0; 622 710 goto do_fault; 623 711 } 624 } 625 626 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) & 712 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; 713 } 714 715 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) & 627 716 env->a20_mask; 628 pde = ld l_phys(pde_addr);717 pde = ldq_phys(pde_addr); 629 718 if (!(pde & PG_PRESENT_MASK)) { 630 719 error_code = 0; 631 720 goto do_fault; 632 721 } 722 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) { 723 error_code = PG_ERROR_RSVD_MASK; 724 goto do_fault; 725 } 726 ptep &= pde ^ PG_NX_MASK; 633 727 if (pde & PG_PSE_MASK) { 634 728 /* 2 MB page */ 635 729 page_size = 2048 * 1024; 636 goto handle_big_page; 730 ptep ^= PG_NX_MASK; 731 if ((ptep & PG_NX_MASK) && is_write1 == 2) 732 goto do_fault_protect; 733 if (is_user) { 734 if (!(ptep & PG_USER_MASK)) 735 goto do_fault_protect; 736 if (is_write && !(ptep & PG_RW_MASK)) 737 goto do_fault_protect; 738 } else { 739 if ((env->cr[0] & CR0_WP_MASK) && 740 is_write && !(ptep & PG_RW_MASK)) 741 goto do_fault_protect; 742 } 743 is_dirty = is_write && !(pde & PG_DIRTY_MASK); 744 if (!(pde & PG_ACCESSED_MASK) || is_dirty) { 745 pde |= PG_ACCESSED_MASK; 746 if (is_dirty) 747 pde |= PG_DIRTY_MASK; 748 stl_phys_notdirty(pde_addr, pde); 749 } 750 /* align to page_size */ 751 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); 752 virt_addr = addr & ~(page_size - 1); 637 753 } else { 638 754 /* 4 KB page */ … … 641 757 stl_phys_notdirty(pde_addr, pde); 642 758 } 643 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &759 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) & 644 760 env->a20_mask; 645 goto handle_4k_page; 761 pte = ldq_phys(pte_addr); 762 if (!(pte & PG_PRESENT_MASK)) { 763 error_code = 0; 764 goto do_fault; 765 } 766 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) { 767 error_code = PG_ERROR_RSVD_MASK; 768 goto do_fault; 769 } 770 /* combine pde and pte nx, user and rw protections */ 771 ptep &= pte ^ PG_NX_MASK; 772 ptep ^= PG_NX_MASK; 773 if ((ptep & PG_NX_MASK) && is_write1 == 2) 774 goto do_fault_protect; 775 if (is_user) { 776 if (!(ptep & PG_USER_MASK)) 777 goto do_fault_protect; 778 if (is_write && !(ptep & PG_RW_MASK)) 779 goto do_fault_protect; 780 } else { 781 if ((env->cr[0] & CR0_WP_MASK) && 782 is_write && !(ptep & PG_RW_MASK)) 783 goto do_fault_protect; 784 } 785 is_dirty = is_write && !(pte & PG_DIRTY_MASK); 786 if (!(pte & PG_ACCESSED_MASK) || is_dirty) { 787 pte |= PG_ACCESSED_MASK; 788 if (is_dirty) 789 pte |= PG_DIRTY_MASK; 790 stl_phys_notdirty(pte_addr, pte); 791 } 792 page_size = 4096; 793 virt_addr = addr & ~0xfff; 794 pte = pte & (PHYS_ADDR_MASK | 0xfff); 646 795 } 647 796 } else { 797 uint32_t pde; 798 648 799 /* page directory entry */ 649 800 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & … … 657 808 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { 658 809 page_size = 4096 * 1024; 659 handle_big_page:660 810 if (is_user) { 661 811 if (!(pde & PG_USER_MASK)) … … 688 838 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 689 839 env->a20_mask; 690 handle_4k_page:691 840 pte = ldl_phys(pte_addr); 692 841 if (!(pte & PG_PRESENT_MASK)) { … … 716 865 virt_addr = addr & ~0xfff; 717 866 } 718 719 /* the page can be put in the TLB */ 720 prot = PAGE_READ; 721 if (pte & PG_DIRTY_MASK) { 722 /* only set write access if already dirty... otherwise wait 723 for dirty access */ 724 if (is_user) { 725 if (ptep & PG_RW_MASK) 726 prot |= PAGE_WRITE; 727 } else { 728 if (!(env->cr[0] & CR0_WP_MASK) || 729 (ptep & PG_RW_MASK)) 730 prot |= PAGE_WRITE; 731 } 867 } 868 /* the page can be put in the TLB */ 869 prot = PAGE_READ; 870 if (!(ptep & PG_NX_MASK)) 871 prot |= PAGE_EXEC; 872 if (pte & PG_DIRTY_MASK) { 873 /* only set write access if already dirty... otherwise wait 874 for dirty access */ 875 if (is_user) { 876 if (ptep & PG_RW_MASK) 877 prot |= PAGE_WRITE; 878 } else { 879 if (!(env->cr[0] & CR0_WP_MASK) || 880 (ptep & PG_RW_MASK)) 881 prot |= PAGE_WRITE; 732 882 } 733 883 } … … 741 891 vaddr = virt_addr + page_offset; 742 892 743 ret = tlb_set_page (env, vaddr, paddr, prot, is_user, is_softmmu);893 ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu); 744 894 return ret; 745 895 do_fault_protect: … … 747 897 do_fault: 748 898 env->cr[2] = addr; 749 e nv->error_code = (is_write << PG_ERROR_W_BIT) | error_code;899 error_code |= (is_write << PG_ERROR_W_BIT); 750 900 if (is_user) 751 env->error_code |= PG_ERROR_U_MASK; 901 error_code |= PG_ERROR_U_MASK; 902 if (is_write1 == 2 && 903 (env->efer & MSR_EFER_NXE) && 904 (env->cr[4] & CR4_PAE_MASK)) 905 error_code |= PG_ERROR_I_D_MASK; 906 env->error_code = error_code; 907 env->exception_index = EXCP0E_PAGE; 752 908 return 1; 753 909 } -
trunk/src/recompiler/target-i386/op.c
r1514 r2422 226 226 void OPPROTO op_bswapq_T0(void) 227 227 { 228 T0 = bswap64(T0);228 helper_bswapq_T0(); 229 229 } 230 230 #endif … … 492 492 uint32_t idx = (PARAM1 - offsetof(CPUX86State,segs[0].base)) / sizeof(SegmentCache); 493 493 494 if (env->segs[idx].newselector && !(env->eflags & VM_MASK)) 495 { 494 if (env->segs[idx].newselector && !(env->eflags & VM_MASK)) { 496 495 sync_seg(env, idx, env->segs[idx].newselector); 497 496 } … … 499 498 if ( (env->cr[0] & (CR0_PE_MASK|CR0_PG_MASK)) == (CR0_PE_MASK|CR0_PG_MASK) 500 499 && !(env->eflags & VM_MASK) 501 && env->segs[idx].selector == 0 502 ) 503 { 500 && env->segs[idx].selector == 0) { 504 501 raise_exception(EXCP0D_GPF); 505 502 } 506 503 A0 = (uint32_t)env->segs[idx].base; 507 504 FORCE_RET(); 508 #else 505 #else /* !VBOX */ 509 506 A0 = (uint32_t)*(target_ulong *)((char *)env + PARAM1); 510 #endif 507 #endif /* !VBOX */ 511 508 } 512 509 … … 516 513 uint32_t idx = (PARAM1 - offsetof(CPUX86State,segs[0].base)) / sizeof(SegmentCache); 517 514 518 if (env->segs[idx].newselector && !(env->eflags & VM_MASK)) 519 { 515 if (env->segs[idx].newselector && !(env->eflags & VM_MASK)) { 520 516 sync_seg(env, idx, env->segs[idx].newselector); 521 517 } … … 523 519 if ( (env->cr[0] & (CR0_PE_MASK|CR0_PG_MASK)) == (CR0_PE_MASK|CR0_PG_MASK) 524 520 && !(env->eflags & VM_MASK) 525 && env->segs[idx].selector == 0 526 ) 527 { 521 && env->segs[idx].selector == 0) { 528 522 raise_exception(EXCP0D_GPF); 529 523 } 530 524 A0 = (uint32_t)(A0 + env->segs[idx].base); 531 525 FORCE_RET(); 532 #else 526 #else /* !VBOX */ 533 527 A0 = (uint32_t)(A0 + *(target_ulong *)((char *)env + PARAM1)); 534 #endif 528 #endif /* !VBOX */ 535 529 } 536 530 … … 600 594 601 595 if (env->segs[idx].newselector && !(env->eflags & VM_MASK)) 602 {603 596 sync_seg(env, idx, env->segs[idx].newselector); 604 }605 597 A0 = (target_ulong)env->segs[idx].base; 606 #else 598 #else /* !VBOX */ 607 599 A0 = *(target_ulong *)((char *)env + PARAM1); 608 #endif 600 #endif /* !VBOX */ 609 601 } 610 602 … … 615 607 616 608 if (env->segs[idx].newselector && !(env->eflags & VM_MASK)) 617 {618 609 sync_seg(env, idx, env->segs[idx].newselector); 619 }620 610 A0 += (target_ulong)env->segs[idx].base; 621 #else 611 #else /* !VBOX */ 622 612 A0 += *(target_ulong *)((char *)env + PARAM1); 623 #endif 613 #endif /* !VBOX */ 624 614 } 625 615 … … 739 729 } 740 730 741 #ifdef VBOX 731 void OPPROTO op_rsm(void) 732 { 733 helper_rsm(); 734 } 735 736 #ifndef VBOX 737 #if 0 742 738 /* vm86plus instructions */ 739 void OPPROTO op_cli_vm(void) 740 { 741 env->eflags &= ~VIF_MASK; 742 } 743 744 void OPPROTO op_sti_vm(void) 745 { 746 env->eflags |= VIF_MASK; 747 if (env->eflags & VIP_MASK) { 748 EIP = PARAM1; 749 raise_exception(EXCP0D_GPF); 750 } 751 FORCE_RET(); 752 } 753 #endif 754 755 #else /* VBOX */ 743 756 void OPPROTO op_cli_vme(void) 744 757 { … … 755 768 FORCE_RET(); 756 769 } 757 #endif 770 #endif /* VBOX */ 758 771 759 772 void OPPROTO op_boundw(void) … … 792 805 793 806 #ifdef VBOX 794 795 /** @todo Ugly: Exit current TB to process an external interrupt request */796 #define CPU_INTERRUPT_EXTERNAL_EXIT 0x0200 /* also defined in cpu-all.h!! */797 #define CPU_INTERRUPT_EXTERNAL_HARD 0x0400 /* also defined in cpu-all.h!! */798 #define CPU_INTERRUPT_EXTERNAL_TIMER 0x0800 /* also defined in cpu-all.h!! */799 #define CPU_INTERRUPT_EXTERNAL_DMA 0x1000 /* also defined in cpu-all.h!! */800 801 807 void OPPROTO op_check_external_event(void) 802 808 { … … 866 872 void OPPROTO op_movswl_EAX_AX(void) 867 873 { 868 EAX = ( int16_t)EAX;874 EAX = (uint32_t)((int16_t)EAX); 869 875 } 870 876 … … 888 894 void OPPROTO op_movslq_EDX_EAX(void) 889 895 { 890 EDX = ( int32_t)EAX >> 31;896 EDX = (uint32_t)((int32_t)EAX >> 31); 891 897 } 892 898 … … 956 962 void op_addl_A0_SS(void) 957 963 { 958 A0 += (long)env->segs[R_SS].base;964 A0 = (uint32_t)(A0 + env->segs[R_SS].base); 959 965 } 960 966 … … 1000 1006 1001 1007 #ifdef TARGET_X86_64 1008 void op_subq_A0_2(void) 1009 { 1010 A0 -= 2; 1011 } 1012 1002 1013 void op_subq_A0_8(void) 1003 1014 { … … 1030 1041 helper_enter_level(PARAM1, PARAM2); 1031 1042 } 1043 1044 #ifdef TARGET_X86_64 1045 void OPPROTO op_enter64_level(void) 1046 { 1047 helper_enter64_level(PARAM1, PARAM2); 1048 } 1049 #endif 1032 1050 1033 1051 void OPPROTO op_sysenter(void) … … 1515 1533 } 1516 1534 1535 #ifndef VBOX 1536 #if 0 1517 1537 /* vm86plus version */ 1518 #ifdef VBOX 1519 /* IOPL != 3, CR4.VME=1 */ 1520 void OPPROTO op_movw_eflags_T0_vme(void) 1521 { 1522 unsigned int new_eflags = T0; 1523 1524 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */ 1525 /* if TF will be set -> #GP */ 1526 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK)) 1527 || (new_eflags & TF_MASK)) 1528 { 1529 raise_exception(EXCP0D_GPF); 1538 void OPPROTO op_movw_eflags_T0_vm(void) 1539 { 1540 int eflags; 1541 eflags = T0; 1542 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 1543 DF = 1 - (2 * ((eflags >> 10) & 1)); 1544 /* we also update some system flags as in user mode */ 1545 env->eflags = (env->eflags & ~(FL_UPDATE_MASK16 | VIF_MASK)) | 1546 (eflags & FL_UPDATE_MASK16); 1547 if (eflags & IF_MASK) { 1548 env->eflags |= VIF_MASK; 1549 if (env->eflags & VIP_MASK) { 1550 EIP = PARAM1; 1551 raise_exception(EXCP0D_GPF); 1552 } 1530 1553 } 1531 else 1532 { 1533 load_eflags(new_eflags, (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff); 1534 1535 if (new_eflags & IF_MASK) 1536 env->eflags |= VIF_MASK; 1537 else 1538 env->eflags &= ~VIF_MASK; 1539 } 1540 1541 FORCE_RET(); 1542 } 1543 #endif 1544 1545 #if 0 1554 FORCE_RET(); 1555 } 1556 1546 1557 void OPPROTO op_movl_eflags_T0_vm(void) 1547 1558 { … … 1564 1575 #endif 1565 1576 1577 #else /* VBOX */ 1578 /* IOPL != 3, CR4.VME=1 */ 1579 void OPPROTO op_movw_eflags_T0_vme(void) 1580 { 1581 unsigned int new_eflags = T0; 1582 1583 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */ 1584 /* if TF will be set -> #GP */ 1585 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK)) 1586 || (new_eflags & TF_MASK)) { 1587 raise_exception(EXCP0D_GPF); 1588 } else { 1589 load_eflags(new_eflags, (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff); 1590 1591 if (new_eflags & IF_MASK) { 1592 env->eflags |= VIF_MASK; 1593 } else { 1594 env->eflags &= ~VIF_MASK; 1595 } 1596 } 1597 1598 FORCE_RET(); 1599 } 1600 #endif /* VBOX */ 1601 1566 1602 /* XXX: compute only O flag */ 1567 1603 void OPPROTO op_movb_eflags_T0(void) … … 1582 1618 1583 1619 /* vm86plus version */ 1584 #ifdef VBOX 1620 #ifdef VBOX /* #if 0 */ 1585 1621 void OPPROTO op_movl_T0_eflags_vme(void) 1586 1622 { … … 1593 1629 T0 = eflags; 1594 1630 } 1595 #endif 1631 #endif /* VBOX / 0 */ 1596 1632 1597 1633 void OPPROTO op_cld(void) … … 1966 2002 1967 2003 d = ST0; 1968 val = lrint(d);2004 val = floatx_to_int32(d, &env->fp_status); 1969 2005 if (val != (int16_t)val) 1970 2006 val = -32768; … … 1983 2019 1984 2020 d = ST0; 1985 val = lrint(d);2021 val = floatx_to_int32(d, &env->fp_status); 1986 2022 stl(A0, val); 1987 2023 FORCE_RET(); … … 1998 2034 1999 2035 d = ST0; 2000 val = llrint(d); 2036 val = floatx_to_int64(d, &env->fp_status); 2037 stq(A0, val); 2038 FORCE_RET(); 2039 } 2040 2041 void OPPROTO op_fistt_ST0_A0(void) 2042 { 2043 #if defined(__sparc__) && !defined(__sparc_v9__) 2044 register CPU86_LDouble d asm("o0"); 2045 #else 2046 CPU86_LDouble d; 2047 #endif 2048 int val; 2049 2050 d = ST0; 2051 val = floatx_to_int32_round_to_zero(d, &env->fp_status); 2052 if (val != (int16_t)val) 2053 val = -32768; 2054 stw(A0, val); 2055 FORCE_RET(); 2056 } 2057 2058 void OPPROTO op_fisttl_ST0_A0(void) 2059 { 2060 #if defined(__sparc__) && !defined(__sparc_v9__) 2061 register CPU86_LDouble d asm("o0"); 2062 #else 2063 CPU86_LDouble d; 2064 #endif 2065 int val; 2066 2067 d = ST0; 2068 val = floatx_to_int32_round_to_zero(d, &env->fp_status); 2069 stl(A0, val); 2070 FORCE_RET(); 2071 } 2072 2073 void OPPROTO op_fisttll_ST0_A0(void) 2074 { 2075 #if defined(__sparc__) && !defined(__sparc_v9__) 2076 register CPU86_LDouble d asm("o0"); 2077 #else 2078 CPU86_LDouble d; 2079 #endif 2080 int64_t val; 2081 2082 d = ST0; 2083 val = floatx_to_int64_round_to_zero(d, &env->fp_status); 2001 2084 stq(A0, val); 2002 2085 FORCE_RET(); … … 2072 2155 /* FPU operations */ 2073 2156 2074 /* XXX: handle nans */ 2157 const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500}; 2158 2075 2159 void OPPROTO op_fcom_ST0_FT0(void) 2076 2160 { 2077 env->fpus &= (~0x4500); /* (C3,C2,C0) <-- 000 */ 2078 if (ST0 < FT0) 2079 env->fpus |= 0x100; /* (C3,C2,C0) <-- 001 */ 2080 else if (ST0 == FT0) 2081 env->fpus |= 0x4000; /* (C3,C2,C0) <-- 100 */ 2082 FORCE_RET(); 2083 } 2084 2085 /* XXX: handle nans */ 2161 int ret; 2162 2163 ret = floatx_compare(ST0, FT0, &env->fp_status); 2164 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1]; 2165 FORCE_RET(); 2166 } 2167 2086 2168 void OPPROTO op_fucom_ST0_FT0(void) 2087 2169 { 2088 env->fpus &= (~0x4500); /* (C3,C2,C0) <-- 000 */2089 if (ST0 < FT0) 2090 env->fpus |= 0x100; /* (C3,C2,C0) <-- 001 */2091 e lse if (ST0 == FT0)2092 env->fpus |= 0x4000; /* (C3,C2,C0) <-- 100 */2093 FORCE_RET(); 2094 } 2095 2096 /* XXX: handle nans */ 2170 int ret; 2171 2172 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status); 2173 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1]; 2174 FORCE_RET(); 2175 } 2176 2177 const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; 2178 2097 2179 void OPPROTO op_fcomi_ST0_FT0(void) 2098 2180 { 2099 2181 int eflags; 2182 int ret; 2183 2184 ret = floatx_compare(ST0, FT0, &env->fp_status); 2100 2185 eflags = cc_table[CC_OP].compute_all(); 2101 eflags &= ~(CC_Z | CC_P | CC_C); 2102 if (ST0 < FT0) 2103 eflags |= CC_C; 2104 else if (ST0 == FT0) 2105 eflags |= CC_Z; 2186 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; 2106 2187 CC_SRC = eflags; 2107 2188 FORCE_RET(); 2108 2189 } 2109 2190 2110 /* XXX: handle nans */2111 2191 void OPPROTO op_fucomi_ST0_FT0(void) 2112 2192 { 2113 2193 int eflags; 2194 int ret; 2195 2196 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status); 2114 2197 eflags = cc_table[CC_OP].compute_all(); 2115 eflags &= ~(CC_Z | CC_P | CC_C); 2116 if (ST0 < FT0) 2117 eflags |= CC_C; 2118 else if (ST0 == FT0) 2119 eflags |= CC_Z; 2198 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; 2120 2199 CC_SRC = eflags; 2121 2200 FORCE_RET(); … … 2201 2280 void OPPROTO op_fchs_ST0(void) 2202 2281 { 2203 ST0 = -ST0;2282 ST0 = floatx_chs(ST0); 2204 2283 } 2205 2284 2206 2285 void OPPROTO op_fabs_ST0(void) 2207 2286 { 2208 ST0 = f abs(ST0);2287 ST0 = floatx_abs(ST0); 2209 2288 } 2210 2289 … … 2351 2430 void OPPROTO op_fldcw_A0(void) 2352 2431 { 2353 int rnd_type;2354 2432 env->fpuc = lduw(A0); 2355 /* set rounding mode */ 2356 switch(env->fpuc & RC_MASK) { 2357 default: 2358 case RC_NEAR: 2359 rnd_type = FE_TONEAREST; 2360 break; 2361 case RC_DOWN: 2362 rnd_type = FE_DOWNWARD; 2363 break; 2364 case RC_UP: 2365 rnd_type = FE_UPWARD; 2366 break; 2367 case RC_CHOP: 2368 rnd_type = FE_TOWARDZERO; 2369 break; 2370 } 2371 fesetround(rnd_type); 2433 update_fp_status(); 2372 2434 } 2373 2435 … … 2501 2563 #define SHIFT 1 2502 2564 #include "ops_sse.h" 2565 2566 #ifdef VBOX 2567 /* Instantiate the structure signatures. */ 2568 # define REM_STRUCT_OP 1 2569 # include "../InnoTek/structs.h" 2570 #endif 2571 -
trunk/src/recompiler/target-i386/ops_sse.h
r1 r2422 559 559 } 560 560 561 #ifdef TARGET_X86_64 562 void OPPROTO glue(op_movq_mm_T0, SUFFIX) (void) 563 { 564 Reg *d; 565 d = (Reg *)((char *)env + PARAM1); 566 d->Q(0) = T0; 567 #if SHIFT == 1 568 d->Q(1) = 0; 569 #endif 570 } 571 572 void OPPROTO glue(op_movq_T0_mm, SUFFIX) (void) 573 { 574 Reg *s; 575 s = (Reg *)((char *)env + PARAM1); 576 T0 = s->Q(0); 577 } 578 #endif 579 561 580 #if SHIFT == 0 562 581 void OPPROTO glue(op_pshufw, SUFFIX) (void) 563 582 { 583 #if __GCC__ == 3 || defined(__AMD64__) 564 584 Reg r, *d, *s; 565 585 int order; … … 572 592 r.W(3) = s->W((order >> 6) & 3); 573 593 *d = r; 594 #else 595 Reg *s; 596 int order; 597 uint32_t l0, l1; 598 s = (Reg *)((char *)env + PARAM2); 599 order = PARAM3; 600 l0 = s->W(order & 3); 601 l0 |= (uint32_t)s->W((order >> 2) & 3) << 16; 602 l1 = s->W((order >> 4) & 3); 603 l1 |= (uint32_t)s->W((order >> 6) & 3) << 16; 604 605 s = (Reg *)((char *)env + PARAM1); 606 s->_l[0] = l0; 607 s->_l[1] = l1; 608 #endif 574 609 } 575 610 #else … … 655 690 d = (Reg *)((char *)env + PARAM1);\ 656 691 s = (Reg *)((char *)env + PARAM2);\ 657 d->XMM_S(0) = F( d->XMM_S(0), s->XMM_S(0));\658 d->XMM_S(1) = F( d->XMM_S(1), s->XMM_S(1));\659 d->XMM_S(2) = F( d->XMM_S(2), s->XMM_S(2));\660 d->XMM_S(3) = F( d->XMM_S(3), s->XMM_S(3));\692 d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0));\ 693 d->XMM_S(1) = F(32, d->XMM_S(1), s->XMM_S(1));\ 694 d->XMM_S(2) = F(32, d->XMM_S(2), s->XMM_S(2));\ 695 d->XMM_S(3) = F(32, d->XMM_S(3), s->XMM_S(3));\ 661 696 }\ 662 697 \ … … 666 701 d = (Reg *)((char *)env + PARAM1);\ 667 702 s = (Reg *)((char *)env + PARAM2);\ 668 d->XMM_S(0) = F( d->XMM_S(0), s->XMM_S(0));\703 d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0));\ 669 704 }\ 670 705 void OPPROTO op_ ## name ## pd (void)\ … … 673 708 d = (Reg *)((char *)env + PARAM1);\ 674 709 s = (Reg *)((char *)env + PARAM2);\ 675 d->XMM_D(0) = F( d->XMM_D(0), s->XMM_D(0));\676 d->XMM_D(1) = F( d->XMM_D(1), s->XMM_D(1));\710 d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0));\ 711 d->XMM_D(1) = F(64, d->XMM_D(1), s->XMM_D(1));\ 677 712 }\ 678 713 \ … … 682 717 d = (Reg *)((char *)env + PARAM1);\ 683 718 s = (Reg *)((char *)env + PARAM2);\ 684 d->XMM_D(0) = F( d->XMM_D(0), s->XMM_D(0));\685 } 686 687 #define FPU_ADD( a, b) (a) + (b)688 #define FPU_SUB( a, b) (a) - (b)689 #define FPU_MUL( a, b) (a) * (b)690 #define FPU_DIV( a, b) (a) / (b)691 #define FPU_MIN( a, b) (a) < (b) ? (a) : (b)692 #define FPU_MAX( a, b) (a) > (b) ? (a) : (b)693 #define FPU_SQRT( a, b) helper_sqrt(b)719 d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0));\ 720 } 721 722 #define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status) 723 #define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status) 724 #define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status) 725 #define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status) 726 #define FPU_MIN(size, a, b) (a) < (b) ? (a) : (b) 727 #define FPU_MAX(size, a, b) (a) > (b) ? (a) : (b) 728 #define FPU_SQRT(size, a, b) float ## size ## _sqrt(b, &env->sse_status) 694 729 695 730 SSE_OP_S(add, FPU_ADD) … … 705 740 void OPPROTO op_cvtps2pd(void) 706 741 { 707 float s0, s1;742 float32 s0, s1; 708 743 Reg *d, *s; 709 744 d = (Reg *)((char *)env + PARAM1); … … 711 746 s0 = s->XMM_S(0); 712 747 s1 = s->XMM_S(1); 713 d->XMM_D(0) = float32_to_float64(s0 );714 d->XMM_D(1) = float32_to_float64(s1 );748 d->XMM_D(0) = float32_to_float64(s0, &env->sse_status); 749 d->XMM_D(1) = float32_to_float64(s1, &env->sse_status); 715 750 } 716 751 … … 720 755 d = (Reg *)((char *)env + PARAM1); 721 756 s = (Reg *)((char *)env + PARAM2); 722 d->XMM_S(0) = float64_to_float32(s->XMM_D(0) );723 d->XMM_S(1) = float64_to_float32(s->XMM_D(1) );757 d->XMM_S(0) = float64_to_float32(s->XMM_D(0), &env->sse_status); 758 d->XMM_S(1) = float64_to_float32(s->XMM_D(1), &env->sse_status); 724 759 d->Q(1) = 0; 725 760 } … … 730 765 d = (Reg *)((char *)env + PARAM1); 731 766 s = (Reg *)((char *)env + PARAM2); 732 d->XMM_D(0) = float32_to_float64(s->XMM_S(0) );767 d->XMM_D(0) = float32_to_float64(s->XMM_S(0), &env->sse_status); 733 768 } 734 769 … … 738 773 d = (Reg *)((char *)env + PARAM1); 739 774 s = (Reg *)((char *)env + PARAM2); 740 d->XMM_S(0) = float64_to_float32(s->XMM_D(0) );775 d->XMM_S(0) = float64_to_float32(s->XMM_D(0), &env->sse_status); 741 776 } 742 777 … … 746 781 XMMReg *d = (XMMReg *)((char *)env + PARAM1); 747 782 XMMReg *s = (XMMReg *)((char *)env + PARAM2); 748 d->XMM_S(0) = int32_to_float32(s->XMM_L(0) );749 d->XMM_S(1) = int32_to_float32(s->XMM_L(1) );750 d->XMM_S(2) = int32_to_float32(s->XMM_L(2) );751 d->XMM_S(3) = int32_to_float32(s->XMM_L(3) );783 d->XMM_S(0) = int32_to_float32(s->XMM_L(0), &env->sse_status); 784 d->XMM_S(1) = int32_to_float32(s->XMM_L(1), &env->sse_status); 785 d->XMM_S(2) = int32_to_float32(s->XMM_L(2), &env->sse_status); 786 d->XMM_S(3) = int32_to_float32(s->XMM_L(3), &env->sse_status); 752 787 } 753 788 … … 759 794 l0 = (int32_t)s->XMM_L(0); 760 795 l1 = (int32_t)s->XMM_L(1); 761 d->XMM_D(0) = int32_to_float64(l0 );762 d->XMM_D(1) = int32_to_float64(l1 );796 d->XMM_D(0) = int32_to_float64(l0, &env->sse_status); 797 d->XMM_D(1) = int32_to_float64(l1, &env->sse_status); 763 798 } 764 799 … … 767 802 XMMReg *d = (Reg *)((char *)env + PARAM1); 768 803 MMXReg *s = (MMXReg *)((char *)env + PARAM2); 769 d->XMM_S(0) = int32_to_float32(s->MMX_L(0) );770 d->XMM_S(1) = int32_to_float32(s->MMX_L(1) );804 d->XMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status); 805 d->XMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status); 771 806 } 772 807 … … 775 810 XMMReg *d = (Reg *)((char *)env + PARAM1); 776 811 MMXReg *s = (MMXReg *)((char *)env + PARAM2); 777 d->XMM_D(0) = int32_to_float64(s->MMX_L(0) );778 d->XMM_D(1) = int32_to_float64(s->MMX_L(1) );812 d->XMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status); 813 d->XMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status); 779 814 } 780 815 … … 782 817 { 783 818 XMMReg *d = (Reg *)((char *)env + PARAM1); 784 d->XMM_S(0) = int32_to_float32(T0 );819 d->XMM_S(0) = int32_to_float32(T0, &env->sse_status); 785 820 } 786 821 … … 788 823 { 789 824 XMMReg *d = (Reg *)((char *)env + PARAM1); 790 d->XMM_D(0) = int32_to_float64(T0 );825 d->XMM_D(0) = int32_to_float64(T0, &env->sse_status); 791 826 } 792 827 … … 795 830 { 796 831 XMMReg *d = (Reg *)((char *)env + PARAM1); 797 d->XMM_S(0) = int64_to_float32(T0 );832 d->XMM_S(0) = int64_to_float32(T0, &env->sse_status); 798 833 } 799 834 … … 801 836 { 802 837 XMMReg *d = (Reg *)((char *)env + PARAM1); 803 d->XMM_D(0) = int64_to_float64(T0 );838 d->XMM_D(0) = int64_to_float64(T0, &env->sse_status); 804 839 } 805 840 #endif … … 810 845 XMMReg *d = (XMMReg *)((char *)env + PARAM1); 811 846 XMMReg *s = (XMMReg *)((char *)env + PARAM2); 812 d->XMM_L(0) = lrint(s->XMM_S(0));813 d->XMM_L(1) = lrint(s->XMM_S(1));814 d->XMM_L(2) = lrint(s->XMM_S(2));815 d->XMM_L(3) = lrint(s->XMM_S(3));847 d->XMM_L(0) = float32_to_int32(s->XMM_S(0), &env->sse_status); 848 d->XMM_L(1) = float32_to_int32(s->XMM_S(1), &env->sse_status); 849 d->XMM_L(2) = float32_to_int32(s->XMM_S(2), &env->sse_status); 850 d->XMM_L(3) = float32_to_int32(s->XMM_S(3), &env->sse_status); 816 851 } 817 852 … … 820 855 XMMReg *d = (XMMReg *)((char *)env + PARAM1); 821 856 XMMReg *s = (XMMReg *)((char *)env + PARAM2); 822 d->XMM_L(0) = lrint(s->XMM_D(0));823 d->XMM_L(1) = lrint(s->XMM_D(1));857 d->XMM_L(0) = float64_to_int32(s->XMM_D(0), &env->sse_status); 858 d->XMM_L(1) = float64_to_int32(s->XMM_D(1), &env->sse_status); 824 859 d->XMM_Q(1) = 0; 825 860 } … … 829 864 MMXReg *d = (MMXReg *)((char *)env + PARAM1); 830 865 XMMReg *s = (XMMReg *)((char *)env + PARAM2); 831 d->MMX_L(0) = lrint(s->XMM_S(0));832 d->MMX_L(1) = lrint(s->XMM_S(1));866 d->MMX_L(0) = float32_to_int32(s->XMM_S(0), &env->sse_status); 867 d->MMX_L(1) = float32_to_int32(s->XMM_S(1), &env->sse_status); 833 868 } 834 869 … … 837 872 MMXReg *d = (MMXReg *)((char *)env + PARAM1); 838 873 XMMReg *s = (XMMReg *)((char *)env + PARAM2); 839 d->MMX_L(0) = lrint(s->XMM_D(0));840 d->MMX_L(1) = lrint(s->XMM_D(1));874 d->MMX_L(0) = float64_to_int32(s->XMM_D(0), &env->sse_status); 875 d->MMX_L(1) = float64_to_int32(s->XMM_D(1), &env->sse_status); 841 876 } 842 877 … … 844 879 { 845 880 XMMReg *s = (XMMReg *)((char *)env + PARAM1); 846 T0 = (int32_t)lrint(s->XMM_S(0));881 T0 = float32_to_int32(s->XMM_S(0), &env->sse_status); 847 882 } 848 883 … … 850 885 { 851 886 XMMReg *s = (XMMReg *)((char *)env + PARAM1); 852 T0 = (int32_t)lrint(s->XMM_D(0));887 T0 = float64_to_int32(s->XMM_D(0), &env->sse_status); 853 888 } 854 889 … … 857 892 { 858 893 XMMReg *s = (XMMReg *)((char *)env + PARAM1); 859 T0 = llrint(s->XMM_S(0));894 T0 = float32_to_int64(s->XMM_S(0), &env->sse_status); 860 895 } 861 896 … … 863 898 { 864 899 XMMReg *s = (XMMReg *)((char *)env + PARAM1); 865 T0 = llrint(s->XMM_D(0));900 T0 = float64_to_int64(s->XMM_D(0), &env->sse_status); 866 901 } 867 902 #endif … … 872 907 XMMReg *d = (XMMReg *)((char *)env + PARAM1); 873 908 XMMReg *s = (XMMReg *)((char *)env + PARAM2); 874 d->XMM_L(0) = (int32_t)s->XMM_S(0);875 d->XMM_L(1) = (int32_t)s->XMM_S(1);876 d->XMM_L(2) = (int32_t)s->XMM_S(2);877 d->XMM_L(3) = (int32_t)s->XMM_S(3);909 d->XMM_L(0) = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status); 910 d->XMM_L(1) = float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status); 911 d->XMM_L(2) = float32_to_int32_round_to_zero(s->XMM_S(2), &env->sse_status); 912 d->XMM_L(3) = float32_to_int32_round_to_zero(s->XMM_S(3), &env->sse_status); 878 913 } 879 914 … … 882 917 XMMReg *d = (XMMReg *)((char *)env + PARAM1); 883 918 XMMReg *s = (XMMReg *)((char *)env + PARAM2); 884 d->XMM_L(0) = (int32_t)s->XMM_D(0);885 d->XMM_L(1) = (int32_t)s->XMM_D(1);919 d->XMM_L(0) = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status); 920 d->XMM_L(1) = float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status); 886 921 d->XMM_Q(1) = 0; 887 922 } … … 891 926 MMXReg *d = (MMXReg *)((char *)env + PARAM1); 892 927 XMMReg *s = (XMMReg *)((char *)env + PARAM2); 893 d->MMX_L(0) = (int32_t)(s->XMM_S(0));894 d->MMX_L(1) = (int32_t)(s->XMM_S(1));928 d->MMX_L(0) = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status); 929 d->MMX_L(1) = float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status); 895 930 } 896 931 … … 899 934 MMXReg *d = (MMXReg *)((char *)env + PARAM1); 900 935 XMMReg *s = (XMMReg *)((char *)env + PARAM2); 901 d->MMX_L(0) = (int32_t)(s->XMM_D(0));902 d->MMX_L(1) = (int32_t)(s->XMM_D(1));936 d->MMX_L(0) = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status); 937 d->MMX_L(1) = float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status); 903 938 } 904 939 … … 906 941 { 907 942 XMMReg *s = (XMMReg *)((char *)env + PARAM1); 908 T0 = (int32_t)(s->XMM_S(0));943 T0 = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status); 909 944 } 910 945 … … 912 947 { 913 948 XMMReg *s = (XMMReg *)((char *)env + PARAM1); 914 T0 = (int32_t)(s->XMM_D(0));949 T0 = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status); 915 950 } 916 951 … … 919 954 { 920 955 XMMReg *s = (XMMReg *)((char *)env + PARAM1); 921 T0 = (int64_t)(s->XMM_S(0));956 T0 = float32_to_int64_round_to_zero(s->XMM_S(0), &env->sse_status); 922 957 } 923 958 … … 925 960 { 926 961 XMMReg *s = (XMMReg *)((char *)env + PARAM1); 927 T0 = (int64_t)(s->XMM_D(0));962 T0 = float64_to_int64_round_to_zero(s->XMM_D(0), &env->sse_status); 928 963 } 929 964 #endif … … 1032 1067 d = (Reg *)((char *)env + PARAM1);\ 1033 1068 s = (Reg *)((char *)env + PARAM2);\ 1034 d->XMM_L(0) = F( d->XMM_S(0), s->XMM_S(0));\1035 d->XMM_L(1) = F( d->XMM_S(1), s->XMM_S(1));\1036 d->XMM_L(2) = F( d->XMM_S(2), s->XMM_S(2));\1037 d->XMM_L(3) = F( d->XMM_S(3), s->XMM_S(3));\1069 d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0));\ 1070 d->XMM_L(1) = F(32, d->XMM_S(1), s->XMM_S(1));\ 1071 d->XMM_L(2) = F(32, d->XMM_S(2), s->XMM_S(2));\ 1072 d->XMM_L(3) = F(32, d->XMM_S(3), s->XMM_S(3));\ 1038 1073 }\ 1039 1074 \ … … 1043 1078 d = (Reg *)((char *)env + PARAM1);\ 1044 1079 s = (Reg *)((char *)env + PARAM2);\ 1045 d->XMM_L(0) = F( d->XMM_S(0), s->XMM_S(0));\1080 d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0));\ 1046 1081 }\ 1047 1082 void OPPROTO op_ ## name ## pd (void)\ … … 1050 1085 d = (Reg *)((char *)env + PARAM1);\ 1051 1086 s = (Reg *)((char *)env + PARAM2);\ 1052 d->XMM_Q(0) = F( d->XMM_D(0), s->XMM_D(0));\1053 d->XMM_Q(1) = F( d->XMM_D(1), s->XMM_D(1));\1087 d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0));\ 1088 d->XMM_Q(1) = F(64, d->XMM_D(1), s->XMM_D(1));\ 1054 1089 }\ 1055 1090 \ … … 1059 1094 d = (Reg *)((char *)env + PARAM1);\ 1060 1095 s = (Reg *)((char *)env + PARAM2);\ 1061 d->XMM_Q(0) = F( d->XMM_D(0), s->XMM_D(0));\1062 } 1063 1064 #define FPU_CMPEQ( a, b) (a) == (b) ? -1 : 01065 #define FPU_CMPLT( a, b) (a) < (b) ? -1 : 01066 #define FPU_CMPLE( a, b) (a) <= (b) ? -1 : 01067 #define FPU_CMPUNORD( a, b) (fpu_isnan(a) || fpu_isnan(b)) ? - 1 : 01068 #define FPU_CMPNEQ( a, b) (a) == (b) ? 0 : -11069 #define FPU_CMPNLT( a, b) (a) < (b) ? 0 : -11070 #define FPU_CMPNLE( a, b) (a) <= (b) ? 0 : -11071 #define FPU_CMPORD( a, b) (!fpu_isnan(a) && !fpu_isnan(b)) ? - 1 : 01096 d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0));\ 1097 } 1098 1099 #define FPU_CMPEQ(size, a, b) float ## size ## _eq(a, b, &env->sse_status) ? -1 : 0 1100 #define FPU_CMPLT(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0 1101 #define FPU_CMPLE(size, a, b) float ## size ## _le(a, b, &env->sse_status) ? -1 : 0 1102 #define FPU_CMPUNORD(size, a, b) float ## size ## _unordered(a, b, &env->sse_status) ? - 1 : 0 1103 #define FPU_CMPNEQ(size, a, b) float ## size ## _eq(a, b, &env->sse_status) ? 0 : -1 1104 #define FPU_CMPNLT(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1 1105 #define FPU_CMPNLE(size, a, b) float ## size ## _le(a, b, &env->sse_status) ? 0 : -1 1106 #define FPU_CMPORD(size, a, b) float ## size ## _unordered(a, b, &env->sse_status) ? 0 : -1 1072 1107 1073 1108 SSE_OP_CMP(cmpeq, FPU_CMPEQ) … … 1080 1115 SSE_OP_CMP(cmpord, FPU_CMPORD) 1081 1116 1117 const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; 1118 1082 1119 void OPPROTO op_ucomiss(void) 1083 1120 { 1084 int eflags;1085 float s0, s1;1121 int ret; 1122 float32 s0, s1; 1086 1123 Reg *d, *s; 1087 1124 d = (Reg *)((char *)env + PARAM1); … … 1090 1127 s0 = d->XMM_S(0); 1091 1128 s1 = s->XMM_S(0); 1092 if (s0 < s1) 1093 eflags = CC_C; 1094 else if (s0 == s1) 1095 eflags = CC_Z; 1096 else 1097 eflags = 0; 1098 CC_SRC = eflags; 1129 ret = float32_compare_quiet(s0, s1, &env->sse_status); 1130 CC_SRC = comis_eflags[ret + 1]; 1099 1131 FORCE_RET(); 1100 1132 } … … 1102 1134 void OPPROTO op_comiss(void) 1103 1135 { 1104 int eflags;1105 float s0, s1;1136 int ret; 1137 float32 s0, s1; 1106 1138 Reg *d, *s; 1107 1139 d = (Reg *)((char *)env + PARAM1); … … 1110 1142 s0 = d->XMM_S(0); 1111 1143 s1 = s->XMM_S(0); 1112 if (s0 < s1) 1113 eflags = CC_C; 1114 else if (s0 == s1) 1115 eflags = CC_Z; 1116 else 1117 eflags = 0; 1118 CC_SRC = eflags; 1144 ret = float32_compare(s0, s1, &env->sse_status); 1145 CC_SRC = comis_eflags[ret + 1]; 1119 1146 FORCE_RET(); 1120 1147 } … … 1122 1149 void OPPROTO op_ucomisd(void) 1123 1150 { 1124 int eflags;1125 doubled0, d1;1151 int ret; 1152 float64 d0, d1; 1126 1153 Reg *d, *s; 1127 1154 d = (Reg *)((char *)env + PARAM1); … … 1130 1157 d0 = d->XMM_D(0); 1131 1158 d1 = s->XMM_D(0); 1132 if (d0 < d1) 1133 eflags = CC_C; 1134 else if (d0 == d1) 1135 eflags = CC_Z; 1136 else 1137 eflags = 0; 1138 CC_SRC = eflags; 1159 ret = float64_compare_quiet(d0, d1, &env->sse_status); 1160 CC_SRC = comis_eflags[ret + 1]; 1139 1161 FORCE_RET(); 1140 1162 } … … 1142 1164 void OPPROTO op_comisd(void) 1143 1165 { 1144 int eflags;1145 doubled0, d1;1166 int ret; 1167 float64 d0, d1; 1146 1168 Reg *d, *s; 1147 1169 d = (Reg *)((char *)env + PARAM1); … … 1150 1172 d0 = d->XMM_D(0); 1151 1173 d1 = s->XMM_D(0); 1152 if (d0 < d1) 1153 eflags = CC_C; 1154 else if (d0 == d1) 1155 eflags = CC_Z; 1156 else 1157 eflags = 0; 1158 CC_SRC = eflags; 1174 ret = float64_compare(d0, d1, &env->sse_status); 1175 CC_SRC = comis_eflags[ret + 1]; 1159 1176 FORCE_RET(); 1160 1177 } -
trunk/src/recompiler/target-i386/ops_template_mem.h
r1 r2422 75 75 76 76 if (T1 & SHIFT1_MASK) { 77 count = T1 & SHIFT_MASK; 77 count = T1 & SHIFT_MASK; 78 78 src = T0; 79 79 T0 &= DATA_MASK; -
trunk/src/recompiler/target-i386/translate.c
r1514 r2422 23 23 #include <string.h> 24 24 #include <inttypes.h> 25 #ifndef VBOX 25 26 #include <signal.h> 26 27 #include <assert.h> 28 #endif /* !VBOX */ 27 29 28 30 #include "cpu.h" … … 634 636 gen_op_shldw ## SUFFIX ## _T0_T1_ ## op ## _cc,\ 635 637 gen_op_shrdw ## SUFFIX ## _T0_T1_ ## op ## _cc,\ 636 },\638 },\ 637 639 {\ 638 640 gen_op_shldl ## SUFFIX ## _T0_T1_ ## op ## _cc,\ … … 855 857 #ifdef VBOX 856 858 gen_check_external_event(); 857 #endif 859 #endif /* VBOX */ 858 860 #ifdef TARGET_X86_64 859 861 if (pc == (uint32_t)pc) { … … 1672 1674 *reg_ptr = opreg; 1673 1675 *offset_ptr = disp; 1676 } 1677 1678 static void gen_nop_modrm(DisasContext *s, int modrm) 1679 { 1680 int mod, rm, base, code; 1681 1682 mod = (modrm >> 6) & 3; 1683 if (mod == 3) 1684 return; 1685 rm = modrm & 7; 1686 1687 if (s->aflag) { 1688 1689 base = rm; 1690 1691 if (base == 4) { 1692 code = ldub_code(s->pc++); 1693 base = (code & 7); 1694 } 1695 1696 switch (mod) { 1697 case 0: 1698 if (base == 5) { 1699 s->pc += 4; 1700 } 1701 break; 1702 case 1: 1703 s->pc++; 1704 break; 1705 default: 1706 case 2: 1707 s->pc += 4; 1708 break; 1709 } 1710 } else { 1711 switch (mod) { 1712 case 0: 1713 if (rm == 6) { 1714 s->pc += 2; 1715 } 1716 break; 1717 case 1: 1718 s->pc++; 1719 break; 1720 default: 1721 case 2: 1722 s->pc += 2; 1723 break; 1724 } 1725 } 1674 1726 } 1675 1727 … … 2630 2682 case 0x12b: /* movntps */ 2631 2683 case 0x3f0: /* lddqu */ 2632 if (mod == 3) 2684 if (mod == 3) 2633 2685 goto illegal_op; 2634 2686 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); … … 2636 2688 break; 2637 2689 case 0x6e: /* movd mm, ea */ 2638 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0); 2639 gen_op_movl_mm_T0_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); 2690 #ifdef TARGET_X86_64 2691 if (s->dflag == 2) { 2692 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0); 2693 gen_op_movq_mm_T0_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); 2694 } else 2695 #endif 2696 { 2697 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0); 2698 gen_op_movl_mm_T0_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); 2699 } 2640 2700 break; 2641 2701 case 0x16e: /* movd xmm, ea */ 2642 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0); 2643 gen_op_movl_mm_T0_xmm(offsetof(CPUX86State,xmm_regs[reg])); 2702 #ifdef TARGET_X86_64 2703 if (s->dflag == 2) { 2704 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0); 2705 gen_op_movq_mm_T0_xmm(offsetof(CPUX86State,xmm_regs[reg])); 2706 } else 2707 #endif 2708 { 2709 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0); 2710 gen_op_movl_mm_T0_xmm(offsetof(CPUX86State,xmm_regs[reg])); 2711 } 2644 2712 break; 2645 2713 case 0x6f: /* movq mm, ea */ … … 2765 2833 break; 2766 2834 case 0x7e: /* movd ea, mm */ 2767 gen_op_movl_T0_mm_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); 2768 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1); 2835 #ifdef TARGET_X86_64 2836 if (s->dflag == 2) { 2837 gen_op_movq_T0_mm_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); 2838 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1); 2839 } else 2840 #endif 2841 { 2842 gen_op_movl_T0_mm_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); 2843 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1); 2844 } 2769 2845 break; 2770 2846 case 0x17e: /* movd ea, xmm */ 2771 gen_op_movl_T0_mm_xmm(offsetof(CPUX86State,xmm_regs[reg])); 2772 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1); 2847 #ifdef TARGET_X86_64 2848 if (s->dflag == 2) { 2849 gen_op_movq_T0_mm_xmm(offsetof(CPUX86State,xmm_regs[reg])); 2850 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1); 2851 } else 2852 #endif 2853 { 2854 gen_op_movl_T0_mm_xmm(offsetof(CPUX86State,xmm_regs[reg])); 2855 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1); 2856 } 2773 2857 break; 2774 2858 case 0x27e: /* movq xmm, ea */ … … 3012 3096 case 0x2d6: /* movq2dq */ 3013 3097 gen_op_enter_mmx(); 3014 rm = (modrm & 7) | REX_B(s);3015 gen_op_movq(offsetof(CPUX86State,xmm_regs[r m].XMM_Q(0)),3016 offsetof(CPUX86State,fpregs[r eg & 7].mmx));3017 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[r m].XMM_Q(1)));3098 rm = (modrm & 7); 3099 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), 3100 offsetof(CPUX86State,fpregs[rm].mmx)); 3101 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); 3018 3102 break; 3019 3103 case 0x3d6: /* movdq2q */ 3020 3104 gen_op_enter_mmx(); 3021 rm = (modrm & 7) ;3022 gen_op_movq(offsetof(CPUX86State,fpregs[r m].mmx),3023 offsetof(CPUX86State,xmm_regs[r eg].XMM_Q(0)));3105 rm = (modrm & 7) | REX_B(s); 3106 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx), 3107 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); 3024 3108 break; 3025 3109 case 0xd7: /* pmovmskb */ … … 3072 3156 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3073 3157 op2_offset = offsetof(CPUX86State,xmm_t0); 3074 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f ) ||3158 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) || 3075 3159 b == 0xc2)) { 3076 3160 /* specific case for SSE single instructions */ … … 4388 4472 case 0x0a: /* fsts */ 4389 4473 case 0x0b: /* fstps */ 4390 case 0x18: /* fildl */ 4391 case 0x1a: /* fistl */ 4392 case 0x1b: /* fistpl */ 4393 case 0x28: /* fldl */ 4394 case 0x2a: /* fstl */ 4395 case 0x2b: /* fstpl */ 4396 case 0x38: /* filds */ 4397 case 0x3a: /* fists */ 4398 case 0x3b: /* fistps */ 4399 4474 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */ 4475 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */ 4476 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */ 4400 4477 switch(op & 7) { 4401 4478 case 0: … … 4415 4492 break; 4416 4493 } 4494 break; 4495 case 1: 4496 switch(op >> 4) { 4497 case 1: 4498 gen_op_fisttl_ST0_A0(); 4499 break; 4500 case 2: 4501 gen_op_fisttll_ST0_A0(); 4502 break; 4503 case 3: 4504 default: 4505 gen_op_fistt_ST0_A0(); 4506 } 4507 gen_op_fpop(); 4417 4508 break; 4418 4509 default: … … 5367 5458 if (s->vm86 && s->iopl != 3 && !s->vme) { 5368 5459 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5369 } 5370 else 5460 } else 5371 5461 #endif 5372 5462 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); … … 5393 5483 break; 5394 5484 case 0xf1: /* icebp (undocumented, exits to external debugger) */ 5485 #if 1 5395 5486 gen_debug(s, pc_start - s->cs_base); 5487 #else 5488 /* start debug */ 5489 tb_flush(cpu_single_env); 5490 cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM); 5491 #endif 5396 5492 break; 5397 5493 case 0xfa: /* cli */ … … 5406 5502 gen_op_cli(); 5407 5503 #ifdef VBOX 5408 } else 5409 if (s->iopl != 3 && s->vme) { 5504 } else if (s->iopl != 3 && s->vme) { 5410 5505 gen_op_cli_vme(); 5411 5506 #endif … … 5435 5530 goto gen_sti; 5436 5531 #ifdef VBOX 5437 } else 5438 if (s->iopl != 3 && s->vme) { 5532 } else if (s->iopl != 3 && s->vme) { 5439 5533 gen_op_sti_vme(); 5440 5534 /* give a chance to handle pending irqs */ … … 5780 5874 if (mod == 3) { 5781 5875 #ifdef TARGET_X86_64 5782 if (CODE64(s) && (modrm & 7)== 0) {5876 if (CODE64(s) && rm == 0) { 5783 5877 /* swapgs */ 5784 5878 gen_op_movtl_T0_env(offsetof(CPUX86State,segs[R_GS].base)); … … 5898 5992 /* nothing more to do */ 5899 5993 break; 5900 default: 5901 goto illegal_op; 5902 } 5994 default: /* nop (multi byte) */ 5995 gen_nop_modrm(s, modrm); 5996 break; 5997 } 5998 break; 5999 case 0x119 ... 0x11f: /* nop (multi byte) */ 6000 modrm = ldub_code(s->pc++); 6001 gen_nop_modrm(s, modrm); 5903 6002 break; 5904 6003 case 0x120: /* mov reg, crN */ … … 6063 6162 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 6064 6163 /* ignore for now */ 6164 break; 6165 case 0x1aa: /* rsm */ 6166 if (!(s->flags & HF_SMM_MASK)) 6167 goto illegal_op; 6168 if (s->cc_op != CC_OP_DYNAMIC) { 6169 gen_op_set_cc_op(s->cc_op); 6170 s->cc_op = CC_OP_DYNAMIC; 6171 } 6172 gen_jmp_im(s->pc - s->cs_base); 6173 gen_op_rsm(); 6174 gen_eob(s); 6065 6175 break; 6066 6176 case 0x110 ... 0x117: … … 6571 6681 break; 6572 6682 } 6573 #endif 6683 #endif /* VBOX */ 6574 6684 6575 6685 /* if single step mode, we generate only one instruction and -
trunk/src/recompiler/translate-all.c
r1 r2422 31 31 #include "disas.h" 32 32 33 extern int dyngen_code(uint8_t *gen_code_buf, 34 uint16_t *label_offsets, uint16_t *jmp_offsets, 35 const uint16_t *opc_buf, const uint32_t *opparam_buf, const long *gen_labels); 36 33 37 enum { 34 38 #define DEF(s, n, copy_size) INDEX_op_ ## s, … … 38 42 }; 39 43 40 #include "dyngen.h"41 #include "op.h"42 43 44 uint16_t gen_opc_buf[OPC_BUF_SIZE]; 44 45 uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE]; … … 52 53 #elif defined(TARGET_SPARC) 53 54 target_ulong gen_opc_npc[OPC_BUF_SIZE]; 55 target_ulong gen_opc_jump_pc[2]; 56 #elif defined(TARGET_MIPS) 57 uint32_t gen_opc_hflags[OPC_BUF_SIZE]; 54 58 #endif 55 59 … … 157 161 return -1; 158 162 } 159 #else 163 #else /* !VBOX */ 160 164 if (gen_intermediate_code(env, tb) < 0) 161 165 return -1; 162 #endif 166 #endif /* !VBOX */ 167 163 168 /* generate machine code */ 164 169 tb->tb_next_offset[0] = 0xffff; … … 179 184 #endif 180 185 gen_opc_buf, gen_opparam_buf, gen_labels); 181 182 186 #ifdef VBOX 183 187 RAWEx_ProfileStop(env, STATS_QEMU_COMPILATION); … … 258 262 env->regs[15] = gen_opc_pc[j]; 259 263 #elif defined(TARGET_SPARC) 260 /* XXX: restore npc too */ 261 env->pc = gen_opc_pc[j]; 262 env->npc = gen_opc_npc[j]; 264 { 265 target_ulong npc; 266 env->pc = gen_opc_pc[j]; 267 npc = gen_opc_npc[j]; 268 if (npc == 1) { 269 /* dynamic NPC: already stored */ 270 } else if (npc == 2) { 271 target_ulong t2 = (target_ulong)puc; 272 /* jump PC: use T2 and the jump targets of the translation */ 273 if (t2) 274 env->npc = gen_opc_jump_pc[0]; 275 else 276 env->npc = gen_opc_jump_pc[1]; 277 } else { 278 env->npc = npc; 279 } 280 } 263 281 #elif defined(TARGET_PPC) 264 282 { … … 299 317 env->access_type = type; 300 318 } 319 #elif defined(TARGET_M68K) 320 env->pc = gen_opc_pc[j]; 321 #elif defined(TARGET_MIPS) 322 env->PC = gen_opc_pc[j]; 323 env->hflags &= ~MIPS_HFLAG_BMASK; 324 env->hflags |= gen_opc_hflags[j]; 301 325 #endif 302 326 return 0; -
trunk/src/recompiler/vl.h
r1 r2422 31 31 #include <string.h> 32 32 #include <inttypes.h> 33 #ifndef VBOX 33 34 #include <limits.h> 34 35 #include <time.h> … … 38 39 #include <fcntl.h> 39 40 #include <sys/stat.h> 40 #ifndef VBOX41 41 #include "audio/audio.h" 42 #endif 42 #endif /* !VBOX */ 43 43 44 44 #ifndef O_LARGEFILE … … 49 49 #endif 50 50 51 #ifndef ENOMEDIUM 52 #define ENOMEDIUM ENODEV 53 #endif 54 51 55 #ifdef _WIN32 56 #ifndef VBOX 57 #include <windows.h> 58 #define fsync _commit 52 59 #define lseek _lseeki64 53 60 #define ENOTSUP 4096 54 /* XXX: find 64 bit version */ 55 #define ftruncate chsize 61 extern int qemu_ftruncate64(int, int64_t); 62 #define ftruncate qemu_ftruncate64 63 56 64 57 65 static inline char *realpath(const char *path, char *resolved_path) … … 60 68 return resolved_path; 61 69 } 70 71 #define PRId64 "I64d" 72 #define PRIx64 "I64x" 73 #define PRIu64 "I64u" 74 #define PRIo64 "I64o" 75 #endif /* !VBOX */ 62 76 #endif 63 77 … … 73 87 #else 74 88 89 #ifndef VBOX 90 #include "audio/audio.h" 91 #endif /* !VBOX */ 75 92 #include "cpu.h" 76 93 … … 80 97 # include <VBox/types.h> 81 98 # include "REMInternal.h" 82 # undef MIN83 # undef MAX84 99 #endif /* VBOX */ 85 100 … … 91 106 #endif 92 107 93 /* vl.c */ 94 uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c); 95 96 void hw_error(const char *fmt, ...); 97 98 int get_image_size(const char *filename); 99 int load_image(const char *filename, uint8_t *addr); 100 extern const char *bios_dir; 101 108 #ifndef MIN 109 #define MIN(a, b) (((a) < (b)) ? (a) : (b)) 110 #endif 111 #ifndef MAX 112 #define MAX(a, b) (((a) > (b)) ? (a) : (b)) 113 #endif 114 115 /* cutils.c */ 102 116 void pstrcpy(char *buf, int buf_size, const char *str); 103 117 char *pstrcat(char *buf, int buf_size, const char *s); 104 118 int strstart(const char *str, const char *val, const char **ptr); 119 int stristart(const char *str, const char *val, const char **ptr); 120 121 /* vl.c */ 122 uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c); 123 124 void hw_error(const char *fmt, ...); 125 126 extern const char *bios_dir; 105 127 106 128 extern int vm_running; 107 129 130 typedef struct vm_change_state_entry VMChangeStateEntry; 131 typedef void VMChangeStateHandler(void *opaque, int running); 108 132 typedef void VMStopHandler(void *opaque, int reason); 133 134 VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb, 135 void *opaque); 136 void qemu_del_vm_change_state_handler(VMChangeStateEntry *e); 109 137 110 138 int qemu_add_vm_stop_handler(VMStopHandler *cb, void *opaque); … … 119 147 void qemu_system_reset_request(void); 120 148 void qemu_system_shutdown_request(void); 149 void qemu_system_powerdown_request(void); 150 #if !defined(TARGET_SPARC) 151 // Please implement a power failure function to signal the OS 152 #define qemu_system_powerdown() do{}while(0) 153 #else 154 void qemu_system_powerdown(void); 155 #endif 121 156 122 157 void main_loop_wait(int timeout); 123 158 124 extern int audio_enabled;125 extern int sb16_enabled;126 extern int adlib_enabled;127 extern int gus_enabled;128 159 extern int ram_size; 129 160 extern int bios_size; 130 161 extern int rtc_utc; 131 #ifndef VBOX132 162 extern int cirrus_vga_enabled; 133 #endif134 163 extern int graphic_width; 135 164 extern int graphic_height; 136 165 extern int graphic_depth; 137 166 extern const char *keyboard_layout; 167 extern int kqemu_allowed; 168 extern int win2k_install_hack; 169 extern int usb_enabled; 170 extern int smp_cpus; 171 extern int no_quit; 172 extern int semihosting_enabled; 173 extern int autostart; 174 175 #ifndef VBOX 176 #define MAX_OPTION_ROMS 16 177 extern const char *option_rom[MAX_OPTION_ROMS]; 178 extern int nb_option_roms; 138 179 139 180 /* XXX: make it dynamic */ 140 #if defined (TARGET_PPC) 141 #define BIOS_SIZE (512 * 1024) 181 #if defined (TARGET_PPC) || defined (TARGET_SPARC64) 182 #define BIOS_SIZE ((512 + 32) * 1024) 183 #elif defined(TARGET_MIPS) 184 #define BIOS_SIZE (4 * 1024 * 1024) 142 185 #else 143 186 #define BIOS_SIZE ((256 + 64) * 1024) … … 153 196 typedef void QEMUPutMouseEvent(void *opaque, int dx, int dy, int dz, int buttons_state); 154 197 198 typedef struct QEMUPutMouseEntry { 199 QEMUPutMouseEvent *qemu_put_mouse_event; 200 void *qemu_put_mouse_event_opaque; 201 int qemu_put_mouse_event_absolute; 202 char *qemu_put_mouse_event_name; 203 204 /* used internally by qemu for handling mice */ 205 struct QEMUPutMouseEntry *next; 206 } QEMUPutMouseEntry; 207 155 208 void qemu_add_kbd_event_handler(QEMUPutKBDEvent *func, void *opaque); 156 void qemu_add_mouse_event_handler(QEMUPutMouseEvent *func, void *opaque); 209 QEMUPutMouseEntry *qemu_add_mouse_event_handler(QEMUPutMouseEvent *func, 210 void *opaque, int absolute, 211 const char *name); 212 void qemu_remove_mouse_event_handler(QEMUPutMouseEntry *entry); 157 213 158 214 void kbd_put_keycode(int keycode); 159 215 void kbd_mouse_event(int dx, int dy, int dz, int buttons_state); 216 int kbd_mouse_is_absolute(void); 217 218 void do_info_mice(void); 219 void do_mouse_set(int index); 160 220 161 221 /* keysym is a unicode code except for special keys (see QEMU_KEY_xxx … … 188 248 typedef void IOReadHandler(void *opaque, const uint8_t *buf, int size); 189 249 typedef int IOCanRWHandler(void *opaque); 190 191 int qemu_add_fd_read_handler(int fd, IOCanRWHandler *fd_can_read, 192 IOReadHandler *fd_read, void *opaque); 193 void qemu_del_fd_read_handler(int fd); 250 typedef void IOHandler(void *opaque); 251 252 int qemu_set_fd_handler2(int fd, 253 IOCanRWHandler *fd_read_poll, 254 IOHandler *fd_read, 255 IOHandler *fd_write, 256 void *opaque); 257 int qemu_set_fd_handler(int fd, 258 IOHandler *fd_read, 259 IOHandler *fd_write, 260 void *opaque); 261 262 /* Polling handling */ 263 264 /* return TRUE if no sleep should be done afterwards */ 265 typedef int PollingFunc(void *opaque); 266 267 int qemu_add_polling_cb(PollingFunc *func, void *opaque); 268 void qemu_del_polling_cb(PollingFunc *func, void *opaque); 269 270 #ifdef _WIN32 271 /* Wait objects handling */ 272 typedef void WaitObjectFunc(void *opaque); 273 274 int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque); 275 void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque); 276 #endif 277 278 typedef struct QEMUBH QEMUBH; 194 279 195 280 /* character device */ … … 197 282 #define CHR_EVENT_BREAK 0 /* serial break char */ 198 283 #define CHR_EVENT_FOCUS 1 /* focus to this terminal (modal input needed) */ 284 #define CHR_EVENT_RESET 2 /* new connection established */ 285 286 287 #define CHR_IOCTL_SERIAL_SET_PARAMS 1 288 typedef struct { 289 int speed; 290 int parity; 291 int data_bits; 292 int stop_bits; 293 } QEMUSerialSetParams; 294 295 #define CHR_IOCTL_SERIAL_SET_BREAK 2 296 297 #define CHR_IOCTL_PP_READ_DATA 3 298 #define CHR_IOCTL_PP_WRITE_DATA 4 299 #define CHR_IOCTL_PP_READ_CONTROL 5 300 #define CHR_IOCTL_PP_WRITE_CONTROL 6 301 #define CHR_IOCTL_PP_READ_STATUS 7 199 302 200 303 typedef void IOEventHandler(void *opaque, int event); … … 202 305 typedef struct CharDriverState { 203 306 int (*chr_write)(struct CharDriverState *s, const uint8_t *buf, int len); 204 void (*chr_add_read_handler)(struct CharDriverState *s, 205 IOCanRWHandler *fd_can_read, 206 IOReadHandler *fd_read, void *opaque); 307 void (*chr_update_read_handler)(struct CharDriverState *s); 308 int (*chr_ioctl)(struct CharDriverState *s, int cmd, void *arg); 207 309 IOEventHandler *chr_event; 310 IOCanRWHandler *chr_can_read; 311 IOReadHandler *chr_read; 312 void *handler_opaque; 208 313 void (*chr_send_event)(struct CharDriverState *chr, int event); 314 void (*chr_close)(struct CharDriverState *chr); 209 315 void *opaque; 316 QEMUBH *bh; 210 317 } CharDriverState; 211 318 319 CharDriverState *qemu_chr_open(const char *filename); 212 320 void qemu_chr_printf(CharDriverState *s, const char *fmt, ...); 213 321 int qemu_chr_write(CharDriverState *s, const uint8_t *buf, int len); 214 322 void qemu_chr_send_event(CharDriverState *s, int event); 215 void qemu_chr_add_read_handler(CharDriverState *s, 216 IOCanRWHandler *fd_can_read, 217 IOReadHandler *fd_read, void *opaque); 218 void qemu_chr_add_event_handler(CharDriverState *s, IOEventHandler *chr_event); 219 323 void qemu_chr_add_handlers(CharDriverState *s, 324 IOCanRWHandler *fd_can_read, 325 IOReadHandler *fd_read, 326 IOEventHandler *fd_event, 327 void *opaque); 328 int qemu_chr_ioctl(CharDriverState *s, int cmd, void *arg); 329 void qemu_chr_reset(CharDriverState *s); 330 int qemu_chr_can_read(CharDriverState *s); 331 void qemu_chr_read(CharDriverState *s, uint8_t *buf, int len); 332 220 333 /* consoles */ 221 334 … … 223 336 typedef struct TextConsole TextConsole; 224 337 225 extern TextConsole *vga_console; 226 227 TextConsole *graphic_console_init(DisplayState *ds); 228 int is_active_console(TextConsole *s); 338 typedef void (*vga_hw_update_ptr)(void *); 339 typedef void (*vga_hw_invalidate_ptr)(void *); 340 typedef void (*vga_hw_screen_dump_ptr)(void *, const char *); 341 342 TextConsole *graphic_console_init(DisplayState *ds, vga_hw_update_ptr update, 343 vga_hw_invalidate_ptr invalidate, 344 vga_hw_screen_dump_ptr screen_dump, 345 void *opaque); 346 void vga_hw_update(void); 347 void vga_hw_invalidate(void); 348 void vga_hw_screen_dump(const char *filename); 349 350 int is_graphic_console(void); 229 351 CharDriverState *text_console_init(DisplayState *ds); 230 352 void console_select(unsigned int index); 231 353 232 #ifdef VBOX233 void console_init(void);234 #endif235 236 354 /* serial ports */ 237 355 … … 246 364 extern CharDriverState *parallel_hds[MAX_PARALLEL_PORTS]; 247 365 248 /* network redirectors support */ 366 /* VLANs support */ 367 368 typedef struct VLANClientState VLANClientState; 369 370 struct VLANClientState { 371 IOReadHandler *fd_read; 372 /* Packets may still be sent if this returns zero. It's used to 373 rate-limit the slirp code. */ 374 IOCanRWHandler *fd_can_read; 375 void *opaque; 376 struct VLANClientState *next; 377 struct VLANState *vlan; 378 char info_str[256]; 379 }; 380 381 typedef struct VLANState { 382 int id; 383 VLANClientState *first_client; 384 struct VLANState *next; 385 } VLANState; 386 387 VLANState *qemu_find_vlan(int id); 388 VLANClientState *qemu_new_vlan_client(VLANState *vlan, 389 IOReadHandler *fd_read, 390 IOCanRWHandler *fd_can_read, 391 void *opaque); 392 int qemu_can_send_packet(VLANClientState *vc); 393 void qemu_send_packet(VLANClientState *vc, const uint8_t *buf, int size); 394 void qemu_handler_true(void *opaque); 395 396 void do_info_network(void); 397 398 /* TAP win32 */ 399 int tap_win32_init(VLANState *vlan, const char *ifname); 400 401 /* NIC info */ 249 402 250 403 #define MAX_NICS 8 251 404 252 typedef struct NetDriverState { 253 int index; /* index number in QEMU */ 405 typedef struct NICInfo { 254 406 uint8_t macaddr[6]; 255 char ifname[16]; 256 void (*send_packet)(struct NetDriverState *nd, 257 const uint8_t *buf, int size); 258 void (*add_read_packet)(struct NetDriverState *nd, 259 IOCanRWHandler *fd_can_read, 260 IOReadHandler *fd_read, void *opaque); 261 /* tun specific data */ 262 int fd; 263 /* slirp specific data */ 264 } NetDriverState; 407 const char *model; 408 VLANState *vlan; 409 } NICInfo; 265 410 266 411 extern int nb_nics; 267 extern NetDriverState nd_table[MAX_NICS]; 268 269 void qemu_send_packet(NetDriverState *nd, const uint8_t *buf, int size); 270 void qemu_add_read_packet(NetDriverState *nd, IOCanRWHandler *fd_can_read, 271 IOReadHandler *fd_read, void *opaque); 412 extern NICInfo nd_table[MAX_NICS]; 272 413 273 414 /* timers */ 274 275 #if defined(VBOX)276 277 #include <VBox/tm.h>278 279 /* VBox wrappers */280 #define QEMUTimerCB FNTMTIMERQEMU281 typedef struct TMTIMER QEMUTimer;282 #define rt_clock THIS_SHALL_NOT_BE_USED//TMCLOCK_REAL283 #define vm_clock THIS_SHALL_NOT_BE_USED//TMCLOCK_VIRTUAL284 #define ticks_per_sec THIS_SHALL_NOT_BE_USED//TMCpuTicksPerSecond((PVM)cpu_single_env->pVM)285 #define qemu_get_clock(enmClock) THIS_SHALL_NOT_BE_USED//TMR3Clock((PVM)cpu_single_env->pVM, enmClock)286 #define qemu_new_timer(clock, callback, user) THIS_SHALL_NOT_BE_USED//(QEMUTimer *)TMR3TimerCreateExternal((PVM)cpu_single_env->pVM, clock, callback, user, __FUNCTION__ )287 #define qemu_free_timer(timer) THIS_SHALL_NOT_BE_USED//TMTimerDestroy(timer)288 #define qemu_del_timer(timer) THIS_SHALL_NOT_BE_USED//TMTimerStop(timer)289 #define qemu_mod_timer(timer, expire) THIS_SHALL_NOT_BE_USED//TMTimerSet(timer, (uint64_t)expire)290 #define qemu_timer_pending(timer) THIS_SHALL_NOT_BE_USED//TMTimerIsActive(timer)291 #define cpu_disable_ticks() THIS_SHALL_NOT_BE_USED//TMCpuTickPause((PVM)cpu_single_env->pVM)292 #define cpu_enable_ticks() THIS_SHALL_NOT_BE_USED//TMCpuTickResume((PVM)cpu_single_env->pVM)293 #define cpu_calibrate_ticks() THIS_SHALL_NOT_BE_USED//do {} while (0)294 #define init_timers() THIS_SHALL_NOT_BE_USED//do {} while (0)295 #define quit_timers() THIS_SHALL_NOT_BE_USED//do {} while (0)296 297 #else /* !VBOX */298 415 299 416 typedef struct QEMUClock QEMUClock; … … 323 440 extern int pit_min_timer_count; 324 441 442 int64_t cpu_get_ticks(void); 325 443 void cpu_enable_ticks(void); 326 444 void cpu_disable_ticks(void); 327 #endif /* !VBOX */328 445 329 446 /* VM Load/Save */ 330 447 331 #if defined(VBOX) 332 333 #include <VBox/ssm.h> 334 #include <VBox/err.h> 335 336 typedef struct SSMHANDLE QEMUFile; 337 338 #define qemu_put_buffer(f, pv, cb) THIS_SHALL_NOT_BE_USED//SSMR3PutMem((f), (pv), (cb)) 339 #define qemu_put_byte(f, u8) THIS_SHALL_NOT_BE_USED//SSMR3PutU8((f), (uint8_t)(u8)) 340 #define qemu_put_8s(f, pu8) THIS_SHALL_NOT_BE_USED//SSMR3PutU8((f), *(pu8)) 341 #define qemu_put_be16s(f, pu16) THIS_SHALL_NOT_BE_USED//SSMR3PutU32((f), *(pu16)) 342 #define qemu_put_be32s(f, pu32) THIS_SHALL_NOT_BE_USED//SSMR3PutU32((f), *(pu32)) 343 #define qemu_put_be64s(f, pu64) THIS_SHALL_NOT_BE_USED//SSMR3PutU64((f), *(pu64)) 344 #define qemu_put_be16(f, u16) THIS_SHALL_NOT_BE_USED//SSMR3PutU16((f), (uint16_t)(u16)) 345 #define qemu_put_be32(f, u32) THIS_SHALL_NOT_BE_USED//SSMR3PutU32((f), (uint32_t)(u32)) 346 #define qemu_put_be64(f, u64) THIS_SHALL_NOT_BE_USED//SSMR3PutU64((f), (uint64_t)(u64)) 347 348 #define qemu_get_8s(f, pu8) THIS_SHALL_NOT_BE_USED//SSMR3GetU8((f), (pu8)) 349 #define qemu_get_be16s(f, pu16) THIS_SHALL_NOT_BE_USED//SSMR3GetU16((f), (pu16)) 350 #define qemu_get_be32s(f, pu32) THIS_SHALL_NOT_BE_USED//SSMR3GetU32((f), (pu32)) 351 #define qemu_get_be64s(f, pu64) THIS_SHALL_NOT_BE_USED//SSMR3GetU64((f), (pu64)) 352 353 #if 0 354 static inline int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size) 355 { 356 int rc = SSMR3GetMem(f, buf, size); 357 return VBOX_SUCCESS(rc) ? size : 0; 358 } 359 360 static inline int qemu_get_byte(QEMUFile *f) 361 { 362 uint8_t u8; 363 int rc = SSMR3GetU8(f, &u8); 364 return VBOX_SUCCESS(rc) ? (int)u8 : -1; 365 } 366 367 static inline unsigned int qemu_get_be16(QEMUFile *f) 368 { 369 uint16_t u16; 370 int rc = SSMR3GetU16(f, &u16); 371 return VBOX_SUCCESS(rc) ? u16 : ~0; 372 } 373 374 static inline unsigned int qemu_get_be32(QEMUFile *f) 375 { 376 uint32_t u32; 377 int rc = SSMR3GetU32(f, &u32); 378 return VBOX_SUCCESS(rc) ? u32 : ~0; 379 } 380 381 static inline uint64_t qemu_get_be64(QEMUFile *f) 382 { 383 uint64_t u64; 384 int rc = SSMR3GetU64(f, &u64); 385 return VBOX_SUCCESS(rc) ? u64 : ~0; 386 } 387 388 #define qemu_put_timer(f, ts) TMR3TimerSave((ts), (f)) 389 #define qemu_get_timer(f, ts) TMR3TimerLoad((ts), (f)) 390 391 typedef void SaveStateHandler(QEMUFile *f, void *opaque); 392 typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id); 393 394 int register_savevm(const char *idstr, 395 int instance_id, 396 int version_id, 397 SaveStateHandler *save_state, 398 LoadStateHandler *load_state, 399 void *opaque); 400 #endif /* not used */ 401 402 #else /* !VBOX */ 403 404 typedef FILE QEMUFile; 405 448 typedef struct QEMUFile QEMUFile; 449 450 QEMUFile *qemu_fopen(const char *filename, const char *mode); 451 void qemu_fflush(QEMUFile *f); 452 void qemu_fclose(QEMUFile *f); 406 453 void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, int size); 407 454 void qemu_put_byte(QEMUFile *f, int v); … … 473 520 typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id); 474 521 475 int qemu_loadvm(const char *filename);476 int qemu_savevm(const char *filename);477 522 int register_savevm(const char *idstr, 478 523 int instance_id, … … 483 528 void qemu_get_timer(QEMUFile *f, QEMUTimer *ts); 484 529 void qemu_put_timer(QEMUFile *f, QEMUTimer *ts); 485 #endif /* !VBOX */ 530 531 void cpu_save(QEMUFile *f, void *opaque); 532 int cpu_load(QEMUFile *f, void *opaque, int version_id); 533 534 void do_savevm(const char *name); 535 void do_loadvm(const char *name); 536 void do_delvm(const char *name); 537 void do_info_snapshots(void); 538 539 /* bottom halves */ 540 typedef void QEMUBHFunc(void *opaque); 541 542 QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque); 543 void qemu_bh_schedule(QEMUBH *bh); 544 void qemu_bh_cancel(QEMUBH *bh); 545 void qemu_bh_delete(QEMUBH *bh); 546 int qemu_bh_poll(void); 486 547 487 548 /* block.c */ … … 489 550 typedef struct BlockDriver BlockDriver; 490 551 491 #ifdef VBOX492 extern BlockDriver bdrv_vbox;493 /* @@@AH remove later */494 extern BlockDriver bdrv_qcow;495 552 extern BlockDriver bdrv_raw; 496 #else /* !VBOX */ 497 extern BlockDriver bdrv_raw; 553 extern BlockDriver bdrv_host_device; 498 554 extern BlockDriver bdrv_cow; 499 555 extern BlockDriver bdrv_qcow; … … 501 557 extern BlockDriver bdrv_cloop; 502 558 extern BlockDriver bdrv_dmg; 503 #endif /* !VBOX */ 559 extern BlockDriver bdrv_bochs; 560 extern BlockDriver bdrv_vpc; 561 extern BlockDriver bdrv_vvfat; 562 extern BlockDriver bdrv_qcow2; 563 564 typedef struct BlockDriverInfo { 565 /* in bytes, 0 if irrelevant */ 566 int cluster_size; 567 /* offset at which the VM state can be saved (0 if not possible) */ 568 int64_t vm_state_offset; 569 } BlockDriverInfo; 570 571 typedef struct QEMUSnapshotInfo { 572 char id_str[128]; /* unique snapshot id */ 573 /* the following fields are informative. They are not needed for 574 the consistency of the snapshot */ 575 char name[256]; /* user choosen name */ 576 uint32_t vm_state_size; /* VM state info size */ 577 uint32_t date_sec; /* UTC date of the snapshot */ 578 uint32_t date_nsec; 579 uint64_t vm_clock_nsec; /* VM clock relative to boot */ 580 } QEMUSnapshotInfo; 581 582 #define BDRV_O_RDONLY 0x0000 583 #define BDRV_O_RDWR 0x0002 584 #define BDRV_O_ACCESS 0x0003 585 #define BDRV_O_CREAT 0x0004 /* create an empty file */ 586 #define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */ 587 #define BDRV_O_FILE 0x0010 /* open as a raw file (do not try to 588 use a disk image format on top of 589 it (default for 590 bdrv_file_open()) */ 504 591 505 592 void bdrv_init(void); … … 510 597 BlockDriverState *bdrv_new(const char *device_name); 511 598 void bdrv_delete(BlockDriverState *bs); 512 int bdrv_open(BlockDriverState *bs, const char *filename, int snapshot); 513 int bdrv_open2(BlockDriverState *bs, const char *filename, int snapshot, 599 int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags); 600 int bdrv_open(BlockDriverState *bs, const char *filename, int flags); 601 int bdrv_open2(BlockDriverState *bs, const char *filename, int flags, 514 602 BlockDriver *drv); 515 603 void bdrv_close(BlockDriverState *bs); … … 518 606 int bdrv_write(BlockDriverState *bs, int64_t sector_num, 519 607 const uint8_t *buf, int nb_sectors); 608 int bdrv_pread(BlockDriverState *bs, int64_t offset, 609 void *buf, int count); 610 int bdrv_pwrite(BlockDriverState *bs, int64_t offset, 611 const void *buf, int count); 612 int bdrv_truncate(BlockDriverState *bs, int64_t offset); 613 int64_t bdrv_getlength(BlockDriverState *bs); 520 614 void bdrv_get_geometry(BlockDriverState *bs, int64_t *nb_sectors_ptr); 521 615 int bdrv_commit(BlockDriverState *bs); 522 616 void bdrv_set_boot_sector(BlockDriverState *bs, const uint8_t *data, int size); 617 /* async block I/O */ 618 typedef struct BlockDriverAIOCB BlockDriverAIOCB; 619 typedef void BlockDriverCompletionFunc(void *opaque, int ret); 620 621 BlockDriverAIOCB *bdrv_aio_read(BlockDriverState *bs, int64_t sector_num, 622 uint8_t *buf, int nb_sectors, 623 BlockDriverCompletionFunc *cb, void *opaque); 624 BlockDriverAIOCB *bdrv_aio_write(BlockDriverState *bs, int64_t sector_num, 625 const uint8_t *buf, int nb_sectors, 626 BlockDriverCompletionFunc *cb, void *opaque); 627 void bdrv_aio_cancel(BlockDriverAIOCB *acb); 628 629 void qemu_aio_init(void); 630 void qemu_aio_poll(void); 631 void qemu_aio_flush(void); 632 void qemu_aio_wait_start(void); 633 void qemu_aio_wait(void); 634 void qemu_aio_wait_end(void); 635 636 /* Ensure contents are flushed to disk. */ 637 void bdrv_flush(BlockDriverState *bs); 523 638 524 639 #define BDRV_TYPE_HD 0 525 640 #define BDRV_TYPE_CDROM 1 526 641 #define BDRV_TYPE_FLOPPY 2 527 #define BIOS_ATA_TRANSLATION_AUTO 0 528 #define BIOS_ATA_TRANSLATION_NONE 1 529 #define BIOS_ATA_TRANSLATION_LBA 2 642 #define BIOS_ATA_TRANSLATION_AUTO 0 643 #define BIOS_ATA_TRANSLATION_NONE 1 644 #define BIOS_ATA_TRANSLATION_LBA 2 645 #define BIOS_ATA_TRANSLATION_LARGE 3 646 #define BIOS_ATA_TRANSLATION_RECHS 4 530 647 531 648 void bdrv_set_geometry_hint(BlockDriverState *bs, … … 540 657 int bdrv_is_read_only(BlockDriverState *bs); 541 658 int bdrv_is_inserted(BlockDriverState *bs); 659 int bdrv_media_changed(BlockDriverState *bs); 542 660 int bdrv_is_locked(BlockDriverState *bs); 543 661 void bdrv_set_locked(BlockDriverState *bs, int locked); 662 void bdrv_eject(BlockDriverState *bs, int eject_flag); 544 663 void bdrv_set_change_cb(BlockDriverState *bs, 545 664 void (*change_cb)(void *opaque), void *opaque); … … 553 672 void *opaque); 554 673 const char *bdrv_get_device_name(BlockDriverState *bs); 555 556 int qcow_get_cluster_size(BlockDriverState *bs); 557 int qcow_compress_cluster(BlockDriverState *bs, int64_t sector_num, 558 const uint8_t *buf); 674 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, 675 const uint8_t *buf, int nb_sectors); 676 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); 677 678 void bdrv_get_backing_filename(BlockDriverState *bs, 679 char *filename, int filename_size); 680 int bdrv_snapshot_create(BlockDriverState *bs, 681 QEMUSnapshotInfo *sn_info); 682 int bdrv_snapshot_goto(BlockDriverState *bs, 683 const char *snapshot_id); 684 int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id); 685 int bdrv_snapshot_list(BlockDriverState *bs, 686 QEMUSnapshotInfo **psn_info); 687 char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn); 688 689 char *get_human_readable_size(char *buf, int buf_size, int64_t size); 690 int path_is_absolute(const char *path); 691 void path_combine(char *dest, int dest_size, 692 const char *base_path, 693 const char *filename); 559 694 560 695 #ifndef QEMU_TOOL 696 697 typedef void QEMUMachineInitFunc(int ram_size, int vga_ram_size, 698 int boot_device, 699 DisplayState *ds, const char **fd_filename, int snapshot, 700 const char *kernel_filename, const char *kernel_cmdline, 701 const char *initrd_filename); 702 703 typedef struct QEMUMachine { 704 const char *name; 705 const char *desc; 706 QEMUMachineInitFunc *init; 707 struct QEMUMachine *next; 708 } QEMUMachine; 709 710 int qemu_register_machine(QEMUMachine *m); 711 712 typedef void SetIRQFunc(void *opaque, int irq_num, int level); 713 typedef void IRQRequestFunc(void *opaque, int level); 714 561 715 /* ISA bus */ 562 563 #if defined(VBOX)564 #define isa_mem_base THIS_SHALL_NOT_BE_USED//0565 #else /* !VBOX */566 716 567 717 extern target_phys_addr_t isa_mem_base; … … 575 725 IOPortWriteFunc *func, void *opaque); 576 726 void isa_unassign_ioport(int start, int length); 577 #endif /* !VBOX */ 727 728 void isa_mmio_init(target_phys_addr_t base, target_phys_addr_t size); 578 729 579 730 /* PCI bus */ 580 581 #if defined(VBOX)582 typedef struct PCIBus PCIBus;583 typedef struct PCIDevice PCIDevice;584 typedef struct openpic_t openpic_t;585 #else /* !VBOX */586 extern int pci_enabled;587 731 588 732 extern target_phys_addr_t pci_mem_base; … … 611 755 #define PCI_ROM_SLOT 6 612 756 #define PCI_NUM_REGIONS 7 757 758 #define PCI_DEVICES_MAX 64 759 760 #define PCI_VENDOR_ID 0x00 /* 16 bits */ 761 #define PCI_DEVICE_ID 0x02 /* 16 bits */ 762 #define PCI_COMMAND 0x04 /* 16 bits */ 763 #define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */ 764 #define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */ 765 #define PCI_CLASS_DEVICE 0x0a /* Device class */ 766 #define PCI_INTERRUPT_LINE 0x3c /* 8 bits */ 767 #define PCI_INTERRUPT_PIN 0x3d /* 8 bits */ 768 #define PCI_MIN_GNT 0x3e /* 8 bits */ 769 #define PCI_MAX_LAT 0x3f /* 8 bits */ 770 613 771 struct PCIDevice { 614 772 /* PCI config space */ … … 624 782 PCIConfigReadFunc *config_read; 625 783 PCIConfigWriteFunc *config_write; 784 /* ??? This is a PC-specific hack, and should be removed. */ 626 785 int irq_index; 786 787 /* Current IRQ levels. Used internally by the generic PCI code. */ 788 int irq_state[4]; 627 789 }; 628 790 … … 642 804 void pci_default_write_config(PCIDevice *d, 643 805 uint32_t address, uint32_t val, int len); 644 void generic_pci_save(QEMUFile* f, void *opaque); 645 int generic_pci_load(QEMUFile* f, void *opaque, int version_id); 646 647 extern struct PIIX3State *piix3_state; 648 649 PCIBus *i440fx_init(void); 650 void piix3_init(PCIBus *bus); 651 void pci_bios_init(void); 806 void pci_device_save(PCIDevice *s, QEMUFile *f); 807 int pci_device_load(PCIDevice *s, QEMUFile *f); 808 809 typedef void (*pci_set_irq_fn)(void *pic, int irq_num, int level); 810 typedef int (*pci_map_irq_fn)(PCIDevice *pci_dev, int irq_num); 811 PCIBus *pci_register_bus(pci_set_irq_fn set_irq, pci_map_irq_fn map_irq, 812 void *pic, int devfn_min, int nirq); 813 814 void pci_nic_init(PCIBus *bus, NICInfo *nd, int devfn); 815 void pci_data_write(void *opaque, uint32_t addr, uint32_t val, int len); 816 uint32_t pci_data_read(void *opaque, uint32_t addr, int len); 817 int pci_bus_num(PCIBus *s); 818 void pci_for_each_device(int bus_num, void (*fn)(PCIDevice *d)); 819 652 820 void pci_info(void); 653 654 /* temporary: will be moved in platform specific file */ 821 PCIBus *pci_bridge_init(PCIBus *bus, int devfn, uint32_t id, 822 pci_map_irq_fn map_irq, const char *name); 823 824 /* prep_pci.c */ 655 825 PCIBus *pci_prep_init(void); 656 struct openpic_t; 657 void pci_pmac_set_openpic(PCIBus *bus, struct openpic_t *openpic); 658 PCIBus *pci_pmac_init(void); 826 827 /* grackle_pci.c */ 828 PCIBus *pci_grackle_init(uint32_t base, void *pic); 829 830 /* unin_pci.c */ 831 PCIBus *pci_pmac_init(void *pic); 832 833 /* apb_pci.c */ 834 PCIBus *pci_apb_init(target_ulong special_base, target_ulong mem_base, 835 void *pic); 836 837 PCIBus *pci_vpb_init(void *pic, int irq, int realview); 838 839 /* piix_pci.c */ 840 PCIBus *i440fx_init(PCIDevice **pi440fx_state); 841 void i440fx_set_smm(PCIDevice *d, int val); 842 int piix3_init(PCIBus *bus, int devfn); 843 void i440fx_init_memory_mappings(PCIDevice *d); 844 845 int piix4_init(PCIBus *bus, int devfn); 659 846 660 847 /* openpic.c */ 661 848 typedef struct openpic_t openpic_t; 662 void openpic_set_irq (openpic_t *opp, int n_IRQ, int level); 663 openpic_t *openpic_init (PCIBus *bus, int *pmem_index, int nb_cpus); 664 #endif /* !VBOX */ 849 void openpic_set_irq(void *opaque, int n_IRQ, int level); 850 openpic_t *openpic_init (PCIBus *bus, int *pmem_index, int nb_cpus, 851 CPUState **envp); 852 853 /* heathrow_pic.c */ 854 typedef struct HeathrowPICS HeathrowPICS; 855 void heathrow_pic_set_irq(void *opaque, int num, int level); 856 HeathrowPICS *heathrow_pic_init(int *pmem_index); 857 858 /* gt64xxx.c */ 859 PCIBus *pci_gt64120_init(void *pic); 860 861 #ifdef HAS_AUDIO 862 struct soundhw { 863 const char *name; 864 const char *descr; 865 int enabled; 866 int isa; 867 union { 868 int (*init_isa) (AudioState *s); 869 int (*init_pci) (PCIBus *bus, AudioState *s); 870 } init; 871 }; 872 873 extern struct soundhw soundhw[]; 874 #endif 665 875 666 876 /* vga.c */ 667 877 668 #define VGA_RAM_SIZE ( 4096* 1024)878 #define VGA_RAM_SIZE (8192 * 1024) 669 879 670 880 struct DisplayState { … … 672 882 int linesize; 673 883 int depth; 884 int bgr; /* BGR color order instead of RGB. Only valid for depth == 32 */ 674 885 int width; 675 886 int height; 887 void *opaque; 888 676 889 void (*dpy_update)(struct DisplayState *s, int x, int y, int w, int h); 677 890 void (*dpy_resize)(struct DisplayState *s, int w, int h); 678 891 void (*dpy_refresh)(struct DisplayState *s); 892 void (*dpy_copy)(struct DisplayState *s, int src_x, int src_y, int dst_x, int dst_y, int w, int h); 679 893 }; 680 894 … … 689 903 } 690 904 691 int vga_initialize(PCIBus *bus,DisplayState *ds, uint8_t *vga_ram_base,692 693 void vga_update_display(void); 694 void vga_invalidate_display(void); 695 void vga_screen_dump(const char *filename);905 int isa_vga_init(DisplayState *ds, uint8_t *vga_ram_base, 906 unsigned long vga_ram_offset, int vga_ram_size); 907 int pci_vga_init(PCIBus *bus, DisplayState *ds, uint8_t *vga_ram_base, 908 unsigned long vga_ram_offset, int vga_ram_size, 909 unsigned long vga_bios_offset, int vga_bios_size); 696 910 697 911 /* cirrus_vga.c */ … … 704 918 void sdl_display_init(DisplayState *ds, int full_screen); 705 919 920 /* cocoa.m */ 921 void cocoa_display_init(DisplayState *ds, int full_screen); 922 923 /* vnc.c */ 924 void vnc_display_init(DisplayState *ds, const char *display); 925 926 /* x_keymap.c */ 927 extern uint8_t _translate_keycode(const int key); 928 706 929 /* ide.c */ 707 930 #define MAX_DISKS 4 708 931 709 extern BlockDriverState *bs_table[MAX_DISKS ];932 extern BlockDriverState *bs_table[MAX_DISKS + 1]; 710 933 711 934 void isa_ide_init(int iobase, int iobase2, int irq, 712 935 BlockDriverState *hd0, BlockDriverState *hd1); 713 void pci_ide_init(PCIBus *bus, BlockDriverState **hd_table); 714 void pci_piix3_ide_init(PCIBus *bus, BlockDriverState **hd_table); 936 void pci_cmd646_ide_init(PCIBus *bus, BlockDriverState **hd_table, 937 int secondary_ide_enabled); 938 void pci_piix3_ide_init(PCIBus *bus, BlockDriverState **hd_table, int devfn); 715 939 int pmac_ide_init (BlockDriverState **hd_table, 716 openpic_t *openpic, int irq); 940 SetIRQFunc *set_irq, void *irq_opaque, int irq); 941 942 /* cdrom.c */ 943 int cdrom_read_toc(int nb_sectors, uint8_t *buf, int msf, int start_track); 944 int cdrom_read_toc_raw(int nb_sectors, uint8_t *buf, int msf, int session_num); 945 946 /* es1370.c */ 947 int es1370_init (PCIBus *bus, AudioState *s); 717 948 718 949 /* sb16.c */ 719 void SB16_init (void);950 int SB16_init (AudioState *s); 720 951 721 952 /* adlib.c */ 722 void Adlib_init (void);953 int Adlib_init (AudioState *s); 723 954 724 955 /* gus.c */ 725 void GUS_init (void);956 int GUS_init (AudioState *s); 726 957 727 958 /* dma.c */ … … 751 982 /* ne2000.c */ 752 983 753 void isa_ne2000_init(int base, int irq, NetDriverState *nd); 754 void pci_ne2000_init(PCIBus *bus, NetDriverState *nd); 984 void isa_ne2000_init(int base, int irq, NICInfo *nd); 985 void pci_ne2000_init(PCIBus *bus, NICInfo *nd, int devfn); 986 987 /* rtl8139.c */ 988 989 void pci_rtl8139_init(PCIBus *bus, NICInfo *nd, int devfn); 990 991 /* pcnet.c */ 992 993 void pci_pcnet_init(PCIBus *bus, NICInfo *nd, int devfn); 994 void pcnet_h_reset(void *opaque); 995 void *lance_init(NICInfo *nd, uint32_t leaddr, void *dma_opaque); 996 755 997 756 998 /* pckbd.c */ … … 769 1011 770 1012 typedef struct SerialState SerialState; 771 SerialState *serial_init(int base, int irq, CharDriverState *chr); 1013 SerialState *serial_init(SetIRQFunc *set_irq, void *opaque, 1014 int base, int irq, CharDriverState *chr); 1015 SerialState *serial_mm_init (SetIRQFunc *set_irq, void *opaque, 1016 target_ulong base, int it_shift, 1017 int irq, CharDriverState *chr); 772 1018 773 1019 /* parallel.c */ … … 778 1024 /* i8259.c */ 779 1025 1026 typedef struct PicState2 PicState2; 1027 extern PicState2 *isa_pic; 780 1028 void pic_set_irq(int irq, int level); 781 void pic_init(void); 782 uint32_t pic_intack_read(CPUState *env); 1029 void pic_set_irq_new(void *opaque, int irq, int level); 1030 PicState2 *pic_init(IRQRequestFunc *irq_request, void *irq_request_opaque); 1031 void pic_set_alt_irq_func(PicState2 *s, SetIRQFunc *alt_irq_func, 1032 void *alt_irq_opaque); 1033 int pic_read_irq(PicState2 *s); 1034 void pic_update_irq(PicState2 *s); 1035 uint32_t pic_intack_read(PicState2 *s); 783 1036 void pic_info(void); 784 1037 void irq_info(void); 785 1038 786 1039 /* APIC */ 1040 typedef struct IOAPICState IOAPICState; 1041 787 1042 int apic_init(CPUState *env); 788 1043 int apic_get_interrupt(CPUState *env); 1044 IOAPICState *ioapic_init(void); 1045 void ioapic_set_irq(void *opaque, int vector, int level); 789 1046 790 1047 /* i8254.c */ … … 797 1054 void pit_set_gate(PITState *pit, int channel, int val); 798 1055 int pit_get_gate(PITState *pit, int channel); 1056 int pit_get_initial_count(PITState *pit, int channel); 1057 int pit_get_mode(PITState *pit, int channel); 799 1058 int pit_get_out(PITState *pit, int channel, int64_t current_time); 800 1059 1060 /* pcspk.c */ 1061 void pcspk_init(PITState *); 1062 int pcspk_audio_init(AudioState *); 1063 1064 #include "hw/smbus.h" 1065 1066 /* acpi.c */ 1067 extern int acpi_enabled; 1068 void piix4_pm_init(PCIBus *bus, int devfn); 1069 void piix4_smbus_register_device(SMBusDevice *dev, uint8_t addr); 1070 void acpi_bios_init(void); 1071 1072 /* smbus_eeprom.c */ 1073 SMBusDevice *smbus_eeprom_device_init(uint8_t addr, uint8_t *buf); 1074 801 1075 /* pc.c */ 802 void pc_init(int ram_size, int vga_ram_size, int boot_device, 803 DisplayState *ds, const char **fd_filename, int snapshot, 804 const char *kernel_filename, const char *kernel_cmdline, 805 const char *initrd_filename); 1076 extern QEMUMachine pc_machine; 1077 extern QEMUMachine isapc_machine; 1078 extern int fd_bootchk; 1079 1080 void ioport_set_a20(int enable); 1081 int ioport_get_a20(void); 806 1082 807 1083 /* ppc.c */ 808 void ppc_init (int ram_size, int vga_ram_size, int boot_device, 809 DisplayState *ds, const char **fd_filename, int snapshot, 810 const char *kernel_filename, const char *kernel_cmdline, 811 const char *initrd_filename); 812 void ppc_prep_init (int ram_size, int vga_ram_size, int boot_device, 813 DisplayState *ds, const char **fd_filename, int snapshot, 814 const char *kernel_filename, const char *kernel_cmdline, 815 const char *initrd_filename); 816 void ppc_chrp_init(int ram_size, int vga_ram_size, int boot_device, 817 DisplayState *ds, const char **fd_filename, int snapshot, 818 const char *kernel_filename, const char *kernel_cmdline, 819 const char *initrd_filename); 1084 extern QEMUMachine prep_machine; 1085 extern QEMUMachine core99_machine; 1086 extern QEMUMachine heathrow_machine; 1087 1088 /* mips_r4k.c */ 1089 extern QEMUMachine mips_machine; 1090 1091 /* mips_malta.c */ 1092 extern QEMUMachine mips_malta_machine; 1093 1094 /* mips_int */ 1095 extern void cpu_mips_irq_request(void *opaque, int irq, int level); 1096 1097 /* mips_timer.c */ 1098 extern void cpu_mips_clock_init(CPUState *); 1099 extern void cpu_mips_irqctrl_init (void); 1100 1101 /* shix.c */ 1102 extern QEMUMachine shix_machine; 1103 820 1104 #ifdef TARGET_PPC 821 1105 ppc_tb_t *cpu_ppc_tb_init (CPUState *env, uint32_t freq); … … 825 1109 extern CPUWriteMemoryFunc *PPC_io_write[]; 826 1110 extern CPUReadMemoryFunc *PPC_io_read[]; 827 extern int prep_enabled; 828 829 #ifndef VBOX 1111 void PPC_debug_write (void *opaque, uint32_t addr, uint32_t val); 1112 830 1113 /* sun4m.c */ 831 void sun4m_init(int ram_size, int vga_ram_size, int boot_device, 832 DisplayState *ds, const char **fd_filename, int snapshot, 833 const char *kernel_filename, const char *kernel_cmdline, 834 const char *initrd_filename); 835 uint32_t iommu_translate(uint32_t addr); 1114 extern QEMUMachine sun4m_machine; 1115 void pic_set_irq_cpu(int irq, int level, unsigned int cpu); 836 1116 837 1117 /* iommu.c */ 838 1118 void *iommu_init(uint32_t addr); 839 uint32_t iommu_translate_local(void *opaque, uint32_t addr); 840 841 /* lance.c */ 842 void lance_init(NetDriverState *nd, int irq, uint32_t leaddr, uint32_t ledaddr); 1119 void sparc_iommu_memory_rw(void *opaque, target_phys_addr_t addr, 1120 uint8_t *buf, int len, int is_write); 1121 static inline void sparc_iommu_memory_read(void *opaque, 1122 target_phys_addr_t addr, 1123 uint8_t *buf, int len) 1124 { 1125 sparc_iommu_memory_rw(opaque, addr, buf, len, 0); 1126 } 1127 1128 static inline void sparc_iommu_memory_write(void *opaque, 1129 target_phys_addr_t addr, 1130 uint8_t *buf, int len) 1131 { 1132 sparc_iommu_memory_rw(opaque, addr, buf, len, 1); 1133 } 843 1134 844 1135 /* tcx.c */ 845 void *tcx_init(DisplayState *ds, uint32_t addr, uint8_t *vram_base, 846 unsigned long vram_offset, int vram_size); 847 void tcx_update_display(void *opaque); 848 void tcx_invalidate_display(void *opaque); 849 void tcx_screen_dump(void *opaque, const char *filename); 1136 void tcx_init(DisplayState *ds, uint32_t addr, uint8_t *vram_base, 1137 unsigned long vram_offset, int vram_size, int width, int height); 850 1138 851 1139 /* slavio_intctl.c */ 852 1140 void *slavio_intctl_init(); 1141 void slavio_intctl_set_cpu(void *opaque, unsigned int cpu, CPUState *env); 853 1142 void slavio_pic_info(void *opaque); 854 1143 void slavio_irq_info(void *opaque); 855 1144 void slavio_pic_set_irq(void *opaque, int irq, int level); 856 857 /* magic-load.c */ 858 int load_elf(const char *filename, uint8_t *addr); 1145 void slavio_pic_set_irq_cpu(void *opaque, int irq, int level, unsigned int cpu); 1146 1147 /* loader.c */ 1148 int get_image_size(const char *filename); 1149 int load_image(const char *filename, uint8_t *addr); 1150 int load_elf(const char *filename, int64_t virt_to_phys_addend, uint64_t *pentry); 859 1151 int load_aout(const char *filename, uint8_t *addr); 860 1152 861 1153 /* slavio_timer.c */ 862 void slavio_timer_init(uint32_t addr 1, int irq1, uint32_t addr2, int irq2);1154 void slavio_timer_init(uint32_t addr, int irq, int mode, unsigned int cpu); 863 1155 864 1156 /* slavio_serial.c */ 865 1157 SerialState *slavio_serial_init(int base, int irq, CharDriverState *chr1, CharDriverState *chr2); 866 1158 void slavio_serial_ms_kbd_init(int base, int irq); 1159 1160 /* slavio_misc.c */ 1161 void *slavio_misc_init(uint32_t base, int irq); 1162 void slavio_set_power_fail(void *opaque, int power_failing); 1163 1164 /* esp.c */ 1165 void esp_scsi_attach(void *opaque, BlockDriverState *bd, int id); 1166 void *esp_init(BlockDriverState **bd, uint32_t espaddr, void *dma_opaque); 1167 void esp_reset(void *opaque); 1168 1169 /* sparc32_dma.c */ 1170 void *sparc32_dma_init(uint32_t daddr, int espirq, int leirq, void *iommu, 1171 void *intctl); 1172 void ledma_set_irq(void *opaque, int isr); 1173 void ledma_memory_read(void *opaque, target_phys_addr_t addr, 1174 uint8_t *buf, int len, int do_bswap); 1175 void ledma_memory_write(void *opaque, target_phys_addr_t addr, 1176 uint8_t *buf, int len, int do_bswap); 1177 void espdma_raise_irq(void *opaque); 1178 void espdma_clear_irq(void *opaque); 1179 void espdma_memory_read(void *opaque, uint8_t *buf, int len); 1180 void espdma_memory_write(void *opaque, uint8_t *buf, int len); 1181 void sparc32_dma_set_reset_data(void *opaque, void *esp_opaque, 1182 void *lance_opaque); 1183 1184 /* cs4231.c */ 1185 void cs_init(target_phys_addr_t base, int irq, void *intctl); 1186 1187 /* sun4u.c */ 1188 extern QEMUMachine sun4u_machine; 867 1189 868 1190 /* NVRAM helpers */ … … 888 1210 uint32_t NVRAM_image, 889 1211 int width, int height, int depth); 890 #endif891 1212 892 1213 /* adb.c */ … … 932 1253 933 1254 extern ADBBusState adb_bus; 934 int cuda_init(openpic_t *openpic, int irq); 1255 int cuda_init(SetIRQFunc *set_irq, void *irq_opaque, int irq); 1256 1257 #include "hw/usb.h" 1258 1259 /* usb ports of the VM */ 1260 1261 void qemu_register_usb_port(USBPort *port, void *opaque, int index, 1262 usb_attachfn attach); 1263 1264 #define VM_USB_HUB_SIZE 8 1265 1266 void do_usb_add(const char *devname); 1267 void do_usb_del(const char *devname); 1268 void usb_info(void); 1269 1270 /* scsi-disk.c */ 1271 enum scsi_reason { 1272 SCSI_REASON_DONE, /* Command complete. */ 1273 SCSI_REASON_DATA /* Transfer complete, more data required. */ 1274 }; 1275 1276 typedef struct SCSIDevice SCSIDevice; 1277 typedef void (*scsi_completionfn)(void *opaque, int reason, uint32_t tag, 1278 uint32_t arg); 1279 1280 SCSIDevice *scsi_disk_init(BlockDriverState *bdrv, 1281 int tcq, 1282 scsi_completionfn completion, 1283 void *opaque); 1284 void scsi_disk_destroy(SCSIDevice *s); 1285 1286 int32_t scsi_send_command(SCSIDevice *s, uint32_t tag, uint8_t *buf, int lun); 1287 /* SCSI data transfers are asynchrnonous. However, unlike the block IO 1288 layer the completion routine may be called directly by 1289 scsi_{read,write}_data. */ 1290 void scsi_read_data(SCSIDevice *s, uint32_t tag); 1291 int scsi_write_data(SCSIDevice *s, uint32_t tag); 1292 void scsi_cancel_io(SCSIDevice *s, uint32_t tag); 1293 uint8_t *scsi_get_buf(SCSIDevice *s, uint32_t tag); 1294 1295 /* lsi53c895a.c */ 1296 void lsi_scsi_attach(void *opaque, BlockDriverState *bd, int id); 1297 void *lsi_scsi_init(PCIBus *bus, int devfn); 1298 1299 /* integratorcp.c */ 1300 extern QEMUMachine integratorcp926_machine; 1301 extern QEMUMachine integratorcp1026_machine; 1302 1303 /* versatilepb.c */ 1304 extern QEMUMachine versatilepb_machine; 1305 extern QEMUMachine versatileab_machine; 1306 1307 /* realview.c */ 1308 extern QEMUMachine realview_machine; 1309 1310 /* ps2.c */ 1311 void *ps2_kbd_init(void (*update_irq)(void *, int), void *update_arg); 1312 void *ps2_mouse_init(void (*update_irq)(void *, int), void *update_arg); 1313 void ps2_write_mouse(void *, int val); 1314 void ps2_write_keyboard(void *, int val); 1315 uint32_t ps2_read_data(void *); 1316 void ps2_queue(void *, int b); 1317 void ps2_keyboard_set_translation(void *opaque, int mode); 1318 1319 /* smc91c111.c */ 1320 void smc91c111_init(NICInfo *, uint32_t, void *, int); 1321 1322 /* pl110.c */ 1323 void *pl110_init(DisplayState *ds, uint32_t base, void *pic, int irq, int); 1324 1325 /* pl011.c */ 1326 void pl011_init(uint32_t base, void *pic, int irq, CharDriverState *chr); 1327 1328 /* pl050.c */ 1329 void pl050_init(uint32_t base, void *pic, int irq, int is_mouse); 1330 1331 /* pl080.c */ 1332 void *pl080_init(uint32_t base, void *pic, int irq, int nchannels); 1333 1334 /* pl190.c */ 1335 void *pl190_init(uint32_t base, void *parent, int irq, int fiq); 1336 1337 /* arm-timer.c */ 1338 void sp804_init(uint32_t base, void *pic, int irq); 1339 void icp_pit_init(uint32_t base, void *pic, int irq); 1340 1341 /* arm_sysctl.c */ 1342 void arm_sysctl_init(uint32_t base, uint32_t sys_id); 1343 1344 /* arm_gic.c */ 1345 void *arm_gic_init(uint32_t base, void *parent, int parent_irq); 1346 1347 /* arm_boot.c */ 1348 1349 void arm_load_kernel(CPUState *env, int ram_size, const char *kernel_filename, 1350 const char *kernel_cmdline, const char *initrd_filename, 1351 int board_id); 1352 1353 /* sh7750.c */ 1354 struct SH7750State; 1355 1356 struct SH7750State *sh7750_init(CPUState * cpu); 1357 1358 typedef struct { 1359 /* The callback will be triggered if any of the designated lines change */ 1360 uint16_t portamask_trigger; 1361 uint16_t portbmask_trigger; 1362 /* Return 0 if no action was taken */ 1363 int (*port_change_cb) (uint16_t porta, uint16_t portb, 1364 uint16_t * periph_pdtra, 1365 uint16_t * periph_portdira, 1366 uint16_t * periph_pdtrb, 1367 uint16_t * periph_portdirb); 1368 } sh7750_io_device; 1369 1370 int sh7750_register_io_device(struct SH7750State *s, 1371 sh7750_io_device * device); 1372 /* tc58128.c */ 1373 int tc58128_init(struct SH7750State *s, char *zone1, char *zone2); 1374 1375 /* NOR flash devices */ 1376 typedef struct pflash_t pflash_t; 1377 1378 pflash_t *pflash_register (target_ulong base, ram_addr_t off, 1379 BlockDriverState *bs, 1380 target_ulong sector_len, int nb_blocs, int width, 1381 uint16_t id0, uint16_t id1, 1382 uint16_t id2, uint16_t id3); 1383 1384 #include "gdbstub.h" 935 1385 936 1386 #endif /* defined(QEMU_TOOL) */ 937 1387 938 #ifndef VBOX939 1388 /* monitor.c */ 940 1389 void monitor_init(CharDriverState *hd, int show_banner); … … 942 1391 void term_vprintf(const char *fmt, va_list ap); 943 1392 void term_printf(const char *fmt, ...) __attribute__ ((__format__ (__printf__, 1, 2))); 1393 void term_print_filename(const char *filename); 944 1394 void term_flush(void); 945 1395 void term_print_help(void); … … 958 1408 ReadLineFunc *readline_func, void *opaque); 959 1409 1410 void kqemu_record_dump(void); 1411 960 1412 #endif /* !VBOX */ 961 1413 962 /* gdbstub.c */963 964 #define DEFAULT_GDBSTUB_PORT 1234965 966 int gdbserver_start(int port);967 968 1414 #endif /* VL_H */
Note:
See TracChangeset
for help on using the changeset viewer.