- Timestamp:
- Oct 16, 2008 9:16:27 PM (16 years ago)
- Location:
- trunk/src/recompiler_new
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler_new/Makefile.kmk
r13337 r13357 3 3 # The Recompiler Sub-Makefile. 4 4 # 5 # There are a few of complicating factors here, esp. on AMD64 systems:6 #7 # * op.c doesn't compile work correctly with gcc 4. For this we've8 # checked in op.S, which is the reason why we don't compile op.c9 # directly but always compile via the assembly file.s10 # * On 64-bit Windows we lack a compiler and have to resort to a11 # linux cross compiler building an ELF relocatable module which12 # we then load using a wrapper module. Thus the REM_MOD mess.13 # * On platforms using the 64-bit GCC ABI, we're not allowed to14 # generate non-PIC shared objects, and op.c requires the code15 # to be non-PIC. We apply the same trick as we developed for16 # 64-bit windows.17 #18 19 5 # 20 6 # Copyright (C) 2006-2007 Sun Microsystems, Inc. … … 43 29 44 30 DEFS += VBOX_WITH_NEW_RECOMPILER 31 $(REM_MOD)_TEMPLATE = VBOXR3NP 45 32 46 33 47 OTHER_CLEAN += \ 48 $(PATH_$(REM_MOD))/op.h \ 49 $(PATH_$(REM_MOD))/opc.h \ 50 $(PATH_$(REM_MOD))/gen-op.h \ 51 $(PATH_$(REM_MOD))/opc.h 34 OTHER_CLEAN += 52 35 53 36 # … … 55 38 # 56 39 VBOX_PATH_RECOMPILER_SRC := $(PATH_SUB_CURRENT) 57 TEMPLATE_DUMMY = dummy template (move to kBuild)58 40 59 #60 # L4 must use the no-crt path because it's lacking math stuff it seems...61 # Darwin must use the non-crt path because it can't compile op.c nativly.62 # All the AMD64 target must use the no-crt path because ELF doesn't like op.c63 # when stuffed into a shared library and windows doesn't have 64-bit gcc (yet).64 #65 ifeq ($(filter-out l4 darwin freebsd,$(KBUILD_TARGET)),)66 REM_USE_NOCRT := 167 endif68 ifeq ($(REM_MOD),VBoxREM2)69 REM_USE_NOCRT := 170 endif71 72 #73 41 # The VBoxREM.[dll|so|..] or VBoxREM2.rel. 74 42 # … … 76 44 $(REM_MOD)_DEFS += REM_PHYS_ADDR_IN_TLB 77 45 #$(REM_MOD)_DEFS += DEBUG_ALL_LOGGING DEBUG_DISAS DEBUG_PCALL DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging. 46 $(REM_MOD)_DEFS.linux = _GNU_SOURCE 78 47 79 48 $(REM_MOD)_INCS = \ … … 99 68 target-i386/op_helper.c \ 100 69 target-i386/helper.c \ 101 target-i386/helper2.c \102 70 target-i386/translate.c 103 71 104 72 ifeq ($(KBUILD_TARGET_ARCH),amd64) 105 73 $(REM_MOD)_INCS += tcg/x86_64 106 $(REM_MOD)_SOURCES += tcg/x86_64/tcg-target.c107 74 else 108 75 $(REM_MOD)_INCS += tcg/i386 109 $(REM_MOD)_SOURCES += tcg/i386/tcg-target.c110 76 endif 111 77 … … 118 84 endif 119 85 86 $(REM_MOD)_CFLAGS.linux += -Wno-unused 87 $(REM_MOD)_CFLAGS.debug = -O0 88 $(REM_MOD)_CFLAGS.release += -fomit-frame-pointer -fno-gcse 89 $(REM_MOD)_CFLAGS.profile = $($(REM_MOD)_CFLAGS.release) 90 $(REM_MOD)_CFLAGS.kprofile = $($(REM_MOD)_CFLAGS.release) 120 91 121 ifdef REM_USE_NOCRT 122 $(REM_MOD)_TEMPLATE = VBOXNOCRTGAS 123 $(REM_MOD)_DEFS += LOG_USE_C99 $(ARCH_BITS_DEFS) 124 $(REM_MOD)_CFLAGS.amd64 = -O2 125 $(REM_MOD)_CFLAGS.debug = -O0 126 ifdef ($(KBUILD_TARGET_ARCH),x86) 127 $(REM_MOD)_CFLAGS.release+= -fomit-frame-pointer -fno-gcse 128 endif 92 $(REM_MOD)_DEFS += IN_RING3 LOG_USE_C99 $(ARCH_BITS_DEFS) 93 #$(REM_MOD)_DEFS += DEBUG_DISAS DEBUG_PCALL DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging. 129 94 130 # This doesn't fit in IPRT because it requires GAS and is LGPL. 131 $(REM_MOD)_SOURCES += \ 132 Sun/e_powl-$(KBUILD_TARGET_ARCH).S 95 $(REM_MOD)_LDFLAGS.darwin = -read_only_relocs suppress -install_name $(VBOX_DYLD_EXECUTABLE_PATH)/$(REM_MOD).dylib -multiple_defined warning 96 $(REM_MOD)_LDFLAGS.l4 = -T$(L4_LIBDIR)/../main_rel.ld -nostdlib -Wl,--no-undefined 97 $(REM_MOD)_LDFLAGS.linux = $(VBOX_LD_as_needed) 98 $(REM_MOD)_LDFLAGS.os2 = -Zomf 99 $(REM_MOD)_LDFLAGS.debug = -g 100 $(REM_MOD)_LDFLAGS.solaris = -mimpure-text 133 101 134 ifeq ($(REM_MOD),VBoxREM) 135 $(REM_MOD)_LIBS = \ 136 $(PATH_LIB)/RuntimeR3NoCRTGCC$(VBOX_SUFF_LIB) \ 137 $(LIB_VMM) \ 138 $(LIB_RUNTIME) 139 ifeq ($(KBUILD_TARGET),l4) 140 $(REM_MOD)_LIBS += \ 141 $(L4_LIBDIR)/libuc.0.s.so 142 endif 143 $(REM_MOD)_LIBS.darwin = \ 144 $(TARGET_VBoxREMImp) 145 $(REM_MOD)_LDFLAGS.darwin = -read_only_relocs suppress -multiply_defined warning # -install_name $(VBOX_DYLD_EXECUTABLE_PATH)/$(REM_MOD).dylib 146 $(REM_MOD)_POST_CMDS.darwin = install_name_tool -id $(VBOX_DYLD_EXECUTABLE_PATH)/$(REM_MOD).dylib $(out) 147 $(REM_MOD)_CFLAGS.darwin = -fno-common -mdynamic-no-pic 148 else 149 $(REM_MOD)_LIBS = \ 150 $(PATH_LIB)/RuntimeR3NoCRTGCC$(VBOX_SUFF_LIB) 151 $(REM_MOD)_SYSSUFF = .rel 152 $(REM_MOD)_LDFLAGS.darwin = -nostdlib -static 153 $(REM_MOD)_CFLAGS.darwin = -fno-common -static -mno-dynamic-no-pic 154 endif 102 $(REM_MOD)_LIBS = \ 103 $(LIB_VMM) \ 104 $(LIB_RUNTIME) 155 105 156 else # !REM_USE_NOCRT157 158 $(REM_MOD)_TOOL = GXX3159 $(REM_MOD)_TOOL.solaris = GXX3PLAIN160 $(REM_MOD)_TOOL.win.x86 = MINGW32161 $(REM_MOD)_TOOL.win.amd64 = XGCCAMD64LINUX162 $(REM_MOD)_TEMPLATE = DUMMY163 $(REM_MOD)_SDKS.win.x86 = W32API ## @todo do we really need this now?164 $(REM_MOD)_ASFLAGS = -x assembler-with-cpp ## @todo didn't I make this default already?165 $(REM_MOD)_CFLAGS = -Wall -g166 $(REM_MOD)_CFLAGS.debug = -O0167 $(REM_MOD)_CFLAGS.release += -fomit-frame-pointer -fno-gcse168 $(REM_MOD)_CFLAGS.profile = $($(REM_MOD)_CFLAGS.release)169 $(REM_MOD)_CFLAGS.kprofile = $($(REM_MOD)_CFLAGS.release)170 $(REM_MOD)_CFLAGS.l4 = -nostdinc171 ifeq ($(KBUILD_TARGET),l4)172 $(REM_MOD)_INCS += $(VBOX_L4_GCC3_INCS) $(L4_INCDIR)173 endif174 175 $(REM_MOD)_DEFS += IN_RING3 LOG_USE_C99 $(ARCH_BITS_DEFS)176 #$(REM_MOD)_DEFS += DEBUG_DISAS DEBUG_PCALL DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging.177 # these defines are probably all irrelevant now:178 $(REM_MOD)_DEFS += _GNU_SOURCE _FILE_OFFSET_BITS=64 _LARGEFILE_SOURCE _REENTRANT179 180 $(REM_MOD)_LDFLAGS.darwin = -read_only_relocs suppress -install_name $(VBOX_DYLD_EXECUTABLE_PATH)/$(REM_MOD).dylib -multiple_defined warning181 $(REM_MOD)_LDFLAGS.l4 = -T$(L4_LIBDIR)/../main_rel.ld -nostdlib -Wl,--no-undefined182 $(REM_MOD)_LDFLAGS.linux = $(VBOX_LD_as_needed)183 $(REM_MOD)_LDFLAGS.os2 = -Zomf184 $(REM_MOD)_LDFLAGS.debug = -g185 $(REM_MOD)_LDFLAGS.solaris = -mimpure-text186 ifdef VBOX_SOLARIS_10187 $(REM_MOD)_DEFS.solaris += HOST_SOLARIS=10188 else # solaris 11189 $(REM_MOD)_DEFS.solaris += HOST_SOLARIS=11190 endif191 ifeq ($(KBUILD_TARGET_ARCH),amd64)192 $(REM_MOD)_LIBS = $(FILE_TOOL_GCC3_LIBGCC)193 else # x86194 $(REM_MOD)_LIBS = \195 $(LIB_VMM) \196 $(LIB_RUNTIME)197 $(REM_MOD)_LIBS.win.x86 = \198 mingw32 \199 user32 gdi32 winmm ws2_32 iphlpapi dxguid200 $(REM_MOD)_LIBS.linux = \201 $(LIB_UUID) \202 m \203 util \204 rt \205 $(LIB_PTHREAD)206 $(REM_MOD)_LIBS.l4 = \207 gcc \208 $(L4_LIBDIR)/libvboxserver.s.so \209 $(L4_LIBDIR)/libdl.s.so \210 $(L4_LIBDIR)/libuc.0.s.so211 endif # x86212 213 endif # !REM_USE_NOCRT214 106 215 107 # Extra flags for these source modules. 216 target-i386/ helper.c_CFLAGS.x86 = -O2 -fomit-frame-pointer -fno-strict-aliasing -fno-gcse108 target-i386/op-helper.c_CFLAGS.x86 = -O2 -fomit-frame-pointer -fno-strict-aliasing -fno-gcse 217 109 cpu-exec.c_CFLAGS.x86 = -O2 -fomit-frame-pointer -fno-strict-aliasing -fno-gcse 218 110 cpu-exec.c_CFLAGS.solaris.amd64 = -O2 -fomit-frame-pointer -fno-strict-aliasing 219 111 220 221 # transitional rule222 $(PATH_$(REM_MOD))/op.h:223 $(APPEND) $@ ''224 112 225 113 # … … 227 115 # 228 116 ## @todo This is a bit messy because of MINGW32. 229 #BLDPROGS += testmath230 testmath_TOOL = GXX3231 testmath_TOOL.win.x86 = MINGW32232 testmath_SDKS.win.x86 = W32API233 ifeq ($(KBUILD_HOST).$(KBUILD_HOST_ARCH),win.amd64)234 # 64-bit windows: Pretend to be 32-bit.235 testmath_BLD_TRG = win32236 testmath_BLD_TRG_ARCH = x86237 testmath_BLD_TRG_CPU = i386238 endif239 testmath_ASTOOL = $(VBOX_ASTOOL)240 ifeq ($(filter-out win32 win64,$(KBUILD_HOST)),)241 testmath_ASFLAGS = -f win32 -DNASM_FORMAT_PE $(VBOX_ASFLAGS) -w+orphan-labels242 else243 testmath_ASFLAGS = -f elf -DNASM_FORMAT_ELF $(VBOX_ASFLAGS) -w+orphan-labels244 endif245 117 testmath_ASFLAGS.amd64 = -m amd64 246 118 testmath_CFLAGS = -Wall -g … … 250 122 testmath_SOURCES = Sun/testmath.c 251 123 #testmath_SOURCES += $(PATH_LIB)/RuntimeR3NoCRTGCC$(VBOX_SUFF_LIB) 252 253 254 ifeq ($(REM_MOD),VBoxREM2)255 #256 # The VBoxREM2 wrapper.257 #258 VBoxREM_TEMPLATE = VBOXR3259 VBoxREM_DEFS = IN_REM_R3260 VBoxREM_SOURCES = \261 VBoxREMWrapper.cpp \262 VBoxREMWrapperA.asm263 VBoxREM_LDFLAGS.darwin = -install_name $(VBOX_DYLD_EXECUTABLE_PATH)/VBoxREM.dylib264 VBoxREM_LIBS = \265 $(LIB_VMM) \266 $(LIB_RUNTIME)267 endif268 124 269 125 -
trunk/src/recompiler_new/cpu-defs.h
r13230 r13357 135 135 target_phys_addr_t addend; 136 136 #endif 137 #ifndef VBOX 137 138 /* padding to get a power of two size */ 138 139 uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - … … 140 141 ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) + 141 142 sizeof(target_phys_addr_t))]; 143 #endif 142 144 } CPUTLBEntry; 143 145 -
trunk/src/recompiler_new/dyngen-exec.h
r13337 r13357 47 47 typedef unsigned short uint16_t; 48 48 typedef unsigned int uint32_t; 49 / / Linux/Sparc64 defines uint64_t49 /* Linux/Sparc64 defines uint64_t */ 50 50 #if !(defined (__sparc_v9__) && defined(__linux__)) 51 51 /* XXX may be done for all 64 bits targets ? */ -
trunk/src/recompiler_new/osdep.h
r13337 r13357 68 68 #define likely(x) __builtin_expect(!!(x), 1) 69 69 #define unlikely(x) __builtin_expect(!!(x), 0) 70 #else / / VBOX70 #else /* VBOX */ 71 71 #define likely(cond) RT_LIKELY(cond) 72 72 #define unlikely(cond) RT_UNLIKELY(cond) 73 73 #endif 74 #endif / / !likely74 #endif /* !likely */ 75 75 76 76 #ifndef offsetof … … 137 137 #define qemu_gettimeofday(tp) gettimeofday(tp, NULL); 138 138 #endif /* !_WIN32 */ 139 #endif / / !VBOX139 #endif /* !VBOX */ 140 140 141 141 #endif -
trunk/src/recompiler_new/target-i386/helper.c
r13337 r13357 1 1 /* 2 * i386 helpers 2 * i386 helpers (without register variable usage) 3 3 * 4 4 * Copyright (c) 2003 Fabrice Bellard … … 18 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 19 */ 20 21 /*22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where25 * a choice of LGPL license versions is made available with the language indicating26 * that LGPLv2 or any later version may be used, or where a choice of which version27 * of the LGPL is applied is otherwise unspecified.28 */29 #ifdef VBOX30 # include <VBox/err.h>31 #endif32 33 #ifndef VBOX34 20 #include <stdarg.h> 35 21 #include <stdlib.h> … … 37 23 #include <string.h> 38 24 #include <inttypes.h> 25 #ifndef VBOX 39 26 #include <signal.h> 40 27 #include <assert.h> … … 46 33 #include "qemu-common.h" 47 34 48 //#define DEBUG_PCALL 49 50 #if 0 51 #define raise_exception_err(a, b)\ 52 do {\ 53 if (logfile)\ 54 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\ 55 (raise_exception_err)(a, b);\ 56 } while (0) 57 #endif 58 59 const uint8_t parity_table[256] = { 60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 82 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 84 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 85 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 86 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 87 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 90 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, 91 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, 35 //#define DEBUG_MMU 36 37 static int cpu_x86_register (CPUX86State *env, const char *cpu_model); 38 39 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features, 40 uint32_t *ext_features, 41 uint32_t *ext2_features, 42 uint32_t *ext3_features) 43 { 44 int i; 45 /* feature flags taken from "Intel Processor Identification and the CPUID 46 * Instruction" and AMD's "CPUID Specification". In cases of disagreement 47 * about feature names, the Linux name is used. */ 48 static const char *feature_name[] = { 49 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 50 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", 51 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx", 52 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe", 53 }; 54 static const char *ext_feature_name[] = { 55 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est", 56 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, 57 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt", 58 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 59 }; 60 static const char *ext2_feature_name[] = { 61 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 62 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov", 63 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx", 64 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow", 65 }; 66 static const char *ext3_feature_name[] = { 67 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse", 68 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL, 69 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 70 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 71 }; 72 73 for ( i = 0 ; i < 32 ; i++ ) 74 if (feature_name[i] && !strcmp (flagname, feature_name[i])) { 75 *features |= 1 << i; 76 return; 77 } 78 for ( i = 0 ; i < 32 ; i++ ) 79 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) { 80 *ext_features |= 1 << i; 81 return; 82 } 83 for ( i = 0 ; i < 32 ; i++ ) 84 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) { 85 *ext2_features |= 1 << i; 86 return; 87 } 88 for ( i = 0 ; i < 32 ; i++ ) 89 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) { 90 *ext3_features |= 1 << i; 91 return; 92 } 93 fprintf(stderr, "CPU feature %s not found\n", flagname); 94 } 95 #ifndef VBOX 96 CPUX86State *cpu_x86_init(const char *cpu_model) 97 { 98 CPUX86State *env; 99 static int inited; 100 101 env = qemu_mallocz(sizeof(CPUX86State)); 102 if (!env) 103 return NULL; 104 #else 105 CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model) 106 { 107 static int inited; 108 #endif 109 cpu_exec_init(env); 110 env->cpu_model_str = cpu_model; 111 112 /* init various static tables */ 113 if (!inited) { 114 inited = 1; 115 optimize_flags_init(); 116 } 117 if (cpu_x86_register(env, cpu_model) < 0) { 118 cpu_x86_close(env); 119 return NULL; 120 } 121 cpu_reset(env); 122 #ifdef USE_KQEMU 123 kqemu_init(env); 124 #endif 125 return env; 126 } 127 128 typedef struct x86_def_t { 129 const char *name; 130 uint32_t level; 131 uint32_t vendor1, vendor2, vendor3; 132 int family; 133 int model; 134 int stepping; 135 uint32_t features, ext_features, ext2_features, ext3_features; 136 uint32_t xlevel; 137 char model_id[48]; 138 } x86_def_t; 139 140 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 141 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 142 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX) 143 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 144 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 145 CPUID_PSE36 | CPUID_FXSR) 146 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 147 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 148 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 149 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 150 CPUID_PAE | CPUID_SEP | CPUID_APIC) 151 static x86_def_t x86_defs[] = { 152 #ifdef TARGET_X86_64 153 { 154 .name = "qemu64", 155 .level = 2, 156 .vendor1 = CPUID_VENDOR_AMD_1, 157 .vendor2 = CPUID_VENDOR_AMD_2, 158 .vendor3 = CPUID_VENDOR_AMD_3, 159 .family = 6, 160 .model = 2, 161 .stepping = 3, 162 .features = PPRO_FEATURES | 163 /* these features are needed for Win64 and aren't fully implemented */ 164 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 165 /* this feature is needed for Solaris and isn't fully implemented */ 166 CPUID_PSE36, 167 .ext_features = CPUID_EXT_SSE3, 168 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 169 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 170 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 171 .ext3_features = CPUID_EXT3_SVM, 172 .xlevel = 0x8000000A, 173 .model_id = "QEMU Virtual CPU version " QEMU_VERSION, 174 }, 175 { 176 .name = "core2duo", 177 .level = 10, 178 .family = 6, 179 .model = 15, 180 .stepping = 11, 181 /* The original CPU also implements these features: 182 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT, 183 CPUID_TM, CPUID_PBE */ 184 .features = PPRO_FEATURES | 185 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 186 CPUID_PSE36, 187 /* The original CPU also implements these ext features: 188 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST, 189 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */ 190 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3, 191 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 192 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */ 193 .xlevel = 0x80000008, 194 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 195 }, 196 #endif 197 { 198 .name = "qemu32", 199 .level = 2, 200 .family = 6, 201 .model = 3, 202 .stepping = 3, 203 .features = PPRO_FEATURES, 204 .ext_features = CPUID_EXT_SSE3, 205 .xlevel = 0, 206 .model_id = "QEMU Virtual CPU version " QEMU_VERSION, 207 }, 208 { 209 .name = "coreduo", 210 .level = 10, 211 .family = 6, 212 .model = 14, 213 .stepping = 8, 214 /* The original CPU also implements these features: 215 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT, 216 CPUID_TM, CPUID_PBE */ 217 .features = PPRO_FEATURES | CPUID_VME | 218 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA, 219 /* The original CPU also implements these ext features: 220 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR, 221 CPUID_EXT_PDCM */ 222 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 223 .ext2_features = CPUID_EXT2_NX, 224 .xlevel = 0x80000008, 225 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 226 }, 227 { 228 .name = "486", 229 .level = 0, 230 .family = 4, 231 .model = 0, 232 .stepping = 0, 233 .features = I486_FEATURES, 234 .xlevel = 0, 235 }, 236 { 237 .name = "pentium", 238 .level = 1, 239 .family = 5, 240 .model = 4, 241 .stepping = 3, 242 .features = PENTIUM_FEATURES, 243 .xlevel = 0, 244 }, 245 { 246 .name = "pentium2", 247 .level = 2, 248 .family = 6, 249 .model = 5, 250 .stepping = 2, 251 .features = PENTIUM2_FEATURES, 252 .xlevel = 0, 253 }, 254 { 255 .name = "pentium3", 256 .level = 2, 257 .family = 6, 258 .model = 7, 259 .stepping = 3, 260 .features = PENTIUM3_FEATURES, 261 .xlevel = 0, 262 }, 263 { 264 .name = "athlon", 265 .level = 2, 266 .vendor1 = 0x68747541, /* "Auth" */ 267 .vendor2 = 0x69746e65, /* "enti" */ 268 .vendor3 = 0x444d4163, /* "cAMD" */ 269 .family = 6, 270 .model = 2, 271 .stepping = 3, 272 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA, 273 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 274 .xlevel = 0x80000008, 275 /* XXX: put another string ? */ 276 .model_id = "QEMU Virtual CPU version " QEMU_VERSION, 277 }, 278 { 279 .name = "n270", 280 /* original is on level 10 */ 281 .level = 5, 282 .family = 6, 283 .model = 28, 284 .stepping = 2, 285 .features = PPRO_FEATURES | 286 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME, 287 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS | 288 * CPUID_HT | CPUID_TM | CPUID_PBE */ 289 /* Some CPUs got no CPUID_SEP */ 290 .ext_features = CPUID_EXT_MONITOR | 291 CPUID_EXT_SSE3 /* PNI */, 292 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST | 293 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */ 294 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX, 295 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */ 296 .xlevel = 0x8000000A, 297 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 298 }, 92 299 }; 93 300 94 /* modulo 17 table */ 95 const uint8_t rclw_table[32] = { 96 0, 1, 2, 3, 4, 5, 6, 7, 97 8, 9,10,11,12,13,14,15, 98 16, 0, 1, 2, 3, 4, 5, 6, 99 7, 8, 9,10,11,12,13,14, 301 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model) 302 { 303 unsigned int i; 304 x86_def_t *def; 305 306 char *s = strdup(cpu_model); 307 char *featurestr, *name = strtok(s, ","); 308 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0; 309 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0; 310 int family = -1, model = -1, stepping = -1; 311 312 def = NULL; 313 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) { 314 if (strcmp(name, x86_defs[i].name) == 0) { 315 def = &x86_defs[i]; 316 break; 317 } 318 } 319 if (!def) 320 goto error; 321 memcpy(x86_cpu_def, def, sizeof(*def)); 322 323 featurestr = strtok(NULL, ","); 324 325 while (featurestr) { 326 char *val; 327 if (featurestr[0] == '+') { 328 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features); 329 } else if (featurestr[0] == '-') { 330 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features); 331 } else if ((val = strchr(featurestr, '='))) { 332 *val = 0; val++; 333 if (!strcmp(featurestr, "family")) { 334 char *err; 335 family = strtol(val, &err, 10); 336 if (!*val || *err || family < 0) { 337 fprintf(stderr, "bad numerical value %s\n", val); 338 goto error; 339 } 340 x86_cpu_def->family = family; 341 } else if (!strcmp(featurestr, "model")) { 342 char *err; 343 model = strtol(val, &err, 10); 344 if (!*val || *err || model < 0 || model > 0xf) { 345 fprintf(stderr, "bad numerical value %s\n", val); 346 goto error; 347 } 348 x86_cpu_def->model = model; 349 } else if (!strcmp(featurestr, "stepping")) { 350 char *err; 351 stepping = strtol(val, &err, 10); 352 if (!*val || *err || stepping < 0 || stepping > 0xf) { 353 fprintf(stderr, "bad numerical value %s\n", val); 354 goto error; 355 } 356 x86_cpu_def->stepping = stepping; 357 } else if (!strcmp(featurestr, "vendor")) { 358 if (strlen(val) != 12) { 359 fprintf(stderr, "vendor string must be 12 chars long\n"); 360 goto error; 361 } 362 x86_cpu_def->vendor1 = 0; 363 x86_cpu_def->vendor2 = 0; 364 x86_cpu_def->vendor3 = 0; 365 for(i = 0; i < 4; i++) { 366 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i); 367 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i); 368 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i); 369 } 370 } else if (!strcmp(featurestr, "model_id")) { 371 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id), 372 val); 373 } else { 374 fprintf(stderr, "unrecognized feature %s\n", featurestr); 375 goto error; 376 } 377 } else { 378 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr); 379 goto error; 380 } 381 featurestr = strtok(NULL, ","); 382 } 383 x86_cpu_def->features |= plus_features; 384 x86_cpu_def->ext_features |= plus_ext_features; 385 x86_cpu_def->ext2_features |= plus_ext2_features; 386 x86_cpu_def->ext3_features |= plus_ext3_features; 387 x86_cpu_def->features &= ~minus_features; 388 x86_cpu_def->ext_features &= ~minus_ext_features; 389 x86_cpu_def->ext2_features &= ~minus_ext2_features; 390 x86_cpu_def->ext3_features &= ~minus_ext3_features; 391 free(s); 392 return 0; 393 394 error: 395 free(s); 396 return -1; 397 } 398 399 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) 400 { 401 unsigned int i; 402 403 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) 404 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name); 405 } 406 407 static int cpu_x86_register (CPUX86State *env, const char *cpu_model) 408 { 409 #ifndef VBOX 410 x86_def_t def1, *def = &def1; 411 412 if (cpu_x86_find_by_name(def, cpu_model) < 0) 413 return -1; 414 if (def->vendor1) { 415 env->cpuid_vendor1 = def->vendor1; 416 env->cpuid_vendor2 = def->vendor2; 417 env->cpuid_vendor3 = def->vendor3; 418 } else { 419 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1; 420 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2; 421 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3; 422 } 423 env->cpuid_level = def->level; 424 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping; 425 env->cpuid_features = def->features; 426 env->pat = 0x0007040600070406ULL; 427 env->cpuid_ext_features = def->ext_features; 428 env->cpuid_ext2_features = def->ext2_features; 429 env->cpuid_xlevel = def->xlevel; 430 env->cpuid_ext3_features = def->ext3_features; 431 { 432 const char *model_id = def->model_id; 433 int c, len, i; 434 if (!model_id) 435 model_id = ""; 436 len = strlen(model_id); 437 for(i = 0; i < 48; i++) { 438 if (i >= len) 439 c = '\0'; 440 else 441 c = (uint8_t)model_id[i]; 442 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 443 } 444 } 445 #endif // !VBOX 446 return 0; 447 } 448 449 /* NOTE: must be called outside the CPU execute loop */ 450 void cpu_reset(CPUX86State *env) 451 { 452 int i; 453 454 memset(env, 0, offsetof(CPUX86State, breakpoints)); 455 456 tlb_flush(env, 1); 457 458 env->old_exception = -1; 459 460 /* init to reset state */ 461 462 #ifdef CONFIG_SOFTMMU 463 env->hflags |= HF_SOFTMMU_MASK; 464 #endif 465 env->hflags2 |= HF2_GIF_MASK; 466 467 cpu_x86_update_cr0(env, 0x60000010); 468 env->a20_mask = ~0x0; 469 env->smbase = 0x30000; 470 471 env->idt.limit = 0xffff; 472 env->gdt.limit = 0xffff; 473 env->ldt.limit = 0xffff; 474 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 475 env->tr.limit = 0xffff; 476 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 477 478 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 479 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK); 480 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 481 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK); 482 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 483 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK); 484 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 485 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK); 486 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 487 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK); 488 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 489 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK); 490 491 env->eip = 0xfff0; 492 #ifndef VBOX 493 env->regs[R_EDX] = env->cpuid_version; 494 #else 495 /** @todo: is it right? */ 496 env->regs[R_EDX] = 0x600; /* indicate P6 processor */ 497 #endif 498 499 env->eflags = 0x2; 500 501 /* FPU init */ 502 for(i = 0;i < 8; i++) 503 env->fptags[i] = 1; 504 env->fpuc = 0x37f; 505 506 env->mxcsr = 0x1f80; 507 } 508 509 #ifndef VBOX 510 void cpu_x86_close(CPUX86State *env) 511 { 512 qemu_free(env); 513 } 514 #endif 515 516 /***********************************************************/ 517 /* x86 debug */ 518 519 static const char *cc_op_str[] = { 520 "DYNAMIC", 521 "EFLAGS", 522 523 "MULB", 524 "MULW", 525 "MULL", 526 "MULQ", 527 528 "ADDB", 529 "ADDW", 530 "ADDL", 531 "ADDQ", 532 533 "ADCB", 534 "ADCW", 535 "ADCL", 536 "ADCQ", 537 538 "SUBB", 539 "SUBW", 540 "SUBL", 541 "SUBQ", 542 543 "SBBB", 544 "SBBW", 545 "SBBL", 546 "SBBQ", 547 548 "LOGICB", 549 "LOGICW", 550 "LOGICL", 551 "LOGICQ", 552 553 "INCB", 554 "INCW", 555 "INCL", 556 "INCQ", 557 558 "DECB", 559 "DECW", 560 "DECL", 561 "DECQ", 562 563 "SHLB", 564 "SHLW", 565 "SHLL", 566 "SHLQ", 567 568 "SARB", 569 "SARW", 570 "SARL", 571 "SARQ", 100 572 }; 101 573 102 /* modulo 9 table */ 103 const uint8_t rclb_table[32] = { 104 0, 1, 2, 3, 4, 5, 6, 7, 105 8, 0, 1, 2, 3, 4, 5, 6, 106 7, 8, 0, 1, 2, 3, 4, 5, 107 6, 7, 8, 0, 1, 2, 3, 4, 108 }; 109 110 const CPU86_LDouble f15rk[7] = 111 { 112 0.00000000000000000000L, 113 1.00000000000000000000L, 114 3.14159265358979323851L, /*pi*/ 115 0.30102999566398119523L, /*lg2*/ 116 0.69314718055994530943L, /*ln2*/ 117 1.44269504088896340739L, /*l2e*/ 118 3.32192809488736234781L, /*l2t*/ 119 }; 120 121 /* thread support */ 122 123 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; 124 125 void cpu_lock(void) 126 { 127 spin_lock(&global_cpu_lock); 128 } 129 130 void cpu_unlock(void) 131 { 132 spin_unlock(&global_cpu_lock); 133 } 134 135 void cpu_loop_exit(void) 136 { 137 /* NOTE: the register at this point must be saved by hand because 138 longjmp restore them */ 139 regs_to_env(); 140 longjmp(env->jmp_env, 1); 141 } 142 143 /* return non zero if error */ 144 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, 145 int selector) 146 { 147 SegmentCache *dt; 148 int index; 149 target_ulong ptr; 150 151 if (selector & 0x4) 152 dt = &env->ldt; 153 else 154 dt = &env->gdt; 155 index = selector & ~7; 156 if ((index + 7) > dt->limit) 157 return -1; 158 ptr = dt->base + index; 159 *e1_ptr = ldl_kernel(ptr); 160 *e2_ptr = ldl_kernel(ptr + 4); 161 return 0; 162 } 163 164 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 165 { 166 unsigned int limit; 167 limit = (e1 & 0xffff) | (e2 & 0x000f0000); 168 if (e2 & DESC_G_MASK) 169 limit = (limit << 12) | 0xfff; 170 return limit; 171 } 172 173 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 174 { 175 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); 176 } 177 178 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) 179 { 180 sc->base = get_seg_base(e1, e2); 181 sc->limit = get_seg_limit(e1, e2); 182 sc->flags = e2; 183 } 184 185 /* init the segment cache in vm86 mode. */ 186 static inline void load_seg_vm(int seg, int selector) 187 { 188 selector &= 0xffff; 189 cpu_x86_load_seg_cache(env, seg, selector, 190 (selector << 4), 0xffff, 0); 191 } 192 193 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 194 uint32_t *esp_ptr, int dpl) 195 { 196 int type, index, shift; 197 198 #if 0 574 void cpu_dump_state(CPUState *env, FILE *f, 575 int (*cpu_fprintf)(FILE *f, const char *fmt, ...), 576 int flags) 577 { 578 int eflags, i, nb; 579 char cc_op_name[32]; 580 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" }; 581 582 eflags = env->eflags; 583 #ifdef TARGET_X86_64 584 if (env->hflags & HF_CS64_MASK) { 585 cpu_fprintf(f, 586 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n" 587 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n" 588 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n" 589 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n" 590 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n", 591 env->regs[R_EAX], 592 env->regs[R_EBX], 593 env->regs[R_ECX], 594 env->regs[R_EDX], 595 env->regs[R_ESI], 596 env->regs[R_EDI], 597 env->regs[R_EBP], 598 env->regs[R_ESP], 599 env->regs[8], 600 env->regs[9], 601 env->regs[10], 602 env->regs[11], 603 env->regs[12], 604 env->regs[13], 605 env->regs[14], 606 env->regs[15], 607 env->eip, eflags, 608 eflags & DF_MASK ? 'D' : '-', 609 eflags & CC_O ? 'O' : '-', 610 eflags & CC_S ? 'S' : '-', 611 eflags & CC_Z ? 'Z' : '-', 612 eflags & CC_A ? 'A' : '-', 613 eflags & CC_P ? 'P' : '-', 614 eflags & CC_C ? 'C' : '-', 615 env->hflags & HF_CPL_MASK, 616 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, 617 (int)(env->a20_mask >> 20) & 1, 618 (env->hflags >> HF_SMM_SHIFT) & 1, 619 env->halted); 620 } else 621 #endif 199 622 { 200 int i; 201 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 202 for(i=0;i<env->tr.limit;i++) { 203 printf("%02x ", env->tr.base[i]); 204 if ((i & 7) == 7) printf("\n"); 205 } 206 printf("\n"); 207 } 208 #endif 209 210 if (!(env->tr.flags & DESC_P_MASK)) 211 cpu_abort(env, "invalid tss"); 212 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 213 if ((type & 7) != 1) 214 cpu_abort(env, "invalid tss type %d", type); 215 shift = type >> 3; 216 index = (dpl * 4 + 2) << shift; 217 if (index + (4 << shift) - 1 > env->tr.limit) 218 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc); 219 if (shift == 0) { 220 *esp_ptr = lduw_kernel(env->tr.base + index); 221 *ss_ptr = lduw_kernel(env->tr.base + index + 2); 222 } else { 223 *esp_ptr = ldl_kernel(env->tr.base + index); 224 *ss_ptr = lduw_kernel(env->tr.base + index + 4); 225 } 226 } 227 228 /* XXX: merge with load_seg() */ 229 static void tss_load_seg(int seg_reg, int selector) 230 { 231 uint32_t e1, e2; 232 int rpl, dpl, cpl; 233 234 if ((selector & 0xfffc) != 0) { 235 if (load_segment(&e1, &e2, selector) != 0) 236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 237 if (!(e2 & DESC_S_MASK)) 238 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 239 rpl = selector & 3; 240 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 241 cpl = env->hflags & HF_CPL_MASK; 242 if (seg_reg == R_CS) { 243 if (!(e2 & DESC_CS_MASK)) 244 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 245 /* XXX: is it correct ? */ 246 if (dpl != rpl) 247 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 248 if ((e2 & DESC_C_MASK) && dpl > rpl) 249 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 250 } else if (seg_reg == R_SS) { 251 /* SS must be writable data */ 252 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) 253 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 254 if (dpl != cpl || dpl != rpl) 255 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 256 } else { 257 /* not readable code */ 258 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) 259 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 260 /* if data or non conforming code, checks the rights */ 261 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 262 if (dpl < cpl || dpl < rpl) 263 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 264 } 265 } 266 if (!(e2 & DESC_P_MASK)) 267 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); 268 cpu_x86_load_seg_cache(env, seg_reg, selector, 269 get_seg_base(e1, e2), 270 get_seg_limit(e1, e2), 271 e2); 272 } else { 273 if (seg_reg == R_SS || seg_reg == R_CS) 274 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 275 } 276 } 277 278 #define SWITCH_TSS_JMP 0 279 #define SWITCH_TSS_IRET 1 280 #define SWITCH_TSS_CALL 2 281 282 /* XXX: restore CPU state in registers (PowerPC case) */ 283 static void switch_tss(int tss_selector, 284 uint32_t e1, uint32_t e2, int source, 285 uint32_t next_eip) 286 { 287 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; 288 target_ulong tss_base; 289 uint32_t new_regs[8], new_segs[6]; 290 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 291 uint32_t old_eflags, eflags_mask; 292 SegmentCache *dt; 293 int index; 294 target_ulong ptr; 295 296 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 297 #ifdef DEBUG_PCALL 298 if (loglevel & CPU_LOG_PCALL) 299 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source); 300 #endif 301 302 #if defined(VBOX) && defined(DEBUG) 303 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip); 304 #endif 305 306 /* if task gate, we read the TSS segment and we load it */ 307 if (type == 5) { 308 if (!(e2 & DESC_P_MASK)) 309 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc); 310 tss_selector = e1 >> 16; 311 if (tss_selector & 4) 312 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); 313 if (load_segment(&e1, &e2, tss_selector) != 0) 314 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); 315 if (e2 & DESC_S_MASK) 316 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); 317 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 318 if ((type & 7) != 1) 319 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); 320 } 321 322 if (!(e2 & DESC_P_MASK)) 323 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc); 324 325 if (type & 8) 326 tss_limit_max = 103; 327 else 328 tss_limit_max = 43; 329 tss_limit = get_seg_limit(e1, e2); 330 tss_base = get_seg_base(e1, e2); 331 if ((tss_selector & 4) != 0 || 332 tss_limit < tss_limit_max) 333 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); 334 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 335 if (old_type & 8) 336 old_tss_limit_max = 103; 337 else 338 old_tss_limit_max = 43; 339 340 /* read all the registers from the new TSS */ 341 if (type & 8) { 342 /* 32 bit */ 343 new_cr3 = ldl_kernel(tss_base + 0x1c); 344 new_eip = ldl_kernel(tss_base + 0x20); 345 new_eflags = ldl_kernel(tss_base + 0x24); 346 for(i = 0; i < 8; i++) 347 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4)); 348 for(i = 0; i < 6; i++) 349 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4)); 350 new_ldt = lduw_kernel(tss_base + 0x60); 351 new_trap = ldl_kernel(tss_base + 0x64); 352 } else { 353 /* 16 bit */ 354 new_cr3 = 0; 355 new_eip = lduw_kernel(tss_base + 0x0e); 356 new_eflags = lduw_kernel(tss_base + 0x10); 357 for(i = 0; i < 8; i++) 358 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000; 359 for(i = 0; i < 4; i++) 360 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4)); 361 new_ldt = lduw_kernel(tss_base + 0x2a); 362 new_segs[R_FS] = 0; 363 new_segs[R_GS] = 0; 364 new_trap = 0; 365 } 366 367 /* NOTE: we must avoid memory exceptions during the task switch, 368 so we make dummy accesses before */ 369 /* XXX: it can still fail in some cases, so a bigger hack is 370 necessary to valid the TLB after having done the accesses */ 371 372 v1 = ldub_kernel(env->tr.base); 373 v2 = ldub_kernel(env->tr.base + old_tss_limit_max); 374 stb_kernel(env->tr.base, v1); 375 stb_kernel(env->tr.base + old_tss_limit_max, v2); 376 377 /* clear busy bit (it is restartable) */ 378 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 379 target_ulong ptr; 380 uint32_t e2; 381 ptr = env->gdt.base + (env->tr.selector & ~7); 382 e2 = ldl_kernel(ptr + 4); 383 e2 &= ~DESC_TSS_BUSY_MASK; 384 stl_kernel(ptr + 4, e2); 385 } 386 old_eflags = compute_eflags(); 387 if (source == SWITCH_TSS_IRET) 388 old_eflags &= ~NT_MASK; 389 390 /* save the current state in the old TSS */ 391 if (type & 8) { 392 /* 32 bit */ 393 stl_kernel(env->tr.base + 0x20, next_eip); 394 stl_kernel(env->tr.base + 0x24, old_eflags); 395 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX); 396 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX); 397 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX); 398 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX); 399 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP); 400 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP); 401 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI); 402 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI); 403 for(i = 0; i < 6; i++) 404 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector); 405 #if defined(VBOX) && defined(DEBUG) 406 printf("TSS 32 bits switch\n"); 407 printf("Saving CS=%08X\n", env->segs[R_CS].selector); 408 #endif 409 } else { 410 /* 16 bit */ 411 stw_kernel(env->tr.base + 0x0e, next_eip); 412 stw_kernel(env->tr.base + 0x10, old_eflags); 413 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX); 414 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX); 415 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX); 416 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX); 417 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP); 418 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP); 419 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI); 420 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI); 421 for(i = 0; i < 4; i++) 422 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector); 423 } 424 425 /* now if an exception occurs, it will occurs in the next task 426 context */ 427 428 if (source == SWITCH_TSS_CALL) { 429 stw_kernel(tss_base, env->tr.selector); 430 new_eflags |= NT_MASK; 431 } 432 433 /* set busy bit */ 434 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 435 target_ulong ptr; 436 uint32_t e2; 437 ptr = env->gdt.base + (tss_selector & ~7); 438 e2 = ldl_kernel(ptr + 4); 439 e2 |= DESC_TSS_BUSY_MASK; 440 stl_kernel(ptr + 4, e2); 441 } 442 443 /* set the new CPU state */ 444 /* from this point, any exception which occurs can give problems */ 445 env->cr[0] |= CR0_TS_MASK; 446 env->hflags |= HF_TS_MASK; 447 env->tr.selector = tss_selector; 448 env->tr.base = tss_base; 449 env->tr.limit = tss_limit; 450 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 451 452 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 453 cpu_x86_update_cr3(env, new_cr3); 454 } 455 456 /* load all registers without an exception, then reload them with 457 possible exception */ 458 env->eip = new_eip; 459 eflags_mask = TF_MASK | AC_MASK | ID_MASK | 460 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 461 if (!(type & 8)) 462 eflags_mask &= 0xffff; 463 load_eflags(new_eflags, eflags_mask); 464 /* XXX: what to do in 16 bit case ? */ 465 EAX = new_regs[0]; 466 ECX = new_regs[1]; 467 EDX = new_regs[2]; 468 EBX = new_regs[3]; 469 ESP = new_regs[4]; 470 EBP = new_regs[5]; 471 ESI = new_regs[6]; 472 EDI = new_regs[7]; 473 if (new_eflags & VM_MASK) { 474 for(i = 0; i < 6; i++) 475 load_seg_vm(i, new_segs[i]); 476 /* in vm86, CPL is always 3 */ 477 cpu_x86_set_cpl(env, 3); 478 } else { 479 /* CPL is set the RPL of CS */ 480 cpu_x86_set_cpl(env, new_segs[R_CS] & 3); 481 /* first just selectors as the rest may trigger exceptions */ 482 for(i = 0; i < 6; i++) 483 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 484 } 485 486 env->ldt.selector = new_ldt & ~4; 487 env->ldt.base = 0; 488 env->ldt.limit = 0; 489 env->ldt.flags = 0; 490 491 /* load the LDT */ 492 if (new_ldt & 4) 493 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); 494 495 if ((new_ldt & 0xfffc) != 0) { 496 dt = &env->gdt; 497 index = new_ldt & ~7; 498 if ((index + 7) > dt->limit) 499 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); 500 ptr = dt->base + index; 501 e1 = ldl_kernel(ptr); 502 e2 = ldl_kernel(ptr + 4); 503 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) 504 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); 505 if (!(e2 & DESC_P_MASK)) 506 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); 507 load_seg_cache_raw_dt(&env->ldt, e1, e2); 508 } 509 510 /* load the segments */ 511 if (!(new_eflags & VM_MASK)) { 512 tss_load_seg(R_CS, new_segs[R_CS]); 513 tss_load_seg(R_SS, new_segs[R_SS]); 514 tss_load_seg(R_ES, new_segs[R_ES]); 515 tss_load_seg(R_DS, new_segs[R_DS]); 516 tss_load_seg(R_FS, new_segs[R_FS]); 517 tss_load_seg(R_GS, new_segs[R_GS]); 518 } 519 520 /* check that EIP is in the CS segment limits */ 521 if (new_eip > env->segs[R_CS].limit) { 522 /* XXX: different exception if CALL ? */ 523 raise_exception_err(EXCP0D_GPF, 0); 524 } 525 } 526 527 /* check if Port I/O is allowed in TSS */ 528 static inline void check_io(int addr, int size) 529 { 530 int io_offset, val, mask; 531 532 /* TSS must be a valid 32 bit one */ 533 if (!(env->tr.flags & DESC_P_MASK) || 534 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || 535 env->tr.limit < 103) 536 goto fail; 537 io_offset = lduw_kernel(env->tr.base + 0x66); 538 io_offset += (addr >> 3); 539 /* Note: the check needs two bytes */ 540 if ((io_offset + 1) > env->tr.limit) 541 goto fail; 542 val = lduw_kernel(env->tr.base + io_offset); 543 val >>= (addr & 7); 544 mask = (1 << size) - 1; 545 /* all bits must be zero to allow the I/O */ 546 if ((val & mask) != 0) { 547 fail: 548 raise_exception_err(EXCP0D_GPF, 0); 549 } 550 } 551 552 void check_iob_T0(void) 553 { 554 check_io(T0, 1); 555 } 556 557 void check_iow_T0(void) 558 { 559 check_io(T0, 2); 560 } 561 562 void check_iol_T0(void) 563 { 564 check_io(T0, 4); 565 } 566 567 void check_iob_DX(void) 568 { 569 check_io(EDX & 0xffff, 1); 570 } 571 572 void check_iow_DX(void) 573 { 574 check_io(EDX & 0xffff, 2); 575 } 576 577 void check_iol_DX(void) 578 { 579 check_io(EDX & 0xffff, 4); 580 } 581 582 static inline unsigned int get_sp_mask(unsigned int e2) 583 { 584 if (e2 & DESC_B_MASK) 585 return 0xffffffff; 586 else 587 return 0xffff; 588 } 589 590 #ifdef TARGET_X86_64 591 #define SET_ESP(val, sp_mask)\ 592 do {\ 593 if ((sp_mask) == 0xffff)\ 594 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\ 595 else if ((sp_mask) == 0xffffffffLL)\ 596 ESP = (uint32_t)(val);\ 597 else\ 598 ESP = (val);\ 599 } while (0) 600 #else 601 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)) 602 #endif 603 604 /* XXX: add a is_user flag to have proper security support */ 605 #define PUSHW(ssp, sp, sp_mask, val)\ 606 {\ 607 sp -= 2;\ 608 stw_kernel((ssp) + (sp & (sp_mask)), (val));\ 609 } 610 611 #define PUSHL(ssp, sp, sp_mask, val)\ 612 {\ 613 sp -= 4;\ 614 stl_kernel((ssp) + (sp & (sp_mask)), (val));\ 615 } 616 617 #define POPW(ssp, sp, sp_mask, val)\ 618 {\ 619 val = lduw_kernel((ssp) + (sp & (sp_mask)));\ 620 sp += 2;\ 621 } 622 623 #define POPL(ssp, sp, sp_mask, val)\ 624 {\ 625 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\ 626 sp += 4;\ 627 } 628 629 /* protected mode interrupt */ 630 static void do_interrupt_protected(int intno, int is_int, int error_code, 631 unsigned int next_eip, int is_hw) 632 { 633 SegmentCache *dt; 634 target_ulong ptr, ssp; 635 int type, dpl, selector, ss_dpl, cpl; 636 int has_error_code, new_stack, shift; 637 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2; 638 uint32_t old_eip, sp_mask; 639 640 #ifdef VBOX 641 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS) 642 cpu_loop_exit(); 643 #endif 644 645 has_error_code = 0; 646 if (!is_int && !is_hw) { 647 switch(intno) { 648 case 8: 649 case 10: 650 case 11: 651 case 12: 652 case 13: 653 case 14: 654 case 17: 655 has_error_code = 1; 656 break; 657 } 658 } 659 if (is_int) 660 old_eip = next_eip; 661 else 662 old_eip = env->eip; 663 664 dt = &env->idt; 665 if (intno * 8 + 7 > dt->limit) 666 raise_exception_err(EXCP0D_GPF, intno * 8 + 2); 667 ptr = dt->base + intno * 8; 668 e1 = ldl_kernel(ptr); 669 e2 = ldl_kernel(ptr + 4); 670 /* check gate type */ 671 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 672 switch(type) { 673 case 5: /* task gate */ 674 /* must do that check here to return the correct error code */ 675 if (!(e2 & DESC_P_MASK)) 676 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); 677 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 678 if (has_error_code) { 679 int type; 680 uint32_t mask; 681 /* push the error code */ 682 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 683 shift = type >> 3; 684 if (env->segs[R_SS].flags & DESC_B_MASK) 685 mask = 0xffffffff; 686 else 687 mask = 0xffff; 688 esp = (ESP - (2 << shift)) & mask; 689 ssp = env->segs[R_SS].base + esp; 690 if (shift) 691 stl_kernel(ssp, error_code); 692 else 693 stw_kernel(ssp, error_code); 694 SET_ESP(esp, mask); 695 } 696 return; 697 case 6: /* 286 interrupt gate */ 698 case 7: /* 286 trap gate */ 699 case 14: /* 386 interrupt gate */ 700 case 15: /* 386 trap gate */ 701 break; 702 default: 703 raise_exception_err(EXCP0D_GPF, intno * 8 + 2); 704 break; 705 } 706 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 707 cpl = env->hflags & HF_CPL_MASK; 708 /* check privledge if software int */ 709 if (is_int && dpl < cpl) 710 raise_exception_err(EXCP0D_GPF, intno * 8 + 2); 711 /* check valid bit */ 712 if (!(e2 & DESC_P_MASK)) 713 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); 714 selector = e1 >> 16; 715 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 716 if ((selector & 0xfffc) == 0) 717 raise_exception_err(EXCP0D_GPF, 0); 718 719 if (load_segment(&e1, &e2, selector) != 0) 720 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 721 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) 722 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 723 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 724 if (dpl > cpl) 725 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 726 if (!(e2 & DESC_P_MASK)) 727 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); 728 if (!(e2 & DESC_C_MASK) && dpl < cpl) { 729 /* to inner priviledge */ 730 get_ss_esp_from_tss(&ss, &esp, dpl); 731 if ((ss & 0xfffc) == 0) 732 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 733 if ((ss & 3) != dpl) 734 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 735 if (load_segment(&ss_e1, &ss_e2, ss) != 0) 736 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 737 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 738 if (ss_dpl != dpl) 739 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 740 if (!(ss_e2 & DESC_S_MASK) || 741 (ss_e2 & DESC_CS_MASK) || 742 !(ss_e2 & DESC_W_MASK)) 743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 744 if (!(ss_e2 & DESC_P_MASK)) 745 #ifdef VBOX /* See page 3-477 of 253666.pdf */ 746 raise_exception_err(EXCP0C_STACK, ss & 0xfffc); 747 #else 748 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 749 #endif 750 new_stack = 1; 751 sp_mask = get_sp_mask(ss_e2); 752 ssp = get_seg_base(ss_e1, ss_e2); 753 #if defined(VBOX) && defined(DEBUG) 754 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl); 755 #endif 756 } else if ((e2 & DESC_C_MASK) || dpl == cpl) { 757 /* to same priviledge */ 758 if (env->eflags & VM_MASK) 759 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 760 new_stack = 0; 761 sp_mask = get_sp_mask(env->segs[R_SS].flags); 762 ssp = env->segs[R_SS].base; 763 esp = ESP; 764 dpl = cpl; 765 } else { 766 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 767 new_stack = 0; /* avoid warning */ 768 sp_mask = 0; /* avoid warning */ 769 ssp = 0; /* avoid warning */ 770 esp = 0; /* avoid warning */ 771 } 772 773 shift = type >> 3; 774 775 #if 0 776 /* XXX: check that enough room is available */ 777 push_size = 6 + (new_stack << 2) + (has_error_code << 1); 778 if (env->eflags & VM_MASK) 779 push_size += 8; 780 push_size <<= shift; 781 #endif 782 if (shift == 1) { 783 if (new_stack) { 784 if (env->eflags & VM_MASK) { 785 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); 786 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); 787 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); 788 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); 789 } 790 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); 791 PUSHL(ssp, esp, sp_mask, ESP); 792 } 793 PUSHL(ssp, esp, sp_mask, compute_eflags()); 794 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); 795 PUSHL(ssp, esp, sp_mask, old_eip); 796 if (has_error_code) { 797 PUSHL(ssp, esp, sp_mask, error_code); 798 } 799 } else { 800 if (new_stack) { 801 if (env->eflags & VM_MASK) { 802 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); 803 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); 804 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); 805 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); 806 } 807 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); 808 PUSHW(ssp, esp, sp_mask, ESP); 809 } 810 PUSHW(ssp, esp, sp_mask, compute_eflags()); 811 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); 812 PUSHW(ssp, esp, sp_mask, old_eip); 813 if (has_error_code) { 814 PUSHW(ssp, esp, sp_mask, error_code); 815 } 816 } 817 818 if (new_stack) { 819 if (env->eflags & VM_MASK) { 820 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 821 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 822 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 823 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 824 } 825 ss = (ss & ~3) | dpl; 826 cpu_x86_load_seg_cache(env, R_SS, ss, 827 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); 828 } 829 SET_ESP(esp, sp_mask); 830 831 selector = (selector & ~3) | dpl; 832 cpu_x86_load_seg_cache(env, R_CS, selector, 833 get_seg_base(e1, e2), 834 get_seg_limit(e1, e2), 835 e2); 836 cpu_x86_set_cpl(env, dpl); 837 env->eip = offset; 838 839 /* interrupt gate clear IF mask */ 840 if ((type & 1) == 0) { 841 env->eflags &= ~IF_MASK; 842 } 843 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 844 } 845 846 #ifdef VBOX 847 848 /* check if VME interrupt redirection is enabled in TSS */ 849 static inline bool is_vme_irq_redirected(int intno) 850 { 851 int io_offset, intredir_offset; 852 unsigned char val, mask; 853 854 /* TSS must be a valid 32 bit one */ 855 if (!(env->tr.flags & DESC_P_MASK) || 856 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || 857 env->tr.limit < 103) 858 goto fail; 859 io_offset = lduw_kernel(env->tr.base + 0x66); 860 /* the virtual interrupt redirection bitmap is located below the io bitmap */ 861 intredir_offset = io_offset - 0x20; 862 863 intredir_offset += (intno >> 3); 864 if ((intredir_offset) > env->tr.limit) 865 goto fail; 866 867 val = ldub_kernel(env->tr.base + intredir_offset); 868 mask = 1 << (unsigned char)(intno & 7); 869 870 /* bit set means no redirection. */ 871 if ((val & mask) != 0) { 872 return false; 873 } 874 return true; 875 876 fail: 877 raise_exception_err(EXCP0D_GPF, 0); 878 return true; 879 } 880 881 /* V86 mode software interrupt with CR4.VME=1 */ 882 static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip) 883 { 884 target_ulong ptr, ssp; 885 int selector; 886 uint32_t offset, esp; 887 uint32_t old_cs, old_eflags; 888 uint32_t iopl; 889 890 iopl = ((env->eflags >> IOPL_SHIFT) & 3); 891 892 if (!is_vme_irq_redirected(intno)) 893 { 894 if (iopl == 3) 895 /* normal protected mode handler call */ 896 return do_interrupt_protected(intno, 1, error_code, next_eip, 0); 897 else 898 raise_exception_err(EXCP0D_GPF, 0); 899 } 900 901 /* virtual mode idt is at linear address 0 */ 902 ptr = 0 + intno * 4; 903 offset = lduw_kernel(ptr); 904 selector = lduw_kernel(ptr + 2); 905 esp = ESP; 906 ssp = env->segs[R_SS].base; 907 old_cs = env->segs[R_CS].selector; 908 909 old_eflags = compute_eflags(); 910 if (iopl < 3) 911 { 912 /* copy VIF into IF and set IOPL to 3 */ 913 if (env->eflags & VIF_MASK) 914 old_eflags |= IF_MASK; 915 else 916 old_eflags &= ~IF_MASK; 917 918 old_eflags |= (3 << IOPL_SHIFT); 919 } 920 921 /* XXX: use SS segment size ? */ 922 PUSHW(ssp, esp, 0xffff, old_eflags); 923 PUSHW(ssp, esp, 0xffff, old_cs); 924 PUSHW(ssp, esp, 0xffff, next_eip); 925 926 /* update processor state */ 927 ESP = (ESP & ~0xffff) | (esp & 0xffff); 928 env->eip = offset; 929 env->segs[R_CS].selector = selector; 930 env->segs[R_CS].base = (selector << 4); 931 env->eflags &= ~(TF_MASK | RF_MASK); 932 933 if (iopl < 3) 934 env->eflags &= ~VIF_MASK; 935 else 936 env->eflags &= ~IF_MASK; 937 } 938 #endif /* VBOX */ 939 940 #ifdef TARGET_X86_64 941 942 #define PUSHQ(sp, val)\ 943 {\ 944 sp -= 8;\ 945 stq_kernel(sp, (val));\ 946 } 947 948 #define POPQ(sp, val)\ 949 {\ 950 val = ldq_kernel(sp);\ 951 sp += 8;\ 952 } 953 954 static inline target_ulong get_rsp_from_tss(int level) 955 { 956 int index; 957 958 #if 0 959 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 960 env->tr.base, env->tr.limit); 961 #endif 962 963 if (!(env->tr.flags & DESC_P_MASK)) 964 cpu_abort(env, "invalid tss"); 965 index = 8 * level + 4; 966 if ((index + 7) > env->tr.limit) 967 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc); 968 return ldq_kernel(env->tr.base + index); 969 } 970 971 /* 64 bit interrupt */ 972 static void do_interrupt64(int intno, int is_int, int error_code, 973 target_ulong next_eip, int is_hw) 974 { 975 SegmentCache *dt; 976 target_ulong ptr; 977 int type, dpl, selector, cpl, ist; 978 int has_error_code, new_stack; 979 uint32_t e1, e2, e3, ss; 980 target_ulong old_eip, esp, offset; 981 982 #ifdef VBOX 983 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS) 984 cpu_loop_exit(); 985 #endif 986 987 has_error_code = 0; 988 if (!is_int && !is_hw) { 989 switch(intno) { 990 case 8: 991 case 10: 992 case 11: 993 case 12: 994 case 13: 995 case 14: 996 case 17: 997 has_error_code = 1; 998 break; 999 } 1000 } 1001 if (is_int) 1002 old_eip = next_eip; 1003 else 1004 old_eip = env->eip; 1005 1006 dt = &env->idt; 1007 if (intno * 16 + 15 > dt->limit) 1008 raise_exception_err(EXCP0D_GPF, intno * 16 + 2); 1009 ptr = dt->base + intno * 16; 1010 e1 = ldl_kernel(ptr); 1011 e2 = ldl_kernel(ptr + 4); 1012 e3 = ldl_kernel(ptr + 8); 1013 /* check gate type */ 1014 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1015 switch(type) { 1016 case 14: /* 386 interrupt gate */ 1017 case 15: /* 386 trap gate */ 1018 break; 1019 default: 1020 raise_exception_err(EXCP0D_GPF, intno * 16 + 2); 1021 break; 1022 } 1023 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1024 cpl = env->hflags & HF_CPL_MASK; 1025 /* check privledge if software int */ 1026 if (is_int && dpl < cpl) 1027 raise_exception_err(EXCP0D_GPF, intno * 16 + 2); 1028 /* check valid bit */ 1029 if (!(e2 & DESC_P_MASK)) 1030 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2); 1031 selector = e1 >> 16; 1032 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 1033 ist = e2 & 7; 1034 if ((selector & 0xfffc) == 0) 1035 raise_exception_err(EXCP0D_GPF, 0); 1036 1037 if (load_segment(&e1, &e2, selector) != 0) 1038 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1039 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) 1040 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1041 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1042 if (dpl > cpl) 1043 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1044 if (!(e2 & DESC_P_MASK)) 1045 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); 1046 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) 1047 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1048 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) { 1049 /* to inner priviledge */ 1050 if (ist != 0) 1051 esp = get_rsp_from_tss(ist + 3); 1052 else 1053 esp = get_rsp_from_tss(dpl); 1054 esp &= ~0xfLL; /* align stack */ 1055 ss = 0; 1056 new_stack = 1; 1057 } else if ((e2 & DESC_C_MASK) || dpl == cpl) { 1058 /* to same priviledge */ 1059 if (env->eflags & VM_MASK) 1060 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1061 new_stack = 0; 1062 if (ist != 0) 1063 esp = get_rsp_from_tss(ist + 3); 1064 else 1065 esp = ESP; 1066 esp &= ~0xfLL; /* align stack */ 1067 dpl = cpl; 1068 } else { 1069 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1070 new_stack = 0; /* avoid warning */ 1071 esp = 0; /* avoid warning */ 1072 } 1073 1074 PUSHQ(esp, env->segs[R_SS].selector); 1075 PUSHQ(esp, ESP); 1076 PUSHQ(esp, compute_eflags()); 1077 PUSHQ(esp, env->segs[R_CS].selector); 1078 PUSHQ(esp, old_eip); 1079 if (has_error_code) { 1080 PUSHQ(esp, error_code); 1081 } 1082 1083 if (new_stack) { 1084 ss = 0 | dpl; 1085 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 1086 } 1087 ESP = esp; 1088 1089 selector = (selector & ~3) | dpl; 1090 cpu_x86_load_seg_cache(env, R_CS, selector, 1091 get_seg_base(e1, e2), 1092 get_seg_limit(e1, e2), 1093 e2); 1094 cpu_x86_set_cpl(env, dpl); 1095 env->eip = offset; 1096 1097 /* interrupt gate clear IF mask */ 1098 if ((type & 1) == 0) { 1099 env->eflags &= ~IF_MASK; 1100 } 1101 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 1102 } 1103 #endif 1104 1105 void helper_syscall(int next_eip_addend) 1106 { 1107 int selector; 1108 1109 if (!(env->efer & MSR_EFER_SCE)) { 1110 raise_exception_err(EXCP06_ILLOP, 0); 1111 } 1112 selector = (env->star >> 32) & 0xffff; 623 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n" 624 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n" 625 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n", 626 (uint32_t)env->regs[R_EAX], 627 (uint32_t)env->regs[R_EBX], 628 (uint32_t)env->regs[R_ECX], 629 (uint32_t)env->regs[R_EDX], 630 (uint32_t)env->regs[R_ESI], 631 (uint32_t)env->regs[R_EDI], 632 (uint32_t)env->regs[R_EBP], 633 (uint32_t)env->regs[R_ESP], 634 (uint32_t)env->eip, eflags, 635 eflags & DF_MASK ? 'D' : '-', 636 eflags & CC_O ? 'O' : '-', 637 eflags & CC_S ? 'S' : '-', 638 eflags & CC_Z ? 'Z' : '-', 639 eflags & CC_A ? 'A' : '-', 640 eflags & CC_P ? 'P' : '-', 641 eflags & CC_C ? 'C' : '-', 642 env->hflags & HF_CPL_MASK, 643 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, 644 (int)(env->a20_mask >> 20) & 1, 645 (env->hflags >> HF_SMM_SHIFT) & 1, 646 env->halted); 647 } 648 1113 649 #ifdef TARGET_X86_64 1114 650 if (env->hflags & HF_LMA_MASK) { 1115 int code64; 1116 1117 ECX = env->eip + next_eip_addend; 1118 env->regs[11] = compute_eflags(); 1119 1120 code64 = env->hflags & HF_CS64_MASK; 1121 1122 cpu_x86_set_cpl(env, 0); 1123 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 1124 0, 0xffffffff, 1125 DESC_G_MASK | DESC_P_MASK | 1126 DESC_S_MASK | 1127 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); 1128 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 1129 0, 0xffffffff, 1130 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1131 DESC_S_MASK | 1132 DESC_W_MASK | DESC_A_MASK); 1133 env->eflags &= ~env->fmask; 1134 load_eflags(env->eflags, 0); 1135 if (code64) 1136 env->eip = env->lstar; 651 for(i = 0; i < 6; i++) { 652 SegmentCache *sc = &env->segs[i]; 653 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n", 654 seg_name[i], 655 sc->selector, 656 sc->base, 657 sc->limit, 658 sc->flags); 659 } 660 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n", 661 env->ldt.selector, 662 env->ldt.base, 663 env->ldt.limit, 664 env->ldt.flags); 665 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n", 666 env->tr.selector, 667 env->tr.base, 668 env->tr.limit, 669 env->tr.flags); 670 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n", 671 env->gdt.base, env->gdt.limit); 672 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n", 673 env->idt.base, env->idt.limit); 674 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n", 675 (uint32_t)env->cr[0], 676 env->cr[2], 677 env->cr[3], 678 (uint32_t)env->cr[4]); 679 } else 680 #endif 681 { 682 for(i = 0; i < 6; i++) { 683 SegmentCache *sc = &env->segs[i]; 684 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n", 685 seg_name[i], 686 sc->selector, 687 (uint32_t)sc->base, 688 sc->limit, 689 sc->flags); 690 } 691 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n", 692 env->ldt.selector, 693 (uint32_t)env->ldt.base, 694 env->ldt.limit, 695 env->ldt.flags); 696 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n", 697 env->tr.selector, 698 (uint32_t)env->tr.base, 699 env->tr.limit, 700 env->tr.flags); 701 cpu_fprintf(f, "GDT= %08x %08x\n", 702 (uint32_t)env->gdt.base, env->gdt.limit); 703 cpu_fprintf(f, "IDT= %08x %08x\n", 704 (uint32_t)env->idt.base, env->idt.limit); 705 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n", 706 (uint32_t)env->cr[0], 707 (uint32_t)env->cr[2], 708 (uint32_t)env->cr[3], 709 (uint32_t)env->cr[4]); 710 } 711 if (flags & X86_DUMP_CCOP) { 712 if ((unsigned)env->cc_op < CC_OP_NB) 713 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]); 1137 714 else 1138 env->eip = env->cstar; 1139 } else 1140 #endif 1141 { 1142 ECX = (uint32_t)(env->eip + next_eip_addend); 1143 1144 cpu_x86_set_cpl(env, 0); 1145 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 1146 0, 0xffffffff, 1147 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1148 DESC_S_MASK | 1149 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1150 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 1151 0, 0xffffffff, 1152 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1153 DESC_S_MASK | 1154 DESC_W_MASK | DESC_A_MASK); 1155 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); 1156 env->eip = (uint32_t)env->star; 1157 } 1158 } 1159 1160 void helper_sysret(int dflag) 1161 { 1162 int cpl, selector; 1163 1164 if (!(env->efer & MSR_EFER_SCE)) { 1165 raise_exception_err(EXCP06_ILLOP, 0); 1166 } 1167 cpl = env->hflags & HF_CPL_MASK; 1168 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 1169 raise_exception_err(EXCP0D_GPF, 0); 1170 } 1171 selector = (env->star >> 48) & 0xffff; 715 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op); 1172 716 #ifdef TARGET_X86_64 1173 if (env->hflags & HF_LMA_MASK) { 1174 if (dflag == 2) { 1175 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1176 0, 0xffffffff, 1177 DESC_G_MASK | DESC_P_MASK | 1178 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1179 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1180 DESC_L_MASK); 1181 env->eip = ECX; 1182 } else { 1183 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1184 0, 0xffffffff, 1185 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1186 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1187 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1188 env->eip = (uint32_t)ECX; 1189 } 1190 cpu_x86_load_seg_cache(env, R_SS, selector + 8, 1191 0, 0xffffffff, 1192 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1193 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1194 DESC_W_MASK | DESC_A_MASK); 1195 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | 1196 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK); 1197 cpu_x86_set_cpl(env, 3); 1198 } else 1199 #endif 1200 { 1201 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1202 0, 0xffffffff, 1203 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1204 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1205 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1206 env->eip = (uint32_t)ECX; 1207 cpu_x86_load_seg_cache(env, R_SS, selector + 8, 1208 0, 0xffffffff, 1209 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1210 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1211 DESC_W_MASK | DESC_A_MASK); 1212 env->eflags |= IF_MASK; 1213 cpu_x86_set_cpl(env, 3); 1214 } 1215 #ifdef USE_KQEMU 1216 if (kqemu_is_ok(env)) { 1217 if (env->hflags & HF_LMA_MASK) 1218 CC_OP = CC_OP_EFLAGS; 1219 env->exception_index = -1; 1220 cpu_loop_exit(); 1221 } 1222 #endif 1223 } 717 if (env->hflags & HF_CS64_MASK) { 718 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n", 719 env->cc_src, env->cc_dst, 720 cc_op_name); 721 } else 722 #endif 723 { 724 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n", 725 (uint32_t)env->cc_src, (uint32_t)env->cc_dst, 726 cc_op_name); 727 } 728 } 729 if (flags & X86_DUMP_FPU) { 730 int fptag; 731 fptag = 0; 732 for(i = 0; i < 8; i++) { 733 fptag |= ((!env->fptags[i]) << i); 734 } 735 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n", 736 env->fpuc, 737 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11, 738 env->fpstt, 739 fptag, 740 env->mxcsr); 741 for(i=0;i<8;i++) { 742 #if defined(USE_X86LDOUBLE) 743 union { 744 long double d; 745 struct { 746 uint64_t lower; 747 uint16_t upper; 748 } l; 749 } tmp; 750 tmp.d = env->fpregs[i].d; 751 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x", 752 i, tmp.l.lower, tmp.l.upper); 753 #else 754 cpu_fprintf(f, "FPR%d=%016" PRIx64, 755 i, env->fpregs[i].mmx.q); 756 #endif 757 if ((i & 1) == 1) 758 cpu_fprintf(f, "\n"); 759 else 760 cpu_fprintf(f, " "); 761 } 762 if (env->hflags & HF_CS64_MASK) 763 nb = 16; 764 else 765 nb = 8; 766 for(i=0;i<nb;i++) { 767 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x", 768 i, 769 env->xmm_regs[i].XMM_L(3), 770 env->xmm_regs[i].XMM_L(2), 771 env->xmm_regs[i].XMM_L(1), 772 env->xmm_regs[i].XMM_L(0)); 773 if ((i & 1) == 1) 774 cpu_fprintf(f, "\n"); 775 else 776 cpu_fprintf(f, " "); 777 } 778 } 779 } 780 781 /***********************************************************/ 782 /* x86 mmu */ 783 /* XXX: add PGE support */ 784 785 void cpu_x86_set_a20(CPUX86State *env, int a20_state) 786 { 787 a20_state = (a20_state != 0); 788 if (a20_state != ((env->a20_mask >> 20) & 1)) { 789 #if defined(DEBUG_MMU) 790 printf("A20 update: a20=%d\n", a20_state); 791 #endif 792 /* if the cpu is currently executing code, we must unlink it and 793 all the potentially executing TB */ 794 cpu_interrupt(env, CPU_INTERRUPT_EXITTB); 795 796 /* when a20 is changed, all the MMU mappings are invalid, so 797 we must flush everything */ 798 tlb_flush(env, 1); 799 env->a20_mask = (~0x100000) | (a20_state << 20); 800 } 801 } 802 803 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) 804 { 805 int pe_state; 806 807 #if defined(DEBUG_MMU) 808 printf("CR0 update: CR0=0x%08x\n", new_cr0); 809 #endif 810 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) != 811 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) { 812 tlb_flush(env, 1); 813 } 814 815 #ifdef TARGET_X86_64 816 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) && 817 (env->efer & MSR_EFER_LME)) { 818 /* enter in long mode */ 819 /* XXX: generate an exception */ 820 if (!(env->cr[4] & CR4_PAE_MASK)) 821 return; 822 env->efer |= MSR_EFER_LMA; 823 env->hflags |= HF_LMA_MASK; 824 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) && 825 (env->efer & MSR_EFER_LMA)) { 826 /* exit long mode */ 827 env->efer &= ~MSR_EFER_LMA; 828 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK); 829 env->eip &= 0xffffffff; 830 } 831 #endif 832 env->cr[0] = new_cr0 | CR0_ET_MASK; 833 834 /* update PE flag in hidden flags */ 835 pe_state = (env->cr[0] & CR0_PE_MASK); 836 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT); 837 /* ensure that ADDSEG is always set in real mode */ 838 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT); 839 /* update FPU flags */ 840 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) | 841 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)); 1224 842 1225 843 #ifdef VBOX 1226 /** 1227 * Checks and processes external VMM events. 1228 * Called by op_check_external_event() when any of the flags is set and can be serviced. 1229 */ 1230 void helper_external_event(void) 1231 { 1232 #if defined(RT_OS_DARWIN) && defined(VBOX_STRICT) 1233 uintptr_t uESP; 1234 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP)); 1235 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP)); 1236 #endif 1237 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) 1238 { 1239 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_HARD); 1240 cpu_interrupt(env, CPU_INTERRUPT_HARD); 1241 } 1242 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT) 1243 { 1244 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_EXIT); 1245 cpu_interrupt(env, CPU_INTERRUPT_EXIT); 1246 } 1247 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA) 1248 { 1249 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_DMA); 1250 remR3DmaRun(env); 1251 } 1252 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER) 1253 { 1254 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER); 1255 remR3TimersRun(env); 1256 } 1257 } 1258 /* helper for recording call instruction addresses for later scanning */ 1259 void helper_record_call() 1260 { 1261 if ( !(env->state & CPU_RAW_RING0) 1262 && (env->cr[0] & CR0_PG_MASK) 1263 && !(env->eflags & X86_EFL_IF)) 1264 remR3RecordCall(env); 1265 } 1266 #endif /* VBOX */ 1267 1268 /* real mode interrupt */ 1269 static void do_interrupt_real(int intno, int is_int, int error_code, 1270 unsigned int next_eip) 1271 { 1272 SegmentCache *dt; 1273 target_ulong ptr, ssp; 1274 int selector; 1275 uint32_t offset, esp; 1276 uint32_t old_cs, old_eip; 1277 1278 /* real mode (simpler !) */ 1279 dt = &env->idt; 1280 if (intno * 4 + 3 > dt->limit) 1281 raise_exception_err(EXCP0D_GPF, intno * 8 + 2); 1282 ptr = dt->base + intno * 4; 1283 offset = lduw_kernel(ptr); 1284 selector = lduw_kernel(ptr + 2); 1285 esp = ESP; 1286 ssp = env->segs[R_SS].base; 1287 if (is_int) 1288 old_eip = next_eip; 844 remR3ChangeCpuMode(env); 845 #endif 846 } 847 848 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in 849 the PDPT */ 850 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3) 851 { 852 env->cr[3] = new_cr3; 853 if (env->cr[0] & CR0_PG_MASK) { 854 #if defined(DEBUG_MMU) 855 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3); 856 #endif 857 tlb_flush(env, 0); 858 } 859 } 860 861 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) 862 { 863 #if defined(DEBUG_MMU) 864 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]); 865 #endif 866 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) != 867 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) { 868 tlb_flush(env, 1); 869 } 870 /* SSE handling */ 871 if (!(env->cpuid_features & CPUID_SSE)) 872 new_cr4 &= ~CR4_OSFXSR_MASK; 873 if (new_cr4 & CR4_OSFXSR_MASK) 874 env->hflags |= HF_OSFXSR_MASK; 1289 875 else 1290 old_eip = env->eip; 1291 old_cs = env->segs[R_CS].selector; 1292 /* XXX: use SS segment size ? */ 1293 PUSHW(ssp, esp, 0xffff, compute_eflags()); 1294 PUSHW(ssp, esp, 0xffff, old_cs); 1295 PUSHW(ssp, esp, 0xffff, old_eip); 1296 1297 /* update processor state */ 1298 ESP = (ESP & ~0xffff) | (esp & 0xffff); 1299 env->eip = offset; 1300 env->segs[R_CS].selector = selector; 1301 env->segs[R_CS].base = (selector << 4); 1302 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1303 } 1304 1305 /* fake user mode interrupt */ 1306 void do_interrupt_user(int intno, int is_int, int error_code, 1307 target_ulong next_eip) 1308 { 1309 SegmentCache *dt; 1310 target_ulong ptr; 1311 int dpl, cpl; 1312 uint32_t e2; 1313 1314 dt = &env->idt; 1315 ptr = dt->base + (intno * 8); 1316 e2 = ldl_kernel(ptr + 4); 1317 1318 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1319 cpl = env->hflags & HF_CPL_MASK; 1320 /* check privledge if software int */ 1321 if (is_int && dpl < cpl) 1322 raise_exception_err(EXCP0D_GPF, intno * 8 + 2); 1323 1324 /* Since we emulate only user space, we cannot do more than 1325 exiting the emulation with the suitable exception and error 1326 code */ 1327 if (is_int) 1328 EIP = next_eip; 1329 } 1330 1331 /* 1332 * Begin execution of an interruption. is_int is TRUE if coming from 1333 * the int instruction. next_eip is the EIP value AFTER the interrupt 1334 * instruction. It is only relevant if is_int is TRUE. 1335 */ 1336 void do_interrupt(int intno, int is_int, int error_code, 1337 target_ulong next_eip, int is_hw) 1338 { 1339 if (loglevel & CPU_LOG_INT) { 1340 if ((env->cr[0] & CR0_PE_MASK)) { 1341 static int count; 1342 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1343 count, intno, error_code, is_int, 1344 env->hflags & HF_CPL_MASK, 1345 env->segs[R_CS].selector, EIP, 1346 (int)env->segs[R_CS].base + EIP, 1347 env->segs[R_SS].selector, ESP); 1348 if (intno == 0x0e) { 1349 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]); 1350 } else { 1351 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX); 1352 } 1353 fprintf(logfile, "\n"); 1354 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); 1355 #if 0 1356 { 1357 int i; 1358 uint8_t *ptr; 1359 fprintf(logfile, " code="); 1360 ptr = env->segs[R_CS].base + env->eip; 1361 for(i = 0; i < 16; i++) { 1362 fprintf(logfile, " %02x", ldub(ptr + i)); 1363 } 1364 fprintf(logfile, "\n"); 1365 } 1366 #endif 1367 count++; 1368 } 1369 } 1370 if (env->cr[0] & CR0_PE_MASK) { 876 env->hflags &= ~HF_OSFXSR_MASK; 877 878 env->cr[4] = new_cr4; 879 #ifdef VBOX 880 remR3ChangeCpuMode(env); 881 #endif 882 } 883 884 /* XXX: also flush 4MB pages */ 885 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr) 886 { 887 tlb_flush_page(env, addr); 888 } 889 890 #if defined(CONFIG_USER_ONLY) 891 892 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 893 int is_write, int mmu_idx, int is_softmmu) 894 { 895 /* user mode only emulation */ 896 is_write &= 1; 897 env->cr[2] = addr; 898 env->error_code = (is_write << PG_ERROR_W_BIT); 899 env->error_code |= PG_ERROR_U_MASK; 900 env->exception_index = EXCP0E_PAGE; 901 return 1; 902 } 903 904 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) 905 { 906 return addr; 907 } 908 909 #else 910 911 /* XXX: This value should match the one returned by CPUID 912 * and in exec.c */ 913 #if defined(USE_KQEMU) 914 #define PHYS_ADDR_MASK 0xfffff000LL 915 #else 916 # if defined(TARGET_X86_64) 917 # define PHYS_ADDR_MASK 0xfffffff000LL 918 # else 919 # define PHYS_ADDR_MASK 0xffffff000LL 920 # endif 921 #endif 922 923 /* return value: 924 -1 = cannot handle fault 925 0 = nothing more to do 926 1 = generate PF fault 927 2 = soft MMU activation required for this block 928 */ 929 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 930 int is_write1, int mmu_idx, int is_softmmu) 931 { 932 uint64_t ptep, pte; 933 target_ulong pde_addr, pte_addr; 934 int error_code, is_dirty, prot, page_size, ret, is_write, is_user; 935 target_phys_addr_t paddr; 936 uint32_t page_offset; 937 target_ulong vaddr, virt_addr; 938 939 is_user = mmu_idx == MMU_USER_IDX; 940 #if defined(DEBUG_MMU) 941 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", 942 addr, is_write1, is_user, env->eip); 943 #endif 944 is_write = is_write1 & 1; 945 946 if (!(env->cr[0] & CR0_PG_MASK)) { 947 pte = addr; 948 virt_addr = addr & TARGET_PAGE_MASK; 949 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 950 page_size = 4096; 951 goto do_mapping; 952 } 953 954 if (env->cr[4] & CR4_PAE_MASK) { 955 uint64_t pde, pdpe; 956 target_ulong pdpe_addr; 957 1371 958 #ifdef TARGET_X86_64 1372 959 if (env->hflags & HF_LMA_MASK) { 1373 do_interrupt64(intno, is_int, error_code, next_eip, is_hw); 960 uint64_t pml4e_addr, pml4e; 961 int32_t sext; 962 963 /* test virtual address sign extension */ 964 sext = (int64_t)addr >> 47; 965 if (sext != 0 && sext != -1) { 966 env->error_code = 0; 967 env->exception_index = EXCP0D_GPF; 968 return 1; 969 } 970 971 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 972 env->a20_mask; 973 pml4e = ldq_phys(pml4e_addr); 974 if (!(pml4e & PG_PRESENT_MASK)) { 975 error_code = 0; 976 goto do_fault; 977 } 978 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) { 979 error_code = PG_ERROR_RSVD_MASK; 980 goto do_fault; 981 } 982 if (!(pml4e & PG_ACCESSED_MASK)) { 983 pml4e |= PG_ACCESSED_MASK; 984 stl_phys_notdirty(pml4e_addr, pml4e); 985 } 986 ptep = pml4e ^ PG_NX_MASK; 987 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) & 988 env->a20_mask; 989 pdpe = ldq_phys(pdpe_addr); 990 if (!(pdpe & PG_PRESENT_MASK)) { 991 error_code = 0; 992 goto do_fault; 993 } 994 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) { 995 error_code = PG_ERROR_RSVD_MASK; 996 goto do_fault; 997 } 998 ptep &= pdpe ^ PG_NX_MASK; 999 if (!(pdpe & PG_ACCESSED_MASK)) { 1000 pdpe |= PG_ACCESSED_MASK; 1001 stl_phys_notdirty(pdpe_addr, pdpe); 1002 } 1374 1003 } else 1375 1004 #endif 1376 1005 { 1377 #ifdef VBOX 1378 /* int xx *, v86 code and VME enabled? */ 1379 if ( (env->eflags & VM_MASK) 1380 && (env->cr[4] & CR4_VME_MASK) 1381 && is_int 1382 && !is_hw 1383 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */ 1384 ) 1385 do_soft_interrupt_vme(intno, error_code, next_eip); 1386 else 1387 #endif /* VBOX */ 1388 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw); 1006 /* XXX: load them when cr3 is loaded ? */ 1007 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & 1008 env->a20_mask; 1009 pdpe = ldq_phys(pdpe_addr); 1010 if (!(pdpe & PG_PRESENT_MASK)) { 1011 error_code = 0; 1012 goto do_fault; 1013 } 1014 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; 1015 } 1016 1017 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) & 1018 env->a20_mask; 1019 pde = ldq_phys(pde_addr); 1020 if (!(pde & PG_PRESENT_MASK)) { 1021 error_code = 0; 1022 goto do_fault; 1023 } 1024 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) { 1025 error_code = PG_ERROR_RSVD_MASK; 1026 goto do_fault; 1027 } 1028 ptep &= pde ^ PG_NX_MASK; 1029 if (pde & PG_PSE_MASK) { 1030 /* 2 MB page */ 1031 page_size = 2048 * 1024; 1032 ptep ^= PG_NX_MASK; 1033 if ((ptep & PG_NX_MASK) && is_write1 == 2) 1034 goto do_fault_protect; 1035 if (is_user) { 1036 if (!(ptep & PG_USER_MASK)) 1037 goto do_fault_protect; 1038 if (is_write && !(ptep & PG_RW_MASK)) 1039 goto do_fault_protect; 1040 } else { 1041 if ((env->cr[0] & CR0_WP_MASK) && 1042 is_write && !(ptep & PG_RW_MASK)) 1043 goto do_fault_protect; 1044 } 1045 is_dirty = is_write && !(pde & PG_DIRTY_MASK); 1046 if (!(pde & PG_ACCESSED_MASK) || is_dirty) { 1047 pde |= PG_ACCESSED_MASK; 1048 if (is_dirty) 1049 pde |= PG_DIRTY_MASK; 1050 stl_phys_notdirty(pde_addr, pde); 1051 } 1052 /* align to page_size */ 1053 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); 1054 virt_addr = addr & ~(page_size - 1); 1055 } else { 1056 /* 4 KB page */ 1057 if (!(pde & PG_ACCESSED_MASK)) { 1058 pde |= PG_ACCESSED_MASK; 1059 stl_phys_notdirty(pde_addr, pde); 1060 } 1061 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) & 1062 env->a20_mask; 1063 pte = ldq_phys(pte_addr); 1064 if (!(pte & PG_PRESENT_MASK)) { 1065 error_code = 0; 1066 goto do_fault; 1067 } 1068 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) { 1069 error_code = PG_ERROR_RSVD_MASK; 1070 goto do_fault; 1071 } 1072 /* combine pde and pte nx, user and rw protections */ 1073 ptep &= pte ^ PG_NX_MASK; 1074 ptep ^= PG_NX_MASK; 1075 if ((ptep & PG_NX_MASK) && is_write1 == 2) 1076 goto do_fault_protect; 1077 if (is_user) { 1078 if (!(ptep & PG_USER_MASK)) 1079 goto do_fault_protect; 1080 if (is_write && !(ptep & PG_RW_MASK)) 1081 goto do_fault_protect; 1082 } else { 1083 if ((env->cr[0] & CR0_WP_MASK) && 1084 is_write && !(ptep & PG_RW_MASK)) 1085 goto do_fault_protect; 1086 } 1087 is_dirty = is_write && !(pte & PG_DIRTY_MASK); 1088 if (!(pte & PG_ACCESSED_MASK) || is_dirty) { 1089 pte |= PG_ACCESSED_MASK; 1090 if (is_dirty) 1091 pte |= PG_DIRTY_MASK; 1092 stl_phys_notdirty(pte_addr, pte); 1093 } 1094 page_size = 4096; 1095 virt_addr = addr & ~0xfff; 1096 pte = pte & (PHYS_ADDR_MASK | 0xfff); 1389 1097 } 1390 1098 } else { 1391 do_interrupt_real(intno, is_int, error_code, next_eip); 1392 } 1393 } 1394 1395 /* 1396 * Signal an interruption. It is executed in the main CPU loop. 1397 * is_int is TRUE if coming from the int instruction. next_eip is the 1398 * EIP value AFTER the interrupt instruction. It is only relevant if 1399 * is_int is TRUE. 1400 */ 1401 void raise_interrupt(int intno, int is_int, int error_code, 1402 int next_eip_addend) 1403 { 1404 #if defined(VBOX) && defined(DEBUG) 1405 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %VGv\n", intno, is_int, error_code, env->eip + next_eip_addend))); 1406 #endif 1407 env->exception_index = intno; 1099 uint32_t pde; 1100 1101 /* page directory entry */ 1102 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & 1103 env->a20_mask; 1104 pde = ldl_phys(pde_addr); 1105 if (!(pde & PG_PRESENT_MASK)) { 1106 error_code = 0; 1107 goto do_fault; 1108 } 1109 /* if PSE bit is set, then we use a 4MB page */ 1110 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { 1111 page_size = 4096 * 1024; 1112 if (is_user) { 1113 if (!(pde & PG_USER_MASK)) 1114 goto do_fault_protect; 1115 if (is_write && !(pde & PG_RW_MASK)) 1116 goto do_fault_protect; 1117 } else { 1118 if ((env->cr[0] & CR0_WP_MASK) && 1119 is_write && !(pde & PG_RW_MASK)) 1120 goto do_fault_protect; 1121 } 1122 is_dirty = is_write && !(pde & PG_DIRTY_MASK); 1123 if (!(pde & PG_ACCESSED_MASK) || is_dirty) { 1124 pde |= PG_ACCESSED_MASK; 1125 if (is_dirty) 1126 pde |= PG_DIRTY_MASK; 1127 stl_phys_notdirty(pde_addr, pde); 1128 } 1129 1130 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ 1131 ptep = pte; 1132 virt_addr = addr & ~(page_size - 1); 1133 } else { 1134 if (!(pde & PG_ACCESSED_MASK)) { 1135 pde |= PG_ACCESSED_MASK; 1136 stl_phys_notdirty(pde_addr, pde); 1137 } 1138 1139 /* page directory entry */ 1140 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & 1141 env->a20_mask; 1142 pte = ldl_phys(pte_addr); 1143 if (!(pte & PG_PRESENT_MASK)) { 1144 error_code = 0; 1145 goto do_fault; 1146 } 1147 /* combine pde and pte user and rw protections */ 1148 ptep = pte & pde; 1149 if (is_user) { 1150 if (!(ptep & PG_USER_MASK)) 1151 goto do_fault_protect; 1152 if (is_write && !(ptep & PG_RW_MASK)) 1153 goto do_fault_protect; 1154 } else { 1155 if ((env->cr[0] & CR0_WP_MASK) && 1156 is_write && !(ptep & PG_RW_MASK)) 1157 goto do_fault_protect; 1158 } 1159 is_dirty = is_write && !(pte & PG_DIRTY_MASK); 1160 if (!(pte & PG_ACCESSED_MASK) || is_dirty) { 1161 pte |= PG_ACCESSED_MASK; 1162 if (is_dirty) 1163 pte |= PG_DIRTY_MASK; 1164 stl_phys_notdirty(pte_addr, pte); 1165 } 1166 page_size = 4096; 1167 virt_addr = addr & ~0xfff; 1168 } 1169 } 1170 /* the page can be put in the TLB */ 1171 prot = PAGE_READ; 1172 if (!(ptep & PG_NX_MASK)) 1173 prot |= PAGE_EXEC; 1174 if (pte & PG_DIRTY_MASK) { 1175 /* only set write access if already dirty... otherwise wait 1176 for dirty access */ 1177 if (is_user) { 1178 if (ptep & PG_RW_MASK) 1179 prot |= PAGE_WRITE; 1180 } else { 1181 if (!(env->cr[0] & CR0_WP_MASK) || 1182 (ptep & PG_RW_MASK)) 1183 prot |= PAGE_WRITE; 1184 } 1185 } 1186 do_mapping: 1187 pte = pte & env->a20_mask; 1188 1189 /* Even if 4MB pages, we map only one 4KB page in the cache to 1190 avoid filling it too fast */ 1191 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); 1192 paddr = (pte & TARGET_PAGE_MASK) + page_offset; 1193 vaddr = virt_addr + page_offset; 1194 1195 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu); 1196 return ret; 1197 do_fault_protect: 1198 error_code = PG_ERROR_P_MASK; 1199 do_fault: 1200 error_code |= (is_write << PG_ERROR_W_BIT); 1201 if (is_user) 1202 error_code |= PG_ERROR_U_MASK; 1203 if (is_write1 == 2 && 1204 (env->efer & MSR_EFER_NXE) && 1205 (env->cr[4] & CR4_PAE_MASK)) 1206 error_code |= PG_ERROR_I_D_MASK; 1207 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { 1208 /* cr2 is not modified in case of exceptions */ 1209 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 1210 addr); 1211 } else { 1212 env->cr[2] = addr; 1213 } 1408 1214 env->error_code = error_code; 1409 env->exception_is_int = is_int; 1410 env->exception_next_eip = env->eip + next_eip_addend; 1411 cpu_loop_exit(); 1412 } 1413 1414 /* same as raise_exception_err, but do not restore global registers */ 1415 static void raise_exception_err_norestore(int exception_index, int error_code) 1416 { 1417 env->exception_index = exception_index; 1418 env->error_code = error_code; 1419 env->exception_is_int = 0; 1420 env->exception_next_eip = 0; 1421 longjmp(env->jmp_env, 1); 1422 } 1423 1424 /* shortcuts to generate exceptions */ 1425 1426 void (raise_exception_err)(int exception_index, int error_code) 1427 { 1428 raise_interrupt(exception_index, 0, error_code, 0); 1429 } 1430 1431 void raise_exception(int exception_index) 1432 { 1433 raise_interrupt(exception_index, 0, 0, 0); 1434 } 1435 1436 /* SMM support */ 1437 1438 #if defined(CONFIG_USER_ONLY) 1439 1440 void do_smm_enter(void) 1441 { 1442 } 1443 1444 void helper_rsm(void) 1445 { 1446 } 1447 1448 #else 1449 1450 #ifdef TARGET_X86_64 1451 #define SMM_REVISION_ID 0x00020064 1452 #else 1453 #define SMM_REVISION_ID 0x00020000 1454 #endif 1455 1456 void do_smm_enter(void) 1457 { 1458 #ifdef VBOX 1459 cpu_abort(env, "do_ssm_enter"); 1460 #else /* !VBOX */ 1461 target_ulong sm_state; 1462 SegmentCache *dt; 1463 int i, offset; 1464 1465 if (loglevel & CPU_LOG_INT) { 1466 fprintf(logfile, "SMM: enter\n"); 1467 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); 1468 } 1469 1470 env->hflags |= HF_SMM_MASK; 1471 cpu_smm_update(env); 1472 1473 sm_state = env->smbase + 0x8000; 1474 1475 #ifdef TARGET_X86_64 1476 for(i = 0; i < 6; i++) { 1477 dt = &env->segs[i]; 1478 offset = 0x7e00 + i * 16; 1479 stw_phys(sm_state + offset, dt->selector); 1480 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff); 1481 stl_phys(sm_state + offset + 4, dt->limit); 1482 stq_phys(sm_state + offset + 8, dt->base); 1483 } 1484 1485 stq_phys(sm_state + 0x7e68, env->gdt.base); 1486 stl_phys(sm_state + 0x7e64, env->gdt.limit); 1487 1488 stw_phys(sm_state + 0x7e70, env->ldt.selector); 1489 stq_phys(sm_state + 0x7e78, env->ldt.base); 1490 stl_phys(sm_state + 0x7e74, env->ldt.limit); 1491 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff); 1492 1493 stq_phys(sm_state + 0x7e88, env->idt.base); 1494 stl_phys(sm_state + 0x7e84, env->idt.limit); 1495 1496 stw_phys(sm_state + 0x7e90, env->tr.selector); 1497 stq_phys(sm_state + 0x7e98, env->tr.base); 1498 stl_phys(sm_state + 0x7e94, env->tr.limit); 1499 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff); 1500 1501 stq_phys(sm_state + 0x7ed0, env->efer); 1502 1503 stq_phys(sm_state + 0x7ff8, EAX); 1504 stq_phys(sm_state + 0x7ff0, ECX); 1505 stq_phys(sm_state + 0x7fe8, EDX); 1506 stq_phys(sm_state + 0x7fe0, EBX); 1507 stq_phys(sm_state + 0x7fd8, ESP); 1508 stq_phys(sm_state + 0x7fd0, EBP); 1509 stq_phys(sm_state + 0x7fc8, ESI); 1510 stq_phys(sm_state + 0x7fc0, EDI); 1511 for(i = 8; i < 16; i++) 1512 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]); 1513 stq_phys(sm_state + 0x7f78, env->eip); 1514 stl_phys(sm_state + 0x7f70, compute_eflags()); 1515 stl_phys(sm_state + 0x7f68, env->dr[6]); 1516 stl_phys(sm_state + 0x7f60, env->dr[7]); 1517 1518 stl_phys(sm_state + 0x7f48, env->cr[4]); 1519 stl_phys(sm_state + 0x7f50, env->cr[3]); 1520 stl_phys(sm_state + 0x7f58, env->cr[0]); 1521 1522 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID); 1523 stl_phys(sm_state + 0x7f00, env->smbase); 1524 #else 1525 stl_phys(sm_state + 0x7ffc, env->cr[0]); 1526 stl_phys(sm_state + 0x7ff8, env->cr[3]); 1527 stl_phys(sm_state + 0x7ff4, compute_eflags()); 1528 stl_phys(sm_state + 0x7ff0, env->eip); 1529 stl_phys(sm_state + 0x7fec, EDI); 1530 stl_phys(sm_state + 0x7fe8, ESI); 1531 stl_phys(sm_state + 0x7fe4, EBP); 1532 stl_phys(sm_state + 0x7fe0, ESP); 1533 stl_phys(sm_state + 0x7fdc, EBX); 1534 stl_phys(sm_state + 0x7fd8, EDX); 1535 stl_phys(sm_state + 0x7fd4, ECX); 1536 stl_phys(sm_state + 0x7fd0, EAX); 1537 stl_phys(sm_state + 0x7fcc, env->dr[6]); 1538 stl_phys(sm_state + 0x7fc8, env->dr[7]); 1539 1540 stl_phys(sm_state + 0x7fc4, env->tr.selector); 1541 stl_phys(sm_state + 0x7f64, env->tr.base); 1542 stl_phys(sm_state + 0x7f60, env->tr.limit); 1543 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff); 1544 1545 stl_phys(sm_state + 0x7fc0, env->ldt.selector); 1546 stl_phys(sm_state + 0x7f80, env->ldt.base); 1547 stl_phys(sm_state + 0x7f7c, env->ldt.limit); 1548 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff); 1549 1550 stl_phys(sm_state + 0x7f74, env->gdt.base); 1551 stl_phys(sm_state + 0x7f70, env->gdt.limit); 1552 1553 stl_phys(sm_state + 0x7f58, env->idt.base); 1554 stl_phys(sm_state + 0x7f54, env->idt.limit); 1555 1556 for(i = 0; i < 6; i++) { 1557 dt = &env->segs[i]; 1558 if (i < 3) 1559 offset = 0x7f84 + i * 12; 1560 else 1561 offset = 0x7f2c + (i - 3) * 12; 1562 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector); 1563 stl_phys(sm_state + offset + 8, dt->base); 1564 stl_phys(sm_state + offset + 4, dt->limit); 1565 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff); 1566 } 1567 stl_phys(sm_state + 0x7f14, env->cr[4]); 1568 1569 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID); 1570 stl_phys(sm_state + 0x7ef8, env->smbase); 1571 #endif 1572 /* init SMM cpu state */ 1573 1574 #ifdef TARGET_X86_64 1575 env->efer = 0; 1576 env->hflags &= ~HF_LMA_MASK; 1577 #endif 1578 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); 1579 env->eip = 0x00008000; 1580 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase, 1581 0xffffffff, 0); 1582 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0); 1583 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0); 1584 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0); 1585 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0); 1586 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0); 1587 1588 cpu_x86_update_cr0(env, 1589 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK)); 1590 cpu_x86_update_cr4(env, 0); 1591 env->dr[7] = 0x00000400; 1592 CC_OP = CC_OP_EFLAGS; 1593 #endif /* VBOX */ 1594 } 1595 1596 void helper_rsm(void) 1597 { 1598 #ifdef VBOX 1599 cpu_abort(env, "helper_rsm"); 1600 #else /* !VBOX */ 1601 target_ulong sm_state; 1602 int i, offset; 1603 uint32_t val; 1604 1605 sm_state = env->smbase + 0x8000; 1606 #ifdef TARGET_X86_64 1607 env->efer = ldq_phys(sm_state + 0x7ed0); 1608 if (env->efer & MSR_EFER_LMA) 1609 env->hflags |= HF_LMA_MASK; 1610 else 1611 env->hflags &= ~HF_LMA_MASK; 1612 1613 for(i = 0; i < 6; i++) { 1614 offset = 0x7e00 + i * 16; 1615 cpu_x86_load_seg_cache(env, i, 1616 lduw_phys(sm_state + offset), 1617 ldq_phys(sm_state + offset + 8), 1618 ldl_phys(sm_state + offset + 4), 1619 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8); 1620 } 1621 1622 env->gdt.base = ldq_phys(sm_state + 0x7e68); 1623 env->gdt.limit = ldl_phys(sm_state + 0x7e64); 1624 1625 env->ldt.selector = lduw_phys(sm_state + 0x7e70); 1626 env->ldt.base = ldq_phys(sm_state + 0x7e78); 1627 env->ldt.limit = ldl_phys(sm_state + 0x7e74); 1628 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8; 1629 1630 env->idt.base = ldq_phys(sm_state + 0x7e88); 1631 env->idt.limit = ldl_phys(sm_state + 0x7e84); 1632 1633 env->tr.selector = lduw_phys(sm_state + 0x7e90); 1634 env->tr.base = ldq_phys(sm_state + 0x7e98); 1635 env->tr.limit = ldl_phys(sm_state + 0x7e94); 1636 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8; 1637 1638 EAX = ldq_phys(sm_state + 0x7ff8); 1639 ECX = ldq_phys(sm_state + 0x7ff0); 1640 EDX = ldq_phys(sm_state + 0x7fe8); 1641 EBX = ldq_phys(sm_state + 0x7fe0); 1642 ESP = ldq_phys(sm_state + 0x7fd8); 1643 EBP = ldq_phys(sm_state + 0x7fd0); 1644 ESI = ldq_phys(sm_state + 0x7fc8); 1645 EDI = ldq_phys(sm_state + 0x7fc0); 1646 for(i = 8; i < 16; i++) 1647 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8); 1648 env->eip = ldq_phys(sm_state + 0x7f78); 1649 load_eflags(ldl_phys(sm_state + 0x7f70), 1650 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); 1651 env->dr[6] = ldl_phys(sm_state + 0x7f68); 1652 env->dr[7] = ldl_phys(sm_state + 0x7f60); 1653 1654 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48)); 1655 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50)); 1656 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58)); 1657 1658 val = ldl_phys(sm_state + 0x7efc); /* revision ID */ 1659 if (val & 0x20000) { 1660 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff; 1661 } 1662 #else 1663 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc)); 1664 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8)); 1665 load_eflags(ldl_phys(sm_state + 0x7ff4), 1666 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); 1667 env->eip = ldl_phys(sm_state + 0x7ff0); 1668 EDI = ldl_phys(sm_state + 0x7fec); 1669 ESI = ldl_phys(sm_state + 0x7fe8); 1670 EBP = ldl_phys(sm_state + 0x7fe4); 1671 ESP = ldl_phys(sm_state + 0x7fe0); 1672 EBX = ldl_phys(sm_state + 0x7fdc); 1673 EDX = ldl_phys(sm_state + 0x7fd8); 1674 ECX = ldl_phys(sm_state + 0x7fd4); 1675 EAX = ldl_phys(sm_state + 0x7fd0); 1676 env->dr[6] = ldl_phys(sm_state + 0x7fcc); 1677 env->dr[7] = ldl_phys(sm_state + 0x7fc8); 1678 1679 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff; 1680 env->tr.base = ldl_phys(sm_state + 0x7f64); 1681 env->tr.limit = ldl_phys(sm_state + 0x7f60); 1682 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8; 1683 1684 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff; 1685 env->ldt.base = ldl_phys(sm_state + 0x7f80); 1686 env->ldt.limit = ldl_phys(sm_state + 0x7f7c); 1687 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8; 1688 1689 env->gdt.base = ldl_phys(sm_state + 0x7f74); 1690 env->gdt.limit = ldl_phys(sm_state + 0x7f70); 1691 1692 env->idt.base = ldl_phys(sm_state + 0x7f58); 1693 env->idt.limit = ldl_phys(sm_state + 0x7f54); 1694 1695 for(i = 0; i < 6; i++) { 1696 if (i < 3) 1697 offset = 0x7f84 + i * 12; 1698 else 1699 offset = 0x7f2c + (i - 3) * 12; 1700 cpu_x86_load_seg_cache(env, i, 1701 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff, 1702 ldl_phys(sm_state + offset + 8), 1703 ldl_phys(sm_state + offset + 4), 1704 (ldl_phys(sm_state + offset) & 0xf0ff) << 8); 1705 } 1706 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14)); 1707 1708 val = ldl_phys(sm_state + 0x7efc); /* revision ID */ 1709 if (val & 0x20000) { 1710 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff; 1711 } 1712 #endif 1713 CC_OP = CC_OP_EFLAGS; 1714 env->hflags &= ~HF_SMM_MASK; 1715 cpu_smm_update(env); 1716 1717 if (loglevel & CPU_LOG_INT) { 1718 fprintf(logfile, "SMM: after RSM\n"); 1719 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); 1720 } 1721 #endif /* !VBOX */ 1722 } 1723 1724 #endif /* !CONFIG_USER_ONLY */ 1725 1726 1727 #ifdef BUGGY_GCC_DIV64 1728 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we 1729 call it from another function */ 1730 uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den) 1731 { 1732 *q_ptr = num / den; 1733 return num % den; 1734 } 1735 1736 int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den) 1737 { 1738 *q_ptr = num / den; 1739 return num % den; 1740 } 1741 #endif 1742 1743 void helper_divl_EAX_T0(void) 1744 { 1745 unsigned int den, r; 1746 uint64_t num, q; 1747 1748 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); 1749 den = T0; 1750 if (den == 0) { 1751 raise_exception(EXCP00_DIVZ); 1752 } 1753 #ifdef BUGGY_GCC_DIV64 1754 r = div32(&q, num, den); 1755 #else 1756 q = (num / den); 1757 r = (num % den); 1758 #endif 1759 if (q > 0xffffffff) 1760 raise_exception(EXCP00_DIVZ); 1761 EAX = (uint32_t)q; 1762 EDX = (uint32_t)r; 1763 } 1764 1765 void helper_idivl_EAX_T0(void) 1766 { 1767 int den, r; 1768 int64_t num, q; 1769 1770 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); 1771 den = T0; 1772 if (den == 0) { 1773 raise_exception(EXCP00_DIVZ); 1774 } 1775 #ifdef BUGGY_GCC_DIV64 1776 r = idiv32(&q, num, den); 1777 #else 1778 q = (num / den); 1779 r = (num % den); 1780 #endif 1781 if (q != (int32_t)q) 1782 raise_exception(EXCP00_DIVZ); 1783 EAX = (uint32_t)q; 1784 EDX = (uint32_t)r; 1785 } 1786 1787 void helper_cmpxchg8b(void) 1788 { 1789 uint64_t d; 1790 int eflags; 1791 1792 eflags = cc_table[CC_OP].compute_all(); 1793 d = ldq(A0); 1794 if (d == (((uint64_t)EDX << 32) | EAX)) { 1795 stq(A0, ((uint64_t)ECX << 32) | EBX); 1796 eflags |= CC_Z; 1797 } else { 1798 /* always do the store */ 1799 stq(A0, d); 1800 EDX = (uint32_t)(d >> 32); 1801 EAX = (uint32_t)d; 1802 eflags &= ~CC_Z; 1803 } 1804 CC_SRC = eflags; 1805 } 1806 1807 void helper_single_step() 1808 { 1809 env->dr[6] |= 0x4000; 1810 raise_exception(EXCP01_SSTP); 1811 } 1812 1813 void helper_cpuid(void) 1814 { 1815 #ifndef VBOX 1816 uint32_t index; 1817 index = (uint32_t)EAX; 1818 1819 /* test if maximum index reached */ 1820 if (index & 0x80000000) { 1821 if (index > env->cpuid_xlevel) 1822 index = env->cpuid_level; 1823 } else { 1824 if (index > env->cpuid_level) 1825 index = env->cpuid_level; 1826 } 1827 1828 switch(index) { 1829 case 0: 1830 EAX = env->cpuid_level; 1831 EBX = env->cpuid_vendor1; 1832 EDX = env->cpuid_vendor2; 1833 ECX = env->cpuid_vendor3; 1834 break; 1835 case 1: 1836 EAX = env->cpuid_version; 1837 EBX = 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 1838 ECX = env->cpuid_ext_features; 1839 EDX = env->cpuid_features; 1840 break; 1841 case 2: 1842 /* cache info: needed for Pentium Pro compatibility */ 1843 EAX = 0x410601; 1844 EBX = 0; 1845 ECX = 0; 1846 EDX = 0; 1847 break; 1848 case 0x80000000: 1849 EAX = env->cpuid_xlevel; 1850 EBX = env->cpuid_vendor1; 1851 EDX = env->cpuid_vendor2; 1852 ECX = env->cpuid_vendor3; 1853 break; 1854 case 0x80000001: 1855 EAX = env->cpuid_features; 1856 EBX = 0; 1857 ECX = 0; 1858 EDX = env->cpuid_ext2_features; 1859 break; 1860 case 0x80000002: 1861 case 0x80000003: 1862 case 0x80000004: 1863 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 1864 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 1865 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 1866 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 1867 break; 1868 case 0x80000005: 1869 /* cache info (L1 cache) */ 1870 EAX = 0x01ff01ff; 1871 EBX = 0x01ff01ff; 1872 ECX = 0x40020140; 1873 EDX = 0x40020140; 1874 break; 1875 case 0x80000006: 1876 /* cache info (L2 cache) */ 1877 EAX = 0; 1878 EBX = 0x42004200; 1879 ECX = 0x02008140; 1880 EDX = 0; 1881 break; 1882 case 0x80000008: 1883 /* virtual & phys address size in low 2 bytes. */ 1884 EAX = 0x00003028; 1885 EBX = 0; 1886 ECX = 0; 1887 EDX = 0; 1888 break; 1889 default: 1890 /* reserved values: zero */ 1891 EAX = 0; 1892 EBX = 0; 1893 ECX = 0; 1894 EDX = 0; 1895 break; 1896 } 1897 #else /* VBOX */ 1898 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX); 1899 #endif /* VBOX */ 1900 } 1901 1902 void helper_enter_level(int level, int data32) 1903 { 1904 target_ulong ssp; 1905 uint32_t esp_mask, esp, ebp; 1906 1907 esp_mask = get_sp_mask(env->segs[R_SS].flags); 1908 ssp = env->segs[R_SS].base; 1909 ebp = EBP; 1910 esp = ESP; 1911 if (data32) { 1912 /* 32 bit */ 1913 esp -= 4; 1914 while (--level) { 1915 esp -= 4; 1916 ebp -= 4; 1917 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask))); 1918 } 1919 esp -= 4; 1920 stl(ssp + (esp & esp_mask), T1); 1921 } else { 1922 /* 16 bit */ 1923 esp -= 2; 1924 while (--level) { 1925 esp -= 2; 1926 ebp -= 2; 1927 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask))); 1928 } 1929 esp -= 2; 1930 stw(ssp + (esp & esp_mask), T1); 1931 } 1932 } 1933 1934 #ifdef TARGET_X86_64 1935 void helper_enter64_level(int level, int data64) 1936 { 1937 target_ulong esp, ebp; 1938 ebp = EBP; 1939 esp = ESP; 1940 1941 if (data64) { 1942 /* 64 bit */ 1943 esp -= 8; 1944 while (--level) { 1945 esp -= 8; 1946 ebp -= 8; 1947 stq(esp, ldq(ebp)); 1948 } 1949 esp -= 8; 1950 stq(esp, T1); 1951 } else { 1952 /* 16 bit */ 1953 esp -= 2; 1954 while (--level) { 1955 esp -= 2; 1956 ebp -= 2; 1957 stw(esp, lduw(ebp)); 1958 } 1959 esp -= 2; 1960 stw(esp, T1); 1961 } 1962 } 1963 #endif 1964 1965 void helper_lldt_T0(void) 1966 { 1967 int selector; 1968 SegmentCache *dt; 1969 uint32_t e1, e2; 1970 int index, entry_limit; 1971 target_ulong ptr; 1972 #ifdef VBOX 1973 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%VGv, .limit=%VGv} new=%RTsel\n", 1974 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(T0 & 0xffff))); 1975 #endif 1976 1977 selector = T0 & 0xffff; 1978 if ((selector & 0xfffc) == 0) { 1979 /* XXX: NULL selector case: invalid LDT */ 1980 env->ldt.base = 0; 1981 env->ldt.limit = 0; 1982 } else { 1983 if (selector & 0x4) 1984 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1985 dt = &env->gdt; 1986 index = selector & ~7; 1987 #ifdef TARGET_X86_64 1988 if (env->hflags & HF_LMA_MASK) 1989 entry_limit = 15; 1990 else 1991 #endif 1992 entry_limit = 7; 1993 if ((index + entry_limit) > dt->limit) 1994 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1995 ptr = dt->base + index; 1996 e1 = ldl_kernel(ptr); 1997 e2 = ldl_kernel(ptr + 4); 1998 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) 1999 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2000 if (!(e2 & DESC_P_MASK)) 2001 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); 1215 env->exception_index = EXCP0E_PAGE; 1216 return 1; 1217 } 1218 1219 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) 1220 { 1221 target_ulong pde_addr, pte_addr; 1222 uint64_t pte; 1223 target_phys_addr_t paddr; 1224 uint32_t page_offset; 1225 int page_size; 1226 1227 if (env->cr[4] & CR4_PAE_MASK) { 1228 target_ulong pdpe_addr; 1229 uint64_t pde, pdpe; 1230 2002 1231 #ifdef TARGET_X86_64 2003 1232 if (env->hflags & HF_LMA_MASK) { 2004 uint32_t e3; 2005 e3 = ldl_kernel(ptr + 8); 2006 load_seg_cache_raw_dt(&env->ldt, e1, e2); 2007 env->ldt.base |= (target_ulong)e3 << 32; 1233 uint64_t pml4e_addr, pml4e; 1234 int32_t sext; 1235 1236 /* test virtual address sign extension */ 1237 sext = (int64_t)addr >> 47; 1238 if (sext != 0 && sext != -1) 1239 return -1; 1240 1241 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & 1242 env->a20_mask; 1243 pml4e = ldq_phys(pml4e_addr); 1244 if (!(pml4e & PG_PRESENT_MASK)) 1245 return -1; 1246 1247 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & 1248 env->a20_mask; 1249 pdpe = ldq_phys(pdpe_addr); 1250 if (!(pdpe & PG_PRESENT_MASK)) 1251 return -1; 2008 1252 } else 2009 1253 #endif 2010 1254 { 2011 load_seg_cache_raw_dt(&env->ldt, e1, e2); 2012 } 2013 } 2014 env->ldt.selector = selector; 2015 #ifdef VBOX 2016 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%VGv, .limit=%VGv}\n", 2017 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit)); 2018 #endif 2019 } 2020 2021 void helper_ltr_T0(void) 2022 { 2023 int selector; 2024 SegmentCache *dt; 2025 uint32_t e1, e2; 2026 int index, type, entry_limit; 2027 target_ulong ptr; 2028 2029 #ifdef VBOX 2030 Log(("helper_ltr_T0: old tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n", 2031 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit, 2032 env->tr.flags, (RTSEL)(T0 & 0xffff))); 2033 #endif 2034 2035 selector = T0 & 0xffff; 2036 if ((selector & 0xfffc) == 0) { 2037 /* NULL selector case: invalid TR */ 2038 env->tr.base = 0; 2039 env->tr.limit = 0; 2040 env->tr.flags = 0; 1255 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & 1256 env->a20_mask; 1257 pdpe = ldq_phys(pdpe_addr); 1258 if (!(pdpe & PG_PRESENT_MASK)) 1259 return -1; 1260 } 1261 1262 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) & 1263 env->a20_mask; 1264 pde = ldq_phys(pde_addr); 1265 if (!(pde & PG_PRESENT_MASK)) { 1266 return -1; 1267 } 1268 if (pde & PG_PSE_MASK) { 1269 /* 2 MB page */ 1270 page_size = 2048 * 1024; 1271 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ 1272 } else { 1273 /* 4 KB page */ 1274 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) & 1275 env->a20_mask; 1276 page_size = 4096; 1277 pte = ldq_phys(pte_addr); 1278 } 1279 if (!(pte & PG_PRESENT_MASK)) 1280 return -1; 2041 1281 } else { 2042 if (selector & 0x4) 2043 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2044 dt = &env->gdt; 2045 index = selector & ~7; 2046 #ifdef TARGET_X86_64 2047 if (env->hflags & HF_LMA_MASK) 2048 entry_limit = 15; 2049 else 2050 #endif 2051 entry_limit = 7; 2052 if ((index + entry_limit) > dt->limit) 2053 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2054 ptr = dt->base + index; 2055 e1 = ldl_kernel(ptr); 2056 e2 = ldl_kernel(ptr + 4); 2057 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2058 if ((e2 & DESC_S_MASK) || 2059 (type != 1 && type != 9)) 2060 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2061 if (!(e2 & DESC_P_MASK)) 2062 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); 2063 #ifdef TARGET_X86_64 2064 if (env->hflags & HF_LMA_MASK) { 2065 uint32_t e3; 2066 e3 = ldl_kernel(ptr + 8); 2067 load_seg_cache_raw_dt(&env->tr, e1, e2); 2068 env->tr.base |= (target_ulong)e3 << 32; 2069 } else 2070 #endif 2071 { 2072 load_seg_cache_raw_dt(&env->tr, e1, e2); 2073 } 2074 e2 |= DESC_TSS_BUSY_MASK; 2075 stl_kernel(ptr + 4, e2); 2076 } 2077 env->tr.selector = selector; 2078 #ifdef VBOX 2079 Log(("helper_ltr_T0: new tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n", 2080 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit, 2081 env->tr.flags, (RTSEL)(T0 & 0xffff))); 2082 #endif 2083 } 2084 2085 /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 2086 void load_seg(int seg_reg, int selector) 2087 { 2088 uint32_t e1, e2; 2089 int cpl, dpl, rpl; 2090 SegmentCache *dt; 2091 int index; 2092 target_ulong ptr; 2093 2094 selector &= 0xffff; 2095 cpl = env->hflags & HF_CPL_MASK; 2096 2097 #ifdef VBOX 2098 /* Trying to load a selector with CPL=1? */ 2099 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0)) 2100 { 2101 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc)); 2102 selector = selector & 0xfffc; 2103 } 2104 #endif 2105 2106 if ((selector & 0xfffc) == 0) { 2107 /* null selector case */ 2108 if (seg_reg == R_SS 2109 #ifdef TARGET_X86_64 2110 && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 2111 #endif 2112 ) 2113 raise_exception_err(EXCP0D_GPF, 0); 2114 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 2115 } else { 2116 2117 if (selector & 0x4) 2118 dt = &env->ldt; 2119 else 2120 dt = &env->gdt; 2121 index = selector & ~7; 2122 if ((index + 7) > dt->limit) 2123 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2124 ptr = dt->base + index; 2125 e1 = ldl_kernel(ptr); 2126 e2 = ldl_kernel(ptr + 4); 2127 2128 if (!(e2 & DESC_S_MASK)) 2129 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2130 rpl = selector & 3; 2131 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2132 if (seg_reg == R_SS) { 2133 /* must be writable segment */ 2134 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) 2135 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2136 if (rpl != cpl || dpl != cpl) 2137 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 1282 uint32_t pde; 1283 1284 if (!(env->cr[0] & CR0_PG_MASK)) { 1285 pte = addr; 1286 page_size = 4096; 2138 1287 } else { 2139 /* must be readable segment */ 2140 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) 2141 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2142 2143 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 2144 /* if not conforming code, test rights */ 2145 if (dpl < cpl || dpl < rpl) 2146 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2147 } 2148 } 2149 2150 if (!(e2 & DESC_P_MASK)) { 2151 if (seg_reg == R_SS) 2152 raise_exception_err(EXCP0C_STACK, selector & 0xfffc); 2153 else 2154 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); 2155 } 2156 2157 /* set the access bit if not already set */ 2158 if (!(e2 & DESC_A_MASK)) { 2159 e2 |= DESC_A_MASK; 2160 stl_kernel(ptr + 4, e2); 2161 } 2162 2163 cpu_x86_load_seg_cache(env, seg_reg, selector, 2164 get_seg_base(e1, e2), 2165 get_seg_limit(e1, e2), 2166 e2); 2167 #if 0 2168 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 2169 selector, (unsigned long)sc->base, sc->limit, sc->flags); 2170 #endif 2171 } 2172 } 2173 2174 /* protected mode jump */ 2175 void helper_ljmp_protected_T0_T1(int next_eip_addend) 2176 { 2177 int new_cs, gate_cs, type; 2178 uint32_t e1, e2, cpl, dpl, rpl, limit; 2179 target_ulong new_eip, next_eip; 2180 2181 new_cs = T0; 2182 new_eip = T1; 2183 if ((new_cs & 0xfffc) == 0) 2184 raise_exception_err(EXCP0D_GPF, 0); 2185 if (load_segment(&e1, &e2, new_cs) != 0) 2186 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2187 cpl = env->hflags & HF_CPL_MASK; 2188 if (e2 & DESC_S_MASK) { 2189 if (!(e2 & DESC_CS_MASK)) 2190 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2191 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2192 if (e2 & DESC_C_MASK) { 2193 /* conforming code segment */ 2194 if (dpl > cpl) 2195 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2196 } else { 2197 /* non conforming code segment */ 2198 rpl = new_cs & 3; 2199 if (rpl > cpl) 2200 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2201 if (dpl != cpl) 2202 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2203 } 2204 if (!(e2 & DESC_P_MASK)) 2205 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); 2206 limit = get_seg_limit(e1, e2); 2207 if (new_eip > limit && 2208 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) 2209 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2210 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 2211 get_seg_base(e1, e2), limit, e2); 2212 EIP = new_eip; 2213 } else { 2214 /* jump to call or task gate */ 2215 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2216 rpl = new_cs & 3; 2217 cpl = env->hflags & HF_CPL_MASK; 2218 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2219 switch(type) { 2220 case 1: /* 286 TSS */ 2221 case 9: /* 386 TSS */ 2222 case 5: /* task gate */ 2223 if (dpl < cpl || dpl < rpl) 2224 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2225 next_eip = env->eip + next_eip_addend; 2226 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip); 2227 CC_OP = CC_OP_EFLAGS; 2228 break; 2229 case 4: /* 286 call gate */ 2230 case 12: /* 386 call gate */ 2231 if ((dpl < cpl) || (dpl < rpl)) 2232 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2233 if (!(e2 & DESC_P_MASK)) 2234 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); 2235 gate_cs = e1 >> 16; 2236 new_eip = (e1 & 0xffff); 2237 if (type == 12) 2238 new_eip |= (e2 & 0xffff0000); 2239 if (load_segment(&e1, &e2, gate_cs) != 0) 2240 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); 2241 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2242 /* must be code segment */ 2243 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 2244 (DESC_S_MASK | DESC_CS_MASK))) 2245 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); 2246 if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 2247 (!(e2 & DESC_C_MASK) && (dpl != cpl))) 2248 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); 2249 if (!(e2 & DESC_P_MASK)) 2250 #ifdef VBOX /* See page 3-514 of 253666.pdf */ 2251 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc); 2252 #else 2253 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); 2254 #endif 2255 limit = get_seg_limit(e1, e2); 2256 if (new_eip > limit) 2257 raise_exception_err(EXCP0D_GPF, 0); 2258 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 2259 get_seg_base(e1, e2), limit, e2); 2260 EIP = new_eip; 2261 break; 2262 default: 2263 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2264 break; 2265 } 2266 } 2267 } 2268 2269 /* real mode call */ 2270 void helper_lcall_real_T0_T1(int shift, int next_eip) 2271 { 2272 int new_cs, new_eip; 2273 uint32_t esp, esp_mask; 2274 target_ulong ssp; 2275 2276 new_cs = T0; 2277 new_eip = T1; 2278 esp = ESP; 2279 esp_mask = get_sp_mask(env->segs[R_SS].flags); 2280 ssp = env->segs[R_SS].base; 2281 if (shift) { 2282 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector); 2283 PUSHL(ssp, esp, esp_mask, next_eip); 2284 } else { 2285 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector); 2286 PUSHW(ssp, esp, esp_mask, next_eip); 2287 } 2288 2289 SET_ESP(esp, esp_mask); 2290 env->eip = new_eip; 2291 env->segs[R_CS].selector = new_cs; 2292 env->segs[R_CS].base = (new_cs << 4); 2293 } 2294 2295 /* protected mode call */ 2296 void helper_lcall_protected_T0_T1(int shift, int next_eip_addend) 2297 { 2298 int new_cs, new_stack, i; 2299 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; 2300 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask; 2301 uint32_t val, limit, old_sp_mask; 2302 target_ulong ssp, old_ssp, next_eip, new_eip; 2303 2304 new_cs = T0; 2305 new_eip = T1; 2306 next_eip = env->eip + next_eip_addend; 2307 #ifdef DEBUG_PCALL 2308 if (loglevel & CPU_LOG_PCALL) { 2309 fprintf(logfile, "lcall %04x:%08x s=%d\n", 2310 new_cs, (uint32_t)new_eip, shift); 2311 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); 2312 } 2313 #endif 2314 if ((new_cs & 0xfffc) == 0) 2315 raise_exception_err(EXCP0D_GPF, 0); 2316 if (load_segment(&e1, &e2, new_cs) != 0) 2317 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2318 cpl = env->hflags & HF_CPL_MASK; 2319 #ifdef DEBUG_PCALL 2320 if (loglevel & CPU_LOG_PCALL) { 2321 fprintf(logfile, "desc=%08x:%08x\n", e1, e2); 2322 } 2323 #endif 2324 if (e2 & DESC_S_MASK) { 2325 if (!(e2 & DESC_CS_MASK)) 2326 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2327 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2328 if (e2 & DESC_C_MASK) { 2329 /* conforming code segment */ 2330 if (dpl > cpl) 2331 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2332 } else { 2333 /* non conforming code segment */ 2334 rpl = new_cs & 3; 2335 if (rpl > cpl) 2336 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2337 if (dpl != cpl) 2338 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2339 } 2340 if (!(e2 & DESC_P_MASK)) 2341 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); 2342 2343 #ifdef TARGET_X86_64 2344 /* XXX: check 16/32 bit cases in long mode */ 2345 if (shift == 2) { 2346 target_ulong rsp; 2347 /* 64 bit case */ 2348 rsp = ESP; 2349 PUSHQ(rsp, env->segs[R_CS].selector); 2350 PUSHQ(rsp, next_eip); 2351 /* from this point, not restartable */ 2352 ESP = rsp; 2353 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 2354 get_seg_base(e1, e2), 2355 get_seg_limit(e1, e2), e2); 2356 EIP = new_eip; 2357 } else 2358 #endif 2359 { 2360 sp = ESP; 2361 sp_mask = get_sp_mask(env->segs[R_SS].flags); 2362 ssp = env->segs[R_SS].base; 2363 if (shift) { 2364 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); 2365 PUSHL(ssp, sp, sp_mask, next_eip); 1288 /* page directory entry */ 1289 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask; 1290 pde = ldl_phys(pde_addr); 1291 if (!(pde & PG_PRESENT_MASK)) 1292 return -1; 1293 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { 1294 pte = pde & ~0x003ff000; /* align to 4MB */ 1295 page_size = 4096 * 1024; 2366 1296 } else { 2367 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); 2368 PUSHW(ssp, sp, sp_mask, next_eip); 2369 } 2370 2371 limit = get_seg_limit(e1, e2); 2372 if (new_eip > limit) 2373 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2374 /* from this point, not restartable */ 2375 SET_ESP(sp, sp_mask); 2376 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 2377 get_seg_base(e1, e2), limit, e2); 2378 EIP = new_eip; 2379 } 2380 } else { 2381 /* check gate type */ 2382 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 2383 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2384 rpl = new_cs & 3; 2385 switch(type) { 2386 case 1: /* available 286 TSS */ 2387 case 9: /* available 386 TSS */ 2388 case 5: /* task gate */ 2389 if (dpl < cpl || dpl < rpl) 2390 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2391 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip); 2392 CC_OP = CC_OP_EFLAGS; 2393 return; 2394 case 4: /* 286 call gate */ 2395 case 12: /* 386 call gate */ 2396 break; 2397 default: 2398 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2399 break; 2400 } 2401 shift = type >> 3; 2402 2403 if (dpl < cpl || dpl < rpl) 2404 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2405 /* check valid bit */ 2406 if (!(e2 & DESC_P_MASK)) 2407 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); 2408 selector = e1 >> 16; 2409 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 2410 param_count = e2 & 0x1f; 2411 if ((selector & 0xfffc) == 0) 2412 raise_exception_err(EXCP0D_GPF, 0); 2413 2414 if (load_segment(&e1, &e2, selector) != 0) 2415 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2416 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) 2417 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2418 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2419 if (dpl > cpl) 2420 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2421 if (!(e2 & DESC_P_MASK)) 2422 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); 2423 2424 if (!(e2 & DESC_C_MASK) && dpl < cpl) { 2425 /* to inner priviledge */ 2426 get_ss_esp_from_tss(&ss, &sp, dpl); 2427 #ifdef DEBUG_PCALL 2428 if (loglevel & CPU_LOG_PCALL) 2429 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", 2430 ss, sp, param_count, ESP); 2431 #endif 2432 if ((ss & 0xfffc) == 0) 2433 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 2434 if ((ss & 3) != dpl) 2435 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 2436 if (load_segment(&ss_e1, &ss_e2, ss) != 0) 2437 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 2438 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 2439 if (ss_dpl != dpl) 2440 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 2441 if (!(ss_e2 & DESC_S_MASK) || 2442 (ss_e2 & DESC_CS_MASK) || 2443 !(ss_e2 & DESC_W_MASK)) 2444 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 2445 if (!(ss_e2 & DESC_P_MASK)) 2446 #ifdef VBOX /* See page 3-99 of 253666.pdf */ 2447 raise_exception_err(EXCP0C_STACK, ss & 0xfffc); 2448 #else 2449 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 2450 #endif 2451 2452 // push_size = ((param_count * 2) + 8) << shift; 2453 2454 old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 2455 old_ssp = env->segs[R_SS].base; 2456 2457 sp_mask = get_sp_mask(ss_e2); 2458 ssp = get_seg_base(ss_e1, ss_e2); 2459 if (shift) { 2460 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector); 2461 PUSHL(ssp, sp, sp_mask, ESP); 2462 for(i = param_count - 1; i >= 0; i--) { 2463 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask)); 2464 PUSHL(ssp, sp, sp_mask, val); 2465 } 2466 } else { 2467 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector); 2468 PUSHW(ssp, sp, sp_mask, ESP); 2469 for(i = param_count - 1; i >= 0; i--) { 2470 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask)); 2471 PUSHW(ssp, sp, sp_mask, val); 2472 } 2473 } 2474 new_stack = 1; 2475 } else { 2476 /* to same priviledge */ 2477 sp = ESP; 2478 sp_mask = get_sp_mask(env->segs[R_SS].flags); 2479 ssp = env->segs[R_SS].base; 2480 // push_size = (4 << shift); 2481 new_stack = 0; 2482 } 2483 2484 if (shift) { 2485 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); 2486 PUSHL(ssp, sp, sp_mask, next_eip); 2487 } else { 2488 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); 2489 PUSHW(ssp, sp, sp_mask, next_eip); 2490 } 2491 2492 /* from this point, not restartable */ 2493 2494 if (new_stack) { 2495 ss = (ss & ~3) | dpl; 2496 cpu_x86_load_seg_cache(env, R_SS, ss, 2497 ssp, 2498 get_seg_limit(ss_e1, ss_e2), 2499 ss_e2); 2500 } 2501 2502 selector = (selector & ~3) | dpl; 2503 cpu_x86_load_seg_cache(env, R_CS, selector, 2504 get_seg_base(e1, e2), 2505 get_seg_limit(e1, e2), 2506 e2); 2507 cpu_x86_set_cpl(env, dpl); 2508 SET_ESP(sp, sp_mask); 2509 EIP = offset; 2510 } 2511 #ifdef USE_KQEMU 2512 if (kqemu_is_ok(env)) { 2513 env->exception_index = -1; 2514 cpu_loop_exit(); 2515 } 2516 #endif 2517 } 2518 2519 /* real and vm86 mode iret */ 2520 void helper_iret_real(int shift) 2521 { 2522 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; 2523 target_ulong ssp; 2524 int eflags_mask; 2525 #ifdef VBOX 2526 bool fVME = false; 2527 2528 remR3TrapClear(env->pVM); 2529 #endif /* VBOX */ 2530 2531 sp_mask = 0xffff; /* XXXX: use SS segment size ? */ 2532 sp = ESP; 2533 ssp = env->segs[R_SS].base; 2534 if (shift == 1) { 2535 /* 32 bits */ 2536 POPL(ssp, sp, sp_mask, new_eip); 2537 POPL(ssp, sp, sp_mask, new_cs); 2538 new_cs &= 0xffff; 2539 POPL(ssp, sp, sp_mask, new_eflags); 2540 } else { 2541 /* 16 bits */ 2542 POPW(ssp, sp, sp_mask, new_eip); 2543 POPW(ssp, sp, sp_mask, new_cs); 2544 POPW(ssp, sp, sp_mask, new_eflags); 2545 } 2546 #ifdef VBOX 2547 if ( (env->eflags & VM_MASK) 2548 && ((env->eflags >> IOPL_SHIFT) & 3) != 3 2549 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */ 2550 { 2551 fVME = true; 2552 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */ 2553 /* if TF will be set -> #GP */ 2554 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK)) 2555 || (new_eflags & TF_MASK)) 2556 raise_exception(EXCP0D_GPF); 2557 } 2558 #endif /* VBOX */ 2559 2560 ESP = (ESP & ~sp_mask) | (sp & sp_mask); 2561 load_seg_vm(R_CS, new_cs); 2562 env->eip = new_eip; 2563 #ifdef VBOX 2564 if (fVME) 2565 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 2566 else 2567 #endif 2568 if (env->eflags & VM_MASK) 2569 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK; 2570 else 2571 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK; 2572 if (shift == 0) 2573 eflags_mask &= 0xffff; 2574 load_eflags(new_eflags, eflags_mask); 2575 2576 #ifdef VBOX 2577 if (fVME) 2578 { 2579 if (new_eflags & IF_MASK) 2580 env->eflags |= VIF_MASK; 2581 else 2582 env->eflags &= ~VIF_MASK; 2583 } 2584 #endif /* VBOX */ 2585 } 2586 2587 static inline void validate_seg(int seg_reg, int cpl) 2588 { 2589 int dpl; 2590 uint32_t e2; 2591 2592 /* XXX: on x86_64, we do not want to nullify FS and GS because 2593 they may still contain a valid base. I would be interested to 2594 know how a real x86_64 CPU behaves */ 2595 if ((seg_reg == R_FS || seg_reg == R_GS) && 2596 (env->segs[seg_reg].selector & 0xfffc) == 0) 2597 return; 2598 2599 e2 = env->segs[seg_reg].flags; 2600 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2601 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 2602 /* data or non conforming code segment */ 2603 if (dpl < cpl) { 2604 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0); 2605 } 2606 } 2607 } 2608 2609 /* protected mode iret */ 2610 static inline void helper_ret_protected(int shift, int is_iret, int addend) 2611 { 2612 uint32_t new_cs, new_eflags, new_ss; 2613 uint32_t new_es, new_ds, new_fs, new_gs; 2614 uint32_t e1, e2, ss_e1, ss_e2; 2615 int cpl, dpl, rpl, eflags_mask, iopl; 2616 target_ulong ssp, sp, new_eip, new_esp, sp_mask; 2617 2618 #ifdef TARGET_X86_64 2619 if (shift == 2) 2620 sp_mask = -1; 2621 else 2622 #endif 2623 sp_mask = get_sp_mask(env->segs[R_SS].flags); 2624 sp = ESP; 2625 ssp = env->segs[R_SS].base; 2626 new_eflags = 0; /* avoid warning */ 2627 #ifdef TARGET_X86_64 2628 if (shift == 2) { 2629 POPQ(sp, new_eip); 2630 POPQ(sp, new_cs); 2631 new_cs &= 0xffff; 2632 if (is_iret) { 2633 POPQ(sp, new_eflags); 2634 } 2635 } else 2636 #endif 2637 if (shift == 1) { 2638 /* 32 bits */ 2639 POPL(ssp, sp, sp_mask, new_eip); 2640 POPL(ssp, sp, sp_mask, new_cs); 2641 new_cs &= 0xffff; 2642 if (is_iret) { 2643 POPL(ssp, sp, sp_mask, new_eflags); 2644 #if defined(VBOX) && defined(DEBUG) 2645 printf("iret: new CS %04X\n", new_cs); 2646 printf("iret: new EIP %08X\n", new_eip); 2647 printf("iret: new EFLAGS %08X\n", new_eflags); 2648 printf("iret: EAX=%08x\n", EAX); 2649 #endif 2650 2651 if (new_eflags & VM_MASK) 2652 goto return_to_vm86; 2653 } 2654 #ifdef VBOX 2655 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0)) 2656 { 2657 #ifdef DEBUG 2658 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc); 2659 #endif 2660 new_cs = new_cs & 0xfffc; 2661 } 2662 #endif 2663 } else { 2664 /* 16 bits */ 2665 POPW(ssp, sp, sp_mask, new_eip); 2666 POPW(ssp, sp, sp_mask, new_cs); 2667 if (is_iret) 2668 POPW(ssp, sp, sp_mask, new_eflags); 2669 } 2670 #ifdef DEBUG_PCALL 2671 if (loglevel & CPU_LOG_PCALL) { 2672 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 2673 new_cs, new_eip, shift, addend); 2674 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); 2675 } 2676 #endif 2677 if ((new_cs & 0xfffc) == 0) 2678 { 2679 #if defined(VBOX) && defined(DEBUG) 2680 printf("new_cs & 0xfffc) == 0\n"); 2681 #endif 2682 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2683 } 2684 if (load_segment(&e1, &e2, new_cs) != 0) 2685 { 2686 #if defined(VBOX) && defined(DEBUG) 2687 printf("load_segment failed\n"); 2688 #endif 2689 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2690 } 2691 if (!(e2 & DESC_S_MASK) || 2692 !(e2 & DESC_CS_MASK)) 2693 { 2694 #if defined(VBOX) && defined(DEBUG) 2695 printf("e2 mask %08x\n", e2); 2696 #endif 2697 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2698 } 2699 cpl = env->hflags & HF_CPL_MASK; 2700 rpl = new_cs & 3; 2701 if (rpl < cpl) 2702 { 2703 #if defined(VBOX) && defined(DEBUG) 2704 printf("rpl < cpl (%d vs %d)\n", rpl, cpl); 2705 #endif 2706 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2707 } 2708 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2709 if (e2 & DESC_C_MASK) { 2710 if (dpl > rpl) 2711 { 2712 #if defined(VBOX) && defined(DEBUG) 2713 printf("dpl > rpl (%d vs %d)\n", dpl, rpl); 2714 #endif 2715 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2716 } 2717 } else { 2718 if (dpl != rpl) 2719 { 2720 #if defined(VBOX) && defined(DEBUG) 2721 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2); 2722 #endif 2723 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); 2724 } 2725 } 2726 if (!(e2 & DESC_P_MASK)) 2727 { 2728 #if defined(VBOX) && defined(DEBUG) 2729 printf("DESC_P_MASK e2=%08x\n", e2); 2730 #endif 2731 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); 2732 } 2733 sp += addend; 2734 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2735 ((env->hflags & HF_CS64_MASK) && !is_iret))) { 2736 /* return to same priledge level */ 2737 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2738 get_seg_base(e1, e2), 2739 get_seg_limit(e1, e2), 2740 e2); 2741 } else { 2742 /* return to different priviledge level */ 2743 #ifdef TARGET_X86_64 2744 if (shift == 2) { 2745 POPQ(sp, new_esp); 2746 POPQ(sp, new_ss); 2747 new_ss &= 0xffff; 2748 } else 2749 #endif 2750 if (shift == 1) { 2751 /* 32 bits */ 2752 POPL(ssp, sp, sp_mask, new_esp); 2753 POPL(ssp, sp, sp_mask, new_ss); 2754 new_ss &= 0xffff; 2755 } else { 2756 /* 16 bits */ 2757 POPW(ssp, sp, sp_mask, new_esp); 2758 POPW(ssp, sp, sp_mask, new_ss); 2759 } 2760 #ifdef DEBUG_PCALL 2761 if (loglevel & CPU_LOG_PCALL) { 2762 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n", 2763 new_ss, new_esp); 2764 } 2765 #endif 2766 if ((new_ss & 0xfffc) == 0) { 2767 #ifdef TARGET_X86_64 2768 /* NULL ss is allowed in long mode if cpl != 3*/ 2769 /* XXX: test CS64 ? */ 2770 if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2771 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2772 0, 0xffffffff, 2773 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2774 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2775 DESC_W_MASK | DESC_A_MASK); 2776 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */ 2777 } else 2778 #endif 2779 { 2780 raise_exception_err(EXCP0D_GPF, 0); 2781 } 2782 } else { 2783 if ((new_ss & 3) != rpl) 2784 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); 2785 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) 2786 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); 2787 if (!(ss_e2 & DESC_S_MASK) || 2788 (ss_e2 & DESC_CS_MASK) || 2789 !(ss_e2 & DESC_W_MASK)) 2790 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); 2791 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 2792 if (dpl != rpl) 2793 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); 2794 if (!(ss_e2 & DESC_P_MASK)) 2795 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc); 2796 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2797 get_seg_base(ss_e1, ss_e2), 2798 get_seg_limit(ss_e1, ss_e2), 2799 ss_e2); 2800 } 2801 2802 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2803 get_seg_base(e1, e2), 2804 get_seg_limit(e1, e2), 2805 e2); 2806 cpu_x86_set_cpl(env, rpl); 2807 sp = new_esp; 2808 #ifdef TARGET_X86_64 2809 if (env->hflags & HF_CS64_MASK) 2810 sp_mask = -1; 2811 else 2812 #endif 2813 sp_mask = get_sp_mask(ss_e2); 2814 2815 /* validate data segments */ 2816 validate_seg(R_ES, rpl); 2817 validate_seg(R_DS, rpl); 2818 validate_seg(R_FS, rpl); 2819 validate_seg(R_GS, rpl); 2820 2821 sp += addend; 2822 } 2823 SET_ESP(sp, sp_mask); 2824 env->eip = new_eip; 2825 if (is_iret) { 2826 /* NOTE: 'cpl' is the _old_ CPL */ 2827 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 2828 if (cpl == 0) 2829 #ifdef VBOX 2830 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK; 2831 #else 2832 eflags_mask |= IOPL_MASK; 2833 #endif 2834 iopl = (env->eflags >> IOPL_SHIFT) & 3; 2835 if (cpl <= iopl) 2836 eflags_mask |= IF_MASK; 2837 if (shift == 0) 2838 eflags_mask &= 0xffff; 2839 load_eflags(new_eflags, eflags_mask); 2840 } 2841 return; 2842 2843 return_to_vm86: 2844 2845 #if 0 // defined(VBOX) && defined(DEBUG) 2846 printf("V86: new CS %04X\n", new_cs); 2847 printf("V86: Descriptor %08X:%08X\n", e2, e1); 2848 printf("V86: new EIP %08X\n", new_eip); 2849 printf("V86: new EFLAGS %08X\n", new_eflags); 2850 #endif 2851 2852 POPL(ssp, sp, sp_mask, new_esp); 2853 POPL(ssp, sp, sp_mask, new_ss); 2854 POPL(ssp, sp, sp_mask, new_es); 2855 POPL(ssp, sp, sp_mask, new_ds); 2856 POPL(ssp, sp, sp_mask, new_fs); 2857 POPL(ssp, sp, sp_mask, new_gs); 2858 2859 /* modify processor state */ 2860 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 2861 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK); 2862 load_seg_vm(R_CS, new_cs & 0xffff); 2863 cpu_x86_set_cpl(env, 3); 2864 load_seg_vm(R_SS, new_ss & 0xffff); 2865 load_seg_vm(R_ES, new_es & 0xffff); 2866 load_seg_vm(R_DS, new_ds & 0xffff); 2867 load_seg_vm(R_FS, new_fs & 0xffff); 2868 load_seg_vm(R_GS, new_gs & 0xffff); 2869 2870 env->eip = new_eip & 0xffff; 2871 ESP = new_esp; 2872 } 2873 2874 void helper_iret_protected(int shift, int next_eip) 2875 { 2876 int tss_selector, type; 2877 uint32_t e1, e2; 2878 2879 #ifdef VBOX 2880 remR3TrapClear(env->pVM); 2881 #endif 2882 2883 /* specific case for TSS */ 2884 if (env->eflags & NT_MASK) { 2885 #ifdef TARGET_X86_64 2886 if (env->hflags & HF_LMA_MASK) 2887 raise_exception_err(EXCP0D_GPF, 0); 2888 #endif 2889 tss_selector = lduw_kernel(env->tr.base + 0); 2890 if (tss_selector & 4) 2891 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); 2892 if (load_segment(&e1, &e2, tss_selector) != 0) 2893 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); 2894 type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2895 /* NOTE: we check both segment and busy TSS */ 2896 if (type != 3) 2897 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); 2898 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip); 2899 } else { 2900 helper_ret_protected(shift, 1, 0); 2901 } 2902 #ifdef USE_KQEMU 2903 if (kqemu_is_ok(env)) { 2904 CC_OP = CC_OP_EFLAGS; 2905 env->exception_index = -1; 2906 cpu_loop_exit(); 2907 } 2908 #endif 2909 } 2910 2911 void helper_lret_protected(int shift, int addend) 2912 { 2913 helper_ret_protected(shift, 0, addend); 2914 #ifdef USE_KQEMU 2915 if (kqemu_is_ok(env)) { 2916 env->exception_index = -1; 2917 cpu_loop_exit(); 2918 } 2919 #endif 2920 } 2921 2922 void helper_sysenter(void) 2923 { 2924 if (env->sysenter_cs == 0) { 2925 raise_exception_err(EXCP0D_GPF, 0); 2926 } 2927 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 2928 cpu_x86_set_cpl(env, 0); 2929 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2930 0, 0xffffffff, 2931 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2932 DESC_S_MASK | 2933 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2934 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2935 0, 0xffffffff, 2936 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2937 DESC_S_MASK | 2938 DESC_W_MASK | DESC_A_MASK); 2939 ESP = env->sysenter_esp; 2940 EIP = env->sysenter_eip; 2941 } 2942 2943 void helper_sysexit(void) 2944 { 2945 int cpl; 2946 2947 cpl = env->hflags & HF_CPL_MASK; 2948 if (env->sysenter_cs == 0 || cpl != 0) { 2949 raise_exception_err(EXCP0D_GPF, 0); 2950 } 2951 cpu_x86_set_cpl(env, 3); 2952 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 2953 0, 0xffffffff, 2954 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2955 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2956 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2957 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 2958 0, 0xffffffff, 2959 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2960 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2961 DESC_W_MASK | DESC_A_MASK); 2962 ESP = ECX; 2963 EIP = EDX; 2964 #ifdef USE_KQEMU 2965 if (kqemu_is_ok(env)) { 2966 env->exception_index = -1; 2967 cpu_loop_exit(); 2968 } 2969 #endif 2970 } 2971 2972 void helper_movl_crN_T0(int reg) 2973 { 2974 #if !defined(CONFIG_USER_ONLY) 2975 switch(reg) { 2976 case 0: 2977 cpu_x86_update_cr0(env, T0); 2978 break; 2979 case 3: 2980 cpu_x86_update_cr3(env, T0); 2981 break; 2982 case 4: 2983 cpu_x86_update_cr4(env, T0); 2984 break; 2985 case 8: 2986 cpu_set_apic_tpr(env, T0); 2987 break; 2988 default: 2989 env->cr[reg] = T0; 2990 break; 2991 } 2992 #endif 2993 } 2994 2995 /* XXX: do more */ 2996 void helper_movl_drN_T0(int reg) 2997 { 2998 env->dr[reg] = T0; 2999 } 3000 3001 void helper_invlpg(target_ulong addr) 3002 { 3003 cpu_x86_flush_tlb(env, addr); 3004 } 3005 3006 void helper_rdtsc(void) 3007 { 3008 uint64_t val; 3009 3010 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { 3011 raise_exception(EXCP0D_GPF); 3012 } 3013 val = cpu_get_tsc(env); 3014 EAX = (uint32_t)(val); 3015 EDX = (uint32_t)(val >> 32); 3016 } 3017 3018 #if defined(CONFIG_USER_ONLY) 3019 void helper_wrmsr(void) 3020 { 3021 } 3022 3023 void helper_rdmsr(void) 3024 { 3025 } 3026 #else 3027 void helper_wrmsr(void) 3028 { 3029 uint64_t val; 3030 3031 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); 3032 3033 switch((uint32_t)ECX) { 3034 case MSR_IA32_SYSENTER_CS: 3035 env->sysenter_cs = val & 0xffff; 3036 break; 3037 case MSR_IA32_SYSENTER_ESP: 3038 env->sysenter_esp = val; 3039 break; 3040 case MSR_IA32_SYSENTER_EIP: 3041 env->sysenter_eip = val; 3042 break; 3043 case MSR_IA32_APICBASE: 3044 cpu_set_apic_base(env, val); 3045 break; 3046 case MSR_EFER: 3047 { 3048 uint64_t update_mask; 3049 update_mask = 0; 3050 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) 3051 update_mask |= MSR_EFER_SCE; 3052 if (env->cpuid_ext2_features & CPUID_EXT2_LM) 3053 update_mask |= MSR_EFER_LME; 3054 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) 3055 update_mask |= MSR_EFER_FFXSR; 3056 if (env->cpuid_ext2_features & CPUID_EXT2_NX) 3057 update_mask |= MSR_EFER_NXE; 3058 env->efer = (env->efer & ~update_mask) | 3059 (val & update_mask); 3060 } 3061 break; 3062 case MSR_STAR: 3063 env->star = val; 3064 break; 3065 case MSR_PAT: 3066 env->pat = val; 3067 break; 3068 #ifdef TARGET_X86_64 3069 case MSR_LSTAR: 3070 env->lstar = val; 3071 break; 3072 case MSR_CSTAR: 3073 env->cstar = val; 3074 break; 3075 case MSR_FMASK: 3076 env->fmask = val; 3077 break; 3078 case MSR_FSBASE: 3079 env->segs[R_FS].base = val; 3080 break; 3081 case MSR_GSBASE: 3082 env->segs[R_GS].base = val; 3083 break; 3084 case MSR_KERNELGSBASE: 3085 env->kernelgsbase = val; 3086 break; 3087 #endif 3088 default: 3089 #ifndef VBOX 3090 /* XXX: exception ? */ 3091 break; 3092 #else /* VBOX */ 3093 { 3094 uint32_t ecx = (uint32_t)ECX; 3095 /* In X2APIC specification this range is reserved for APIC control. */ 3096 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END) 3097 cpu_apic_wrmsr(env, ecx, val); 3098 /** @todo else exception? */ 3099 break; 3100 } 3101 #endif /* VBOX */ 3102 } 3103 } 3104 3105 void helper_rdmsr(void) 3106 { 3107 uint64_t val; 3108 switch((uint32_t)ECX) { 3109 case MSR_IA32_SYSENTER_CS: 3110 val = env->sysenter_cs; 3111 break; 3112 case MSR_IA32_SYSENTER_ESP: 3113 val = env->sysenter_esp; 3114 break; 3115 case MSR_IA32_SYSENTER_EIP: 3116 val = env->sysenter_eip; 3117 break; 3118 case MSR_IA32_APICBASE: 3119 val = cpu_get_apic_base(env); 3120 break; 3121 case MSR_EFER: 3122 val = env->efer; 3123 break; 3124 case MSR_STAR: 3125 val = env->star; 3126 break; 3127 case MSR_PAT: 3128 val = env->pat; 3129 break; 3130 #ifdef TARGET_X86_64 3131 case MSR_LSTAR: 3132 val = env->lstar; 3133 break; 3134 case MSR_CSTAR: 3135 val = env->cstar; 3136 break; 3137 case MSR_FMASK: 3138 val = env->fmask; 3139 break; 3140 case MSR_FSBASE: 3141 val = env->segs[R_FS].base; 3142 break; 3143 case MSR_GSBASE: 3144 val = env->segs[R_GS].base; 3145 break; 3146 case MSR_KERNELGSBASE: 3147 val = env->kernelgsbase; 3148 break; 3149 #endif 3150 default: 3151 #ifndef VBOX 3152 /* XXX: exception ? */ 3153 val = 0; 3154 break; 3155 #else /* VBOX */ 3156 { 3157 uint32_t ecx = (uint32_t)ECX; 3158 /* In X2APIC specification this range is reserved for APIC control. */ 3159 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END) 3160 val = cpu_apic_rdmsr(env, ecx); 3161 else 3162 val = 0; /** @todo else exception? */ 3163 break; 3164 } 3165 #endif /* VBOX */ 3166 } 3167 EAX = (uint32_t)(val); 3168 EDX = (uint32_t)(val >> 32); 3169 } 3170 #endif 3171 3172 void helper_lsl(void) 3173 { 3174 unsigned int selector, limit; 3175 uint32_t e1, e2, eflags; 3176 int rpl, dpl, cpl, type; 3177 3178 eflags = cc_table[CC_OP].compute_all(); 3179 selector = T0 & 0xffff; 3180 if (load_segment(&e1, &e2, selector) != 0) 3181 goto fail; 3182 rpl = selector & 3; 3183 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 3184 cpl = env->hflags & HF_CPL_MASK; 3185 if (e2 & DESC_S_MASK) { 3186 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 3187 /* conforming */ 3188 } else { 3189 if (dpl < cpl || dpl < rpl) 3190 goto fail; 3191 } 3192 } else { 3193 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 3194 switch(type) { 3195 case 1: 3196 case 2: 3197 case 3: 3198 case 9: 3199 case 11: 3200 break; 3201 default: 3202 goto fail; 3203 } 3204 if (dpl < cpl || dpl < rpl) { 3205 fail: 3206 CC_SRC = eflags & ~CC_Z; 3207 return; 3208 } 3209 } 3210 limit = get_seg_limit(e1, e2); 3211 T1 = limit; 3212 CC_SRC = eflags | CC_Z; 3213 } 3214 3215 void helper_lar(void) 3216 { 3217 unsigned int selector; 3218 uint32_t e1, e2, eflags; 3219 int rpl, dpl, cpl, type; 3220 3221 eflags = cc_table[CC_OP].compute_all(); 3222 selector = T0 & 0xffff; 3223 if ((selector & 0xfffc) == 0) 3224 goto fail; 3225 if (load_segment(&e1, &e2, selector) != 0) 3226 goto fail; 3227 rpl = selector & 3; 3228 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 3229 cpl = env->hflags & HF_CPL_MASK; 3230 if (e2 & DESC_S_MASK) { 3231 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 3232 /* conforming */ 3233 } else { 3234 if (dpl < cpl || dpl < rpl) 3235 goto fail; 3236 } 3237 } else { 3238 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 3239 switch(type) { 3240 case 1: 3241 case 2: 3242 case 3: 3243 case 4: 3244 case 5: 3245 case 9: 3246 case 11: 3247 case 12: 3248 break; 3249 default: 3250 goto fail; 3251 } 3252 if (dpl < cpl || dpl < rpl) { 3253 fail: 3254 CC_SRC = eflags & ~CC_Z; 3255 return; 3256 } 3257 } 3258 T1 = e2 & 0x00f0ff00; 3259 CC_SRC = eflags | CC_Z; 3260 } 3261 3262 void helper_verr(void) 3263 { 3264 unsigned int selector; 3265 uint32_t e1, e2, eflags; 3266 int rpl, dpl, cpl; 3267 3268 eflags = cc_table[CC_OP].compute_all(); 3269 selector = T0 & 0xffff; 3270 if ((selector & 0xfffc) == 0) 3271 goto fail; 3272 if (load_segment(&e1, &e2, selector) != 0) 3273 goto fail; 3274 if (!(e2 & DESC_S_MASK)) 3275 goto fail; 3276 rpl = selector & 3; 3277 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 3278 cpl = env->hflags & HF_CPL_MASK; 3279 if (e2 & DESC_CS_MASK) { 3280 if (!(e2 & DESC_R_MASK)) 3281 goto fail; 3282 if (!(e2 & DESC_C_MASK)) { 3283 if (dpl < cpl || dpl < rpl) 3284 goto fail; 3285 } 3286 } else { 3287 if (dpl < cpl || dpl < rpl) { 3288 fail: 3289 CC_SRC = eflags & ~CC_Z; 3290 return; 3291 } 3292 } 3293 CC_SRC = eflags | CC_Z; 3294 } 3295 3296 void helper_verw(void) 3297 { 3298 unsigned int selector; 3299 uint32_t e1, e2, eflags; 3300 int rpl, dpl, cpl; 3301 3302 eflags = cc_table[CC_OP].compute_all(); 3303 selector = T0 & 0xffff; 3304 if ((selector & 0xfffc) == 0) 3305 goto fail; 3306 if (load_segment(&e1, &e2, selector) != 0) 3307 goto fail; 3308 if (!(e2 & DESC_S_MASK)) 3309 goto fail; 3310 rpl = selector & 3; 3311 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 3312 cpl = env->hflags & HF_CPL_MASK; 3313 if (e2 & DESC_CS_MASK) { 3314 goto fail; 3315 } else { 3316 if (dpl < cpl || dpl < rpl) 3317 goto fail; 3318 if (!(e2 & DESC_W_MASK)) { 3319 fail: 3320 CC_SRC = eflags & ~CC_Z; 3321 return; 3322 } 3323 } 3324 CC_SRC = eflags | CC_Z; 3325 } 3326 3327 /* FPU helpers */ 3328 3329 void helper_fldt_ST0_A0(void) 3330 { 3331 int new_fpstt; 3332 new_fpstt = (env->fpstt - 1) & 7; 3333 env->fpregs[new_fpstt].d = helper_fldt(A0); 3334 env->fpstt = new_fpstt; 3335 env->fptags[new_fpstt] = 0; /* validate stack entry */ 3336 } 3337 3338 void helper_fstt_ST0_A0(void) 3339 { 3340 helper_fstt(ST0, A0); 3341 } 3342 3343 void fpu_set_exception(int mask) 3344 { 3345 env->fpus |= mask; 3346 if (env->fpus & (~env->fpuc & FPUC_EM)) 3347 env->fpus |= FPUS_SE | FPUS_B; 3348 } 3349 3350 CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b) 3351 { 3352 if (b == 0.0) 3353 fpu_set_exception(FPUS_ZE); 3354 return a / b; 3355 } 3356 3357 void fpu_raise_exception(void) 3358 { 3359 if (env->cr[0] & CR0_NE_MASK) { 3360 raise_exception(EXCP10_COPR); 3361 } 3362 #if !defined(CONFIG_USER_ONLY) 3363 else { 3364 cpu_set_ferr(env); 3365 } 3366 #endif 3367 } 3368 3369 /* BCD ops */ 3370 3371 void helper_fbld_ST0_A0(void) 3372 { 3373 CPU86_LDouble tmp; 3374 uint64_t val; 3375 unsigned int v; 3376 int i; 3377 3378 val = 0; 3379 for(i = 8; i >= 0; i--) { 3380 v = ldub(A0 + i); 3381 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf); 3382 } 3383 tmp = val; 3384 if (ldub(A0 + 9) & 0x80) 3385 tmp = -tmp; 3386 fpush(); 3387 ST0 = tmp; 3388 } 3389 3390 void helper_fbst_ST0_A0(void) 3391 { 3392 int v; 3393 target_ulong mem_ref, mem_end; 3394 int64_t val; 3395 3396 val = floatx_to_int64(ST0, &env->fp_status); 3397 mem_ref = A0; 3398 mem_end = mem_ref + 9; 3399 if (val < 0) { 3400 stb(mem_end, 0x80); 3401 val = -val; 3402 } else { 3403 stb(mem_end, 0x00); 3404 } 3405 while (mem_ref < mem_end) { 3406 if (val == 0) 3407 break; 3408 v = val % 100; 3409 val = val / 100; 3410 v = ((v / 10) << 4) | (v % 10); 3411 stb(mem_ref++, v); 3412 } 3413 while (mem_ref < mem_end) { 3414 stb(mem_ref++, 0); 3415 } 3416 } 3417 3418 void helper_f2xm1(void) 3419 { 3420 ST0 = pow(2.0,ST0) - 1.0; 3421 } 3422 3423 void helper_fyl2x(void) 3424 { 3425 CPU86_LDouble fptemp; 3426 3427 fptemp = ST0; 3428 if (fptemp>0.0){ 3429 fptemp = log(fptemp)/log(2.0); /* log2(ST) */ 3430 ST1 *= fptemp; 3431 fpop(); 3432 } else { 3433 env->fpus &= (~0x4700); 3434 env->fpus |= 0x400; 3435 } 3436 } 3437 3438 void helper_fptan(void) 3439 { 3440 CPU86_LDouble fptemp; 3441 3442 fptemp = ST0; 3443 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { 3444 env->fpus |= 0x400; 3445 } else { 3446 ST0 = tan(fptemp); 3447 fpush(); 3448 ST0 = 1.0; 3449 env->fpus &= (~0x400); /* C2 <-- 0 */ 3450 /* the above code is for |arg| < 2**52 only */ 3451 } 3452 } 3453 3454 void helper_fpatan(void) 3455 { 3456 CPU86_LDouble fptemp, fpsrcop; 3457 3458 fpsrcop = ST1; 3459 fptemp = ST0; 3460 ST1 = atan2(fpsrcop,fptemp); 3461 fpop(); 3462 } 3463 3464 void helper_fxtract(void) 3465 { 3466 CPU86_LDoubleU temp; 3467 unsigned int expdif; 3468 3469 temp.d = ST0; 3470 expdif = EXPD(temp) - EXPBIAS; 3471 /*DP exponent bias*/ 3472 ST0 = expdif; 3473 fpush(); 3474 BIASEXPONENT(temp); 3475 ST0 = temp.d; 3476 } 3477 3478 void helper_fprem1(void) 3479 { 3480 CPU86_LDouble dblq, fpsrcop, fptemp; 3481 CPU86_LDoubleU fpsrcop1, fptemp1; 3482 int expdif; 3483 int q; 3484 3485 fpsrcop = ST0; 3486 fptemp = ST1; 3487 fpsrcop1.d = fpsrcop; 3488 fptemp1.d = fptemp; 3489 expdif = EXPD(fpsrcop1) - EXPD(fptemp1); 3490 if (expdif < 53) { 3491 dblq = fpsrcop / fptemp; 3492 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq); 3493 ST0 = fpsrcop - fptemp*dblq; 3494 q = (int)dblq; /* cutting off top bits is assumed here */ 3495 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ 3496 /* (C0,C1,C3) <-- (q2,q1,q0) */ 3497 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */ 3498 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */ 3499 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */ 3500 } else { 3501 env->fpus |= 0x400; /* C2 <-- 1 */ 3502 fptemp = pow(2.0, expdif-50); 3503 fpsrcop = (ST0 / ST1) / fptemp; 3504 /* fpsrcop = integer obtained by rounding to the nearest */ 3505 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)? 3506 floor(fpsrcop): ceil(fpsrcop); 3507 ST0 -= (ST1 * fpsrcop * fptemp); 3508 } 3509 } 3510 3511 void helper_fprem(void) 3512 { 3513 CPU86_LDouble dblq, fpsrcop, fptemp; 3514 CPU86_LDoubleU fpsrcop1, fptemp1; 3515 int expdif; 3516 int q; 3517 3518 fpsrcop = ST0; 3519 fptemp = ST1; 3520 fpsrcop1.d = fpsrcop; 3521 fptemp1.d = fptemp; 3522 expdif = EXPD(fpsrcop1) - EXPD(fptemp1); 3523 if ( expdif < 53 ) { 3524 dblq = fpsrcop / fptemp; 3525 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq); 3526 ST0 = fpsrcop - fptemp*dblq; 3527 q = (int)dblq; /* cutting off top bits is assumed here */ 3528 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ 3529 /* (C0,C1,C3) <-- (q2,q1,q0) */ 3530 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */ 3531 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */ 3532 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */ 3533 } else { 3534 env->fpus |= 0x400; /* C2 <-- 1 */ 3535 fptemp = pow(2.0, expdif-50); 3536 fpsrcop = (ST0 / ST1) / fptemp; 3537 /* fpsrcop = integer obtained by chopping */ 3538 fpsrcop = (fpsrcop < 0.0)? 3539 -(floor(fabs(fpsrcop))): floor(fpsrcop); 3540 ST0 -= (ST1 * fpsrcop * fptemp); 3541 } 3542 } 3543 3544 void helper_fyl2xp1(void) 3545 { 3546 CPU86_LDouble fptemp; 3547 3548 fptemp = ST0; 3549 if ((fptemp+1.0)>0.0) { 3550 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */ 3551 ST1 *= fptemp; 3552 fpop(); 3553 } else { 3554 env->fpus &= (~0x4700); 3555 env->fpus |= 0x400; 3556 } 3557 } 3558 3559 void helper_fsqrt(void) 3560 { 3561 CPU86_LDouble fptemp; 3562 3563 fptemp = ST0; 3564 if (fptemp<0.0) { 3565 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ 3566 env->fpus |= 0x400; 3567 } 3568 ST0 = sqrt(fptemp); 3569 } 3570 3571 void helper_fsincos(void) 3572 { 3573 CPU86_LDouble fptemp; 3574 3575 fptemp = ST0; 3576 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { 3577 env->fpus |= 0x400; 3578 } else { 3579 ST0 = sin(fptemp); 3580 fpush(); 3581 ST0 = cos(fptemp); 3582 env->fpus &= (~0x400); /* C2 <-- 0 */ 3583 /* the above code is for |arg| < 2**63 only */ 3584 } 3585 } 3586 3587 void helper_frndint(void) 3588 { 3589 ST0 = floatx_round_to_int(ST0, &env->fp_status); 3590 } 3591 3592 void helper_fscale(void) 3593 { 3594 ST0 = ldexp (ST0, (int)(ST1)); 3595 } 3596 3597 void helper_fsin(void) 3598 { 3599 CPU86_LDouble fptemp; 3600 3601 fptemp = ST0; 3602 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { 3603 env->fpus |= 0x400; 3604 } else { 3605 ST0 = sin(fptemp); 3606 env->fpus &= (~0x400); /* C2 <-- 0 */ 3607 /* the above code is for |arg| < 2**53 only */ 3608 } 3609 } 3610 3611 void helper_fcos(void) 3612 { 3613 CPU86_LDouble fptemp; 3614 3615 fptemp = ST0; 3616 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { 3617 env->fpus |= 0x400; 3618 } else { 3619 ST0 = cos(fptemp); 3620 env->fpus &= (~0x400); /* C2 <-- 0 */ 3621 /* the above code is for |arg5 < 2**63 only */ 3622 } 3623 } 3624 3625 void helper_fxam_ST0(void) 3626 { 3627 CPU86_LDoubleU temp; 3628 int expdif; 3629 3630 temp.d = ST0; 3631 3632 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ 3633 if (SIGND(temp)) 3634 env->fpus |= 0x200; /* C1 <-- 1 */ 3635 3636 /* XXX: test fptags too */ 3637 expdif = EXPD(temp); 3638 if (expdif == MAXEXPD) { 3639 #ifdef USE_X86LDOUBLE 3640 if (MANTD(temp) == 0x8000000000000000ULL) 3641 #else 3642 if (MANTD(temp) == 0) 3643 #endif 3644 env->fpus |= 0x500 /*Infinity*/; 3645 else 3646 env->fpus |= 0x100 /*NaN*/; 3647 } else if (expdif == 0) { 3648 if (MANTD(temp) == 0) 3649 env->fpus |= 0x4000 /*Zero*/; 3650 else 3651 env->fpus |= 0x4400 /*Denormal*/; 3652 } else { 3653 env->fpus |= 0x400; 3654 } 3655 } 3656 3657 void helper_fstenv(target_ulong ptr, int data32) 3658 { 3659 int fpus, fptag, exp, i; 3660 uint64_t mant; 3661 CPU86_LDoubleU tmp; 3662 3663 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; 3664 fptag = 0; 3665 for (i=7; i>=0; i--) { 3666 fptag <<= 2; 3667 if (env->fptags[i]) { 3668 fptag |= 3; 3669 } else { 3670 tmp.d = env->fpregs[i].d; 3671 exp = EXPD(tmp); 3672 mant = MANTD(tmp); 3673 if (exp == 0 && mant == 0) { 3674 /* zero */ 3675 fptag |= 1; 3676 } else if (exp == 0 || exp == MAXEXPD 3677 #ifdef USE_X86LDOUBLE 3678 || (mant & (1LL << 63)) == 0 3679 #endif 3680 ) { 3681 /* NaNs, infinity, denormal */ 3682 fptag |= 2; 3683 } 3684 } 3685 } 3686 if (data32) { 3687 /* 32 bit */ 3688 stl(ptr, env->fpuc); 3689 stl(ptr + 4, fpus); 3690 stl(ptr + 8, fptag); 3691 stl(ptr + 12, 0); /* fpip */ 3692 stl(ptr + 16, 0); /* fpcs */ 3693 stl(ptr + 20, 0); /* fpoo */ 3694 stl(ptr + 24, 0); /* fpos */ 3695 } else { 3696 /* 16 bit */ 3697 stw(ptr, env->fpuc); 3698 stw(ptr + 2, fpus); 3699 stw(ptr + 4, fptag); 3700 stw(ptr + 6, 0); 3701 stw(ptr + 8, 0); 3702 stw(ptr + 10, 0); 3703 stw(ptr + 12, 0); 3704 } 3705 } 3706 3707 void helper_fldenv(target_ulong ptr, int data32) 3708 { 3709 int i, fpus, fptag; 3710 3711 if (data32) { 3712 env->fpuc = lduw(ptr); 3713 fpus = lduw(ptr + 4); 3714 fptag = lduw(ptr + 8); 3715 } 3716 else { 3717 env->fpuc = lduw(ptr); 3718 fpus = lduw(ptr + 2); 3719 fptag = lduw(ptr + 4); 3720 } 3721 env->fpstt = (fpus >> 11) & 7; 3722 env->fpus = fpus & ~0x3800; 3723 for(i = 0;i < 8; i++) { 3724 env->fptags[i] = ((fptag & 3) == 3); 3725 fptag >>= 2; 3726 } 3727 } 3728 3729 void helper_fsave(target_ulong ptr, int data32) 3730 { 3731 CPU86_LDouble tmp; 3732 int i; 3733 3734 helper_fstenv(ptr, data32); 3735 3736 ptr += (14 << data32); 3737 for(i = 0;i < 8; i++) { 3738 tmp = ST(i); 3739 helper_fstt(tmp, ptr); 3740 ptr += 10; 3741 } 3742 3743 /* fninit */ 3744 env->fpus = 0; 3745 env->fpstt = 0; 3746 env->fpuc = 0x37f; 3747 env->fptags[0] = 1; 3748 env->fptags[1] = 1; 3749 env->fptags[2] = 1; 3750 env->fptags[3] = 1; 3751 env->fptags[4] = 1; 3752 env->fptags[5] = 1; 3753 env->fptags[6] = 1; 3754 env->fptags[7] = 1; 3755 } 3756 3757 void helper_frstor(target_ulong ptr, int data32) 3758 { 3759 CPU86_LDouble tmp; 3760 int i; 3761 3762 helper_fldenv(ptr, data32); 3763 ptr += (14 << data32); 3764 3765 for(i = 0;i < 8; i++) { 3766 tmp = helper_fldt(ptr); 3767 ST(i) = tmp; 3768 ptr += 10; 3769 } 3770 } 3771 3772 void helper_fxsave(target_ulong ptr, int data64) 3773 { 3774 int fpus, fptag, i, nb_xmm_regs; 3775 CPU86_LDouble tmp; 3776 target_ulong addr; 3777 3778 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; 3779 fptag = 0; 3780 for(i = 0; i < 8; i++) { 3781 fptag |= (env->fptags[i] << i); 3782 } 3783 stw(ptr, env->fpuc); 3784 stw(ptr + 2, fpus); 3785 stw(ptr + 4, fptag ^ 0xff); 3786 3787 addr = ptr + 0x20; 3788 for(i = 0;i < 8; i++) { 3789 tmp = ST(i); 3790 helper_fstt(tmp, addr); 3791 addr += 16; 3792 } 3793 3794 if (env->cr[4] & CR4_OSFXSR_MASK) { 3795 /* XXX: finish it */ 3796 stl(ptr + 0x18, env->mxcsr); /* mxcsr */ 3797 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */ 3798 nb_xmm_regs = 8 << data64; 3799 addr = ptr + 0xa0; 3800 for(i = 0; i < nb_xmm_regs; i++) { 3801 stq(addr, env->xmm_regs[i].XMM_Q(0)); 3802 stq(addr + 8, env->xmm_regs[i].XMM_Q(1)); 3803 addr += 16; 3804 } 3805 } 3806 } 3807 3808 void helper_fxrstor(target_ulong ptr, int data64) 3809 { 3810 int i, fpus, fptag, nb_xmm_regs; 3811 CPU86_LDouble tmp; 3812 target_ulong addr; 3813 3814 env->fpuc = lduw(ptr); 3815 fpus = lduw(ptr + 2); 3816 fptag = lduw(ptr + 4); 3817 env->fpstt = (fpus >> 11) & 7; 3818 env->fpus = fpus & ~0x3800; 3819 fptag ^= 0xff; 3820 for(i = 0;i < 8; i++) { 3821 env->fptags[i] = ((fptag >> i) & 1); 3822 } 3823 3824 addr = ptr + 0x20; 3825 for(i = 0;i < 8; i++) { 3826 tmp = helper_fldt(addr); 3827 ST(i) = tmp; 3828 addr += 16; 3829 } 3830 3831 if (env->cr[4] & CR4_OSFXSR_MASK) { 3832 /* XXX: finish it */ 3833 env->mxcsr = ldl(ptr + 0x18); 3834 //ldl(ptr + 0x1c); 3835 nb_xmm_regs = 8 << data64; 3836 addr = ptr + 0xa0; 3837 for(i = 0; i < nb_xmm_regs; i++) { 3838 #if !defined(VBOX) || __GNUC__ < 4 3839 env->xmm_regs[i].XMM_Q(0) = ldq(addr); 3840 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8); 3841 #else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */ 3842 # if 1 3843 env->xmm_regs[i].XMM_L(0) = ldl(addr); 3844 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4); 3845 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8); 3846 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12); 3847 # else 3848 /* this works fine on Mac OS X, gcc 4.0.1 */ 3849 uint64_t u64 = ldq(addr); 3850 env->xmm_regs[i].XMM_Q(0); 3851 u64 = ldq(addr + 4); 3852 env->xmm_regs[i].XMM_Q(1) = u64; 3853 # endif 3854 #endif 3855 addr += 16; 3856 } 3857 } 3858 } 3859 3860 #ifndef USE_X86LDOUBLE 3861 3862 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f) 3863 { 3864 CPU86_LDoubleU temp; 3865 int e; 3866 3867 temp.d = f; 3868 /* mantissa */ 3869 *pmant = (MANTD(temp) << 11) | (1LL << 63); 3870 /* exponent + sign */ 3871 e = EXPD(temp) - EXPBIAS + 16383; 3872 e |= SIGND(temp) >> 16; 3873 *pexp = e; 3874 } 3875 3876 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper) 3877 { 3878 CPU86_LDoubleU temp; 3879 int e; 3880 uint64_t ll; 3881 3882 /* XXX: handle overflow ? */ 3883 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */ 3884 e |= (upper >> 4) & 0x800; /* sign */ 3885 ll = (mant >> 11) & ((1LL << 52) - 1); 3886 #ifdef __arm__ 3887 temp.l.upper = (e << 20) | (ll >> 32); 3888 temp.l.lower = ll; 3889 #else 3890 temp.ll = ll | ((uint64_t)e << 52); 3891 #endif 3892 return temp.d; 3893 } 3894 3895 #else 3896 3897 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f) 3898 { 3899 CPU86_LDoubleU temp; 3900 3901 temp.d = f; 3902 *pmant = temp.l.lower; 3903 *pexp = temp.l.upper; 3904 } 3905 3906 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper) 3907 { 3908 CPU86_LDoubleU temp; 3909 3910 temp.l.upper = upper; 3911 temp.l.lower = mant; 3912 return temp.d; 3913 } 3914 #endif 3915 3916 #ifdef TARGET_X86_64 3917 3918 //#define DEBUG_MULDIV 3919 3920 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) 3921 { 3922 *plow += a; 3923 /* carry test */ 3924 if (*plow < a) 3925 (*phigh)++; 3926 *phigh += b; 3927 } 3928 3929 static void neg128(uint64_t *plow, uint64_t *phigh) 3930 { 3931 *plow = ~ *plow; 3932 *phigh = ~ *phigh; 3933 add128(plow, phigh, 1, 0); 3934 } 3935 3936 static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) 3937 { 3938 uint32_t a0, a1, b0, b1; 3939 uint64_t v; 3940 3941 a0 = a; 3942 a1 = a >> 32; 3943 3944 b0 = b; 3945 b1 = b >> 32; 3946 3947 v = (uint64_t)a0 * (uint64_t)b0; 3948 *plow = v; 3949 *phigh = 0; 3950 3951 v = (uint64_t)a0 * (uint64_t)b1; 3952 add128(plow, phigh, v << 32, v >> 32); 3953 3954 v = (uint64_t)a1 * (uint64_t)b0; 3955 add128(plow, phigh, v << 32, v >> 32); 3956 3957 v = (uint64_t)a1 * (uint64_t)b1; 3958 *phigh += v; 3959 #ifdef DEBUG_MULDIV 3960 printf("mul: 0x%016" PRIx64 " * 0x%016" PRIx64 " = 0x%016" PRIx64 "%016" PRIx64 "\n", 3961 a, b, *phigh, *plow); 3962 #endif 3963 } 3964 3965 static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) 3966 { 3967 int sa, sb; 3968 sa = (a < 0); 3969 if (sa) 3970 a = -a; 3971 sb = (b < 0); 3972 if (sb) 3973 b = -b; 3974 mul64(plow, phigh, a, b); 3975 if (sa ^ sb) { 3976 neg128(plow, phigh); 3977 } 3978 } 3979 3980 /* return TRUE if overflow */ 3981 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b) 3982 { 3983 uint64_t q, r, a1, a0; 3984 int i, qb, ab; 3985 3986 a0 = *plow; 3987 a1 = *phigh; 3988 if (a1 == 0) { 3989 q = a0 / b; 3990 r = a0 % b; 3991 *plow = q; 3992 *phigh = r; 3993 } else { 3994 if (a1 >= b) 3995 return 1; 3996 /* XXX: use a better algorithm */ 3997 for(i = 0; i < 64; i++) { 3998 ab = a1 >> 63; 3999 a1 = (a1 << 1) | (a0 >> 63); 4000 if (ab || a1 >= b) { 4001 a1 -= b; 4002 qb = 1; 4003 } else { 4004 qb = 0; 4005 } 4006 a0 = (a0 << 1) | qb; 4007 } 4008 #if defined(DEBUG_MULDIV) 4009 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n", 4010 *phigh, *plow, b, a0, a1); 4011 #endif 4012 *plow = a0; 4013 *phigh = a1; 4014 } 4015 return 0; 4016 } 4017 4018 /* return TRUE if overflow */ 4019 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b) 4020 { 4021 int sa, sb; 4022 sa = ((int64_t)*phigh < 0); 4023 if (sa) 4024 neg128(plow, phigh); 4025 sb = (b < 0); 4026 if (sb) 4027 b = -b; 4028 if (div64(plow, phigh, b) != 0) 4029 return 1; 4030 if (sa ^ sb) { 4031 if (*plow > (1ULL << 63)) 4032 return 1; 4033 *plow = - *plow; 4034 } else { 4035 if (*plow >= (1ULL << 63)) 4036 return 1; 4037 } 4038 if (sa) 4039 *phigh = - *phigh; 4040 return 0; 4041 } 4042 4043 void helper_mulq_EAX_T0(void) 4044 { 4045 uint64_t r0, r1; 4046 4047 mul64(&r0, &r1, EAX, T0); 4048 EAX = r0; 4049 EDX = r1; 4050 CC_DST = r0; 4051 CC_SRC = r1; 4052 } 4053 4054 void helper_imulq_EAX_T0(void) 4055 { 4056 uint64_t r0, r1; 4057 4058 imul64(&r0, &r1, EAX, T0); 4059 EAX = r0; 4060 EDX = r1; 4061 CC_DST = r0; 4062 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63)); 4063 } 4064 4065 void helper_imulq_T0_T1(void) 4066 { 4067 uint64_t r0, r1; 4068 4069 imul64(&r0, &r1, T0, T1); 4070 T0 = r0; 4071 CC_DST = r0; 4072 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63)); 4073 } 4074 4075 void helper_divq_EAX_T0(void) 4076 { 4077 uint64_t r0, r1; 4078 if (T0 == 0) { 4079 raise_exception(EXCP00_DIVZ); 4080 } 4081 r0 = EAX; 4082 r1 = EDX; 4083 if (div64(&r0, &r1, T0)) 4084 raise_exception(EXCP00_DIVZ); 4085 EAX = r0; 4086 EDX = r1; 4087 } 4088 4089 void helper_idivq_EAX_T0(void) 4090 { 4091 uint64_t r0, r1; 4092 if (T0 == 0) { 4093 raise_exception(EXCP00_DIVZ); 4094 } 4095 r0 = EAX; 4096 r1 = EDX; 4097 if (idiv64(&r0, &r1, T0)) 4098 raise_exception(EXCP00_DIVZ); 4099 EAX = r0; 4100 EDX = r1; 4101 } 4102 4103 void helper_bswapq_T0(void) 4104 { 4105 T0 = bswap64(T0); 4106 } 4107 #endif 4108 4109 void helper_hlt(void) 4110 { 4111 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ 4112 env->hflags |= HF_HALTED_MASK; 4113 env->exception_index = EXCP_HLT; 4114 cpu_loop_exit(); 4115 } 4116 4117 void helper_monitor(void) 4118 { 4119 if ((uint32_t)ECX != 0) 4120 raise_exception(EXCP0D_GPF); 4121 /* XXX: store address ? */ 4122 } 4123 4124 void helper_mwait(void) 4125 { 4126 if ((uint32_t)ECX != 0) 4127 raise_exception(EXCP0D_GPF); 4128 #ifdef VBOX 4129 helper_hlt(); 4130 #else 4131 /* XXX: not complete but not completely erroneous */ 4132 if (env->cpu_index != 0 || env->next_cpu != NULL) { 4133 /* more than one CPU: do not sleep because another CPU may 4134 wake this one */ 4135 } else { 4136 helper_hlt(); 4137 } 4138 #endif 4139 } 4140 4141 float approx_rsqrt(float a) 4142 { 4143 return 1.0 / sqrt(a); 4144 } 4145 4146 float approx_rcp(float a) 4147 { 4148 return 1.0 / a; 4149 } 4150 4151 void update_fp_status(void) 4152 { 4153 int rnd_type; 4154 4155 /* set rounding mode */ 4156 switch(env->fpuc & RC_MASK) { 4157 default: 4158 case RC_NEAR: 4159 rnd_type = float_round_nearest_even; 4160 break; 4161 case RC_DOWN: 4162 rnd_type = float_round_down; 4163 break; 4164 case RC_UP: 4165 rnd_type = float_round_up; 4166 break; 4167 case RC_CHOP: 4168 rnd_type = float_round_to_zero; 4169 break; 4170 } 4171 set_float_rounding_mode(rnd_type, &env->fp_status); 4172 #ifdef FLOATX80 4173 switch((env->fpuc >> 8) & 3) { 4174 case 0: 4175 rnd_type = 32; 4176 break; 4177 case 2: 4178 rnd_type = 64; 4179 break; 4180 case 3: 4181 default: 4182 rnd_type = 80; 4183 break; 4184 } 4185 set_floatx80_rounding_precision(rnd_type, &env->fp_status); 4186 #endif 4187 } 4188 4189 #if !defined(CONFIG_USER_ONLY) 4190 4191 #define MMUSUFFIX _mmu 4192 #define GETPC() (__builtin_return_address(0)) 4193 4194 #define SHIFT 0 4195 #include "softmmu_template.h" 4196 4197 #define SHIFT 1 4198 #include "softmmu_template.h" 4199 4200 #define SHIFT 2 4201 #include "softmmu_template.h" 4202 4203 #define SHIFT 3 4204 #include "softmmu_template.h" 4205 4206 #endif 4207 4208 /* try to fill the TLB and return an exception if error. If retaddr is 4209 NULL, it means that the function was called in C code (i.e. not 4210 from generated code or from helper.c) */ 4211 /* XXX: fix it to restore all registers */ 4212 void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr) 4213 { 4214 TranslationBlock *tb; 4215 int ret; 4216 unsigned long pc; 4217 CPUX86State *saved_env; 4218 4219 /* XXX: hack to restore env in all cases, even if not called from 4220 generated code */ 4221 saved_env = env; 4222 env = cpu_single_env; 4223 4224 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1); 4225 if (ret) { 4226 if (retaddr) { 4227 /* now we have a real cpu fault */ 4228 pc = (unsigned long)retaddr; 4229 tb = tb_find_pc(pc); 4230 if (tb) { 4231 /* the PC is inside the translated code. It means that we have 4232 a virtual CPU fault */ 4233 cpu_restore_state(tb, env, pc, NULL); 4234 } 4235 } 4236 if (retaddr) 4237 raise_exception_err(env->exception_index, env->error_code); 4238 else 4239 raise_exception_err_norestore(env->exception_index, env->error_code); 4240 } 4241 env = saved_env; 4242 } 4243 4244 #ifdef VBOX 4245 4246 /** 4247 * Correctly computes the eflags. 4248 * @returns eflags. 4249 * @param env1 CPU environment. 4250 */ 4251 uint32_t raw_compute_eflags(CPUX86State *env1) 4252 { 4253 CPUX86State *savedenv = env; 4254 env = env1; 4255 uint32_t efl = compute_eflags(); 4256 env = savedenv; 4257 return efl; 4258 } 4259 4260 /** 4261 * Reads byte from virtual address in guest memory area. 4262 * XXX: is it working for any addresses? swapped out pages? 4263 * @returns readed data byte. 4264 * @param env1 CPU environment. 4265 * @param pvAddr GC Virtual address. 4266 */ 4267 uint8_t read_byte(CPUX86State *env1, target_ulong addr) 4268 { 4269 CPUX86State *savedenv = env; 4270 env = env1; 4271 uint8_t u8 = ldub_kernel(addr); 4272 env = savedenv; 4273 return u8; 4274 } 4275 4276 /** 4277 * Reads byte from virtual address in guest memory area. 4278 * XXX: is it working for any addresses? swapped out pages? 4279 * @returns readed data byte. 4280 * @param env1 CPU environment. 4281 * @param pvAddr GC Virtual address. 4282 */ 4283 uint16_t read_word(CPUX86State *env1, target_ulong addr) 4284 { 4285 CPUX86State *savedenv = env; 4286 env = env1; 4287 uint16_t u16 = lduw_kernel(addr); 4288 env = savedenv; 4289 return u16; 4290 } 4291 4292 /** 4293 * Reads byte from virtual address in guest memory area. 4294 * XXX: is it working for any addresses? swapped out pages? 4295 * @returns readed data byte. 4296 * @param env1 CPU environment. 4297 * @param pvAddr GC Virtual address. 4298 */ 4299 uint32_t read_dword(CPUX86State *env1, target_ulong addr) 4300 { 4301 CPUX86State *savedenv = env; 4302 env = env1; 4303 uint32_t u32 = ldl_kernel(addr); 4304 env = savedenv; 4305 return u32; 4306 } 4307 4308 /** 4309 * Writes byte to virtual address in guest memory area. 4310 * XXX: is it working for any addresses? swapped out pages? 4311 * @returns readed data byte. 4312 * @param env1 CPU environment. 4313 * @param pvAddr GC Virtual address. 4314 * @param val byte value 4315 */ 4316 void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val) 4317 { 4318 CPUX86State *savedenv = env; 4319 env = env1; 4320 stb(addr, val); 4321 env = savedenv; 4322 } 4323 4324 void write_word(CPUX86State *env1, target_ulong addr, uint16_t val) 4325 { 4326 CPUX86State *savedenv = env; 4327 env = env1; 4328 stw(addr, val); 4329 env = savedenv; 4330 } 4331 4332 void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val) 4333 { 4334 CPUX86State *savedenv = env; 4335 env = env1; 4336 stl(addr, val); 4337 env = savedenv; 4338 } 4339 4340 /** 4341 * Correctly loads selector into segment register with updating internal 4342 * qemu data/caches. 4343 * @param env1 CPU environment. 4344 * @param seg_reg Segment register. 4345 * @param selector Selector to load. 4346 */ 4347 void sync_seg(CPUX86State *env1, int seg_reg, int selector) 4348 { 4349 CPUX86State *savedenv = env; 4350 env = env1; 4351 4352 if ( env->eflags & X86_EFL_VM 4353 || !(env->cr[0] & X86_CR0_PE)) 4354 { 4355 load_seg_vm(seg_reg, selector); 4356 4357 env = savedenv; 4358 4359 /* Successful sync. */ 4360 env1->segs[seg_reg].newselector = 0; 4361 } 4362 else 4363 { 4364 if (setjmp(env1->jmp_env) == 0) 4365 { 4366 if (seg_reg == R_CS) 4367 { 4368 uint32_t e1, e2; 4369 load_segment(&e1, &e2, selector); 4370 cpu_x86_load_seg_cache(env, R_CS, selector, 4371 get_seg_base(e1, e2), 4372 get_seg_limit(e1, e2), 4373 e2); 4374 } 4375 else 4376 load_seg(seg_reg, selector); 4377 env = savedenv; 4378 4379 /* Successful sync. */ 4380 env1->segs[seg_reg].newselector = 0; 4381 } 4382 else 4383 { 4384 env = savedenv; 4385 4386 /* Postpone sync until the guest uses the selector. */ 4387 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */ 4388 env1->segs[seg_reg].newselector = selector; 4389 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector)); 4390 } 4391 } 4392 4393 } 4394 4395 4396 /** 4397 * Correctly loads a new ldtr selector. 4398 * 4399 * @param env1 CPU environment. 4400 * @param selector Selector to load. 4401 */ 4402 void sync_ldtr(CPUX86State *env1, int selector) 4403 { 4404 CPUX86State *saved_env = env; 4405 target_ulong saved_T0 = T0; 4406 if (setjmp(env1->jmp_env) == 0) 4407 { 4408 env = env1; 4409 T0 = selector; 4410 helper_lldt_T0(); 4411 T0 = saved_T0; 4412 env = saved_env; 4413 } 4414 else 4415 { 4416 T0 = saved_T0; 4417 env = saved_env; 4418 #ifdef VBOX_STRICT 4419 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector); 4420 #endif 4421 } 4422 } 4423 4424 /** 4425 * Correctly loads a new tr selector. 4426 * 4427 * @param env1 CPU environment. 4428 * @param selector Selector to load. 4429 */ 4430 int sync_tr(CPUX86State *env1, int selector) 4431 { 4432 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */ 4433 SegmentCache *dt; 4434 uint32_t e1, e2; 4435 int index, type, entry_limit; 4436 target_ulong ptr; 4437 CPUX86State *saved_env = env; 4438 env = env1; 4439 4440 selector &= 0xffff; 4441 if ((selector & 0xfffc) == 0) { 4442 /* NULL selector case: invalid TR */ 4443 env->tr.base = 0; 4444 env->tr.limit = 0; 4445 env->tr.flags = 0; 4446 } else { 4447 if (selector & 0x4) 4448 goto l_failure; 4449 dt = &env->gdt; 4450 index = selector & ~7; 4451 #ifdef TARGET_X86_64 4452 if (env->hflags & HF_LMA_MASK) 4453 entry_limit = 15; 4454 else 4455 #endif 4456 entry_limit = 7; 4457 if ((index + entry_limit) > dt->limit) 4458 goto l_failure; 4459 ptr = dt->base + index; 4460 e1 = ldl_kernel(ptr); 4461 e2 = ldl_kernel(ptr + 4); 4462 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 4463 if ((e2 & DESC_S_MASK) /*|| 4464 (type != 1 && type != 9)*/) 4465 goto l_failure; 4466 if (!(e2 & DESC_P_MASK)) 4467 goto l_failure; 4468 #ifdef TARGET_X86_64 4469 if (env->hflags & HF_LMA_MASK) { 4470 uint32_t e3; 4471 e3 = ldl_kernel(ptr + 8); 4472 load_seg_cache_raw_dt(&env->tr, e1, e2); 4473 env->tr.base |= (target_ulong)e3 << 32; 4474 } else 4475 #endif 4476 { 4477 load_seg_cache_raw_dt(&env->tr, e1, e2); 4478 } 4479 e2 |= DESC_TSS_BUSY_MASK; 4480 stl_kernel(ptr + 4, e2); 4481 } 4482 env->tr.selector = selector; 4483 4484 env = saved_env; 4485 return 0; 4486 l_failure: 4487 AssertMsgFailed(("selector=%d\n", selector)); 4488 return -1; 4489 } 4490 4491 int emulate_single_instr(CPUX86State *env1) 4492 { 4493 #if 1 /* single stepping is broken when using a static tb... feel free to figure out why. :-) */ 4494 /* This has to be static because it needs to be addressible 4495 using 32-bit immediate addresses on 64-bit machines. This 4496 is dictated by the gcc code model used when building this 4497 module / op.o. Using a static here pushes the problem 4498 onto the module loader. */ 4499 static TranslationBlock tb_temp; 4500 #endif 4501 TranslationBlock *tb; 4502 TranslationBlock *current; 4503 int csize; 4504 void (*gen_func)(void); 4505 uint8_t *tc_ptr; 4506 target_ulong old_eip; 4507 4508 /* ensures env is loaded in ebp! */ 4509 CPUX86State *savedenv = env; 4510 env = env1; 4511 4512 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR); 4513 4514 #if 1 /* see above */ 4515 tc_ptr = env->pvCodeBuffer; 4516 #else 4517 tc_ptr = code_gen_ptr; 4518 #endif 4519 4520 /* 4521 * Setup temporary translation block. 4522 */ 4523 /* tb_alloc: */ 4524 #if 1 /* see above */ 4525 tb = &tb_temp; 4526 tb->pc = env->segs[R_CS].base + env->eip; 4527 tb->cflags = 0; 4528 #else 4529 tb = tb_alloc(env->segs[R_CS].base + env->eip); 4530 if (!tb) 4531 { 4532 tb_flush(env); 4533 tb = tb_alloc(env->segs[R_CS].base + env->eip); 4534 } 4535 #endif 4536 4537 /* tb_find_slow: */ 4538 tb->tc_ptr = tc_ptr; 4539 tb->cs_base = env->segs[R_CS].base; 4540 tb->flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 4541 4542 /* Initialize the rest with sensible values. */ 4543 tb->size = 0; 4544 tb->phys_hash_next = NULL; 4545 tb->page_next[0] = NULL; 4546 tb->page_next[1] = NULL; 4547 tb->page_addr[0] = 0; 4548 tb->page_addr[1] = 0; 4549 tb->tb_next_offset[0] = 0xffff; 4550 tb->tb_next_offset[1] = 0xffff; 4551 tb->tb_next[0] = 0xffff; 4552 tb->tb_next[1] = 0xffff; 4553 tb->jmp_next[0] = NULL; 4554 tb->jmp_next[1] = NULL; 4555 tb->jmp_first = NULL; 4556 4557 current = env->current_tb; 4558 env->current_tb = NULL; 4559 4560 /* 4561 * Translate only one instruction. 4562 */ 4563 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR); 4564 if (cpu_gen_code(env, tb, env->cbCodeBuffer, &csize) < 0) 4565 { 4566 AssertFailed(); 4567 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR); 4568 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR); 4569 env = savedenv; 4570 return -1; 4571 } 4572 #ifdef DEBUG 4573 if(csize > env->cbCodeBuffer) 4574 { 4575 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR); 4576 AssertFailed(); 4577 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR); 4578 env = savedenv; 4579 return -1; 4580 } 4581 if (tb->tc_ptr != tc_ptr) 4582 { 4583 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR); 4584 AssertFailed(); 4585 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR); 4586 env = savedenv; 4587 return -1; 4588 } 4589 #endif 4590 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR); 4591 4592 /* tb_link_phys: */ 4593 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2); 4594 Assert(tb->jmp_next[0] == NULL); Assert(tb->jmp_next[1] == NULL); 4595 if (tb->tb_next_offset[0] != 0xffff) 4596 tb_set_jmp_target(tb, 0, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[0])); 4597 if (tb->tb_next_offset[1] != 0xffff) 4598 tb_set_jmp_target(tb, 1, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[1])); 4599 4600 /* 4601 * Execute it using emulation 4602 */ 4603 old_eip = env->eip; 4604 gen_func = (void *)tb->tc_ptr; 4605 env->current_tb = tb; 4606 4607 // eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code 4608 // perhaps not a very safe hack 4609 while(old_eip == env->eip) 4610 { 4611 gen_func(); 4612 /* 4613 * Exit once we detect an external interrupt and interrupts are enabled 4614 */ 4615 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) || 4616 ( (env->eflags & IF_MASK) && 4617 !(env->hflags & HF_INHIBIT_IRQ_MASK) && 4618 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) ) 4619 { 4620 break; 4621 } 4622 } 4623 env->current_tb = current; 4624 4625 Assert(tb->phys_hash_next == NULL); 4626 Assert(tb->page_next[0] == NULL); 4627 Assert(tb->page_next[1] == NULL); 4628 Assert(tb->page_addr[0] == 0); 4629 Assert(tb->page_addr[1] == 0); 4630 /* 4631 Assert(tb->tb_next_offset[0] == 0xffff); 4632 Assert(tb->tb_next_offset[1] == 0xffff); 4633 Assert(tb->tb_next[0] == 0xffff); 4634 Assert(tb->tb_next[1] == 0xffff); 4635 Assert(tb->jmp_next[0] == NULL); 4636 Assert(tb->jmp_next[1] == NULL); 4637 Assert(tb->jmp_first == NULL); */ 4638 4639 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR); 4640 4641 /* 4642 * Execute the next instruction when we encounter instruction fusing. 4643 */ 4644 if (env->hflags & HF_INHIBIT_IRQ_MASK) 4645 { 4646 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %VGv\n", env->eip)); 4647 env->hflags &= ~HF_INHIBIT_IRQ_MASK; 4648 emulate_single_instr(env); 4649 } 4650 4651 env = savedenv; 4652 return 0; 4653 } 4654 4655 int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr, 4656 uint32_t *esp_ptr, int dpl) 4657 { 4658 int type, index, shift; 4659 4660 CPUX86State *savedenv = env; 4661 env = env1; 4662 4663 if (!(env->tr.flags & DESC_P_MASK)) 4664 cpu_abort(env, "invalid tss"); 4665 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 4666 if ((type & 7) != 1) 4667 cpu_abort(env, "invalid tss type %d", type); 4668 shift = type >> 3; 4669 index = (dpl * 4 + 2) << shift; 4670 if (index + (4 << shift) - 1 > env->tr.limit) 4671 { 4672 env = savedenv; 4673 return 0; 4674 } 4675 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc); 4676 4677 if (shift == 0) { 4678 *esp_ptr = lduw_kernel(env->tr.base + index); 4679 *ss_ptr = lduw_kernel(env->tr.base + index + 2); 4680 } else { 4681 *esp_ptr = ldl_kernel(env->tr.base + index); 4682 *ss_ptr = lduw_kernel(env->tr.base + index + 4); 4683 } 4684 4685 env = savedenv; 4686 return 1; 4687 } 4688 4689 //***************************************************************************** 4690 // Needs to be at the bottom of the file (overriding macros) 4691 4692 static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr) 4693 { 4694 return *(CPU86_LDouble *)ptr; 4695 } 4696 4697 static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr) 4698 { 4699 *(CPU86_LDouble *)ptr = f; 4700 } 4701 4702 #undef stw 4703 #undef stl 4704 #undef stq 4705 #define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b) 4706 #define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b) 4707 #define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b) 4708 #define data64 0 4709 4710 //***************************************************************************** 4711 void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr) 4712 { 4713 int fpus, fptag, i, nb_xmm_regs; 4714 CPU86_LDouble tmp; 4715 uint8_t *addr; 4716 4717 if (env->cpuid_features & CPUID_FXSR) 4718 { 4719 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; 4720 fptag = 0; 4721 for(i = 0; i < 8; i++) { 4722 fptag |= (env->fptags[i] << i); 4723 } 4724 stw(ptr, env->fpuc); 4725 stw(ptr + 2, fpus); 4726 stw(ptr + 4, fptag ^ 0xff); 4727 4728 addr = ptr + 0x20; 4729 for(i = 0;i < 8; i++) { 4730 tmp = ST(i); 4731 helper_fstt_raw(tmp, addr); 4732 addr += 16; 4733 } 4734 4735 if (env->cr[4] & CR4_OSFXSR_MASK) { 4736 /* XXX: finish it */ 4737 stl(ptr + 0x18, env->mxcsr); /* mxcsr */ 4738 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */ 4739 nb_xmm_regs = 8 << data64; 4740 addr = ptr + 0xa0; 4741 for(i = 0; i < nb_xmm_regs; i++) { 4742 #if __GNUC__ < 4 4743 stq(addr, env->xmm_regs[i].XMM_Q(0)); 4744 stq(addr + 8, env->xmm_regs[i].XMM_Q(1)); 4745 #else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */ 4746 stl(addr, env->xmm_regs[i].XMM_L(0)); 4747 stl(addr + 4, env->xmm_regs[i].XMM_L(1)); 4748 stl(addr + 8, env->xmm_regs[i].XMM_L(2)); 4749 stl(addr + 12, env->xmm_regs[i].XMM_L(3)); 4750 #endif 4751 addr += 16; 4752 } 4753 } 4754 } 4755 else 4756 { 4757 PX86FPUSTATE fp = (PX86FPUSTATE)ptr; 4758 int fptag; 4759 4760 fp->FCW = env->fpuc; 4761 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; 4762 fptag = 0; 4763 for (i=7; i>=0; i--) { 4764 fptag <<= 2; 4765 if (env->fptags[i]) { 4766 fptag |= 3; 4767 } else { 4768 /* the FPU automatically computes it */ 4769 } 4770 } 4771 fp->FTW = fptag; 4772 4773 for(i = 0;i < 8; i++) { 4774 tmp = ST(i); 4775 helper_fstt_raw(tmp, &fp->regs[i].reg[0]); 4776 } 4777 } 4778 } 4779 4780 //***************************************************************************** 4781 #undef lduw 4782 #undef ldl 4783 #undef ldq 4784 #define lduw(a) *(uint16_t *)(a) 4785 #define ldl(a) *(uint32_t *)(a) 4786 #define ldq(a) *(uint64_t *)(a) 4787 //***************************************************************************** 4788 void save_raw_fp_state(CPUX86State *env, uint8_t *ptr) 4789 { 4790 int i, fpus, fptag, nb_xmm_regs; 4791 CPU86_LDouble tmp; 4792 uint8_t *addr; 4793 4794 if (env->cpuid_features & CPUID_FXSR) 4795 { 4796 env->fpuc = lduw(ptr); 4797 fpus = lduw(ptr + 2); 4798 fptag = lduw(ptr + 4); 4799 env->fpstt = (fpus >> 11) & 7; 4800 env->fpus = fpus & ~0x3800; 4801 fptag ^= 0xff; 4802 for(i = 0;i < 8; i++) { 4803 env->fptags[i] = ((fptag >> i) & 1); 4804 } 4805 4806 addr = ptr + 0x20; 4807 for(i = 0;i < 8; i++) { 4808 tmp = helper_fldt_raw(addr); 4809 ST(i) = tmp; 4810 addr += 16; 4811 } 4812 4813 if (env->cr[4] & CR4_OSFXSR_MASK) { 4814 /* XXX: finish it, endianness */ 4815 env->mxcsr = ldl(ptr + 0x18); 4816 //ldl(ptr + 0x1c); 4817 nb_xmm_regs = 8 << data64; 4818 addr = ptr + 0xa0; 4819 for(i = 0; i < nb_xmm_regs; i++) { 4820 #if HC_ARCH_BITS == 32 4821 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */ 4822 env->xmm_regs[i].XMM_L(0) = ldl(addr); 4823 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4); 4824 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8); 4825 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12); 4826 #else 4827 env->xmm_regs[i].XMM_Q(0) = ldq(addr); 4828 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8); 4829 #endif 4830 addr += 16; 4831 } 4832 } 4833 } 4834 else 4835 { 4836 PX86FPUSTATE fp = (PX86FPUSTATE)ptr; 4837 int fptag, j; 4838 4839 env->fpuc = fp->FCW; 4840 env->fpstt = (fp->FSW >> 11) & 7; 4841 env->fpus = fp->FSW & ~0x3800; 4842 fptag = fp->FTW; 4843 for(i = 0;i < 8; i++) { 4844 env->fptags[i] = ((fptag & 3) == 3); 4845 fptag >>= 2; 4846 } 4847 j = env->fpstt; 4848 for(i = 0;i < 8; i++) { 4849 tmp = helper_fldt_raw(&fp->regs[i].reg[0]); 4850 ST(i) = tmp; 4851 } 4852 } 4853 } 4854 //***************************************************************************** 4855 //***************************************************************************** 4856 4857 #endif /* VBOX */ 4858 1297 /* page directory entry */ 1298 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask; 1299 pte = ldl_phys(pte_addr); 1300 if (!(pte & PG_PRESENT_MASK)) 1301 return -1; 1302 page_size = 4096; 1303 } 1304 } 1305 pte = pte & env->a20_mask; 1306 } 1307 1308 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); 1309 paddr = (pte & TARGET_PAGE_MASK) + page_offset; 1310 return paddr; 1311 } 1312 #endif /* !CONFIG_USER_ONLY */ -
trunk/src/recompiler_new/target-i386/translate.c
r12421 r13357 40 40 #include "exec-all.h" 41 41 #include "disas.h" 42 43 /* XXX: move that elsewhere */ 44 static uint16_t *gen_opc_ptr; 45 static uint32_t *gen_opparam_ptr; 42 #include "helper.h" 43 #include "tcg-op.h" 46 44 47 45 #define PREFIX_REPZ 0x01 … … 69 67 #endif 70 68 69 //#define MACRO_TEST 1 70 71 /* global register indexes */ 72 static TCGv cpu_env, cpu_A0, cpu_cc_op, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp; 73 /* local temps */ 74 static TCGv cpu_T[2], cpu_T3; 75 /* local register indexes (only used inside old micro ops) */ 76 static TCGv cpu_tmp0, cpu_tmp1_i64, cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp4, cpu_ptr0, cpu_ptr1; 77 static TCGv cpu_tmp5, cpu_tmp6; 78 79 #include "gen-icount.h" 80 71 81 #ifdef TARGET_X86_64 72 82 static int x86_64_hregs; 73 83 #endif 74 84 75 #ifdef USE_DIRECT_JUMP76 #define TBPARAM(x)77 #else78 #define TBPARAM(x) (long)(x)79 #endif80 81 85 #ifdef VBOX 86 82 87 /* Special/override code readers to hide patched code. */ 83 88 … … 140 145 int jmp_opt; /* use direct block chaining for direct jumps */ 141 146 int mem_index; /* select memory access functions */ 142 int flags; /* all execution flags */147 uint64_t flags; /* all execution flags */ 143 148 struct TranslationBlock *tb; 144 149 int popl_esp_hack; /* for correct popl with esp base handling */ … … 153 158 static void gen_jmp(DisasContext *s, target_ulong eip); 154 159 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); 160 161 #ifdef VBOX 162 static void gen_check_external_event(); 163 #endif 155 164 156 165 /* i386 arith/logic operations */ … … 179 188 180 189 enum { 181 #define DEF(s, n, copy_size) INDEX_op_ ## s, 182 #include "opc.h" 183 #undef DEF 184 NB_OPS, 190 JCC_O, 191 JCC_B, 192 JCC_Z, 193 JCC_BE, 194 JCC_S, 195 JCC_P, 196 JCC_L, 197 JCC_LE, 185 198 }; 186 187 #include "gen-op.h"188 199 189 200 /* operand size */ … … 211 222 }; 212 223 224 static inline void gen_op_movl_T0_0(void) 225 { 226 tcg_gen_movi_tl(cpu_T[0], 0); 227 } 228 229 static inline void gen_op_movl_T0_im(int32_t val) 230 { 231 tcg_gen_movi_tl(cpu_T[0], val); 232 } 233 234 static inline void gen_op_movl_T0_imu(uint32_t val) 235 { 236 tcg_gen_movi_tl(cpu_T[0], val); 237 } 238 239 static inline void gen_op_movl_T1_im(int32_t val) 240 { 241 tcg_gen_movi_tl(cpu_T[1], val); 242 } 243 244 static inline void gen_op_movl_T1_imu(uint32_t val) 245 { 246 tcg_gen_movi_tl(cpu_T[1], val); 247 } 248 249 static inline void gen_op_movl_A0_im(uint32_t val) 250 { 251 tcg_gen_movi_tl(cpu_A0, val); 252 } 253 213 254 #ifdef TARGET_X86_64 255 static inline void gen_op_movq_A0_im(int64_t val) 256 { 257 tcg_gen_movi_tl(cpu_A0, val); 258 } 259 #endif 260 261 static inline void gen_movtl_T0_im(target_ulong val) 262 { 263 tcg_gen_movi_tl(cpu_T[0], val); 264 } 265 266 static inline void gen_movtl_T1_im(target_ulong val) 267 { 268 tcg_gen_movi_tl(cpu_T[1], val); 269 } 270 271 static inline void gen_op_andl_T0_ffff(void) 272 { 273 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff); 274 } 275 276 static inline void gen_op_andl_T0_im(uint32_t val) 277 { 278 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val); 279 } 280 281 static inline void gen_op_movl_T0_T1(void) 282 { 283 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]); 284 } 285 286 static inline void gen_op_andl_A0_ffff(void) 287 { 288 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff); 289 } 290 291 #ifdef TARGET_X86_64 214 292 215 293 #define NB_OP_SIZES 4 216 294 217 #define DEF_REGS(prefix, suffix) \218 prefix ## EAX ## suffix,\219 prefix ## ECX ## suffix,\220 prefix ## EDX ## suffix,\221 prefix ## EBX ## suffix,\222 prefix ## ESP ## suffix,\223 prefix ## EBP ## suffix,\224 prefix ## ESI ## suffix,\225 prefix ## EDI ## suffix,\226 prefix ## R8 ## suffix,\227 prefix ## R9 ## suffix,\228 prefix ## R10 ## suffix,\229 prefix ## R11 ## suffix,\230 prefix ## R12 ## suffix,\231 prefix ## R13 ## suffix,\232 prefix ## R14 ## suffix,\233 prefix ## R15 ## suffix,234 235 #define DEF_BREGS(prefixb, prefixh, suffix) \236 \237 static void prefixb ## ESP ## suffix ## _wrapper(void) \238 { \239 if (x86_64_hregs) \240 prefixb ## ESP ## suffix (); \241 else \242 prefixh ## EAX ## suffix (); \243 } \244 \245 static void prefixb ## EBP ## suffix ## _wrapper(void) \246 { \247 if (x86_64_hregs) \248 prefixb ## EBP ## suffix (); \249 else \250 prefixh ## ECX ## suffix (); \251 } \252 \253 static void prefixb ## ESI ## suffix ## _wrapper(void) \254 { \255 if (x86_64_hregs) \256 prefixb ## ESI ## suffix (); \257 else \258 prefixh ## EDX ## suffix (); \259 } \260 \261 static void prefixb ## EDI ## suffix ## _wrapper(void) \262 { \263 if (x86_64_hregs) \264 prefixb ## EDI ## suffix (); \265 else \266 prefixh ## EBX ## suffix (); \267 }268 269 DEF_BREGS(gen_op_movb_, gen_op_movh_, _T0)270 DEF_BREGS(gen_op_movb_, gen_op_movh_, _T1)271 DEF_BREGS(gen_op_movl_T0_, gen_op_movh_T0_, )272 DEF_BREGS(gen_op_movl_T1_, gen_op_movh_T1_, )273 274 295 #else /* !TARGET_X86_64 */ 275 296 276 297 #define NB_OP_SIZES 3 277 298 278 #define DEF_REGS(prefix, suffix) \279 prefix ## EAX ## suffix,\280 prefix ## ECX ## suffix,\281 prefix ## EDX ## suffix,\282 prefix ## EBX ## suffix,\283 prefix ## ESP ## suffix,\284 prefix ## EBP ## suffix,\285 prefix ## ESI ## suffix,\286 prefix ## EDI ## suffix,287 288 299 #endif /* !TARGET_X86_64 */ 289 300 290 static GenOpFunc *gen_op_mov_reg_T0[NB_OP_SIZES][CPU_NB_REGS] = { 291 [OT_BYTE] = { 292 gen_op_movb_EAX_T0, 293 gen_op_movb_ECX_T0, 294 gen_op_movb_EDX_T0, 295 gen_op_movb_EBX_T0, 301 #if defined(WORDS_BIGENDIAN) 302 #define REG_B_OFFSET (sizeof(target_ulong) - 1) 303 #define REG_H_OFFSET (sizeof(target_ulong) - 2) 304 #define REG_W_OFFSET (sizeof(target_ulong) - 2) 305 #define REG_L_OFFSET (sizeof(target_ulong) - 4) 306 #define REG_LH_OFFSET (sizeof(target_ulong) - 8) 307 #else 308 #define REG_B_OFFSET 0 309 #define REG_H_OFFSET 1 310 #define REG_W_OFFSET 0 311 #define REG_L_OFFSET 0 312 #define REG_LH_OFFSET 4 313 #endif 314 315 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0) 316 { 317 switch(ot) { 318 case OT_BYTE: 319 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) { 320 tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET); 321 } else { 322 tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET); 323 } 324 break; 325 case OT_WORD: 326 tcg_gen_st16_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET); 327 break; 296 328 #ifdef TARGET_X86_64 297 gen_op_movb_ESP_T0_wrapper, 298 gen_op_movb_EBP_T0_wrapper, 299 gen_op_movb_ESI_T0_wrapper, 300 gen_op_movb_EDI_T0_wrapper, 301 gen_op_movb_R8_T0, 302 gen_op_movb_R9_T0, 303 gen_op_movb_R10_T0, 304 gen_op_movb_R11_T0, 305 gen_op_movb_R12_T0, 306 gen_op_movb_R13_T0, 307 gen_op_movb_R14_T0, 308 gen_op_movb_R15_T0, 329 case OT_LONG: 330 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); 331 /* high part of register set to zero */ 332 tcg_gen_movi_tl(cpu_tmp0, 0); 333 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET); 334 break; 335 default: 336 case OT_QUAD: 337 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, regs[reg])); 338 break; 309 339 #else 310 gen_op_movh_EAX_T0, 311 gen_op_movh_ECX_T0, 312 gen_op_movh_EDX_T0, 313 gen_op_movh_EBX_T0, 314 #endif 315 }, 316 [OT_WORD] = { 317 DEF_REGS(gen_op_movw_, _T0) 318 }, 319 [OT_LONG] = { 320 DEF_REGS(gen_op_movl_, _T0) 321 }, 340 default: 341 case OT_LONG: 342 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); 343 break; 344 #endif 345 } 346 } 347 348 static inline void gen_op_mov_reg_T0(int ot, int reg) 349 { 350 gen_op_mov_reg_v(ot, reg, cpu_T[0]); 351 } 352 353 static inline void gen_op_mov_reg_T1(int ot, int reg) 354 { 355 gen_op_mov_reg_v(ot, reg, cpu_T[1]); 356 } 357 358 static inline void gen_op_mov_reg_A0(int size, int reg) 359 { 360 switch(size) { 361 case 0: 362 tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET); 363 break; 322 364 #ifdef TARGET_X86_64 323 [OT_QUAD] = { 324 DEF_REGS(gen_op_movq_, _T0) 325 }, 326 #endif 327 }; 328 329 static GenOpFunc *gen_op_mov_reg_T1[NB_OP_SIZES][CPU_NB_REGS] = { 330 [OT_BYTE] = { 331 gen_op_movb_EAX_T1, 332 gen_op_movb_ECX_T1, 333 gen_op_movb_EDX_T1, 334 gen_op_movb_EBX_T1, 365 case 1: 366 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); 367 /* high part of register set to zero */ 368 tcg_gen_movi_tl(cpu_tmp0, 0); 369 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET); 370 break; 371 default: 372 case 2: 373 tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg])); 374 break; 375 #else 376 default: 377 case 1: 378 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); 379 break; 380 #endif 381 } 382 } 383 384 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg) 385 { 386 switch(ot) { 387 case OT_BYTE: 388 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) { 389 goto std_case; 390 } else { 391 tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET); 392 } 393 break; 394 default: 395 std_case: 396 tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, regs[reg])); 397 break; 398 } 399 } 400 401 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg) 402 { 403 gen_op_mov_v_reg(ot, cpu_T[t_index], reg); 404 } 405 406 static inline void gen_op_movl_A0_reg(int reg) 407 { 408 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); 409 } 410 411 static inline void gen_op_addl_A0_im(int32_t val) 412 { 413 tcg_gen_addi_tl(cpu_A0, cpu_A0, val); 335 414 #ifdef TARGET_X86_64 336 gen_op_movb_ESP_T1_wrapper, 337 gen_op_movb_EBP_T1_wrapper, 338 gen_op_movb_ESI_T1_wrapper, 339 gen_op_movb_EDI_T1_wrapper, 340 gen_op_movb_R8_T1, 341 gen_op_movb_R9_T1, 342 gen_op_movb_R10_T1, 343 gen_op_movb_R11_T1, 344 gen_op_movb_R12_T1, 345 gen_op_movb_R13_T1, 346 gen_op_movb_R14_T1, 347 gen_op_movb_R15_T1, 348 #else 349 gen_op_movh_EAX_T1, 350 gen_op_movh_ECX_T1, 351 gen_op_movh_EDX_T1, 352 gen_op_movh_EBX_T1, 353 #endif 354 }, 355 [OT_WORD] = { 356 DEF_REGS(gen_op_movw_, _T1) 357 }, 358 [OT_LONG] = { 359 DEF_REGS(gen_op_movl_, _T1) 360 }, 415 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff); 416 #endif 417 } 418 361 419 #ifdef TARGET_X86_64 362 [OT_QUAD] = { 363 DEF_REGS(gen_op_movq_, _T1) 364 }, 365 #endif 366 }; 367 368 static GenOpFunc *gen_op_mov_reg_A0[NB_OP_SIZES - 1][CPU_NB_REGS] = { 369 [0] = { 370 DEF_REGS(gen_op_movw_, _A0) 371 }, 372 [1] = { 373 DEF_REGS(gen_op_movl_, _A0) 374 }, 420 static inline void gen_op_addq_A0_im(int64_t val) 421 { 422 tcg_gen_addi_tl(cpu_A0, cpu_A0, val); 423 } 424 #endif 425 426 static void gen_add_A0_im(DisasContext *s, int val) 427 { 375 428 #ifdef TARGET_X86_64 376 [2] = { 377 DEF_REGS(gen_op_movq_, _A0) 378 }, 379 #endif 380 }; 381 382 static GenOpFunc *gen_op_mov_TN_reg[NB_OP_SIZES][2][CPU_NB_REGS] = 383 { 384 [OT_BYTE] = { 385 { 386 gen_op_movl_T0_EAX, 387 gen_op_movl_T0_ECX, 388 gen_op_movl_T0_EDX, 389 gen_op_movl_T0_EBX, 429 if (CODE64(s)) 430 gen_op_addq_A0_im(val); 431 else 432 #endif 433 gen_op_addl_A0_im(val); 434 } 435 436 static inline void gen_op_addl_T0_T1(void) 437 { 438 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 439 } 440 441 static inline void gen_op_jmp_T0(void) 442 { 443 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip)); 444 } 445 446 static inline void gen_op_add_reg_im(int size, int reg, int32_t val) 447 { 448 switch(size) { 449 case 0: 450 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 451 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val); 452 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET); 453 break; 454 case 1: 455 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 456 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val); 390 457 #ifdef TARGET_X86_64 391 gen_op_movl_T0_ESP_wrapper, 392 gen_op_movl_T0_EBP_wrapper, 393 gen_op_movl_T0_ESI_wrapper, 394 gen_op_movl_T0_EDI_wrapper, 395 gen_op_movl_T0_R8, 396 gen_op_movl_T0_R9, 397 gen_op_movl_T0_R10, 398 gen_op_movl_T0_R11, 399 gen_op_movl_T0_R12, 400 gen_op_movl_T0_R13, 401 gen_op_movl_T0_R14, 402 gen_op_movl_T0_R15, 403 #else 404 gen_op_movh_T0_EAX, 405 gen_op_movh_T0_ECX, 406 gen_op_movh_T0_EDX, 407 gen_op_movh_T0_EBX, 408 #endif 409 }, 410 { 411 gen_op_movl_T1_EAX, 412 gen_op_movl_T1_ECX, 413 gen_op_movl_T1_EDX, 414 gen_op_movl_T1_EBX, 458 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff); 459 #endif 460 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 461 break; 415 462 #ifdef TARGET_X86_64 416 gen_op_movl_T1_ESP_wrapper, 417 gen_op_movl_T1_EBP_wrapper, 418 gen_op_movl_T1_ESI_wrapper, 419 gen_op_movl_T1_EDI_wrapper, 420 gen_op_movl_T1_R8, 421 gen_op_movl_T1_R9, 422 gen_op_movl_T1_R10, 423 gen_op_movl_T1_R11, 424 gen_op_movl_T1_R12, 425 gen_op_movl_T1_R13, 426 gen_op_movl_T1_R14, 427 gen_op_movl_T1_R15, 428 #else 429 gen_op_movh_T1_EAX, 430 gen_op_movh_T1_ECX, 431 gen_op_movh_T1_EDX, 432 gen_op_movh_T1_EBX, 433 #endif 434 }, 435 }, 436 [OT_WORD] = { 437 { 438 DEF_REGS(gen_op_movl_T0_, ) 439 }, 440 { 441 DEF_REGS(gen_op_movl_T1_, ) 442 }, 443 }, 444 [OT_LONG] = { 445 { 446 DEF_REGS(gen_op_movl_T0_, ) 447 }, 448 { 449 DEF_REGS(gen_op_movl_T1_, ) 450 }, 451 }, 463 case 2: 464 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 465 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val); 466 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 467 break; 468 #endif 469 } 470 } 471 472 static inline void gen_op_add_reg_T0(int size, int reg) 473 { 474 switch(size) { 475 case 0: 476 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 477 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]); 478 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET); 479 break; 480 case 1: 481 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 482 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]); 452 483 #ifdef TARGET_X86_64 453 [OT_QUAD] = { 454 { 455 DEF_REGS(gen_op_movl_T0_, ) 456 }, 457 { 458 DEF_REGS(gen_op_movl_T1_, ) 459 }, 460 }, 461 #endif 462 }; 463 464 static GenOpFunc *gen_op_movl_A0_reg[CPU_NB_REGS] = { 465 DEF_REGS(gen_op_movl_A0_, ) 466 }; 467 468 static GenOpFunc *gen_op_addl_A0_reg_sN[4][CPU_NB_REGS] = { 469 [0] = { 470 DEF_REGS(gen_op_addl_A0_, ) 471 }, 472 [1] = { 473 DEF_REGS(gen_op_addl_A0_, _s1) 474 }, 475 [2] = { 476 DEF_REGS(gen_op_addl_A0_, _s2) 477 }, 478 [3] = { 479 DEF_REGS(gen_op_addl_A0_, _s3) 480 }, 481 }; 482 484 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff); 485 #endif 486 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 487 break; 483 488 #ifdef TARGET_X86_64 484 static GenOpFunc *gen_op_movq_A0_reg[CPU_NB_REGS] = { 485 DEF_REGS(gen_op_movq_A0_, ) 486 }; 487 488 static GenOpFunc *gen_op_addq_A0_reg_sN[4][CPU_NB_REGS] = { 489 [0] = { 490 DEF_REGS(gen_op_addq_A0_, ) 491 }, 492 [1] = { 493 DEF_REGS(gen_op_addq_A0_, _s1) 494 }, 495 [2] = { 496 DEF_REGS(gen_op_addq_A0_, _s2) 497 }, 498 [3] = { 499 DEF_REGS(gen_op_addq_A0_, _s3) 500 }, 501 }; 502 #endif 503 504 static GenOpFunc *gen_op_cmov_reg_T1_T0[NB_OP_SIZES - 1][CPU_NB_REGS] = { 505 [0] = { 506 DEF_REGS(gen_op_cmovw_, _T1_T0) 507 }, 508 [1] = { 509 DEF_REGS(gen_op_cmovl_, _T1_T0) 510 }, 489 case 2: 490 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 491 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]); 492 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 493 break; 494 #endif 495 } 496 } 497 498 static inline void gen_op_set_cc_op(int32_t val) 499 { 500 tcg_gen_movi_i32(cpu_cc_op, val); 501 } 502 503 static inline void gen_op_addl_A0_reg_sN(int shift, int reg) 504 { 505 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 506 if (shift != 0) 507 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift); 508 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); 511 509 #ifdef TARGET_X86_64 512 [2] = { 513 DEF_REGS(gen_op_cmovq_, _T1_T0) 514 }, 515 #endif 516 }; 517 518 static GenOpFunc *gen_op_arith_T0_T1_cc[8] = { 519 NULL, 520 gen_op_orl_T0_T1, 521 NULL, 522 NULL, 523 gen_op_andl_T0_T1, 524 NULL, 525 gen_op_xorl_T0_T1, 526 NULL, 527 }; 528 529 #define DEF_ARITHC(SUFFIX)\ 530 {\ 531 gen_op_adcb ## SUFFIX ## _T0_T1_cc,\ 532 gen_op_sbbb ## SUFFIX ## _T0_T1_cc,\ 533 },\ 534 {\ 535 gen_op_adcw ## SUFFIX ## _T0_T1_cc,\ 536 gen_op_sbbw ## SUFFIX ## _T0_T1_cc,\ 537 },\ 538 {\ 539 gen_op_adcl ## SUFFIX ## _T0_T1_cc,\ 540 gen_op_sbbl ## SUFFIX ## _T0_T1_cc,\ 541 },\ 542 {\ 543 X86_64_ONLY(gen_op_adcq ## SUFFIX ## _T0_T1_cc),\ 544 X86_64_ONLY(gen_op_sbbq ## SUFFIX ## _T0_T1_cc),\ 545 }, 546 547 static GenOpFunc *gen_op_arithc_T0_T1_cc[4][2] = { 548 DEF_ARITHC( ) 549 }; 550 551 static GenOpFunc *gen_op_arithc_mem_T0_T1_cc[3 * 4][2] = { 552 DEF_ARITHC(_raw) 553 #ifndef CONFIG_USER_ONLY 554 DEF_ARITHC(_kernel) 555 DEF_ARITHC(_user) 556 #endif 557 }; 558 559 static const int cc_op_arithb[8] = { 560 CC_OP_ADDB, 561 CC_OP_LOGICB, 562 CC_OP_ADDB, 563 CC_OP_SUBB, 564 CC_OP_LOGICB, 565 CC_OP_SUBB, 566 CC_OP_LOGICB, 567 CC_OP_SUBB, 568 }; 569 570 #define DEF_CMPXCHG(SUFFIX)\ 571 gen_op_cmpxchgb ## SUFFIX ## _T0_T1_EAX_cc,\ 572 gen_op_cmpxchgw ## SUFFIX ## _T0_T1_EAX_cc,\ 573 gen_op_cmpxchgl ## SUFFIX ## _T0_T1_EAX_cc,\ 574 X86_64_ONLY(gen_op_cmpxchgq ## SUFFIX ## _T0_T1_EAX_cc), 575 576 static GenOpFunc *gen_op_cmpxchg_T0_T1_EAX_cc[4] = { 577 DEF_CMPXCHG( ) 578 }; 579 580 static GenOpFunc *gen_op_cmpxchg_mem_T0_T1_EAX_cc[3 * 4] = { 581 DEF_CMPXCHG(_raw) 582 #ifndef CONFIG_USER_ONLY 583 DEF_CMPXCHG(_kernel) 584 DEF_CMPXCHG(_user) 585 #endif 586 }; 587 588 #define DEF_SHIFT(SUFFIX)\ 589 {\ 590 gen_op_rolb ## SUFFIX ## _T0_T1_cc,\ 591 gen_op_rorb ## SUFFIX ## _T0_T1_cc,\ 592 gen_op_rclb ## SUFFIX ## _T0_T1_cc,\ 593 gen_op_rcrb ## SUFFIX ## _T0_T1_cc,\ 594 gen_op_shlb ## SUFFIX ## _T0_T1_cc,\ 595 gen_op_shrb ## SUFFIX ## _T0_T1_cc,\ 596 gen_op_shlb ## SUFFIX ## _T0_T1_cc,\ 597 gen_op_sarb ## SUFFIX ## _T0_T1_cc,\ 598 },\ 599 {\ 600 gen_op_rolw ## SUFFIX ## _T0_T1_cc,\ 601 gen_op_rorw ## SUFFIX ## _T0_T1_cc,\ 602 gen_op_rclw ## SUFFIX ## _T0_T1_cc,\ 603 gen_op_rcrw ## SUFFIX ## _T0_T1_cc,\ 604 gen_op_shlw ## SUFFIX ## _T0_T1_cc,\ 605 gen_op_shrw ## SUFFIX ## _T0_T1_cc,\ 606 gen_op_shlw ## SUFFIX ## _T0_T1_cc,\ 607 gen_op_sarw ## SUFFIX ## _T0_T1_cc,\ 608 },\ 609 {\ 610 gen_op_roll ## SUFFIX ## _T0_T1_cc,\ 611 gen_op_rorl ## SUFFIX ## _T0_T1_cc,\ 612 gen_op_rcll ## SUFFIX ## _T0_T1_cc,\ 613 gen_op_rcrl ## SUFFIX ## _T0_T1_cc,\ 614 gen_op_shll ## SUFFIX ## _T0_T1_cc,\ 615 gen_op_shrl ## SUFFIX ## _T0_T1_cc,\ 616 gen_op_shll ## SUFFIX ## _T0_T1_cc,\ 617 gen_op_sarl ## SUFFIX ## _T0_T1_cc,\ 618 },\ 619 {\ 620 X86_64_ONLY(gen_op_rolq ## SUFFIX ## _T0_T1_cc),\ 621 X86_64_ONLY(gen_op_rorq ## SUFFIX ## _T0_T1_cc),\ 622 X86_64_ONLY(gen_op_rclq ## SUFFIX ## _T0_T1_cc),\ 623 X86_64_ONLY(gen_op_rcrq ## SUFFIX ## _T0_T1_cc),\ 624 X86_64_ONLY(gen_op_shlq ## SUFFIX ## _T0_T1_cc),\ 625 X86_64_ONLY(gen_op_shrq ## SUFFIX ## _T0_T1_cc),\ 626 X86_64_ONLY(gen_op_shlq ## SUFFIX ## _T0_T1_cc),\ 627 X86_64_ONLY(gen_op_sarq ## SUFFIX ## _T0_T1_cc),\ 628 }, 629 630 static GenOpFunc *gen_op_shift_T0_T1_cc[4][8] = { 631 DEF_SHIFT( ) 632 }; 633 634 static GenOpFunc *gen_op_shift_mem_T0_T1_cc[3 * 4][8] = { 635 DEF_SHIFT(_raw) 636 #ifndef CONFIG_USER_ONLY 637 DEF_SHIFT(_kernel) 638 DEF_SHIFT(_user) 639 #endif 640 }; 641 642 #define DEF_SHIFTD(SUFFIX, op)\ 643 {\ 644 NULL,\ 645 NULL,\ 646 },\ 647 {\ 648 gen_op_shldw ## SUFFIX ## _T0_T1_ ## op ## _cc,\ 649 gen_op_shrdw ## SUFFIX ## _T0_T1_ ## op ## _cc,\ 650 },\ 651 {\ 652 gen_op_shldl ## SUFFIX ## _T0_T1_ ## op ## _cc,\ 653 gen_op_shrdl ## SUFFIX ## _T0_T1_ ## op ## _cc,\ 654 },\ 655 {\ 656 X86_64_DEF(gen_op_shldq ## SUFFIX ## _T0_T1_ ## op ## _cc,\ 657 gen_op_shrdq ## SUFFIX ## _T0_T1_ ## op ## _cc,)\ 658 }, 659 660 static GenOpFunc1 *gen_op_shiftd_T0_T1_im_cc[4][2] = { 661 DEF_SHIFTD(, im) 662 }; 663 664 static GenOpFunc *gen_op_shiftd_T0_T1_ECX_cc[4][2] = { 665 DEF_SHIFTD(, ECX) 666 }; 667 668 static GenOpFunc1 *gen_op_shiftd_mem_T0_T1_im_cc[3 * 4][2] = { 669 DEF_SHIFTD(_raw, im) 670 #ifndef CONFIG_USER_ONLY 671 DEF_SHIFTD(_kernel, im) 672 DEF_SHIFTD(_user, im) 673 #endif 674 }; 675 676 static GenOpFunc *gen_op_shiftd_mem_T0_T1_ECX_cc[3 * 4][2] = { 677 DEF_SHIFTD(_raw, ECX) 678 #ifndef CONFIG_USER_ONLY 679 DEF_SHIFTD(_kernel, ECX) 680 DEF_SHIFTD(_user, ECX) 681 #endif 682 }; 683 684 static GenOpFunc *gen_op_btx_T0_T1_cc[3][4] = { 685 [0] = { 686 gen_op_btw_T0_T1_cc, 687 gen_op_btsw_T0_T1_cc, 688 gen_op_btrw_T0_T1_cc, 689 gen_op_btcw_T0_T1_cc, 690 }, 691 [1] = { 692 gen_op_btl_T0_T1_cc, 693 gen_op_btsl_T0_T1_cc, 694 gen_op_btrl_T0_T1_cc, 695 gen_op_btcl_T0_T1_cc, 696 }, 510 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff); 511 #endif 512 } 513 514 static inline void gen_op_movl_A0_seg(int reg) 515 { 516 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET); 517 } 518 519 static inline void gen_op_addl_A0_seg(int reg) 520 { 521 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base)); 522 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); 697 523 #ifdef TARGET_X86_64 698 [2] = { 699 gen_op_btq_T0_T1_cc, 700 gen_op_btsq_T0_T1_cc, 701 gen_op_btrq_T0_T1_cc, 702 gen_op_btcq_T0_T1_cc, 703 }, 704 #endif 705 }; 706 707 static GenOpFunc *gen_op_add_bit_A0_T1[3] = { 708 gen_op_add_bitw_A0_T1, 709 gen_op_add_bitl_A0_T1, 710 X86_64_ONLY(gen_op_add_bitq_A0_T1), 711 }; 712 713 static GenOpFunc *gen_op_bsx_T0_cc[3][2] = { 714 [0] = { 715 gen_op_bsfw_T0_cc, 716 gen_op_bsrw_T0_cc, 717 }, 718 [1] = { 719 gen_op_bsfl_T0_cc, 720 gen_op_bsrl_T0_cc, 721 }, 524 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff); 525 #endif 526 } 527 722 528 #ifdef TARGET_X86_64 723 [2] = { 724 gen_op_bsfq_T0_cc, 725 gen_op_bsrq_T0_cc, 726 }, 727 #endif 728 }; 729 730 static GenOpFunc *gen_op_lds_T0_A0[3 * 4] = { 731 gen_op_ldsb_raw_T0_A0, 732 gen_op_ldsw_raw_T0_A0, 733 X86_64_ONLY(gen_op_ldsl_raw_T0_A0), 734 NULL, 735 #ifndef CONFIG_USER_ONLY 736 gen_op_ldsb_kernel_T0_A0, 737 gen_op_ldsw_kernel_T0_A0, 738 X86_64_ONLY(gen_op_ldsl_kernel_T0_A0), 739 NULL, 740 741 gen_op_ldsb_user_T0_A0, 742 gen_op_ldsw_user_T0_A0, 743 X86_64_ONLY(gen_op_ldsl_user_T0_A0), 744 NULL, 745 #endif 746 }; 747 748 static GenOpFunc *gen_op_ldu_T0_A0[3 * 4] = { 749 gen_op_ldub_raw_T0_A0, 750 gen_op_lduw_raw_T0_A0, 751 NULL, 752 NULL, 753 754 #ifndef CONFIG_USER_ONLY 755 gen_op_ldub_kernel_T0_A0, 756 gen_op_lduw_kernel_T0_A0, 757 NULL, 758 NULL, 759 760 gen_op_ldub_user_T0_A0, 761 gen_op_lduw_user_T0_A0, 762 NULL, 763 NULL, 764 #endif 765 }; 766 767 /* sign does not matter, except for lidt/lgdt call (TODO: fix it) */ 768 static GenOpFunc *gen_op_ld_T0_A0[3 * 4] = { 769 gen_op_ldub_raw_T0_A0, 770 gen_op_lduw_raw_T0_A0, 771 gen_op_ldl_raw_T0_A0, 772 X86_64_ONLY(gen_op_ldq_raw_T0_A0), 773 774 #ifndef CONFIG_USER_ONLY 775 gen_op_ldub_kernel_T0_A0, 776 gen_op_lduw_kernel_T0_A0, 777 gen_op_ldl_kernel_T0_A0, 778 X86_64_ONLY(gen_op_ldq_kernel_T0_A0), 779 780 gen_op_ldub_user_T0_A0, 781 gen_op_lduw_user_T0_A0, 782 gen_op_ldl_user_T0_A0, 783 X86_64_ONLY(gen_op_ldq_user_T0_A0), 784 #endif 785 }; 786 787 static GenOpFunc *gen_op_ld_T1_A0[3 * 4] = { 788 gen_op_ldub_raw_T1_A0, 789 gen_op_lduw_raw_T1_A0, 790 gen_op_ldl_raw_T1_A0, 791 X86_64_ONLY(gen_op_ldq_raw_T1_A0), 792 793 #ifndef CONFIG_USER_ONLY 794 gen_op_ldub_kernel_T1_A0, 795 gen_op_lduw_kernel_T1_A0, 796 gen_op_ldl_kernel_T1_A0, 797 X86_64_ONLY(gen_op_ldq_kernel_T1_A0), 798 799 gen_op_ldub_user_T1_A0, 800 gen_op_lduw_user_T1_A0, 801 gen_op_ldl_user_T1_A0, 802 X86_64_ONLY(gen_op_ldq_user_T1_A0), 803 #endif 804 }; 805 806 static GenOpFunc *gen_op_st_T0_A0[3 * 4] = { 807 gen_op_stb_raw_T0_A0, 808 gen_op_stw_raw_T0_A0, 809 gen_op_stl_raw_T0_A0, 810 X86_64_ONLY(gen_op_stq_raw_T0_A0), 811 812 #ifndef CONFIG_USER_ONLY 813 gen_op_stb_kernel_T0_A0, 814 gen_op_stw_kernel_T0_A0, 815 gen_op_stl_kernel_T0_A0, 816 X86_64_ONLY(gen_op_stq_kernel_T0_A0), 817 818 gen_op_stb_user_T0_A0, 819 gen_op_stw_user_T0_A0, 820 gen_op_stl_user_T0_A0, 821 X86_64_ONLY(gen_op_stq_user_T0_A0), 822 #endif 823 }; 824 825 static GenOpFunc *gen_op_st_T1_A0[3 * 4] = { 826 NULL, 827 gen_op_stw_raw_T1_A0, 828 gen_op_stl_raw_T1_A0, 829 X86_64_ONLY(gen_op_stq_raw_T1_A0), 830 831 #ifndef CONFIG_USER_ONLY 832 NULL, 833 gen_op_stw_kernel_T1_A0, 834 gen_op_stl_kernel_T1_A0, 835 X86_64_ONLY(gen_op_stq_kernel_T1_A0), 836 837 NULL, 838 gen_op_stw_user_T1_A0, 839 gen_op_stl_user_T1_A0, 840 X86_64_ONLY(gen_op_stq_user_T1_A0), 841 #endif 842 }; 843 844 #ifdef VBOX 845 static void gen_check_external_event() 846 { 847 gen_op_check_external_event(); 848 } 849 850 static inline void gen_update_eip(target_ulong pc) 851 { 852 #ifdef TARGET_X86_64 853 if (pc == (uint32_t)pc) { 854 gen_op_movl_eip_im(pc); 855 } else if (pc == (int32_t)pc) { 856 gen_op_movq_eip_im(pc); 857 } else { 858 gen_op_movq_eip_im64(pc >> 32, pc); 529 static inline void gen_op_movq_A0_seg(int reg) 530 { 531 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base)); 532 } 533 534 static inline void gen_op_addq_A0_seg(int reg) 535 { 536 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base)); 537 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); 538 } 539 540 static inline void gen_op_movq_A0_reg(int reg) 541 { 542 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg])); 543 } 544 545 static inline void gen_op_addq_A0_reg_sN(int shift, int reg) 546 { 547 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg])); 548 if (shift != 0) 549 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift); 550 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); 551 } 552 #endif 553 554 static inline void gen_op_lds_T0_A0(int idx) 555 { 556 int mem_index = (idx >> 2) - 1; 557 switch(idx & 3) { 558 case 0: 559 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index); 560 break; 561 case 1: 562 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index); 563 break; 564 default: 565 case 2: 566 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index); 567 break; 859 568 } 860 #else 861 gen_op_movl_eip_im(pc); 862 #endif 863 } 864 865 #endif /* VBOX */ 569 } 570 571 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0) 572 { 573 int mem_index = (idx >> 2) - 1; 574 switch(idx & 3) { 575 case 0: 576 tcg_gen_qemu_ld8u(t0, a0, mem_index); 577 break; 578 case 1: 579 tcg_gen_qemu_ld16u(t0, a0, mem_index); 580 break; 581 case 2: 582 tcg_gen_qemu_ld32u(t0, a0, mem_index); 583 break; 584 default: 585 case 3: 586 tcg_gen_qemu_ld64(t0, a0, mem_index); 587 break; 588 } 589 } 590 591 /* XXX: always use ldu or lds */ 592 static inline void gen_op_ld_T0_A0(int idx) 593 { 594 gen_op_ld_v(idx, cpu_T[0], cpu_A0); 595 } 596 597 static inline void gen_op_ldu_T0_A0(int idx) 598 { 599 gen_op_ld_v(idx, cpu_T[0], cpu_A0); 600 } 601 602 static inline void gen_op_ld_T1_A0(int idx) 603 { 604 gen_op_ld_v(idx, cpu_T[1], cpu_A0); 605 } 606 607 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0) 608 { 609 int mem_index = (idx >> 2) - 1; 610 switch(idx & 3) { 611 case 0: 612 tcg_gen_qemu_st8(t0, a0, mem_index); 613 break; 614 case 1: 615 tcg_gen_qemu_st16(t0, a0, mem_index); 616 break; 617 case 2: 618 tcg_gen_qemu_st32(t0, a0, mem_index); 619 break; 620 default: 621 case 3: 622 tcg_gen_qemu_st64(t0, a0, mem_index); 623 break; 624 } 625 } 626 627 static inline void gen_op_st_T0_A0(int idx) 628 { 629 gen_op_st_v(idx, cpu_T[0], cpu_A0); 630 } 631 632 static inline void gen_op_st_T1_A0(int idx) 633 { 634 gen_op_st_v(idx, cpu_T[1], cpu_A0); 635 } 866 636 867 637 static inline void gen_jmp_im(target_ulong pc) … … 870 640 gen_check_external_event(); 871 641 #endif /* VBOX */ 872 #ifdef TARGET_X86_64 873 if (pc == (uint32_t)pc) { 874 gen_op_movl_eip_im(pc); 875 } else if (pc == (int32_t)pc) { 876 gen_op_movq_eip_im(pc); 877 } else { 878 gen_op_movq_eip_im64(pc >> 32, pc); 879 } 880 #else 881 gen_op_movl_eip_im(pc); 882 #endif 883 } 642 tcg_gen_movi_tl(cpu_tmp0, pc); 643 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip)); 644 } 645 646 #ifdef VBOX 647 static void gen_check_external_event() 648 { 649 /** @todo: this code is either wrong, or low performing, 650 rewrite flags check in TCG IR */ 651 tcg_gen_helper_0_0(helper_check_external_event); 652 } 653 654 static inline void gen_update_eip(target_ulong pc) 655 { 656 gen_jmp_im(pc); 657 658 } 659 #endif 884 660 885 661 static inline void gen_string_movl_A0_ESI(DisasContext *s) … … 891 667 if (s->aflag == 2) { 892 668 if (override >= 0) { 893 gen_op_movq_A0_seg(o ffsetof(CPUX86State,segs[override].base));894 gen_op_addq_A0_reg_sN [0][R_ESI]();669 gen_op_movq_A0_seg(override); 670 gen_op_addq_A0_reg_sN(0, R_ESI); 895 671 } else { 896 gen_op_movq_A0_reg [R_ESI]();672 gen_op_movq_A0_reg(R_ESI); 897 673 } 898 674 } else … … 903 679 override = R_DS; 904 680 if (override >= 0) { 905 gen_op_movl_A0_seg(o ffsetof(CPUX86State,segs[override].base));906 gen_op_addl_A0_reg_sN [0][R_ESI]();681 gen_op_movl_A0_seg(override); 682 gen_op_addl_A0_reg_sN(0, R_ESI); 907 683 } else { 908 gen_op_movl_A0_reg [R_ESI]();684 gen_op_movl_A0_reg(R_ESI); 909 685 } 910 686 } else { … … 912 688 if (override < 0) 913 689 override = R_DS; 914 gen_op_movl_A0_reg [R_ESI]();690 gen_op_movl_A0_reg(R_ESI); 915 691 gen_op_andl_A0_ffff(); 916 gen_op_addl_A0_seg(o ffsetof(CPUX86State,segs[override].base));692 gen_op_addl_A0_seg(override); 917 693 } 918 694 } … … 922 698 #ifdef TARGET_X86_64 923 699 if (s->aflag == 2) { 924 gen_op_movq_A0_reg [R_EDI]();700 gen_op_movq_A0_reg(R_EDI); 925 701 } else 926 702 #endif 927 703 if (s->aflag) { 928 704 if (s->addseg) { 929 gen_op_movl_A0_seg( offsetof(CPUX86State,segs[R_ES].base));930 gen_op_addl_A0_reg_sN [0][R_EDI]();705 gen_op_movl_A0_seg(R_ES); 706 gen_op_addl_A0_reg_sN(0, R_EDI); 931 707 } else { 932 gen_op_movl_A0_reg [R_EDI]();708 gen_op_movl_A0_reg(R_EDI); 933 709 } 934 710 } else { 935 gen_op_movl_A0_reg [R_EDI]();711 gen_op_movl_A0_reg(R_EDI); 936 712 gen_op_andl_A0_ffff(); 937 gen_op_addl_A0_seg( offsetof(CPUX86State,segs[R_ES].base));713 gen_op_addl_A0_seg(R_ES); 938 714 } 939 715 } 940 716 941 static GenOpFunc *gen_op_movl_T0_Dshift[4] = { 942 gen_op_movl_T0_Dshiftb, 943 gen_op_movl_T0_Dshiftw, 944 gen_op_movl_T0_Dshiftl, 945 X86_64_ONLY(gen_op_movl_T0_Dshiftq), 717 static inline void gen_op_movl_T0_Dshift(int ot) 718 { 719 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df)); 720 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot); 946 721 }; 947 722 948 static GenOpFunc1 *gen_op_jnz_ecx[3] = { 949 gen_op_jnz_ecxw, 950 gen_op_jnz_ecxl, 951 X86_64_ONLY(gen_op_jnz_ecxq), 723 static void gen_extu(int ot, TCGv reg) 724 { 725 switch(ot) { 726 case OT_BYTE: 727 tcg_gen_ext8u_tl(reg, reg); 728 break; 729 case OT_WORD: 730 tcg_gen_ext16u_tl(reg, reg); 731 break; 732 case OT_LONG: 733 tcg_gen_ext32u_tl(reg, reg); 734 break; 735 default: 736 break; 737 } 738 } 739 740 static void gen_exts(int ot, TCGv reg) 741 { 742 switch(ot) { 743 case OT_BYTE: 744 tcg_gen_ext8s_tl(reg, reg); 745 break; 746 case OT_WORD: 747 tcg_gen_ext16s_tl(reg, reg); 748 break; 749 case OT_LONG: 750 tcg_gen_ext32s_tl(reg, reg); 751 break; 752 default: 753 break; 754 } 755 } 756 757 static inline void gen_op_jnz_ecx(int size, int label1) 758 { 759 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX])); 760 gen_extu(size + 1, cpu_tmp0); 761 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1); 762 } 763 764 static inline void gen_op_jz_ecx(int size, int label1) 765 { 766 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX])); 767 gen_extu(size + 1, cpu_tmp0); 768 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1); 769 } 770 771 static void *helper_in_func[3] = { 772 helper_inb, 773 helper_inw, 774 helper_inl, 952 775 }; 953 776 954 static GenOpFunc1 *gen_op_jz_ecx[3] = {955 gen_op_jz_ecxw,956 gen_op_jz_ecxl,957 X86_64_ONLY(gen_op_jz_ecxq),777 static void *helper_out_func[3] = { 778 helper_outb, 779 helper_outw, 780 helper_outl, 958 781 }; 959 782 960 static GenOpFunc *gen_op_dec_ECX[3] = {961 gen_op_decw_ECX,962 gen_op_decl_ECX,963 X86_64_ONLY(gen_op_decq_ECX),783 static void *gen_check_io_func[3] = { 784 helper_check_iob, 785 helper_check_iow, 786 helper_check_iol, 964 787 }; 965 788 966 static GenOpFunc1 *gen_op_string_jnz_sub[2][4] = { 967 { 968 gen_op_jnz_subb, 969 gen_op_jnz_subw, 970 gen_op_jnz_subl, 971 X86_64_ONLY(gen_op_jnz_subq), 972 }, 973 { 974 gen_op_jz_subb, 975 gen_op_jz_subw, 976 gen_op_jz_subl, 977 X86_64_ONLY(gen_op_jz_subq), 978 }, 979 }; 980 981 static GenOpFunc *gen_op_in_DX_T0[3] = { 982 gen_op_inb_DX_T0, 983 gen_op_inw_DX_T0, 984 gen_op_inl_DX_T0, 985 }; 986 987 static GenOpFunc *gen_op_out_DX_T0[3] = { 988 gen_op_outb_DX_T0, 989 gen_op_outw_DX_T0, 990 gen_op_outl_DX_T0, 991 }; 992 993 static GenOpFunc *gen_op_in[3] = { 994 gen_op_inb_T0_T1, 995 gen_op_inw_T0_T1, 996 gen_op_inl_T0_T1, 997 }; 998 999 static GenOpFunc *gen_op_out[3] = { 1000 gen_op_outb_T0_T1, 1001 gen_op_outw_T0_T1, 1002 gen_op_outl_T0_T1, 1003 }; 1004 1005 static GenOpFunc *gen_check_io_T0[3] = { 1006 gen_op_check_iob_T0, 1007 gen_op_check_iow_T0, 1008 gen_op_check_iol_T0, 1009 }; 1010 1011 static GenOpFunc *gen_check_io_DX[3] = { 1012 gen_op_check_iob_DX, 1013 gen_op_check_iow_DX, 1014 gen_op_check_iol_DX, 1015 }; 1016 1017 static void gen_check_io(DisasContext *s, int ot, int use_dx, target_ulong cur_eip) 1018 { 789 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip, 790 uint32_t svm_flags) 791 { 792 int state_saved; 793 target_ulong next_eip; 794 795 state_saved = 0; 1019 796 if (s->pe && (s->cpl > s->iopl || s->vm86)) { 1020 797 if (s->cc_op != CC_OP_DYNAMIC) 1021 798 gen_op_set_cc_op(s->cc_op); 1022 799 gen_jmp_im(cur_eip); 1023 if (use_dx)1024 gen_check_io_DX[ot]();1025 else1026 gen_check_io_T0[ot]();800 state_saved = 1; 801 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 802 tcg_gen_helper_0_1(gen_check_io_func[ot], 803 cpu_tmp2_i32); 1027 804 } 805 if(s->flags & HF_SVMI_MASK) { 806 if (!state_saved) { 807 if (s->cc_op != CC_OP_DYNAMIC) 808 gen_op_set_cc_op(s->cc_op); 809 gen_jmp_im(cur_eip); 810 state_saved = 1; 811 } 812 svm_flags |= (1 << (4 + ot)); 813 next_eip = s->pc - s->cs_base; 814 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 815 tcg_gen_helper_0_3(helper_svm_check_io, 816 cpu_tmp2_i32, 817 tcg_const_i32(svm_flags), 818 tcg_const_i32(next_eip - cur_eip)); 819 } 1028 820 } 1029 821 … … 1031 823 { 1032 824 gen_string_movl_A0_ESI(s); 1033 gen_op_ld_T0_A0 [ot + s->mem_index]();825 gen_op_ld_T0_A0(ot + s->mem_index); 1034 826 gen_string_movl_A0_EDI(s); 1035 gen_op_st_T0_A0[ot + s->mem_index](); 1036 gen_op_movl_T0_Dshift[ot](); 1037 #ifdef TARGET_X86_64 1038 if (s->aflag == 2) { 1039 gen_op_addq_ESI_T0(); 1040 gen_op_addq_EDI_T0(); 1041 } else 1042 #endif 1043 if (s->aflag) { 1044 gen_op_addl_ESI_T0(); 1045 gen_op_addl_EDI_T0(); 1046 } else { 1047 gen_op_addw_ESI_T0(); 1048 gen_op_addw_EDI_T0(); 1049 } 827 gen_op_st_T0_A0(ot + s->mem_index); 828 gen_op_movl_T0_Dshift(ot); 829 gen_op_add_reg_T0(s->aflag, R_ESI); 830 gen_op_add_reg_T0(s->aflag, R_EDI); 1050 831 } 1051 832 … … 1058 839 } 1059 840 841 static void gen_op_update1_cc(void) 842 { 843 tcg_gen_discard_tl(cpu_cc_src); 844 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 845 } 846 847 static void gen_op_update2_cc(void) 848 { 849 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]); 850 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 851 } 852 853 static inline void gen_op_cmpl_T0_T1_cc(void) 854 { 855 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]); 856 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]); 857 } 858 859 static inline void gen_op_testl_T0_T1_cc(void) 860 { 861 tcg_gen_discard_tl(cpu_cc_src); 862 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]); 863 } 864 865 static void gen_op_update_neg_cc(void) 866 { 867 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]); 868 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 869 } 870 871 /* compute eflags.C to reg */ 872 static void gen_compute_eflags_c(TCGv reg) 873 { 874 #if TCG_TARGET_REG_BITS == 32 875 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_cc_op, 3); 876 tcg_gen_addi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 877 (long)cc_table + offsetof(CCTable, compute_c)); 878 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0); 879 tcg_gen_call(&tcg_ctx, cpu_tmp2_i32, TCG_CALL_PURE, 880 1, &cpu_tmp2_i32, 0, NULL); 881 #else 882 tcg_gen_extu_i32_tl(cpu_tmp1_i64, cpu_cc_op); 883 tcg_gen_shli_i64(cpu_tmp1_i64, cpu_tmp1_i64, 4); 884 tcg_gen_addi_i64(cpu_tmp1_i64, cpu_tmp1_i64, 885 (long)cc_table + offsetof(CCTable, compute_c)); 886 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_tmp1_i64, 0); 887 tcg_gen_call(&tcg_ctx, cpu_tmp1_i64, TCG_CALL_PURE, 888 1, &cpu_tmp2_i32, 0, NULL); 889 #endif 890 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32); 891 } 892 893 /* compute all eflags to cc_src */ 894 static void gen_compute_eflags(TCGv reg) 895 { 896 #if TCG_TARGET_REG_BITS == 32 897 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_cc_op, 3); 898 tcg_gen_addi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 899 (long)cc_table + offsetof(CCTable, compute_all)); 900 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0); 901 tcg_gen_call(&tcg_ctx, cpu_tmp2_i32, TCG_CALL_PURE, 902 1, &cpu_tmp2_i32, 0, NULL); 903 #else 904 tcg_gen_extu_i32_tl(cpu_tmp1_i64, cpu_cc_op); 905 tcg_gen_shli_i64(cpu_tmp1_i64, cpu_tmp1_i64, 4); 906 tcg_gen_addi_i64(cpu_tmp1_i64, cpu_tmp1_i64, 907 (long)cc_table + offsetof(CCTable, compute_all)); 908 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_tmp1_i64, 0); 909 tcg_gen_call(&tcg_ctx, cpu_tmp1_i64, TCG_CALL_PURE, 910 1, &cpu_tmp2_i32, 0, NULL); 911 #endif 912 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32); 913 } 914 915 static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op) 916 { 917 if (s->cc_op != CC_OP_DYNAMIC) 918 gen_op_set_cc_op(s->cc_op); 919 switch(jcc_op) { 920 case JCC_O: 921 gen_compute_eflags(cpu_T[0]); 922 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11); 923 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1); 924 break; 925 case JCC_B: 926 gen_compute_eflags_c(cpu_T[0]); 927 break; 928 case JCC_Z: 929 gen_compute_eflags(cpu_T[0]); 930 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6); 931 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1); 932 break; 933 case JCC_BE: 934 gen_compute_eflags(cpu_tmp0); 935 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6); 936 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0); 937 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1); 938 break; 939 case JCC_S: 940 gen_compute_eflags(cpu_T[0]); 941 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7); 942 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1); 943 break; 944 case JCC_P: 945 gen_compute_eflags(cpu_T[0]); 946 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2); 947 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1); 948 break; 949 case JCC_L: 950 gen_compute_eflags(cpu_tmp0); 951 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */ 952 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */ 953 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0); 954 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1); 955 break; 956 default: 957 case JCC_LE: 958 gen_compute_eflags(cpu_tmp0); 959 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */ 960 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */ 961 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */ 962 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4); 963 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0); 964 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1); 965 break; 966 } 967 } 968 969 /* return true if setcc_slow is not needed (WARNING: must be kept in 970 sync with gen_jcc1) */ 971 static int is_fast_jcc_case(DisasContext *s, int b) 972 { 973 int jcc_op; 974 jcc_op = (b >> 1) & 7; 975 switch(s->cc_op) { 976 /* we optimize the cmp/jcc case */ 977 case CC_OP_SUBB: 978 case CC_OP_SUBW: 979 case CC_OP_SUBL: 980 case CC_OP_SUBQ: 981 if (jcc_op == JCC_O || jcc_op == JCC_P) 982 goto slow_jcc; 983 break; 984 985 /* some jumps are easy to compute */ 986 case CC_OP_ADDB: 987 case CC_OP_ADDW: 988 case CC_OP_ADDL: 989 case CC_OP_ADDQ: 990 991 case CC_OP_LOGICB: 992 case CC_OP_LOGICW: 993 case CC_OP_LOGICL: 994 case CC_OP_LOGICQ: 995 996 case CC_OP_INCB: 997 case CC_OP_INCW: 998 case CC_OP_INCL: 999 case CC_OP_INCQ: 1000 1001 case CC_OP_DECB: 1002 case CC_OP_DECW: 1003 case CC_OP_DECL: 1004 case CC_OP_DECQ: 1005 1006 case CC_OP_SHLB: 1007 case CC_OP_SHLW: 1008 case CC_OP_SHLL: 1009 case CC_OP_SHLQ: 1010 if (jcc_op != JCC_Z && jcc_op != JCC_S) 1011 goto slow_jcc; 1012 break; 1013 default: 1014 slow_jcc: 1015 return 0; 1016 } 1017 return 1; 1018 } 1019 1020 /* generate a conditional jump to label 'l1' according to jump opcode 1021 value 'b'. In the fast case, T0 is guaranted not to be used. */ 1022 static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1) 1023 { 1024 int inv, jcc_op, size, cond; 1025 TCGv t0; 1026 1027 inv = b & 1; 1028 jcc_op = (b >> 1) & 7; 1029 1030 switch(cc_op) { 1031 /* we optimize the cmp/jcc case */ 1032 case CC_OP_SUBB: 1033 case CC_OP_SUBW: 1034 case CC_OP_SUBL: 1035 case CC_OP_SUBQ: 1036 1037 size = cc_op - CC_OP_SUBB; 1038 switch(jcc_op) { 1039 case JCC_Z: 1040 fast_jcc_z: 1041 switch(size) { 1042 case 0: 1043 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff); 1044 t0 = cpu_tmp0; 1045 break; 1046 case 1: 1047 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff); 1048 t0 = cpu_tmp0; 1049 break; 1050 #ifdef TARGET_X86_64 1051 case 2: 1052 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff); 1053 t0 = cpu_tmp0; 1054 break; 1055 #endif 1056 default: 1057 t0 = cpu_cc_dst; 1058 break; 1059 } 1060 tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1); 1061 break; 1062 case JCC_S: 1063 fast_jcc_s: 1064 switch(size) { 1065 case 0: 1066 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80); 1067 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0, 1068 0, l1); 1069 break; 1070 case 1: 1071 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000); 1072 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0, 1073 0, l1); 1074 break; 1075 #ifdef TARGET_X86_64 1076 case 2: 1077 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000); 1078 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0, 1079 0, l1); 1080 break; 1081 #endif 1082 default: 1083 tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst, 1084 0, l1); 1085 break; 1086 } 1087 break; 1088 1089 case JCC_B: 1090 cond = inv ? TCG_COND_GEU : TCG_COND_LTU; 1091 goto fast_jcc_b; 1092 case JCC_BE: 1093 cond = inv ? TCG_COND_GTU : TCG_COND_LEU; 1094 fast_jcc_b: 1095 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src); 1096 switch(size) { 1097 case 0: 1098 t0 = cpu_tmp0; 1099 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff); 1100 tcg_gen_andi_tl(t0, cpu_cc_src, 0xff); 1101 break; 1102 case 1: 1103 t0 = cpu_tmp0; 1104 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff); 1105 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff); 1106 break; 1107 #ifdef TARGET_X86_64 1108 case 2: 1109 t0 = cpu_tmp0; 1110 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff); 1111 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff); 1112 break; 1113 #endif 1114 default: 1115 t0 = cpu_cc_src; 1116 break; 1117 } 1118 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1); 1119 break; 1120 1121 case JCC_L: 1122 cond = inv ? TCG_COND_GE : TCG_COND_LT; 1123 goto fast_jcc_l; 1124 case JCC_LE: 1125 cond = inv ? TCG_COND_GT : TCG_COND_LE; 1126 fast_jcc_l: 1127 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src); 1128 switch(size) { 1129 case 0: 1130 t0 = cpu_tmp0; 1131 tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4); 1132 tcg_gen_ext8s_tl(t0, cpu_cc_src); 1133 break; 1134 case 1: 1135 t0 = cpu_tmp0; 1136 tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4); 1137 tcg_gen_ext16s_tl(t0, cpu_cc_src); 1138 break; 1139 #ifdef TARGET_X86_64 1140 case 2: 1141 t0 = cpu_tmp0; 1142 tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4); 1143 tcg_gen_ext32s_tl(t0, cpu_cc_src); 1144 break; 1145 #endif 1146 default: 1147 t0 = cpu_cc_src; 1148 break; 1149 } 1150 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1); 1151 break; 1152 1153 default: 1154 goto slow_jcc; 1155 } 1156 break; 1157 1158 /* some jumps are easy to compute */ 1159 case CC_OP_ADDB: 1160 case CC_OP_ADDW: 1161 case CC_OP_ADDL: 1162 case CC_OP_ADDQ: 1163 1164 case CC_OP_ADCB: 1165 case CC_OP_ADCW: 1166 case CC_OP_ADCL: 1167 case CC_OP_ADCQ: 1168 1169 case CC_OP_SBBB: 1170 case CC_OP_SBBW: 1171 case CC_OP_SBBL: 1172 case CC_OP_SBBQ: 1173 1174 case CC_OP_LOGICB: 1175 case CC_OP_LOGICW: 1176 case CC_OP_LOGICL: 1177 case CC_OP_LOGICQ: 1178 1179 case CC_OP_INCB: 1180 case CC_OP_INCW: 1181 case CC_OP_INCL: 1182 case CC_OP_INCQ: 1183 1184 case CC_OP_DECB: 1185 case CC_OP_DECW: 1186 case CC_OP_DECL: 1187 case CC_OP_DECQ: 1188 1189 case CC_OP_SHLB: 1190 case CC_OP_SHLW: 1191 case CC_OP_SHLL: 1192 case CC_OP_SHLQ: 1193 1194 case CC_OP_SARB: 1195 case CC_OP_SARW: 1196 case CC_OP_SARL: 1197 case CC_OP_SARQ: 1198 switch(jcc_op) { 1199 case JCC_Z: 1200 size = (cc_op - CC_OP_ADDB) & 3; 1201 goto fast_jcc_z; 1202 case JCC_S: 1203 size = (cc_op - CC_OP_ADDB) & 3; 1204 goto fast_jcc_s; 1205 default: 1206 goto slow_jcc; 1207 } 1208 break; 1209 default: 1210 slow_jcc: 1211 gen_setcc_slow_T0(s, jcc_op); 1212 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, 1213 cpu_T[0], 0, l1); 1214 break; 1215 } 1216 } 1217 1060 1218 /* XXX: does not work with gdbstub "ice" single step - not a 1061 1219 serious problem */ … … 1066 1224 l1 = gen_new_label(); 1067 1225 l2 = gen_new_label(); 1068 gen_op_jnz_ecx [s->aflag](l1);1226 gen_op_jnz_ecx(s->aflag, l1); 1069 1227 gen_set_label(l2); 1070 1228 gen_jmp_tb(s, next_eip, 1); … … 1075 1233 static inline void gen_stos(DisasContext *s, int ot) 1076 1234 { 1077 gen_op_mov_TN_reg [OT_LONG][0][R_EAX]();1235 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); 1078 1236 gen_string_movl_A0_EDI(s); 1079 gen_op_st_T0_A0[ot + s->mem_index](); 1080 gen_op_movl_T0_Dshift[ot](); 1081 #ifdef TARGET_X86_64 1082 if (s->aflag == 2) { 1083 gen_op_addq_EDI_T0(); 1084 } else 1085 #endif 1086 if (s->aflag) { 1087 gen_op_addl_EDI_T0(); 1088 } else { 1089 gen_op_addw_EDI_T0(); 1090 } 1237 gen_op_st_T0_A0(ot + s->mem_index); 1238 gen_op_movl_T0_Dshift(ot); 1239 gen_op_add_reg_T0(s->aflag, R_EDI); 1091 1240 } 1092 1241 … … 1094 1243 { 1095 1244 gen_string_movl_A0_ESI(s); 1096 gen_op_ld_T0_A0[ot + s->mem_index](); 1097 gen_op_mov_reg_T0[ot][R_EAX](); 1098 gen_op_movl_T0_Dshift[ot](); 1099 #ifdef TARGET_X86_64 1100 if (s->aflag == 2) { 1101 gen_op_addq_ESI_T0(); 1102 } else 1103 #endif 1104 if (s->aflag) { 1105 gen_op_addl_ESI_T0(); 1106 } else { 1107 gen_op_addw_ESI_T0(); 1108 } 1245 gen_op_ld_T0_A0(ot + s->mem_index); 1246 gen_op_mov_reg_T0(ot, R_EAX); 1247 gen_op_movl_T0_Dshift(ot); 1248 gen_op_add_reg_T0(s->aflag, R_ESI); 1109 1249 } 1110 1250 1111 1251 static inline void gen_scas(DisasContext *s, int ot) 1112 1252 { 1113 gen_op_mov_TN_reg [OT_LONG][0][R_EAX]();1253 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); 1114 1254 gen_string_movl_A0_EDI(s); 1115 gen_op_ld_T1_A0 [ot + s->mem_index]();1255 gen_op_ld_T1_A0(ot + s->mem_index); 1116 1256 gen_op_cmpl_T0_T1_cc(); 1117 gen_op_movl_T0_Dshift[ot](); 1118 #ifdef TARGET_X86_64 1119 if (s->aflag == 2) { 1120 gen_op_addq_EDI_T0(); 1121 } else 1122 #endif 1123 if (s->aflag) { 1124 gen_op_addl_EDI_T0(); 1125 } else { 1126 gen_op_addw_EDI_T0(); 1127 } 1257 gen_op_movl_T0_Dshift(ot); 1258 gen_op_add_reg_T0(s->aflag, R_EDI); 1128 1259 } 1129 1260 … … 1131 1262 { 1132 1263 gen_string_movl_A0_ESI(s); 1133 gen_op_ld_T0_A0 [ot + s->mem_index]();1264 gen_op_ld_T0_A0(ot + s->mem_index); 1134 1265 gen_string_movl_A0_EDI(s); 1135 gen_op_ld_T1_A0 [ot + s->mem_index]();1266 gen_op_ld_T1_A0(ot + s->mem_index); 1136 1267 gen_op_cmpl_T0_T1_cc(); 1137 gen_op_movl_T0_Dshift[ot](); 1138 #ifdef TARGET_X86_64 1139 if (s->aflag == 2) { 1140 gen_op_addq_ESI_T0(); 1141 gen_op_addq_EDI_T0(); 1142 } else 1143 #endif 1144 if (s->aflag) { 1145 gen_op_addl_ESI_T0(); 1146 gen_op_addl_EDI_T0(); 1147 } else { 1148 gen_op_addw_ESI_T0(); 1149 gen_op_addw_EDI_T0(); 1150 } 1268 gen_op_movl_T0_Dshift(ot); 1269 gen_op_add_reg_T0(s->aflag, R_ESI); 1270 gen_op_add_reg_T0(s->aflag, R_EDI); 1151 1271 } 1152 1272 1153 1273 static inline void gen_ins(DisasContext *s, int ot) 1154 1274 { 1275 if (use_icount) 1276 gen_io_start(); 1155 1277 gen_string_movl_A0_EDI(s); 1278 /* Note: we must do this dummy write first to be restartable in 1279 case of page fault. */ 1156 1280 gen_op_movl_T0_0(); 1157 gen_op_st_T0_A0[ot + s->mem_index](); 1158 gen_op_in_DX_T0[ot](); 1159 gen_op_st_T0_A0[ot + s->mem_index](); 1160 gen_op_movl_T0_Dshift[ot](); 1161 #ifdef TARGET_X86_64 1162 if (s->aflag == 2) { 1163 gen_op_addq_EDI_T0(); 1164 } else 1165 #endif 1166 if (s->aflag) { 1167 gen_op_addl_EDI_T0(); 1168 } else { 1169 gen_op_addw_EDI_T0(); 1170 } 1281 gen_op_st_T0_A0(ot + s->mem_index); 1282 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX); 1283 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]); 1284 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); 1285 tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[0], cpu_tmp2_i32); 1286 gen_op_st_T0_A0(ot + s->mem_index); 1287 gen_op_movl_T0_Dshift(ot); 1288 gen_op_add_reg_T0(s->aflag, R_EDI); 1289 if (use_icount) 1290 gen_io_end(); 1171 1291 } 1172 1292 1173 1293 static inline void gen_outs(DisasContext *s, int ot) 1174 1294 { 1295 if (use_icount) 1296 gen_io_start(); 1175 1297 gen_string_movl_A0_ESI(s); 1176 gen_op_ld_T0_A0[ot + s->mem_index](); 1177 gen_op_out_DX_T0[ot](); 1178 gen_op_movl_T0_Dshift[ot](); 1179 #ifdef TARGET_X86_64 1180 if (s->aflag == 2) { 1181 gen_op_addq_ESI_T0(); 1182 } else 1183 #endif 1184 if (s->aflag) { 1185 gen_op_addl_ESI_T0(); 1186 } else { 1187 gen_op_addw_ESI_T0(); 1188 } 1298 gen_op_ld_T0_A0(ot + s->mem_index); 1299 1300 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX); 1301 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]); 1302 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); 1303 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]); 1304 tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32); 1305 1306 gen_op_movl_T0_Dshift(ot); 1307 gen_op_add_reg_T0(s->aflag, R_ESI); 1308 if (use_icount) 1309 gen_io_end(); 1189 1310 } 1190 1311 … … 1199 1320 l2 = gen_jz_ecx_string(s, next_eip); \ 1200 1321 gen_ ## op(s, ot); \ 1201 gen_op_ dec_ECX[s->aflag]();\1322 gen_op_add_reg_im(s->aflag, R_ECX, -1); \ 1202 1323 /* a loop would cause two single step exceptions if ECX = 1 \ 1203 1324 before rep string_insn */ \ 1204 1325 if (!s->jmp_opt) \ 1205 gen_op_jz_ecx [s->aflag](l2); \1326 gen_op_jz_ecx(s->aflag, l2); \ 1206 1327 gen_jmp(s, cur_eip); \ 1207 1328 } … … 1217 1338 l2 = gen_jz_ecx_string(s, next_eip); \ 1218 1339 gen_ ## op(s, ot); \ 1219 gen_op_ dec_ECX[s->aflag]();\1340 gen_op_add_reg_im(s->aflag, R_ECX, -1); \ 1220 1341 gen_op_set_cc_op(CC_OP_SUBB + ot); \ 1221 gen_ op_string_jnz_sub[nz][ot](l2);\1342 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \ 1222 1343 if (!s->jmp_opt) \ 1223 gen_op_jz_ecx [s->aflag](l2); \1344 gen_op_jz_ecx(s->aflag, l2); \ 1224 1345 gen_jmp(s, cur_eip); \ 1225 1346 } … … 1233 1354 GEN_REPZ2(cmps) 1234 1355 1235 enum{1236 JCC_O,1237 JCC_B,1238 JCC_Z,1239 JCC_BE,1240 JCC_S,1241 JCC_P,1242 JCC_L,1243 JCC_LE,1356 static void *helper_fp_arith_ST0_FT0[8] = { 1357 helper_fadd_ST0_FT0, 1358 helper_fmul_ST0_FT0, 1359 helper_fcom_ST0_FT0, 1360 helper_fcom_ST0_FT0, 1361 helper_fsub_ST0_FT0, 1362 helper_fsubr_ST0_FT0, 1363 helper_fdiv_ST0_FT0, 1364 helper_fdivr_ST0_FT0, 1244 1365 }; 1245 1366 1246 static GenOpFunc1 *gen_jcc_sub[4][8] = {1247 [OT_BYTE] = {1248 NULL,1249 gen_op_jb_subb,1250 gen_op_jz_subb,1251 gen_op_jbe_subb,1252 gen_op_js_subb,1253 NULL,1254 gen_op_jl_subb,1255 gen_op_jle_subb,1256 },1257 [OT_WORD] = {1258 NULL,1259 gen_op_jb_subw,1260 gen_op_jz_subw,1261 gen_op_jbe_subw,1262 gen_op_js_subw,1263 NULL,1264 gen_op_jl_subw,1265 gen_op_jle_subw,1266 },1267 [OT_LONG] = {1268 NULL,1269 gen_op_jb_subl,1270 gen_op_jz_subl,1271 gen_op_jbe_subl,1272 gen_op_js_subl,1273 NULL,1274 gen_op_jl_subl,1275 gen_op_jle_subl,1276 },1277 #ifdef TARGET_X86_641278 [OT_QUAD] = {1279 NULL,1280 BUGGY_64(gen_op_jb_subq),1281 gen_op_jz_subq,1282 BUGGY_64(gen_op_jbe_subq),1283 gen_op_js_subq,1284 NULL,1285 BUGGY_64(gen_op_jl_subq),1286 BUGGY_64(gen_op_jle_subq),1287 },1288 #endif1289 };1290 static GenOpFunc1 *gen_op_loop[3][4] = {1291 [0] = {1292 gen_op_loopnzw,1293 gen_op_loopzw,1294 gen_op_jnz_ecxw,1295 },1296 [1] = {1297 gen_op_loopnzl,1298 gen_op_loopzl,1299 gen_op_jnz_ecxl,1300 },1301 #ifdef TARGET_X86_641302 [2] = {1303 gen_op_loopnzq,1304 gen_op_loopzq,1305 gen_op_jnz_ecxq,1306 },1307 #endif1308 };1309 1310 static GenOpFunc *gen_setcc_slow[8] = {1311 gen_op_seto_T0_cc,1312 gen_op_setb_T0_cc,1313 gen_op_setz_T0_cc,1314 gen_op_setbe_T0_cc,1315 gen_op_sets_T0_cc,1316 gen_op_setp_T0_cc,1317 gen_op_setl_T0_cc,1318 gen_op_setle_T0_cc,1319 };1320 1321 static GenOpFunc *gen_setcc_sub[4][8] = {1322 [OT_BYTE] = {1323 NULL,1324 gen_op_setb_T0_subb,1325 gen_op_setz_T0_subb,1326 gen_op_setbe_T0_subb,1327 gen_op_sets_T0_subb,1328 NULL,1329 gen_op_setl_T0_subb,1330 gen_op_setle_T0_subb,1331 },1332 [OT_WORD] = {1333 NULL,1334 gen_op_setb_T0_subw,1335 gen_op_setz_T0_subw,1336 gen_op_setbe_T0_subw,1337 gen_op_sets_T0_subw,1338 NULL,1339 gen_op_setl_T0_subw,1340 gen_op_setle_T0_subw,1341 },1342 [OT_LONG] = {1343 NULL,1344 gen_op_setb_T0_subl,1345 gen_op_setz_T0_subl,1346 gen_op_setbe_T0_subl,1347 gen_op_sets_T0_subl,1348 NULL,1349 gen_op_setl_T0_subl,1350 gen_op_setle_T0_subl,1351 },1352 #ifdef TARGET_X86_641353 [OT_QUAD] = {1354 NULL,1355 gen_op_setb_T0_subq,1356 gen_op_setz_T0_subq,1357 gen_op_setbe_T0_subq,1358 gen_op_sets_T0_subq,1359 NULL,1360 gen_op_setl_T0_subq,1361 gen_op_setle_T0_subq,1362 },1363 #endif1364 };1365 1366 static GenOpFunc *gen_op_fp_arith_ST0_FT0[8] = {1367 gen_op_fadd_ST0_FT0,1368 gen_op_fmul_ST0_FT0,1369 gen_op_fcom_ST0_FT0,1370 gen_op_fcom_ST0_FT0,1371 gen_op_fsub_ST0_FT0,1372 gen_op_fsubr_ST0_FT0,1373 gen_op_fdiv_ST0_FT0,1374 gen_op_fdivr_ST0_FT0,1375 };1376 1377 1367 /* NOTE the exception in "r" op ordering */ 1378 static GenOpFunc1 *gen_op_fp_arith_STN_ST0[8] = {1379 gen_op_fadd_STN_ST0,1380 gen_op_fmul_STN_ST0,1368 static void *helper_fp_arith_STN_ST0[8] = { 1369 helper_fadd_STN_ST0, 1370 helper_fmul_STN_ST0, 1381 1371 NULL, 1382 1372 NULL, 1383 gen_op_fsubr_STN_ST0,1384 gen_op_fsub_STN_ST0,1385 gen_op_fdivr_STN_ST0,1386 gen_op_fdiv_STN_ST0,1373 helper_fsubr_STN_ST0, 1374 helper_fsub_STN_ST0, 1375 helper_fdivr_STN_ST0, 1376 helper_fdiv_STN_ST0, 1387 1377 }; 1388 1378 … … 1390 1380 static void gen_op(DisasContext *s1, int op, int ot, int d) 1391 1381 { 1392 GenOpFunc *gen_update_cc;1393 1394 1382 if (d != OR_TMP0) { 1395 gen_op_mov_TN_reg [ot][0][d]();1383 gen_op_mov_TN_reg(ot, 0, d); 1396 1384 } else { 1397 gen_op_ld_T0_A0 [ot + s1->mem_index]();1385 gen_op_ld_T0_A0(ot + s1->mem_index); 1398 1386 } 1399 1387 switch(op) { 1400 1388 case OP_ADCL: 1389 if (s1->cc_op != CC_OP_DYNAMIC) 1390 gen_op_set_cc_op(s1->cc_op); 1391 gen_compute_eflags_c(cpu_tmp4); 1392 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 1393 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4); 1394 if (d != OR_TMP0) 1395 gen_op_mov_reg_T0(ot, d); 1396 else 1397 gen_op_st_T0_A0(ot + s1->mem_index); 1398 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]); 1399 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 1400 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4); 1401 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2); 1402 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot); 1403 s1->cc_op = CC_OP_DYNAMIC; 1404 break; 1401 1405 case OP_SBBL: 1402 1406 if (s1->cc_op != CC_OP_DYNAMIC) 1403 1407 gen_op_set_cc_op(s1->cc_op); 1404 if (d != OR_TMP0) { 1405 gen_op_arithc_T0_T1_cc[ot][op - OP_ADCL](); 1406 gen_op_mov_reg_T0[ot][d](); 1407 } else { 1408 gen_op_arithc_mem_T0_T1_cc[ot + s1->mem_index][op - OP_ADCL](); 1409 } 1408 gen_compute_eflags_c(cpu_tmp4); 1409 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 1410 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4); 1411 if (d != OR_TMP0) 1412 gen_op_mov_reg_T0(ot, d); 1413 else 1414 gen_op_st_T0_A0(ot + s1->mem_index); 1415 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]); 1416 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 1417 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4); 1418 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2); 1419 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot); 1410 1420 s1->cc_op = CC_OP_DYNAMIC; 1411 goto the_end;1421 break; 1412 1422 case OP_ADDL: 1413 1423 gen_op_addl_T0_T1(); 1424 if (d != OR_TMP0) 1425 gen_op_mov_reg_T0(ot, d); 1426 else 1427 gen_op_st_T0_A0(ot + s1->mem_index); 1428 gen_op_update2_cc(); 1414 1429 s1->cc_op = CC_OP_ADDB + ot; 1415 gen_update_cc = gen_op_update2_cc;1416 1430 break; 1417 1431 case OP_SUBL: 1418 gen_op_subl_T0_T1(); 1432 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 1433 if (d != OR_TMP0) 1434 gen_op_mov_reg_T0(ot, d); 1435 else 1436 gen_op_st_T0_A0(ot + s1->mem_index); 1437 gen_op_update2_cc(); 1419 1438 s1->cc_op = CC_OP_SUBB + ot; 1420 gen_update_cc = gen_op_update2_cc;1421 1439 break; 1422 1440 default: 1423 1441 case OP_ANDL: 1442 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 1443 if (d != OR_TMP0) 1444 gen_op_mov_reg_T0(ot, d); 1445 else 1446 gen_op_st_T0_A0(ot + s1->mem_index); 1447 gen_op_update1_cc(); 1448 s1->cc_op = CC_OP_LOGICB + ot; 1449 break; 1424 1450 case OP_ORL: 1451 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 1452 if (d != OR_TMP0) 1453 gen_op_mov_reg_T0(ot, d); 1454 else 1455 gen_op_st_T0_A0(ot + s1->mem_index); 1456 gen_op_update1_cc(); 1457 s1->cc_op = CC_OP_LOGICB + ot; 1458 break; 1425 1459 case OP_XORL: 1426 gen_op_arith_T0_T1_cc[op](); 1460 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 1461 if (d != OR_TMP0) 1462 gen_op_mov_reg_T0(ot, d); 1463 else 1464 gen_op_st_T0_A0(ot + s1->mem_index); 1465 gen_op_update1_cc(); 1427 1466 s1->cc_op = CC_OP_LOGICB + ot; 1428 gen_update_cc = gen_op_update1_cc;1429 1467 break; 1430 1468 case OP_CMPL: 1431 1469 gen_op_cmpl_T0_T1_cc(); 1432 1470 s1->cc_op = CC_OP_SUBB + ot; 1433 gen_update_cc = NULL;1434 1471 break; 1435 1472 } 1436 if (op != OP_CMPL) {1437 if (d != OR_TMP0)1438 gen_op_mov_reg_T0[ot][d]();1439 else1440 gen_op_st_T0_A0[ot + s1->mem_index]();1441 }1442 /* the flags update must happen after the memory write (precise1443 exception support) */1444 if (gen_update_cc)1445 gen_update_cc();1446 the_end: ;1447 1473 } 1448 1474 … … 1451 1477 { 1452 1478 if (d != OR_TMP0) 1453 gen_op_mov_TN_reg [ot][0][d]();1479 gen_op_mov_TN_reg(ot, 0, d); 1454 1480 else 1455 gen_op_ld_T0_A0 [ot + s1->mem_index]();1481 gen_op_ld_T0_A0(ot + s1->mem_index); 1456 1482 if (s1->cc_op != CC_OP_DYNAMIC) 1457 1483 gen_op_set_cc_op(s1->cc_op); 1458 1484 if (c > 0) { 1459 gen_op_incl_T0();1485 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1); 1460 1486 s1->cc_op = CC_OP_INCB + ot; 1461 1487 } else { 1462 gen_op_decl_T0();1488 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1); 1463 1489 s1->cc_op = CC_OP_DECB + ot; 1464 1490 } 1465 1491 if (d != OR_TMP0) 1466 gen_op_mov_reg_T0 [ot][d]();1492 gen_op_mov_reg_T0(ot, d); 1467 1493 else 1468 gen_op_st_T0_A0[ot + s1->mem_index](); 1469 gen_op_update_inc_cc(); 1494 gen_op_st_T0_A0(ot + s1->mem_index); 1495 gen_compute_eflags_c(cpu_cc_src); 1496 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 1497 } 1498 1499 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1, 1500 int is_right, int is_arith) 1501 { 1502 target_ulong mask; 1503 int shift_label; 1504 TCGv t0, t1; 1505 1506 if (ot == OT_QUAD) 1507 mask = 0x3f; 1508 else 1509 mask = 0x1f; 1510 1511 /* load */ 1512 if (op1 == OR_TMP0) 1513 gen_op_ld_T0_A0(ot + s->mem_index); 1514 else 1515 gen_op_mov_TN_reg(ot, 0, op1); 1516 1517 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask); 1518 1519 tcg_gen_addi_tl(cpu_tmp5, cpu_T[1], -1); 1520 1521 if (is_right) { 1522 if (is_arith) { 1523 gen_exts(ot, cpu_T[0]); 1524 tcg_gen_sar_tl(cpu_T3, cpu_T[0], cpu_tmp5); 1525 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 1526 } else { 1527 gen_extu(ot, cpu_T[0]); 1528 tcg_gen_shr_tl(cpu_T3, cpu_T[0], cpu_tmp5); 1529 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 1530 } 1531 } else { 1532 tcg_gen_shl_tl(cpu_T3, cpu_T[0], cpu_tmp5); 1533 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 1534 } 1535 1536 /* store */ 1537 if (op1 == OR_TMP0) 1538 gen_op_st_T0_A0(ot + s->mem_index); 1539 else 1540 gen_op_mov_reg_T0(ot, op1); 1541 1542 /* update eflags if non zero shift */ 1543 if (s->cc_op != CC_OP_DYNAMIC) 1544 gen_op_set_cc_op(s->cc_op); 1545 1546 /* XXX: inefficient */ 1547 t0 = tcg_temp_local_new(TCG_TYPE_TL); 1548 t1 = tcg_temp_local_new(TCG_TYPE_TL); 1549 1550 tcg_gen_mov_tl(t0, cpu_T[0]); 1551 tcg_gen_mov_tl(t1, cpu_T3); 1552 1553 shift_label = gen_new_label(); 1554 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, shift_label); 1555 1556 tcg_gen_mov_tl(cpu_cc_src, t1); 1557 tcg_gen_mov_tl(cpu_cc_dst, t0); 1558 if (is_right) 1559 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot); 1560 else 1561 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot); 1562 1563 gen_set_label(shift_label); 1564 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ 1565 1566 tcg_temp_free(t0); 1567 tcg_temp_free(t1); 1568 } 1569 1570 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2, 1571 int is_right, int is_arith) 1572 { 1573 int mask; 1574 1575 if (ot == OT_QUAD) 1576 mask = 0x3f; 1577 else 1578 mask = 0x1f; 1579 1580 /* load */ 1581 if (op1 == OR_TMP0) 1582 gen_op_ld_T0_A0(ot + s->mem_index); 1583 else 1584 gen_op_mov_TN_reg(ot, 0, op1); 1585 1586 op2 &= mask; 1587 if (op2 != 0) { 1588 if (is_right) { 1589 if (is_arith) { 1590 gen_exts(ot, cpu_T[0]); 1591 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1); 1592 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2); 1593 } else { 1594 gen_extu(ot, cpu_T[0]); 1595 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1); 1596 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2); 1597 } 1598 } else { 1599 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1); 1600 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2); 1601 } 1602 } 1603 1604 /* store */ 1605 if (op1 == OR_TMP0) 1606 gen_op_st_T0_A0(ot + s->mem_index); 1607 else 1608 gen_op_mov_reg_T0(ot, op1); 1609 1610 /* update eflags if non zero shift */ 1611 if (op2 != 0) { 1612 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4); 1613 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 1614 if (is_right) 1615 s->cc_op = CC_OP_SARB + ot; 1616 else 1617 s->cc_op = CC_OP_SHLB + ot; 1618 } 1619 } 1620 1621 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2) 1622 { 1623 if (arg2 >= 0) 1624 tcg_gen_shli_tl(ret, arg1, arg2); 1625 else 1626 tcg_gen_shri_tl(ret, arg1, -arg2); 1627 } 1628 1629 /* XXX: add faster immediate case */ 1630 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1, 1631 int is_right) 1632 { 1633 target_ulong mask; 1634 int label1, label2, data_bits; 1635 TCGv t0, t1, t2, a0; 1636 1637 /* XXX: inefficient, but we must use local temps */ 1638 t0 = tcg_temp_local_new(TCG_TYPE_TL); 1639 t1 = tcg_temp_local_new(TCG_TYPE_TL); 1640 t2 = tcg_temp_local_new(TCG_TYPE_TL); 1641 a0 = tcg_temp_local_new(TCG_TYPE_TL); 1642 1643 if (ot == OT_QUAD) 1644 mask = 0x3f; 1645 else 1646 mask = 0x1f; 1647 1648 /* load */ 1649 if (op1 == OR_TMP0) { 1650 tcg_gen_mov_tl(a0, cpu_A0); 1651 gen_op_ld_v(ot + s->mem_index, t0, a0); 1652 } else { 1653 gen_op_mov_v_reg(ot, t0, op1); 1654 } 1655 1656 tcg_gen_mov_tl(t1, cpu_T[1]); 1657 1658 tcg_gen_andi_tl(t1, t1, mask); 1659 1660 /* Must test zero case to avoid using undefined behaviour in TCG 1661 shifts. */ 1662 label1 = gen_new_label(); 1663 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1); 1664 1665 if (ot <= OT_WORD) 1666 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1); 1667 else 1668 tcg_gen_mov_tl(cpu_tmp0, t1); 1669 1670 gen_extu(ot, t0); 1671 tcg_gen_mov_tl(t2, t0); 1672 1673 data_bits = 8 << ot; 1674 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX: 1675 fix TCG definition) */ 1676 if (is_right) { 1677 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0); 1678 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0); 1679 tcg_gen_shl_tl(t0, t0, cpu_tmp0); 1680 } else { 1681 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0); 1682 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0); 1683 tcg_gen_shr_tl(t0, t0, cpu_tmp0); 1684 } 1685 tcg_gen_or_tl(t0, t0, cpu_tmp4); 1686 1687 gen_set_label(label1); 1688 /* store */ 1689 if (op1 == OR_TMP0) { 1690 gen_op_st_v(ot + s->mem_index, t0, a0); 1691 } else { 1692 gen_op_mov_reg_v(ot, op1, t0); 1693 } 1694 1695 /* update eflags */ 1696 if (s->cc_op != CC_OP_DYNAMIC) 1697 gen_op_set_cc_op(s->cc_op); 1698 1699 label2 = gen_new_label(); 1700 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2); 1701 1702 gen_compute_eflags(cpu_cc_src); 1703 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C)); 1704 tcg_gen_xor_tl(cpu_tmp0, t2, t0); 1705 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1)); 1706 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O); 1707 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0); 1708 if (is_right) { 1709 tcg_gen_shri_tl(t0, t0, data_bits - 1); 1710 } 1711 tcg_gen_andi_tl(t0, t0, CC_C); 1712 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0); 1713 1714 tcg_gen_discard_tl(cpu_cc_dst); 1715 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS); 1716 1717 gen_set_label(label2); 1718 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ 1719 1720 tcg_temp_free(t0); 1721 tcg_temp_free(t1); 1722 tcg_temp_free(t2); 1723 tcg_temp_free(a0); 1724 } 1725 1726 static void *helper_rotc[8] = { 1727 helper_rclb, 1728 helper_rclw, 1729 helper_rcll, 1730 X86_64_ONLY(helper_rclq), 1731 helper_rcrb, 1732 helper_rcrw, 1733 helper_rcrl, 1734 X86_64_ONLY(helper_rcrq), 1735 }; 1736 1737 /* XXX: add faster immediate = 1 case */ 1738 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1, 1739 int is_right) 1740 { 1741 int label1; 1742 1743 if (s->cc_op != CC_OP_DYNAMIC) 1744 gen_op_set_cc_op(s->cc_op); 1745 1746 /* load */ 1747 if (op1 == OR_TMP0) 1748 gen_op_ld_T0_A0(ot + s->mem_index); 1749 else 1750 gen_op_mov_TN_reg(ot, 0, op1); 1751 1752 tcg_gen_helper_1_2(helper_rotc[ot + (is_right * 4)], 1753 cpu_T[0], cpu_T[0], cpu_T[1]); 1754 /* store */ 1755 if (op1 == OR_TMP0) 1756 gen_op_st_T0_A0(ot + s->mem_index); 1757 else 1758 gen_op_mov_reg_T0(ot, op1); 1759 1760 /* update eflags */ 1761 label1 = gen_new_label(); 1762 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1); 1763 1764 tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp); 1765 tcg_gen_discard_tl(cpu_cc_dst); 1766 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS); 1767 1768 gen_set_label(label1); 1769 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ 1770 } 1771 1772 /* XXX: add faster immediate case */ 1773 static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1, 1774 int is_right) 1775 { 1776 int label1, label2, data_bits; 1777 target_ulong mask; 1778 TCGv t0, t1, t2, a0; 1779 1780 t0 = tcg_temp_local_new(TCG_TYPE_TL); 1781 t1 = tcg_temp_local_new(TCG_TYPE_TL); 1782 t2 = tcg_temp_local_new(TCG_TYPE_TL); 1783 a0 = tcg_temp_local_new(TCG_TYPE_TL); 1784 1785 if (ot == OT_QUAD) 1786 mask = 0x3f; 1787 else 1788 mask = 0x1f; 1789 1790 /* load */ 1791 if (op1 == OR_TMP0) { 1792 tcg_gen_mov_tl(a0, cpu_A0); 1793 gen_op_ld_v(ot + s->mem_index, t0, a0); 1794 } else { 1795 gen_op_mov_v_reg(ot, t0, op1); 1796 } 1797 1798 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask); 1799 1800 tcg_gen_mov_tl(t1, cpu_T[1]); 1801 tcg_gen_mov_tl(t2, cpu_T3); 1802 1803 /* Must test zero case to avoid using undefined behaviour in TCG 1804 shifts. */ 1805 label1 = gen_new_label(); 1806 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1); 1807 1808 tcg_gen_addi_tl(cpu_tmp5, t2, -1); 1809 if (ot == OT_WORD) { 1810 /* Note: we implement the Intel behaviour for shift count > 16 */ 1811 if (is_right) { 1812 tcg_gen_andi_tl(t0, t0, 0xffff); 1813 tcg_gen_shli_tl(cpu_tmp0, t1, 16); 1814 tcg_gen_or_tl(t0, t0, cpu_tmp0); 1815 tcg_gen_ext32u_tl(t0, t0); 1816 1817 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5); 1818 1819 /* only needed if count > 16, but a test would complicate */ 1820 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2); 1821 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5); 1822 1823 tcg_gen_shr_tl(t0, t0, t2); 1824 1825 tcg_gen_or_tl(t0, t0, cpu_tmp0); 1826 } else { 1827 /* XXX: not optimal */ 1828 tcg_gen_andi_tl(t0, t0, 0xffff); 1829 tcg_gen_shli_tl(t1, t1, 16); 1830 tcg_gen_or_tl(t1, t1, t0); 1831 tcg_gen_ext32u_tl(t1, t1); 1832 1833 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5); 1834 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(32), cpu_tmp5); 1835 tcg_gen_shr_tl(cpu_tmp6, t1, cpu_tmp0); 1836 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp6); 1837 1838 tcg_gen_shl_tl(t0, t0, t2); 1839 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2); 1840 tcg_gen_shr_tl(t1, t1, cpu_tmp5); 1841 tcg_gen_or_tl(t0, t0, t1); 1842 } 1843 } else { 1844 data_bits = 8 << ot; 1845 if (is_right) { 1846 if (ot == OT_LONG) 1847 tcg_gen_ext32u_tl(t0, t0); 1848 1849 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5); 1850 1851 tcg_gen_shr_tl(t0, t0, t2); 1852 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2); 1853 tcg_gen_shl_tl(t1, t1, cpu_tmp5); 1854 tcg_gen_or_tl(t0, t0, t1); 1855 1856 } else { 1857 if (ot == OT_LONG) 1858 tcg_gen_ext32u_tl(t1, t1); 1859 1860 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5); 1861 1862 tcg_gen_shl_tl(t0, t0, t2); 1863 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2); 1864 tcg_gen_shr_tl(t1, t1, cpu_tmp5); 1865 tcg_gen_or_tl(t0, t0, t1); 1866 } 1867 } 1868 tcg_gen_mov_tl(t1, cpu_tmp4); 1869 1870 gen_set_label(label1); 1871 /* store */ 1872 if (op1 == OR_TMP0) { 1873 gen_op_st_v(ot + s->mem_index, t0, a0); 1874 } else { 1875 gen_op_mov_reg_v(ot, op1, t0); 1876 } 1877 1878 /* update eflags */ 1879 if (s->cc_op != CC_OP_DYNAMIC) 1880 gen_op_set_cc_op(s->cc_op); 1881 1882 label2 = gen_new_label(); 1883 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2); 1884 1885 tcg_gen_mov_tl(cpu_cc_src, t1); 1886 tcg_gen_mov_tl(cpu_cc_dst, t0); 1887 if (is_right) { 1888 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot); 1889 } else { 1890 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot); 1891 } 1892 gen_set_label(label2); 1893 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ 1894 1895 tcg_temp_free(t0); 1896 tcg_temp_free(t1); 1897 tcg_temp_free(t2); 1898 tcg_temp_free(a0); 1470 1899 } 1471 1900 1472 1901 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s) 1473 1902 { 1474 if (d != OR_TMP0)1475 gen_op_mov_TN_reg[ot][0][d]();1476 else1477 gen_op_ld_T0_A0[ot + s1->mem_index]();1478 1903 if (s != OR_TMP1) 1479 gen_op_mov_TN_reg[ot][1][s](); 1480 /* for zero counts, flags are not updated, so must do it dynamically */ 1481 if (s1->cc_op != CC_OP_DYNAMIC) 1482 gen_op_set_cc_op(s1->cc_op); 1483 1484 if (d != OR_TMP0) 1485 gen_op_shift_T0_T1_cc[ot][op](); 1486 else 1487 gen_op_shift_mem_T0_T1_cc[ot + s1->mem_index][op](); 1488 if (d != OR_TMP0) 1489 gen_op_mov_reg_T0[ot][d](); 1490 s1->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ 1904 gen_op_mov_TN_reg(ot, 1, s); 1905 switch(op) { 1906 case OP_ROL: 1907 gen_rot_rm_T1(s1, ot, d, 0); 1908 break; 1909 case OP_ROR: 1910 gen_rot_rm_T1(s1, ot, d, 1); 1911 break; 1912 case OP_SHL: 1913 case OP_SHL1: 1914 gen_shift_rm_T1(s1, ot, d, 0, 0); 1915 break; 1916 case OP_SHR: 1917 gen_shift_rm_T1(s1, ot, d, 1, 0); 1918 break; 1919 case OP_SAR: 1920 gen_shift_rm_T1(s1, ot, d, 1, 1); 1921 break; 1922 case OP_RCL: 1923 gen_rotc_rm_T1(s1, ot, d, 0); 1924 break; 1925 case OP_RCR: 1926 gen_rotc_rm_T1(s1, ot, d, 1); 1927 break; 1928 } 1491 1929 } 1492 1930 1493 1931 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c) 1494 1932 { 1495 /* currently not optimized */ 1496 gen_op_movl_T1_im(c); 1497 gen_shift(s1, op, ot, d, OR_TMP1); 1933 switch(op) { 1934 case OP_SHL: 1935 case OP_SHL1: 1936 gen_shift_rm_im(s1, ot, d, c, 0, 0); 1937 break; 1938 case OP_SHR: 1939 gen_shift_rm_im(s1, ot, d, c, 1, 0); 1940 break; 1941 case OP_SAR: 1942 gen_shift_rm_im(s1, ot, d, c, 1, 1); 1943 break; 1944 default: 1945 /* currently not optimized */ 1946 gen_op_movl_T1_im(c); 1947 gen_shift(s1, op, ot, d, OR_TMP1); 1948 break; 1949 } 1498 1950 } 1499 1951 … … 1560 2012 #ifdef TARGET_X86_64 1561 2013 if (s->aflag == 2) { 1562 gen_op_movq_A0_reg [base]();2014 gen_op_movq_A0_reg(base); 1563 2015 if (disp != 0) { 1564 2016 gen_op_addq_A0_im(disp); … … 1567 2019 #endif 1568 2020 { 1569 gen_op_movl_A0_reg [base]();2021 gen_op_movl_A0_reg(base); 1570 2022 if (disp != 0) 1571 2023 gen_op_addl_A0_im(disp); … … 1585 2037 #ifdef TARGET_X86_64 1586 2038 if (s->aflag == 2) { 1587 gen_op_addq_A0_reg_sN [scale][index]();2039 gen_op_addq_A0_reg_sN(scale, index); 1588 2040 } else 1589 2041 #endif 1590 2042 { 1591 gen_op_addl_A0_reg_sN [scale][index]();2043 gen_op_addl_A0_reg_sN(scale, index); 1592 2044 } 1593 2045 } … … 1601 2053 #ifdef TARGET_X86_64 1602 2054 if (s->aflag == 2) { 1603 gen_op_addq_A0_seg(o ffsetof(CPUX86State,segs[override].base));2055 gen_op_addq_A0_seg(override); 1604 2056 } else 1605 2057 #endif 1606 2058 { 1607 gen_op_addl_A0_seg(o ffsetof(CPUX86State,segs[override].base));2059 gen_op_addl_A0_seg(override); 1608 2060 } 1609 2061 } … … 1632 2084 switch(rm) { 1633 2085 case 0: 1634 gen_op_movl_A0_reg [R_EBX]();1635 gen_op_addl_A0_reg_sN [0][R_ESI]();2086 gen_op_movl_A0_reg(R_EBX); 2087 gen_op_addl_A0_reg_sN(0, R_ESI); 1636 2088 break; 1637 2089 case 1: 1638 gen_op_movl_A0_reg [R_EBX]();1639 gen_op_addl_A0_reg_sN [0][R_EDI]();2090 gen_op_movl_A0_reg(R_EBX); 2091 gen_op_addl_A0_reg_sN(0, R_EDI); 1640 2092 break; 1641 2093 case 2: 1642 gen_op_movl_A0_reg [R_EBP]();1643 gen_op_addl_A0_reg_sN [0][R_ESI]();2094 gen_op_movl_A0_reg(R_EBP); 2095 gen_op_addl_A0_reg_sN(0, R_ESI); 1644 2096 break; 1645 2097 case 3: 1646 gen_op_movl_A0_reg [R_EBP]();1647 gen_op_addl_A0_reg_sN [0][R_EDI]();2098 gen_op_movl_A0_reg(R_EBP); 2099 gen_op_addl_A0_reg_sN(0, R_EDI); 1648 2100 break; 1649 2101 case 4: 1650 gen_op_movl_A0_reg [R_ESI]();2102 gen_op_movl_A0_reg(R_ESI); 1651 2103 break; 1652 2104 case 5: 1653 gen_op_movl_A0_reg [R_EDI]();2105 gen_op_movl_A0_reg(R_EDI); 1654 2106 break; 1655 2107 case 6: 1656 gen_op_movl_A0_reg [R_EBP]();2108 gen_op_movl_A0_reg(R_EBP); 1657 2109 break; 1658 2110 default: 1659 2111 case 7: 1660 gen_op_movl_A0_reg [R_EBX]();2112 gen_op_movl_A0_reg(R_EBX); 1661 2113 break; 1662 2114 } … … 1672 2124 override = R_DS; 1673 2125 } 1674 gen_op_addl_A0_seg(o ffsetof(CPUX86State,segs[override].base));2126 gen_op_addl_A0_seg(override); 1675 2127 } 1676 2128 } … … 1747 2199 #ifdef TARGET_X86_64 1748 2200 if (CODE64(s)) { 1749 gen_op_addq_A0_seg(o ffsetof(CPUX86State,segs[override].base));2201 gen_op_addq_A0_seg(override); 1750 2202 } else 1751 2203 #endif 1752 2204 { 1753 gen_op_addl_A0_seg(o ffsetof(CPUX86State,segs[override].base));2205 gen_op_addl_A0_seg(override); 1754 2206 } 1755 2207 } 1756 2208 } 1757 2209 1758 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg !=2210 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg == 1759 2211 OR_TMP0 */ 1760 2212 static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store) … … 1767 2219 if (is_store) { 1768 2220 if (reg != OR_TMP0) 1769 gen_op_mov_TN_reg [ot][0][reg]();1770 gen_op_mov_reg_T0 [ot][rm]();2221 gen_op_mov_TN_reg(ot, 0, reg); 2222 gen_op_mov_reg_T0(ot, rm); 1771 2223 } else { 1772 gen_op_mov_TN_reg [ot][0][rm]();2224 gen_op_mov_TN_reg(ot, 0, rm); 1773 2225 if (reg != OR_TMP0) 1774 gen_op_mov_reg_T0 [ot][reg]();2226 gen_op_mov_reg_T0(ot, reg); 1775 2227 } 1776 2228 } else { … … 1778 2230 if (is_store) { 1779 2231 if (reg != OR_TMP0) 1780 gen_op_mov_TN_reg [ot][0][reg]();1781 gen_op_st_T0_A0 [ot + s->mem_index]();2232 gen_op_mov_TN_reg(ot, 0, reg); 2233 gen_op_st_T0_A0(ot + s->mem_index); 1782 2234 } else { 1783 gen_op_ld_T0_A0 [ot + s->mem_index]();2235 gen_op_ld_T0_A0(ot + s->mem_index); 1784 2236 if (reg != OR_TMP0) 1785 gen_op_mov_reg_T0 [ot][reg]();2237 gen_op_mov_reg_T0(ot, reg); 1786 2238 } 1787 2239 } … … 1829 2281 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) { 1830 2282 /* jump to same page: we can use a direct jump */ 1831 if (tb_num == 0) 1832 gen_op_goto_tb0(TBPARAM(tb)); 1833 else 1834 gen_op_goto_tb1(TBPARAM(tb)); 2283 tcg_gen_goto_tb(tb_num); 1835 2284 gen_jmp_im(eip); 1836 gen_op_movl_T0_im((long)tb + tb_num); 1837 gen_op_exit_tb(); 2285 tcg_gen_exit_tb((long)tb + tb_num); 1838 2286 } else { 1839 2287 /* jump to another page: currently not optimized */ … … 1846 2294 target_ulong val, target_ulong next_eip) 1847 2295 { 1848 TranslationBlock *tb; 1849 int inv, jcc_op; 1850 GenOpFunc1 *func; 1851 target_ulong tmp; 1852 int l1, l2; 1853 1854 inv = b & 1; 1855 jcc_op = (b >> 1) & 7; 1856 2296 int l1, l2, cc_op; 2297 2298 cc_op = s->cc_op; 2299 if (s->cc_op != CC_OP_DYNAMIC) { 2300 gen_op_set_cc_op(s->cc_op); 2301 s->cc_op = CC_OP_DYNAMIC; 2302 } 1857 2303 if (s->jmp_opt) { 1858 2304 #ifdef VBOX 1859 2305 gen_check_external_event(s); 1860 2306 #endif /* VBOX */ 1861 switch(s->cc_op) {1862 /* we optimize the cmp/jcc case */1863 case CC_OP_SUBB:1864 case CC_OP_SUBW:1865 case CC_OP_SUBL:1866 case CC_OP_SUBQ:1867 func = gen_jcc_sub[s->cc_op - CC_OP_SUBB][jcc_op];1868 break;1869 1870 /* some jumps are easy to compute */1871 case CC_OP_ADDB:1872 case CC_OP_ADDW:1873 case CC_OP_ADDL:1874 case CC_OP_ADDQ:1875 1876 case CC_OP_ADCB:1877 case CC_OP_ADCW:1878 case CC_OP_ADCL:1879 case CC_OP_ADCQ:1880 1881 case CC_OP_SBBB:1882 case CC_OP_SBBW:1883 case CC_OP_SBBL:1884 case CC_OP_SBBQ:1885 1886 case CC_OP_LOGICB:1887 case CC_OP_LOGICW:1888 case CC_OP_LOGICL:1889 case CC_OP_LOGICQ:1890 1891 case CC_OP_INCB:1892 case CC_OP_INCW:1893 case CC_OP_INCL:1894 case CC_OP_INCQ:1895 1896 case CC_OP_DECB:1897 case CC_OP_DECW:1898 case CC_OP_DECL:1899 case CC_OP_DECQ:1900 1901 case CC_OP_SHLB:1902 case CC_OP_SHLW:1903 case CC_OP_SHLL:1904 case CC_OP_SHLQ:1905 1906 case CC_OP_SARB:1907 case CC_OP_SARW:1908 case CC_OP_SARL:1909 case CC_OP_SARQ:1910 switch(jcc_op) {1911 case JCC_Z:1912 func = gen_jcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op];1913 break;1914 case JCC_S:1915 func = gen_jcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op];1916 break;1917 default:1918 func = NULL;1919 break;1920 }1921 break;1922 default:1923 func = NULL;1924 break;1925 }1926 1927 if (s->cc_op != CC_OP_DYNAMIC) {1928 gen_op_set_cc_op(s->cc_op);1929 s->cc_op = CC_OP_DYNAMIC;1930 }1931 1932 if (!func) {1933 gen_setcc_slow[jcc_op]();1934 func = gen_op_jnz_T0_label;1935 }1936 1937 if (inv) {1938 tmp = val;1939 val = next_eip;1940 next_eip = tmp;1941 }1942 tb = s->tb;1943 1944 2307 l1 = gen_new_label(); 1945 func(l1);1946 2308 gen_jcc1(s, cc_op, b, l1); 2309 1947 2310 gen_goto_tb(s, 0, next_eip); 1948 2311 1949 2312 gen_set_label(l1); 1950 2313 gen_goto_tb(s, 1, val); 1951 1952 2314 s->is_jmp = 3; 1953 2315 } else { 1954 2316 1955 if (s->cc_op != CC_OP_DYNAMIC) {1956 gen_op_set_cc_op(s->cc_op);1957 s->cc_op = CC_OP_DYNAMIC;1958 }1959 gen_setcc_slow[jcc_op]();1960 if (inv) {1961 tmp = val;1962 val = next_eip;1963 next_eip = tmp;1964 }1965 2317 l1 = gen_new_label(); 1966 2318 l2 = gen_new_label(); 1967 gen_op_jnz_T0_label(l1); 2319 gen_jcc1(s, cc_op, b, l1); 2320 1968 2321 gen_jmp_im(next_eip); 1969 gen_op_jmp_label(l2); 2322 tcg_gen_br(l2); 2323 1970 2324 gen_set_label(l1); 1971 2325 gen_jmp_im(val); … … 1977 2331 static void gen_setcc(DisasContext *s, int b) 1978 2332 { 1979 int inv, jcc_op; 1980 GenOpFunc *func; 1981 1982 inv = b & 1; 1983 jcc_op = (b >> 1) & 7; 1984 switch(s->cc_op) { 1985 /* we optimize the cmp/jcc case */ 1986 case CC_OP_SUBB: 1987 case CC_OP_SUBW: 1988 case CC_OP_SUBL: 1989 case CC_OP_SUBQ: 1990 func = gen_setcc_sub[s->cc_op - CC_OP_SUBB][jcc_op]; 1991 if (!func) 1992 goto slow_jcc; 1993 break; 1994 1995 /* some jumps are easy to compute */ 1996 case CC_OP_ADDB: 1997 case CC_OP_ADDW: 1998 case CC_OP_ADDL: 1999 case CC_OP_ADDQ: 2000 2001 case CC_OP_LOGICB: 2002 case CC_OP_LOGICW: 2003 case CC_OP_LOGICL: 2004 case CC_OP_LOGICQ: 2005 2006 case CC_OP_INCB: 2007 case CC_OP_INCW: 2008 case CC_OP_INCL: 2009 case CC_OP_INCQ: 2010 2011 case CC_OP_DECB: 2012 case CC_OP_DECW: 2013 case CC_OP_DECL: 2014 case CC_OP_DECQ: 2015 2016 case CC_OP_SHLB: 2017 case CC_OP_SHLW: 2018 case CC_OP_SHLL: 2019 case CC_OP_SHLQ: 2020 switch(jcc_op) { 2021 case JCC_Z: 2022 func = gen_setcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op]; 2023 break; 2024 case JCC_S: 2025 func = gen_setcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op]; 2026 break; 2027 default: 2028 goto slow_jcc; 2029 } 2030 break; 2031 default: 2032 slow_jcc: 2033 if (s->cc_op != CC_OP_DYNAMIC) 2034 gen_op_set_cc_op(s->cc_op); 2035 func = gen_setcc_slow[jcc_op]; 2036 break; 2333 int inv, jcc_op, l1; 2334 TCGv t0; 2335 2336 if (is_fast_jcc_case(s, b)) { 2337 /* nominal case: we use a jump */ 2338 /* XXX: make it faster by adding new instructions in TCG */ 2339 t0 = tcg_temp_local_new(TCG_TYPE_TL); 2340 tcg_gen_movi_tl(t0, 0); 2341 l1 = gen_new_label(); 2342 gen_jcc1(s, s->cc_op, b ^ 1, l1); 2343 tcg_gen_movi_tl(t0, 1); 2344 gen_set_label(l1); 2345 tcg_gen_mov_tl(cpu_T[0], t0); 2346 tcg_temp_free(t0); 2347 } else { 2348 /* slow case: it is more efficient not to generate a jump, 2349 although it is questionnable whether this optimization is 2350 worth to */ 2351 inv = b & 1; 2352 jcc_op = (b >> 1) & 7; 2353 gen_setcc_slow_T0(s, jcc_op); 2354 if (inv) { 2355 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1); 2356 } 2037 2357 } 2038 func(); 2039 if (inv) { 2040 gen_op_xor_T0_1(); 2041 } 2358 } 2359 2360 static inline void gen_op_movl_T0_seg(int seg_reg) 2361 { 2362 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, 2363 offsetof(CPUX86State,segs[seg_reg].selector)); 2364 } 2365 2366 static inline void gen_op_movl_seg_T0_vm(int seg_reg) 2367 { 2368 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff); 2369 tcg_gen_st32_tl(cpu_T[0], cpu_env, 2370 offsetof(CPUX86State,segs[seg_reg].selector)); 2371 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4); 2372 tcg_gen_st_tl(cpu_T[0], cpu_env, 2373 offsetof(CPUX86State,segs[seg_reg].base)); 2042 2374 } 2043 2375 … … 2051 2383 gen_op_set_cc_op(s->cc_op); 2052 2384 gen_jmp_im(cur_eip); 2053 gen_op_movl_seg_T0(seg_reg); 2385 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 2386 tcg_gen_helper_0_2(helper_load_seg, tcg_const_i32(seg_reg), cpu_tmp2_i32); 2054 2387 /* abort translation because the addseg value may change or 2055 2388 because ss32 may change. For R_SS, translation must always … … 2059 2392 s->is_jmp = 3; 2060 2393 } else { 2061 gen_op_movl_seg_T0_vm( offsetof(CPUX86State,segs[seg_reg]));2394 gen_op_movl_seg_T0_vm(seg_reg); 2062 2395 if (seg_reg == R_SS) 2063 2396 s->is_jmp = 3; … … 2065 2398 } 2066 2399 2400 static inline int svm_is_rep(int prefixes) 2401 { 2402 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0); 2403 } 2404 2405 static inline void 2406 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start, 2407 uint32_t type, uint64_t param) 2408 { 2409 /* no SVM activated; fast case */ 2410 if (likely(!(s->flags & HF_SVMI_MASK))) 2411 return; 2412 if (s->cc_op != CC_OP_DYNAMIC) 2413 gen_op_set_cc_op(s->cc_op); 2414 gen_jmp_im(pc_start - s->cs_base); 2415 tcg_gen_helper_0_2(helper_svm_check_intercept_param, 2416 tcg_const_i32(type), tcg_const_i64(param)); 2417 } 2418 2419 static inline void 2420 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type) 2421 { 2422 gen_svm_check_intercept_param(s, pc_start, type, 0); 2423 } 2424 2067 2425 static inline void gen_stack_update(DisasContext *s, int addend) 2068 2426 { 2069 2427 #ifdef TARGET_X86_64 2070 2428 if (CODE64(s)) { 2071 if (addend == 8) 2072 gen_op_addq_ESP_8(); 2073 else 2074 gen_op_addq_ESP_im(addend); 2429 gen_op_add_reg_im(2, R_ESP, addend); 2075 2430 } else 2076 2431 #endif 2077 2432 if (s->ss32) { 2078 if (addend == 2) 2079 gen_op_addl_ESP_2(); 2080 else if (addend == 4) 2081 gen_op_addl_ESP_4(); 2082 else 2083 gen_op_addl_ESP_im(addend); 2433 gen_op_add_reg_im(1, R_ESP, addend); 2084 2434 } else { 2085 if (addend == 2) 2086 gen_op_addw_ESP_2(); 2087 else if (addend == 4) 2088 gen_op_addw_ESP_4(); 2089 else 2090 gen_op_addw_ESP_im(addend); 2435 gen_op_add_reg_im(0, R_ESP, addend); 2091 2436 } 2092 2437 } … … 2097 2442 #ifdef TARGET_X86_64 2098 2443 if (CODE64(s)) { 2099 gen_op_movq_A0_reg [R_ESP]();2444 gen_op_movq_A0_reg(R_ESP); 2100 2445 if (s->dflag) { 2101 gen_op_ subq_A0_8();2102 gen_op_st_T0_A0 [OT_QUAD + s->mem_index]();2446 gen_op_addq_A0_im(-8); 2447 gen_op_st_T0_A0(OT_QUAD + s->mem_index); 2103 2448 } else { 2104 gen_op_ subq_A0_2();2105 gen_op_st_T0_A0 [OT_WORD + s->mem_index]();2106 } 2107 gen_op_mov q_ESP_A0();2449 gen_op_addq_A0_im(-2); 2450 gen_op_st_T0_A0(OT_WORD + s->mem_index); 2451 } 2452 gen_op_mov_reg_A0(2, R_ESP); 2108 2453 } else 2109 2454 #endif 2110 2455 { 2111 gen_op_movl_A0_reg [R_ESP]();2456 gen_op_movl_A0_reg(R_ESP); 2112 2457 if (!s->dflag) 2113 gen_op_ subl_A0_2();2458 gen_op_addl_A0_im(-2); 2114 2459 else 2115 gen_op_ subl_A0_4();2460 gen_op_addl_A0_im(-4); 2116 2461 if (s->ss32) { 2117 2462 if (s->addseg) { 2118 gen_op_movl_T1_A0();2119 gen_op_addl_A0_ SS();2463 tcg_gen_mov_tl(cpu_T[1], cpu_A0); 2464 gen_op_addl_A0_seg(R_SS); 2120 2465 } 2121 2466 } else { 2122 2467 gen_op_andl_A0_ffff(); 2123 gen_op_movl_T1_A0();2124 gen_op_addl_A0_ SS();2125 } 2126 gen_op_st_T0_A0 [s->dflag + 1 + s->mem_index]();2468 tcg_gen_mov_tl(cpu_T[1], cpu_A0); 2469 gen_op_addl_A0_seg(R_SS); 2470 } 2471 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index); 2127 2472 if (s->ss32 && !s->addseg) 2128 gen_op_mov l_ESP_A0();2473 gen_op_mov_reg_A0(1, R_ESP); 2129 2474 else 2130 gen_op_mov_reg_T1 [s->ss32 + 1][R_ESP]();2475 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP); 2131 2476 } 2132 2477 } … … 2138 2483 #ifdef TARGET_X86_64 2139 2484 if (CODE64(s)) { 2140 gen_op_movq_A0_reg [R_ESP]();2485 gen_op_movq_A0_reg(R_ESP); 2141 2486 if (s->dflag) { 2142 gen_op_ subq_A0_8();2143 gen_op_st_T1_A0 [OT_QUAD + s->mem_index]();2487 gen_op_addq_A0_im(-8); 2488 gen_op_st_T1_A0(OT_QUAD + s->mem_index); 2144 2489 } else { 2145 gen_op_ subq_A0_2();2146 gen_op_st_T0_A0 [OT_WORD + s->mem_index]();2147 } 2148 gen_op_mov q_ESP_A0();2490 gen_op_addq_A0_im(-2); 2491 gen_op_st_T0_A0(OT_WORD + s->mem_index); 2492 } 2493 gen_op_mov_reg_A0(2, R_ESP); 2149 2494 } else 2150 2495 #endif 2151 2496 { 2152 gen_op_movl_A0_reg [R_ESP]();2497 gen_op_movl_A0_reg(R_ESP); 2153 2498 if (!s->dflag) 2154 gen_op_ subl_A0_2();2499 gen_op_addl_A0_im(-2); 2155 2500 else 2156 gen_op_ subl_A0_4();2501 gen_op_addl_A0_im(-4); 2157 2502 if (s->ss32) { 2158 2503 if (s->addseg) { 2159 gen_op_addl_A0_ SS();2504 gen_op_addl_A0_seg(R_SS); 2160 2505 } 2161 2506 } else { 2162 2507 gen_op_andl_A0_ffff(); 2163 gen_op_addl_A0_ SS();2164 } 2165 gen_op_st_T1_A0 [s->dflag + 1 + s->mem_index]();2508 gen_op_addl_A0_seg(R_SS); 2509 } 2510 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index); 2166 2511 2167 2512 if (s->ss32 && !s->addseg) 2168 gen_op_mov l_ESP_A0();2513 gen_op_mov_reg_A0(1, R_ESP); 2169 2514 else 2170 2515 gen_stack_update(s, (-2) << s->dflag); … … 2177 2522 #ifdef TARGET_X86_64 2178 2523 if (CODE64(s)) { 2179 gen_op_movq_A0_reg [R_ESP]();2180 gen_op_ld_T0_A0 [(s->dflag ? OT_QUAD : OT_WORD) + s->mem_index]();2524 gen_op_movq_A0_reg(R_ESP); 2525 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index); 2181 2526 } else 2182 2527 #endif 2183 2528 { 2184 gen_op_movl_A0_reg [R_ESP]();2529 gen_op_movl_A0_reg(R_ESP); 2185 2530 if (s->ss32) { 2186 2531 if (s->addseg) 2187 gen_op_addl_A0_ SS();2532 gen_op_addl_A0_seg(R_SS); 2188 2533 } else { 2189 2534 gen_op_andl_A0_ffff(); 2190 gen_op_addl_A0_ SS();2191 } 2192 gen_op_ld_T0_A0 [s->dflag + 1 + s->mem_index]();2535 gen_op_addl_A0_seg(R_SS); 2536 } 2537 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index); 2193 2538 } 2194 2539 } … … 2208 2553 static void gen_stack_A0(DisasContext *s) 2209 2554 { 2210 gen_op_movl_A0_ ESP();2555 gen_op_movl_A0_reg(R_ESP); 2211 2556 if (!s->ss32) 2212 2557 gen_op_andl_A0_ffff(); 2213 gen_op_movl_T1_A0();2558 tcg_gen_mov_tl(cpu_T[1], cpu_A0); 2214 2559 if (s->addseg) 2215 gen_op_addl_A0_seg( offsetof(CPUX86State,segs[R_SS].base));2560 gen_op_addl_A0_seg(R_SS); 2216 2561 } 2217 2562 … … 2220 2565 { 2221 2566 int i; 2222 gen_op_movl_A0_ ESP();2567 gen_op_movl_A0_reg(R_ESP); 2223 2568 gen_op_addl_A0_im(-16 << s->dflag); 2224 2569 if (!s->ss32) 2225 2570 gen_op_andl_A0_ffff(); 2226 gen_op_movl_T1_A0();2571 tcg_gen_mov_tl(cpu_T[1], cpu_A0); 2227 2572 if (s->addseg) 2228 gen_op_addl_A0_seg( offsetof(CPUX86State,segs[R_SS].base));2573 gen_op_addl_A0_seg(R_SS); 2229 2574 for(i = 0;i < 8; i++) { 2230 gen_op_mov_TN_reg [OT_LONG][0][7 - i]();2231 gen_op_st_T0_A0 [OT_WORD + s->dflag + s->mem_index]();2575 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i); 2576 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index); 2232 2577 gen_op_addl_A0_im(2 << s->dflag); 2233 2578 } 2234 gen_op_mov_reg_T1 [OT_WORD + s->ss32][R_ESP]();2579 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP); 2235 2580 } 2236 2581 … … 2239 2584 { 2240 2585 int i; 2241 gen_op_movl_A0_ ESP();2586 gen_op_movl_A0_reg(R_ESP); 2242 2587 if (!s->ss32) 2243 2588 gen_op_andl_A0_ffff(); 2244 gen_op_movl_T1_A0();2245 gen_op_addl_T1_im(16 << s->dflag);2589 tcg_gen_mov_tl(cpu_T[1], cpu_A0); 2590 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag); 2246 2591 if (s->addseg) 2247 gen_op_addl_A0_seg( offsetof(CPUX86State,segs[R_SS].base));2592 gen_op_addl_A0_seg(R_SS); 2248 2593 for(i = 0;i < 8; i++) { 2249 2594 /* ESP is not reloaded */ 2250 2595 if (i != 3) { 2251 gen_op_ld_T0_A0 [OT_WORD + s->dflag + s->mem_index]();2252 gen_op_mov_reg_T0 [OT_WORD + s->dflag][7 - i]();2596 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index); 2597 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i); 2253 2598 } 2254 2599 gen_op_addl_A0_im(2 << s->dflag); 2255 2600 } 2256 gen_op_mov_reg_T1 [OT_WORD + s->ss32][R_ESP]();2601 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP); 2257 2602 } 2258 2603 … … 2267 2612 opsize = 1 << ot; 2268 2613 2269 gen_op_movl_A0_ ESP();2614 gen_op_movl_A0_reg(R_ESP); 2270 2615 gen_op_addq_A0_im(-opsize); 2271 gen_op_movl_T1_A0();2616 tcg_gen_mov_tl(cpu_T[1], cpu_A0); 2272 2617 2273 2618 /* push bp */ 2274 gen_op_mov_TN_reg [OT_LONG][0][R_EBP]();2275 gen_op_st_T0_A0 [ot + s->mem_index]();2619 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP); 2620 gen_op_st_T0_A0(ot + s->mem_index); 2276 2621 if (level) { 2277 gen_op_enter64_level(level, (ot == OT_QUAD)); 2278 } 2279 gen_op_mov_reg_T1[ot][R_EBP](); 2280 gen_op_addl_T1_im( -esp_addend + (-opsize * level) ); 2281 gen_op_mov_reg_T1[OT_QUAD][R_ESP](); 2622 /* XXX: must save state */ 2623 tcg_gen_helper_0_3(helper_enter64_level, 2624 tcg_const_i32(level), 2625 tcg_const_i32((ot == OT_QUAD)), 2626 cpu_T[1]); 2627 } 2628 gen_op_mov_reg_T1(ot, R_EBP); 2629 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level)); 2630 gen_op_mov_reg_T1(OT_QUAD, R_ESP); 2282 2631 } else 2283 2632 #endif … … 2286 2635 opsize = 2 << s->dflag; 2287 2636 2288 gen_op_movl_A0_ ESP();2637 gen_op_movl_A0_reg(R_ESP); 2289 2638 gen_op_addl_A0_im(-opsize); 2290 2639 if (!s->ss32) 2291 2640 gen_op_andl_A0_ffff(); 2292 gen_op_movl_T1_A0();2641 tcg_gen_mov_tl(cpu_T[1], cpu_A0); 2293 2642 if (s->addseg) 2294 gen_op_addl_A0_seg( offsetof(CPUX86State,segs[R_SS].base));2643 gen_op_addl_A0_seg(R_SS); 2295 2644 /* push bp */ 2296 gen_op_mov_TN_reg [OT_LONG][0][R_EBP]();2297 gen_op_st_T0_A0 [ot + s->mem_index]();2645 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP); 2646 gen_op_st_T0_A0(ot + s->mem_index); 2298 2647 if (level) { 2299 gen_op_enter_level(level, s->dflag); 2300 } 2301 gen_op_mov_reg_T1[ot][R_EBP](); 2302 gen_op_addl_T1_im( -esp_addend + (-opsize * level) ); 2303 gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP](); 2648 /* XXX: must save state */ 2649 tcg_gen_helper_0_3(helper_enter_level, 2650 tcg_const_i32(level), 2651 tcg_const_i32(s->dflag), 2652 cpu_T[1]); 2653 } 2654 gen_op_mov_reg_T1(ot, R_EBP); 2655 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level)); 2656 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP); 2304 2657 } 2305 2658 } … … 2310 2663 gen_op_set_cc_op(s->cc_op); 2311 2664 gen_jmp_im(cur_eip); 2312 gen_op_raise_exception(trapno);2665 tcg_gen_helper_0_1(helper_raise_exception, tcg_const_i32(trapno)); 2313 2666 s->is_jmp = 3; 2314 2667 } 2315 2668 2316 2669 /* an interrupt is different from an exception because of the 2317 privile dge checks */2670 privilege checks */ 2318 2671 static void gen_interrupt(DisasContext *s, int intno, 2319 2672 target_ulong cur_eip, target_ulong next_eip) … … 2322 2675 gen_op_set_cc_op(s->cc_op); 2323 2676 gen_jmp_im(cur_eip); 2324 gen_op_raise_interrupt(intno, (int)(next_eip - cur_eip)); 2677 tcg_gen_helper_0_2(helper_raise_interrupt, 2678 tcg_const_i32(intno), 2679 tcg_const_i32(next_eip - cur_eip)); 2325 2680 s->is_jmp = 3; 2326 2681 } … … 2331 2686 gen_op_set_cc_op(s->cc_op); 2332 2687 gen_jmp_im(cur_eip); 2333 gen_op_debug();2688 tcg_gen_helper_0_0(helper_debug); 2334 2689 s->is_jmp = 3; 2335 2690 } … … 2342 2697 gen_op_set_cc_op(s->cc_op); 2343 2698 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) { 2344 gen_op_reset_inhibit_irq();2699 tcg_gen_helper_0_0(helper_reset_inhibit_irq); 2345 2700 } 2346 2701 if (s->singlestep_enabled) { 2347 gen_op_debug();2702 tcg_gen_helper_0_0(helper_debug); 2348 2703 } else if (s->tf) { 2349 gen_op_single_step();2704 tcg_gen_helper_0_0(helper_single_step); 2350 2705 } else { 2351 gen_op_movl_T0_0(); 2352 gen_op_exit_tb(); 2706 tcg_gen_exit_tb(0); 2353 2707 } 2354 2708 s->is_jmp = 3; … … 2380 2734 } 2381 2735 2382 static void gen_movtl_T0_im(target_ulong val) 2383 { 2384 #ifdef TARGET_X86_64 2385 if ((int32_t)val == val) { 2386 gen_op_movl_T0_im(val); 2387 } else { 2388 gen_op_movq_T0_im64(val >> 32, val); 2389 } 2390 #else 2391 gen_op_movl_T0_im(val); 2392 #endif 2393 } 2394 2395 static void gen_movtl_T1_im(target_ulong val) 2396 { 2397 #ifdef TARGET_X86_64 2398 if ((int32_t)val == val) { 2399 gen_op_movl_T1_im(val); 2400 } else { 2401 gen_op_movq_T1_im64(val >> 32, val); 2402 } 2403 #else 2404 gen_op_movl_T1_im(val); 2405 #endif 2406 } 2407 2408 static void gen_add_A0_im(DisasContext *s, int val) 2409 { 2410 #ifdef TARGET_X86_64 2411 if (CODE64(s)) 2412 gen_op_addq_A0_im(val); 2413 else 2414 #endif 2415 gen_op_addl_A0_im(val); 2416 } 2417 2418 static GenOpFunc1 *gen_ldq_env_A0[3] = { 2419 gen_op_ldq_raw_env_A0, 2420 #ifndef CONFIG_USER_ONLY 2421 gen_op_ldq_kernel_env_A0, 2422 gen_op_ldq_user_env_A0, 2423 #endif 2424 }; 2425 2426 static GenOpFunc1 *gen_stq_env_A0[3] = { 2427 gen_op_stq_raw_env_A0, 2428 #ifndef CONFIG_USER_ONLY 2429 gen_op_stq_kernel_env_A0, 2430 gen_op_stq_user_env_A0, 2431 #endif 2432 }; 2433 2434 static GenOpFunc1 *gen_ldo_env_A0[3] = { 2435 gen_op_ldo_raw_env_A0, 2436 #ifndef CONFIG_USER_ONLY 2437 gen_op_ldo_kernel_env_A0, 2438 gen_op_ldo_user_env_A0, 2439 #endif 2440 }; 2441 2442 static GenOpFunc1 *gen_sto_env_A0[3] = { 2443 gen_op_sto_raw_env_A0, 2444 #ifndef CONFIG_USER_ONLY 2445 gen_op_sto_kernel_env_A0, 2446 gen_op_sto_user_env_A0, 2447 #endif 2448 }; 2449 2450 #define SSE_SPECIAL ((GenOpFunc2 *)1) 2451 2452 #define MMX_OP2(x) { gen_op_ ## x ## _mmx, gen_op_ ## x ## _xmm } 2453 #define SSE_FOP(x) { gen_op_ ## x ## ps, gen_op_ ## x ## pd, \ 2454 gen_op_ ## x ## ss, gen_op_ ## x ## sd, } 2455 2456 static GenOpFunc2 *sse_op_table1[256][4] = { 2736 static inline void gen_ldq_env_A0(int idx, int offset) 2737 { 2738 int mem_index = (idx >> 2) - 1; 2739 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index); 2740 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset); 2741 } 2742 2743 static inline void gen_stq_env_A0(int idx, int offset) 2744 { 2745 int mem_index = (idx >> 2) - 1; 2746 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset); 2747 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index); 2748 } 2749 2750 static inline void gen_ldo_env_A0(int idx, int offset) 2751 { 2752 int mem_index = (idx >> 2) - 1; 2753 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index); 2754 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0))); 2755 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8); 2756 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index); 2757 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1))); 2758 } 2759 2760 static inline void gen_sto_env_A0(int idx, int offset) 2761 { 2762 int mem_index = (idx >> 2) - 1; 2763 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0))); 2764 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index); 2765 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8); 2766 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1))); 2767 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index); 2768 } 2769 2770 static inline void gen_op_movo(int d_offset, int s_offset) 2771 { 2772 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset); 2773 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); 2774 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8); 2775 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8); 2776 } 2777 2778 static inline void gen_op_movq(int d_offset, int s_offset) 2779 { 2780 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset); 2781 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); 2782 } 2783 2784 static inline void gen_op_movl(int d_offset, int s_offset) 2785 { 2786 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset); 2787 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset); 2788 } 2789 2790 static inline void gen_op_movq_env_0(int d_offset) 2791 { 2792 tcg_gen_movi_i64(cpu_tmp1_i64, 0); 2793 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset); 2794 } 2795 2796 #define SSE_SPECIAL ((void *)1) 2797 #define SSE_DUMMY ((void *)2) 2798 2799 #define MMX_OP2(x) { helper_ ## x ## _mmx, helper_ ## x ## _xmm } 2800 #define SSE_FOP(x) { helper_ ## x ## ps, helper_ ## x ## pd, \ 2801 helper_ ## x ## ss, helper_ ## x ## sd, } 2802 2803 static void *sse_op_table1[256][4] = { 2804 /* 3DNow! extensions */ 2805 [0x0e] = { SSE_DUMMY }, /* femms */ 2806 [0x0f] = { SSE_DUMMY }, /* pf... */ 2457 2807 /* pure SSE operations */ 2458 2808 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ … … 2460 2810 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */ 2461 2811 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */ 2462 [0x14] = { gen_op_punpckldq_xmm, gen_op_punpcklqdq_xmm },2463 [0x15] = { gen_op_punpckhdq_xmm, gen_op_punpckhqdq_xmm },2812 [0x14] = { helper_punpckldq_xmm, helper_punpcklqdq_xmm }, 2813 [0x15] = { helper_punpckhdq_xmm, helper_punpckhqdq_xmm }, 2464 2814 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */ 2465 2815 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */ … … 2471 2821 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */ 2472 2822 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */ 2473 [0x2e] = { gen_op_ucomiss, gen_op_ucomisd },2474 [0x2f] = { gen_op_comiss, gen_op_comisd },2823 [0x2e] = { helper_ucomiss, helper_ucomisd }, 2824 [0x2f] = { helper_comiss, helper_comisd }, 2475 2825 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */ 2476 2826 [0x51] = SSE_FOP(sqrt), 2477 [0x52] = { gen_op_rsqrtps, NULL, gen_op_rsqrtss, NULL },2478 [0x53] = { gen_op_rcpps, NULL, gen_op_rcpss, NULL },2479 [0x54] = { gen_op_pand_xmm, gen_op_pand_xmm }, /* andps, andpd */2480 [0x55] = { gen_op_pandn_xmm, gen_op_pandn_xmm }, /* andnps, andnpd */2481 [0x56] = { gen_op_por_xmm, gen_op_por_xmm }, /* orps, orpd */2482 [0x57] = { gen_op_pxor_xmm, gen_op_pxor_xmm }, /* xorps, xorpd */2827 [0x52] = { helper_rsqrtps, NULL, helper_rsqrtss, NULL }, 2828 [0x53] = { helper_rcpps, NULL, helper_rcpss, NULL }, 2829 [0x54] = { helper_pand_xmm, helper_pand_xmm }, /* andps, andpd */ 2830 [0x55] = { helper_pandn_xmm, helper_pandn_xmm }, /* andnps, andnpd */ 2831 [0x56] = { helper_por_xmm, helper_por_xmm }, /* orps, orpd */ 2832 [0x57] = { helper_pxor_xmm, helper_pxor_xmm }, /* xorps, xorpd */ 2483 2833 [0x58] = SSE_FOP(add), 2484 2834 [0x59] = SSE_FOP(mul), 2485 [0x5a] = { gen_op_cvtps2pd, gen_op_cvtpd2ps,2486 gen_op_cvtss2sd, gen_op_cvtsd2ss },2487 [0x5b] = { gen_op_cvtdq2ps, gen_op_cvtps2dq, gen_op_cvttps2dq },2835 [0x5a] = { helper_cvtps2pd, helper_cvtpd2ps, 2836 helper_cvtss2sd, helper_cvtsd2ss }, 2837 [0x5b] = { helper_cvtdq2ps, helper_cvtps2dq, helper_cvttps2dq }, 2488 2838 [0x5c] = SSE_FOP(sub), 2489 2839 [0x5d] = SSE_FOP(min), … … 2492 2842 2493 2843 [0xc2] = SSE_FOP(cmpeq), 2494 [0xc6] = { (GenOpFunc2 *)gen_op_shufps, (GenOpFunc2 *)gen_op_shufpd }, 2844 [0xc6] = { helper_shufps, helper_shufpd }, 2845 2846 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */ 2847 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */ 2495 2848 2496 2849 /* MMX ops and their SSE extensions */ … … 2507 2860 [0x6a] = MMX_OP2(punpckhdq), 2508 2861 [0x6b] = MMX_OP2(packssdw), 2509 [0x6c] = { NULL, gen_op_punpcklqdq_xmm },2510 [0x6d] = { NULL, gen_op_punpckhqdq_xmm },2862 [0x6c] = { NULL, helper_punpcklqdq_xmm }, 2863 [0x6d] = { NULL, helper_punpckhqdq_xmm }, 2511 2864 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */ 2512 2865 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */ 2513 [0x70] = { (GenOpFunc2 *)gen_op_pshufw_mmx,2514 (GenOpFunc2 *)gen_op_pshufd_xmm,2515 (GenOpFunc2 *)gen_op_pshufhw_xmm,2516 (GenOpFunc2 *)gen_op_pshuflw_xmm },2866 [0x70] = { helper_pshufw_mmx, 2867 helper_pshufd_xmm, 2868 helper_pshufhw_xmm, 2869 helper_pshuflw_xmm }, 2517 2870 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */ 2518 2871 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */ … … 2521 2874 [0x75] = MMX_OP2(pcmpeqw), 2522 2875 [0x76] = MMX_OP2(pcmpeql), 2523 [0x77] = { SSE_ SPECIAL}, /* emms */2524 [0x7c] = { NULL, gen_op_haddpd, NULL, gen_op_haddps },2525 [0x7d] = { NULL, gen_op_hsubpd, NULL, gen_op_hsubps },2876 [0x77] = { SSE_DUMMY }, /* emms */ 2877 [0x7c] = { NULL, helper_haddpd, NULL, helper_haddps }, 2878 [0x7d] = { NULL, helper_hsubpd, NULL, helper_hsubps }, 2526 2879 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */ 2527 2880 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */ 2528 2881 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */ 2529 2882 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */ 2530 [0xd0] = { NULL, gen_op_addsubpd, NULL, gen_op_addsubps },2883 [0xd0] = { NULL, helper_addsubpd, NULL, helper_addsubps }, 2531 2884 [0xd1] = MMX_OP2(psrlw), 2532 2885 [0xd2] = MMX_OP2(psrld), … … 2550 2903 [0xe4] = MMX_OP2(pmulhuw), 2551 2904 [0xe5] = MMX_OP2(pmulhw), 2552 [0xe6] = { NULL, gen_op_cvttpd2dq, gen_op_cvtdq2pd, gen_op_cvtpd2dq },2905 [0xe6] = { NULL, helper_cvttpd2dq, helper_cvtdq2pd, helper_cvtpd2dq }, 2553 2906 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */ 2554 2907 [0xe8] = MMX_OP2(psubsb), … … 2577 2930 }; 2578 2931 2579 static GenOpFunc2*sse_op_table2[3 * 8][2] = {2932 static void *sse_op_table2[3 * 8][2] = { 2580 2933 [0 + 2] = MMX_OP2(psrlw), 2581 2934 [0 + 4] = MMX_OP2(psraw), … … 2585 2938 [8 + 6] = MMX_OP2(pslld), 2586 2939 [16 + 2] = MMX_OP2(psrlq), 2587 [16 + 3] = { NULL, gen_op_psrldq_xmm },2940 [16 + 3] = { NULL, helper_psrldq_xmm }, 2588 2941 [16 + 6] = MMX_OP2(psllq), 2589 [16 + 7] = { NULL, gen_op_pslldq_xmm },2942 [16 + 7] = { NULL, helper_pslldq_xmm }, 2590 2943 }; 2591 2944 2592 static GenOpFunc1*sse_op_table3[4 * 3] = {2593 gen_op_cvtsi2ss,2594 gen_op_cvtsi2sd,2595 X86_64_ONLY( gen_op_cvtsq2ss),2596 X86_64_ONLY( gen_op_cvtsq2sd),2597 2598 gen_op_cvttss2si,2599 gen_op_cvttsd2si,2600 X86_64_ONLY( gen_op_cvttss2sq),2601 X86_64_ONLY( gen_op_cvttsd2sq),2602 2603 gen_op_cvtss2si,2604 gen_op_cvtsd2si,2605 X86_64_ONLY( gen_op_cvtss2sq),2606 X86_64_ONLY( gen_op_cvtsd2sq),2945 static void *sse_op_table3[4 * 3] = { 2946 helper_cvtsi2ss, 2947 helper_cvtsi2sd, 2948 X86_64_ONLY(helper_cvtsq2ss), 2949 X86_64_ONLY(helper_cvtsq2sd), 2950 2951 helper_cvttss2si, 2952 helper_cvttsd2si, 2953 X86_64_ONLY(helper_cvttss2sq), 2954 X86_64_ONLY(helper_cvttsd2sq), 2955 2956 helper_cvtss2si, 2957 helper_cvtsd2si, 2958 X86_64_ONLY(helper_cvtss2sq), 2959 X86_64_ONLY(helper_cvtsd2sq), 2607 2960 }; 2608 2961 2609 static GenOpFunc2*sse_op_table4[8][4] = {2962 static void *sse_op_table4[8][4] = { 2610 2963 SSE_FOP(cmpeq), 2611 2964 SSE_FOP(cmplt), … … 2618 2971 }; 2619 2972 2973 static void *sse_op_table5[256] = { 2974 [0x0c] = helper_pi2fw, 2975 [0x0d] = helper_pi2fd, 2976 [0x1c] = helper_pf2iw, 2977 [0x1d] = helper_pf2id, 2978 [0x8a] = helper_pfnacc, 2979 [0x8e] = helper_pfpnacc, 2980 [0x90] = helper_pfcmpge, 2981 [0x94] = helper_pfmin, 2982 [0x96] = helper_pfrcp, 2983 [0x97] = helper_pfrsqrt, 2984 [0x9a] = helper_pfsub, 2985 [0x9e] = helper_pfadd, 2986 [0xa0] = helper_pfcmpgt, 2987 [0xa4] = helper_pfmax, 2988 [0xa6] = helper_movq, /* pfrcpit1; no need to actually increase precision */ 2989 [0xa7] = helper_movq, /* pfrsqit1 */ 2990 [0xaa] = helper_pfsubr, 2991 [0xae] = helper_pfacc, 2992 [0xb0] = helper_pfcmpeq, 2993 [0xb4] = helper_pfmul, 2994 [0xb6] = helper_movq, /* pfrcpit2 */ 2995 [0xb7] = helper_pmulhrw_mmx, 2996 [0xbb] = helper_pswapd, 2997 [0xbf] = helper_pavgb_mmx /* pavgusb */ 2998 }; 2999 3000 struct sse_op_helper_s { 3001 void *op[2]; uint32_t ext_mask; 3002 }; 3003 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 } 3004 #define SSE41_OP(x) { { NULL, helper_ ## x ## _xmm }, CPUID_EXT_SSE41 } 3005 #define SSE42_OP(x) { { NULL, helper_ ## x ## _xmm }, CPUID_EXT_SSE42 } 3006 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 } 3007 static struct sse_op_helper_s sse_op_table6[256] = { 3008 [0x00] = SSSE3_OP(pshufb), 3009 [0x01] = SSSE3_OP(phaddw), 3010 [0x02] = SSSE3_OP(phaddd), 3011 [0x03] = SSSE3_OP(phaddsw), 3012 [0x04] = SSSE3_OP(pmaddubsw), 3013 [0x05] = SSSE3_OP(phsubw), 3014 [0x06] = SSSE3_OP(phsubd), 3015 [0x07] = SSSE3_OP(phsubsw), 3016 [0x08] = SSSE3_OP(psignb), 3017 [0x09] = SSSE3_OP(psignw), 3018 [0x0a] = SSSE3_OP(psignd), 3019 [0x0b] = SSSE3_OP(pmulhrsw), 3020 [0x10] = SSE41_OP(pblendvb), 3021 [0x14] = SSE41_OP(blendvps), 3022 [0x15] = SSE41_OP(blendvpd), 3023 [0x17] = SSE41_OP(ptest), 3024 [0x1c] = SSSE3_OP(pabsb), 3025 [0x1d] = SSSE3_OP(pabsw), 3026 [0x1e] = SSSE3_OP(pabsd), 3027 [0x20] = SSE41_OP(pmovsxbw), 3028 [0x21] = SSE41_OP(pmovsxbd), 3029 [0x22] = SSE41_OP(pmovsxbq), 3030 [0x23] = SSE41_OP(pmovsxwd), 3031 [0x24] = SSE41_OP(pmovsxwq), 3032 [0x25] = SSE41_OP(pmovsxdq), 3033 [0x28] = SSE41_OP(pmuldq), 3034 [0x29] = SSE41_OP(pcmpeqq), 3035 [0x2a] = SSE41_SPECIAL, /* movntqda */ 3036 [0x2b] = SSE41_OP(packusdw), 3037 [0x30] = SSE41_OP(pmovzxbw), 3038 [0x31] = SSE41_OP(pmovzxbd), 3039 [0x32] = SSE41_OP(pmovzxbq), 3040 [0x33] = SSE41_OP(pmovzxwd), 3041 [0x34] = SSE41_OP(pmovzxwq), 3042 [0x35] = SSE41_OP(pmovzxdq), 3043 [0x37] = SSE42_OP(pcmpgtq), 3044 [0x38] = SSE41_OP(pminsb), 3045 [0x39] = SSE41_OP(pminsd), 3046 [0x3a] = SSE41_OP(pminuw), 3047 [0x3b] = SSE41_OP(pminud), 3048 [0x3c] = SSE41_OP(pmaxsb), 3049 [0x3d] = SSE41_OP(pmaxsd), 3050 [0x3e] = SSE41_OP(pmaxuw), 3051 [0x3f] = SSE41_OP(pmaxud), 3052 [0x40] = SSE41_OP(pmulld), 3053 [0x41] = SSE41_OP(phminposuw), 3054 }; 3055 3056 static struct sse_op_helper_s sse_op_table7[256] = { 3057 [0x08] = SSE41_OP(roundps), 3058 [0x09] = SSE41_OP(roundpd), 3059 [0x0a] = SSE41_OP(roundss), 3060 [0x0b] = SSE41_OP(roundsd), 3061 [0x0c] = SSE41_OP(blendps), 3062 [0x0d] = SSE41_OP(blendpd), 3063 [0x0e] = SSE41_OP(pblendw), 3064 [0x0f] = SSSE3_OP(palignr), 3065 [0x14] = SSE41_SPECIAL, /* pextrb */ 3066 [0x15] = SSE41_SPECIAL, /* pextrw */ 3067 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */ 3068 [0x17] = SSE41_SPECIAL, /* extractps */ 3069 [0x20] = SSE41_SPECIAL, /* pinsrb */ 3070 [0x21] = SSE41_SPECIAL, /* insertps */ 3071 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */ 3072 [0x40] = SSE41_OP(dpps), 3073 [0x41] = SSE41_OP(dppd), 3074 [0x42] = SSE41_OP(mpsadbw), 3075 [0x60] = SSE42_OP(pcmpestrm), 3076 [0x61] = SSE42_OP(pcmpestri), 3077 [0x62] = SSE42_OP(pcmpistrm), 3078 [0x63] = SSE42_OP(pcmpistri), 3079 }; 3080 2620 3081 static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) 2621 3082 { 2622 3083 int b1, op1_offset, op2_offset, is_xmm, val, ot; 2623 3084 int modrm, mod, rm, reg, reg_addr, offset_addr; 2624 GenOpFunc2 *sse_op2; 2625 GenOpFunc3 *sse_op3; 3085 void *sse_op2; 2626 3086 2627 3087 b &= 0xff; … … 2637 3097 if (!sse_op2) 2638 3098 goto illegal_op; 2639 if ( b <= 0x5f|| b == 0xc6 || b == 0xc2) {3099 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) { 2640 3100 is_xmm = 1; 2641 3101 } else { … … 2658 3118 } 2659 3119 if (is_xmm && !(s->flags & HF_OSFXSR_MASK)) 2660 goto illegal_op; 3120 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA)) 3121 goto illegal_op; 3122 if (b == 0x0e) { 3123 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) 3124 goto illegal_op; 3125 /* femms */ 3126 tcg_gen_helper_0_0(helper_emms); 3127 return; 3128 } 2661 3129 if (b == 0x77) { 2662 3130 /* emms */ 2663 gen_op_emms();3131 tcg_gen_helper_0_0(helper_emms); 2664 3132 return; 2665 3133 } … … 2667 3135 the static cpu state) */ 2668 3136 if (!is_xmm) { 2669 gen_op_enter_mmx();3137 tcg_gen_helper_0_0(helper_enter_mmx); 2670 3138 } 2671 3139 … … 2682 3150 goto illegal_op; 2683 3151 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2684 gen_stq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,fpregs[reg].mmx));3152 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx)); 2685 3153 break; 2686 3154 case 0x1e7: /* movntdq */ … … 2691 3159 goto illegal_op; 2692 3160 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2693 gen_sto_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg]));3161 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); 2694 3162 break; 2695 3163 case 0x6e: /* movd mm, ea */ … … 2697 3165 if (s->dflag == 2) { 2698 3166 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0); 2699 gen_op_movq_mm_T0_mmx(offsetof(CPUX86State,fpregs[reg].mmx));3167 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); 2700 3168 } else 2701 3169 #endif 2702 3170 { 2703 3171 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0); 2704 gen_op_movl_mm_T0_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); 3172 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 3173 offsetof(CPUX86State,fpregs[reg].mmx)); 3174 tcg_gen_helper_0_2(helper_movl_mm_T0_mmx, cpu_ptr0, cpu_T[0]); 2705 3175 } 2706 3176 break; … … 2709 3179 if (s->dflag == 2) { 2710 3180 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0); 2711 gen_op_movq_mm_T0_xmm(offsetof(CPUX86State,xmm_regs[reg])); 3181 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 3182 offsetof(CPUX86State,xmm_regs[reg])); 3183 tcg_gen_helper_0_2(helper_movq_mm_T0_xmm, cpu_ptr0, cpu_T[0]); 2712 3184 } else 2713 3185 #endif 2714 3186 { 2715 3187 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0); 2716 gen_op_movl_mm_T0_xmm(offsetof(CPUX86State,xmm_regs[reg])); 3188 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 3189 offsetof(CPUX86State,xmm_regs[reg])); 3190 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 3191 tcg_gen_helper_0_2(helper_movl_mm_T0_xmm, cpu_ptr0, cpu_tmp2_i32); 2717 3192 } 2718 3193 break; … … 2720 3195 if (mod != 3) { 2721 3196 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2722 gen_ldq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,fpregs[reg].mmx));3197 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx)); 2723 3198 } else { 2724 3199 rm = (modrm & 7); 2725 gen_op_movq(offsetof(CPUX86State,fpregs[reg].mmx), 2726 offsetof(CPUX86State,fpregs[rm].mmx)); 3200 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, 3201 offsetof(CPUX86State,fpregs[rm].mmx)); 3202 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, 3203 offsetof(CPUX86State,fpregs[reg].mmx)); 2727 3204 } 2728 3205 break; … … 2735 3212 if (mod != 3) { 2736 3213 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2737 gen_ldo_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg]));3214 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); 2738 3215 } else { 2739 3216 rm = (modrm & 7) | REX_B(s); … … 2745 3222 if (mod != 3) { 2746 3223 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2747 gen_op_ld_T0_A0 [OT_LONG + s->mem_index]();2748 gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));3224 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 3225 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); 2749 3226 gen_op_movl_T0_0(); 2750 gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));2751 gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));2752 gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));3227 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); 3228 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); 3229 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); 2753 3230 } else { 2754 3231 rm = (modrm & 7) | REX_B(s); … … 2760 3237 if (mod != 3) { 2761 3238 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2762 gen_ldq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));3239 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); 2763 3240 gen_op_movl_T0_0(); 2764 gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));2765 gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));3241 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); 3242 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); 2766 3243 } else { 2767 3244 rm = (modrm & 7) | REX_B(s); … … 2774 3251 if (mod != 3) { 2775 3252 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2776 gen_ldq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));3253 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); 2777 3254 } else { 2778 3255 /* movhlps */ … … 2785 3262 if (mod != 3) { 2786 3263 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2787 gen_ldo_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg]));3264 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); 2788 3265 } else { 2789 3266 rm = (modrm & 7) | REX_B(s); … … 2801 3278 if (mod != 3) { 2802 3279 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2803 gen_ldq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));3280 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); 2804 3281 } else { 2805 3282 rm = (modrm & 7) | REX_B(s); … … 2814 3291 if (mod != 3) { 2815 3292 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2816 gen_ldq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));3293 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); 2817 3294 } else { 2818 3295 /* movlhps */ … … 2825 3302 if (mod != 3) { 2826 3303 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2827 gen_ldo_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg]));3304 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); 2828 3305 } else { 2829 3306 rm = (modrm & 7) | REX_B(s); … … 2841 3318 #ifdef TARGET_X86_64 2842 3319 if (s->dflag == 2) { 2843 gen_op_movq_T0_mm_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); 3320 tcg_gen_ld_i64(cpu_T[0], cpu_env, 3321 offsetof(CPUX86State,fpregs[reg].mmx)); 2844 3322 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1); 2845 3323 } else 2846 3324 #endif 2847 3325 { 2848 gen_op_movl_T0_mm_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); 3326 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, 3327 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0))); 2849 3328 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1); 2850 3329 } … … 2853 3332 #ifdef TARGET_X86_64 2854 3333 if (s->dflag == 2) { 2855 gen_op_movq_T0_mm_xmm(offsetof(CPUX86State,xmm_regs[reg])); 3334 tcg_gen_ld_i64(cpu_T[0], cpu_env, 3335 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); 2856 3336 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1); 2857 3337 } else 2858 3338 #endif 2859 3339 { 2860 gen_op_movl_T0_mm_xmm(offsetof(CPUX86State,xmm_regs[reg])); 3340 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, 3341 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); 2861 3342 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1); 2862 3343 } … … 2865 3346 if (mod != 3) { 2866 3347 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2867 gen_ldq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));3348 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); 2868 3349 } else { 2869 3350 rm = (modrm & 7) | REX_B(s); … … 2876 3357 if (mod != 3) { 2877 3358 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2878 gen_stq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,fpregs[reg].mmx));3359 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx)); 2879 3360 } else { 2880 3361 rm = (modrm & 7); … … 2891 3372 if (mod != 3) { 2892 3373 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2893 gen_sto_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg]));3374 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); 2894 3375 } else { 2895 3376 rm = (modrm & 7) | REX_B(s); … … 2901 3382 if (mod != 3) { 2902 3383 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2903 gen_op_movl_T0_env(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));2904 gen_op_st_T0_A0 [OT_LONG + s->mem_index]();3384 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); 3385 gen_op_st_T0_A0(OT_LONG + s->mem_index); 2905 3386 } else { 2906 3387 rm = (modrm & 7) | REX_B(s); … … 2912 3393 if (mod != 3) { 2913 3394 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2914 gen_stq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));3395 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); 2915 3396 } else { 2916 3397 rm = (modrm & 7) | REX_B(s); … … 2923 3404 if (mod != 3) { 2924 3405 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2925 gen_stq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));3406 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); 2926 3407 } else { 2927 3408 goto illegal_op; … … 2932 3413 if (mod != 3) { 2933 3414 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2934 gen_stq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));3415 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); 2935 3416 } else { 2936 3417 goto illegal_op; … … 2946 3427 if (is_xmm) { 2947 3428 gen_op_movl_T0_im(val); 2948 gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0)));3429 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); 2949 3430 gen_op_movl_T0_0(); 2950 gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(1)));3431 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1))); 2951 3432 op1_offset = offsetof(CPUX86State,xmm_t0); 2952 3433 } else { 2953 3434 gen_op_movl_T0_im(val); 2954 gen_op_movl_env_T0(offsetof(CPUX86State,mmx_t0.MMX_L(0)));3435 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0))); 2955 3436 gen_op_movl_T0_0(); 2956 gen_op_movl_env_T0(offsetof(CPUX86State,mmx_t0.MMX_L(1)));3437 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1))); 2957 3438 op1_offset = offsetof(CPUX86State,mmx_t0); 2958 3439 } … … 2967 3448 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); 2968 3449 } 2969 sse_op2(op2_offset, op1_offset); 3450 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); 3451 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset); 3452 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1); 2970 3453 break; 2971 3454 case 0x050: /* movmskps */ 2972 3455 rm = (modrm & 7) | REX_B(s); 2973 gen_op_movmskps(offsetof(CPUX86State,xmm_regs[rm])); 2974 gen_op_mov_reg_T0[OT_LONG][reg](); 3456 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 3457 offsetof(CPUX86State,xmm_regs[rm])); 3458 tcg_gen_helper_1_1(helper_movmskps, cpu_tmp2_i32, cpu_ptr0); 3459 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 3460 gen_op_mov_reg_T0(OT_LONG, reg); 2975 3461 break; 2976 3462 case 0x150: /* movmskpd */ 2977 3463 rm = (modrm & 7) | REX_B(s); 2978 gen_op_movmskpd(offsetof(CPUX86State,xmm_regs[rm])); 2979 gen_op_mov_reg_T0[OT_LONG][reg](); 3464 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, 3465 offsetof(CPUX86State,xmm_regs[rm])); 3466 tcg_gen_helper_1_1(helper_movmskpd, cpu_tmp2_i32, cpu_ptr0); 3467 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 3468 gen_op_mov_reg_T0(OT_LONG, reg); 2980 3469 break; 2981 3470 case 0x02a: /* cvtpi2ps */ 2982 3471 case 0x12a: /* cvtpi2pd */ 2983 gen_op_enter_mmx();3472 tcg_gen_helper_0_0(helper_enter_mmx); 2984 3473 if (mod != 3) { 2985 3474 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 2986 3475 op2_offset = offsetof(CPUX86State,mmx_t0); 2987 gen_ldq_env_A0 [s->mem_index >> 2](op2_offset);3476 gen_ldq_env_A0(s->mem_index, op2_offset); 2988 3477 } else { 2989 3478 rm = (modrm & 7); … … 2991 3480 } 2992 3481 op1_offset = offsetof(CPUX86State,xmm_regs[reg]); 3482 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 3483 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 2993 3484 switch(b >> 8) { 2994 3485 case 0x0: 2995 gen_op_cvtpi2ps(op1_offset, op2_offset);3486 tcg_gen_helper_0_2(helper_cvtpi2ps, cpu_ptr0, cpu_ptr1); 2996 3487 break; 2997 3488 default: 2998 3489 case 0x1: 2999 gen_op_cvtpi2pd(op1_offset, op2_offset);3490 tcg_gen_helper_0_2(helper_cvtpi2pd, cpu_ptr0, cpu_ptr1); 3000 3491 break; 3001 3492 } … … 3006 3497 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); 3007 3498 op1_offset = offsetof(CPUX86State,xmm_regs[reg]); 3008 sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)](op1_offset); 3499 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 3500 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)]; 3501 if (ot == OT_LONG) { 3502 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 3503 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_tmp2_i32); 3504 } else { 3505 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_T[0]); 3506 } 3009 3507 break; 3010 3508 case 0x02c: /* cvttps2pi */ … … 3012 3510 case 0x02d: /* cvtps2pi */ 3013 3511 case 0x12d: /* cvtpd2pi */ 3014 gen_op_enter_mmx();3512 tcg_gen_helper_0_0(helper_enter_mmx); 3015 3513 if (mod != 3) { 3016 3514 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3017 3515 op2_offset = offsetof(CPUX86State,xmm_t0); 3018 gen_ldo_env_A0 [s->mem_index >> 2](op2_offset);3516 gen_ldo_env_A0(s->mem_index, op2_offset); 3019 3517 } else { 3020 3518 rm = (modrm & 7) | REX_B(s); … … 3022 3520 } 3023 3521 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx); 3522 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 3523 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 3024 3524 switch(b) { 3025 3525 case 0x02c: 3026 gen_op_cvttps2pi(op1_offset, op2_offset);3526 tcg_gen_helper_0_2(helper_cvttps2pi, cpu_ptr0, cpu_ptr1); 3027 3527 break; 3028 3528 case 0x12c: 3029 gen_op_cvttpd2pi(op1_offset, op2_offset);3529 tcg_gen_helper_0_2(helper_cvttpd2pi, cpu_ptr0, cpu_ptr1); 3030 3530 break; 3031 3531 case 0x02d: 3032 gen_op_cvtps2pi(op1_offset, op2_offset);3532 tcg_gen_helper_0_2(helper_cvtps2pi, cpu_ptr0, cpu_ptr1); 3033 3533 break; 3034 3534 case 0x12d: 3035 gen_op_cvtpd2pi(op1_offset, op2_offset);3535 tcg_gen_helper_0_2(helper_cvtpd2pi, cpu_ptr0, cpu_ptr1); 3036 3536 break; 3037 3537 } … … 3045 3545 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3046 3546 if ((b >> 8) & 1) { 3047 gen_ldq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_t0.XMM_Q(0)));3547 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0))); 3048 3548 } else { 3049 gen_op_ld_T0_A0 [OT_LONG + s->mem_index]();3050 gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0)));3549 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 3550 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); 3051 3551 } 3052 3552 op2_offset = offsetof(CPUX86State,xmm_t0); … … 3055 3555 op2_offset = offsetof(CPUX86State,xmm_regs[rm]); 3056 3556 } 3057 sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 + 3058 (b & 1) * 4](op2_offset); 3059 gen_op_mov_reg_T0[ot][reg](); 3557 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 + 3558 (b & 1) * 4]; 3559 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); 3560 if (ot == OT_LONG) { 3561 tcg_gen_helper_1_1(sse_op2, cpu_tmp2_i32, cpu_ptr0); 3562 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 3563 } else { 3564 tcg_gen_helper_1_1(sse_op2, cpu_T[0], cpu_ptr0); 3565 } 3566 gen_op_mov_reg_T0(ot, reg); 3060 3567 break; 3061 3568 case 0xc4: /* pinsrw */ … … 3066 3573 if (b1) { 3067 3574 val &= 7; 3068 gen_op_pinsrw_xmm(offsetof(CPUX86State,xmm_regs[reg]), val); 3575 tcg_gen_st16_tl(cpu_T[0], cpu_env, 3576 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val))); 3069 3577 } else { 3070 3578 val &= 3; 3071 gen_op_pinsrw_mmx(offsetof(CPUX86State,fpregs[reg].mmx), val); 3579 tcg_gen_st16_tl(cpu_T[0], cpu_env, 3580 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val))); 3072 3581 } 3073 3582 break; … … 3076 3585 if (mod != 3) 3077 3586 goto illegal_op; 3587 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; 3078 3588 val = ldub_code(s->pc++); 3079 3589 if (b1) { 3080 3590 val &= 7; 3081 3591 rm = (modrm & 7) | REX_B(s); 3082 gen_op_pextrw_xmm(offsetof(CPUX86State,xmm_regs[rm]), val); 3592 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, 3593 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val))); 3083 3594 } else { 3084 3595 val &= 3; 3085 3596 rm = (modrm & 7); 3086 gen_op_pextrw_mmx(offsetof(CPUX86State,fpregs[rm].mmx), val); 3597 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, 3598 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))); 3087 3599 } 3088 3600 reg = ((modrm >> 3) & 7) | rex_r; 3089 gen_op_mov_reg_T0 [OT_LONG][reg]();3601 gen_op_mov_reg_T0(ot, reg); 3090 3602 break; 3091 3603 case 0x1d6: /* movq ea, xmm */ 3092 3604 if (mod != 3) { 3093 3605 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3094 gen_stq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));3606 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); 3095 3607 } else { 3096 3608 rm = (modrm & 7) | REX_B(s); … … 3101 3613 break; 3102 3614 case 0x2d6: /* movq2dq */ 3103 gen_op_enter_mmx();3615 tcg_gen_helper_0_0(helper_enter_mmx); 3104 3616 rm = (modrm & 7); 3105 3617 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), … … 3108 3620 break; 3109 3621 case 0x3d6: /* movdq2q */ 3110 gen_op_enter_mmx();3622 tcg_gen_helper_0_0(helper_enter_mmx); 3111 3623 rm = (modrm & 7) | REX_B(s); 3112 3624 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx), … … 3119 3631 if (b1) { 3120 3632 rm = (modrm & 7) | REX_B(s); 3121 gen_op_pmovmskb_xmm(offsetof(CPUX86State,xmm_regs[rm])); 3633 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); 3634 tcg_gen_helper_1_1(helper_pmovmskb_xmm, cpu_tmp2_i32, cpu_ptr0); 3122 3635 } else { 3123 3636 rm = (modrm & 7); 3124 gen_op_pmovmskb_mmx(offsetof(CPUX86State,fpregs[rm].mmx)); 3125 } 3637 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx)); 3638 tcg_gen_helper_1_1(helper_pmovmskb_mmx, cpu_tmp2_i32, cpu_ptr0); 3639 } 3640 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 3126 3641 reg = ((modrm >> 3) & 7) | rex_r; 3127 gen_op_mov_reg_T0[OT_LONG][reg](); 3642 gen_op_mov_reg_T0(OT_LONG, reg); 3643 break; 3644 case 0x138: 3645 if (s->prefix & PREFIX_REPNZ) 3646 goto crc32; 3647 case 0x038: 3648 b = modrm; 3649 modrm = ldub_code(s->pc++); 3650 rm = modrm & 7; 3651 reg = ((modrm >> 3) & 7) | rex_r; 3652 mod = (modrm >> 6) & 3; 3653 3654 sse_op2 = sse_op_table6[b].op[b1]; 3655 if (!sse_op2) 3656 goto illegal_op; 3657 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask)) 3658 goto illegal_op; 3659 3660 if (b1) { 3661 op1_offset = offsetof(CPUX86State,xmm_regs[reg]); 3662 if (mod == 3) { 3663 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); 3664 } else { 3665 op2_offset = offsetof(CPUX86State,xmm_t0); 3666 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3667 switch (b) { 3668 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */ 3669 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */ 3670 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */ 3671 gen_ldq_env_A0(s->mem_index, op2_offset + 3672 offsetof(XMMReg, XMM_Q(0))); 3673 break; 3674 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ 3675 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ 3676 tcg_gen_qemu_ld32u(cpu_tmp2_i32, cpu_A0, 3677 (s->mem_index >> 2) - 1); 3678 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset + 3679 offsetof(XMMReg, XMM_L(0))); 3680 break; 3681 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ 3682 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0, 3683 (s->mem_index >> 2) - 1); 3684 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset + 3685 offsetof(XMMReg, XMM_W(0))); 3686 break; 3687 case 0x2a: /* movntqda */ 3688 gen_ldo_env_A0(s->mem_index, op1_offset); 3689 return; 3690 default: 3691 gen_ldo_env_A0(s->mem_index, op2_offset); 3692 } 3693 } 3694 } else { 3695 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); 3696 if (mod == 3) { 3697 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); 3698 } else { 3699 op2_offset = offsetof(CPUX86State,mmx_t0); 3700 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3701 gen_ldq_env_A0(s->mem_index, op2_offset); 3702 } 3703 } 3704 if (sse_op2 == SSE_SPECIAL) 3705 goto illegal_op; 3706 3707 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 3708 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 3709 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1); 3710 3711 if (b == 0x17) 3712 s->cc_op = CC_OP_EFLAGS; 3713 break; 3714 case 0x338: /* crc32 */ 3715 crc32: 3716 b = modrm; 3717 modrm = ldub_code(s->pc++); 3718 reg = ((modrm >> 3) & 7) | rex_r; 3719 3720 if (b != 0xf0 && b != 0xf1) 3721 goto illegal_op; 3722 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) 3723 goto illegal_op; 3724 3725 if (b == 0xf0) 3726 ot = OT_BYTE; 3727 else if (b == 0xf1 && s->dflag != 2) 3728 if (s->prefix & PREFIX_DATA) 3729 ot = OT_WORD; 3730 else 3731 ot = OT_LONG; 3732 else 3733 ot = OT_QUAD; 3734 3735 gen_op_mov_TN_reg(OT_LONG, 0, reg); 3736 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 3737 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); 3738 tcg_gen_helper_1_3(helper_crc32, cpu_T[0], cpu_tmp2_i32, 3739 cpu_T[0], tcg_const_i32(8 << ot)); 3740 3741 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; 3742 gen_op_mov_reg_T0(ot, reg); 3743 break; 3744 case 0x03a: 3745 case 0x13a: 3746 b = modrm; 3747 modrm = ldub_code(s->pc++); 3748 rm = modrm & 7; 3749 reg = ((modrm >> 3) & 7) | rex_r; 3750 mod = (modrm >> 6) & 3; 3751 3752 sse_op2 = sse_op_table7[b].op[b1]; 3753 if (!sse_op2) 3754 goto illegal_op; 3755 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask)) 3756 goto illegal_op; 3757 3758 if (sse_op2 == SSE_SPECIAL) { 3759 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; 3760 rm = (modrm & 7) | REX_B(s); 3761 if (mod != 3) 3762 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3763 reg = ((modrm >> 3) & 7) | rex_r; 3764 val = ldub_code(s->pc++); 3765 switch (b) { 3766 case 0x14: /* pextrb */ 3767 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, 3768 xmm_regs[reg].XMM_B(val & 15))); 3769 if (mod == 3) 3770 gen_op_mov_reg_T0(ot, rm); 3771 else 3772 tcg_gen_qemu_st8(cpu_T[0], cpu_A0, 3773 (s->mem_index >> 2) - 1); 3774 break; 3775 case 0x15: /* pextrw */ 3776 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, 3777 xmm_regs[reg].XMM_W(val & 7))); 3778 if (mod == 3) 3779 gen_op_mov_reg_T0(ot, rm); 3780 else 3781 tcg_gen_qemu_st16(cpu_T[0], cpu_A0, 3782 (s->mem_index >> 2) - 1); 3783 break; 3784 case 0x16: 3785 if (ot == OT_LONG) { /* pextrd */ 3786 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, 3787 offsetof(CPUX86State, 3788 xmm_regs[reg].XMM_L(val & 3))); 3789 if (mod == 3) 3790 gen_op_mov_reg_v(ot, rm, cpu_tmp2_i32); 3791 else 3792 tcg_gen_qemu_st32(cpu_tmp2_i32, cpu_A0, 3793 (s->mem_index >> 2) - 1); 3794 } else { /* pextrq */ 3795 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, 3796 offsetof(CPUX86State, 3797 xmm_regs[reg].XMM_Q(val & 1))); 3798 if (mod == 3) 3799 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64); 3800 else 3801 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, 3802 (s->mem_index >> 2) - 1); 3803 } 3804 break; 3805 case 0x17: /* extractps */ 3806 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, 3807 xmm_regs[reg].XMM_L(val & 3))); 3808 if (mod == 3) 3809 gen_op_mov_reg_T0(ot, rm); 3810 else 3811 tcg_gen_qemu_st32(cpu_T[0], cpu_A0, 3812 (s->mem_index >> 2) - 1); 3813 break; 3814 case 0x20: /* pinsrb */ 3815 if (mod == 3) 3816 gen_op_mov_TN_reg(OT_LONG, 0, rm); 3817 else 3818 tcg_gen_qemu_ld8u(cpu_T[0], cpu_A0, 3819 (s->mem_index >> 2) - 1); 3820 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, 3821 xmm_regs[reg].XMM_B(val & 15))); 3822 break; 3823 case 0x21: /* insertps */ 3824 if (mod == 3) 3825 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, 3826 offsetof(CPUX86State,xmm_regs[rm] 3827 .XMM_L((val >> 6) & 3))); 3828 else 3829 tcg_gen_qemu_ld32u(cpu_tmp2_i32, cpu_A0, 3830 (s->mem_index >> 2) - 1); 3831 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, 3832 offsetof(CPUX86State,xmm_regs[reg] 3833 .XMM_L((val >> 4) & 3))); 3834 if ((val >> 0) & 1) 3835 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), 3836 cpu_env, offsetof(CPUX86State, 3837 xmm_regs[reg].XMM_L(0))); 3838 if ((val >> 1) & 1) 3839 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), 3840 cpu_env, offsetof(CPUX86State, 3841 xmm_regs[reg].XMM_L(1))); 3842 if ((val >> 2) & 1) 3843 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), 3844 cpu_env, offsetof(CPUX86State, 3845 xmm_regs[reg].XMM_L(2))); 3846 if ((val >> 3) & 1) 3847 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), 3848 cpu_env, offsetof(CPUX86State, 3849 xmm_regs[reg].XMM_L(3))); 3850 break; 3851 case 0x22: 3852 if (ot == OT_LONG) { /* pinsrd */ 3853 if (mod == 3) 3854 gen_op_mov_v_reg(ot, cpu_tmp2_i32, rm); 3855 else 3856 tcg_gen_qemu_ld32u(cpu_tmp2_i32, cpu_A0, 3857 (s->mem_index >> 2) - 1); 3858 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, 3859 offsetof(CPUX86State, 3860 xmm_regs[reg].XMM_L(val & 3))); 3861 } else { /* pinsrq */ 3862 if (mod == 3) 3863 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm); 3864 else 3865 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, 3866 (s->mem_index >> 2) - 1); 3867 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, 3868 offsetof(CPUX86State, 3869 xmm_regs[reg].XMM_Q(val & 1))); 3870 } 3871 break; 3872 } 3873 return; 3874 } 3875 3876 if (b1) { 3877 op1_offset = offsetof(CPUX86State,xmm_regs[reg]); 3878 if (mod == 3) { 3879 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); 3880 } else { 3881 op2_offset = offsetof(CPUX86State,xmm_t0); 3882 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3883 gen_ldo_env_A0(s->mem_index, op2_offset); 3884 } 3885 } else { 3886 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); 3887 if (mod == 3) { 3888 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); 3889 } else { 3890 op2_offset = offsetof(CPUX86State,mmx_t0); 3891 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3892 gen_ldq_env_A0(s->mem_index, op2_offset); 3893 } 3894 } 3895 val = ldub_code(s->pc++); 3896 3897 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */ 3898 s->cc_op = CC_OP_EFLAGS; 3899 3900 if (s->dflag == 2) 3901 /* The helper must use entire 64-bit gp registers */ 3902 val |= 1 << 8; 3903 } 3904 3905 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 3906 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 3907 tcg_gen_helper_0_3(sse_op2, cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); 3128 3908 break; 3129 3909 default: … … 3133 3913 /* generic MMX or SSE operation */ 3134 3914 switch(b) { 3135 case 0xf7:3136 /* maskmov : we must prepare A0 */3137 if (mod != 3)3138 goto illegal_op;3139 #ifdef TARGET_X86_643140 if (s->aflag == 2) {3141 gen_op_movq_A0_reg[R_EDI]();3142 } else3143 #endif3144 {3145 gen_op_movl_A0_reg[R_EDI]();3146 if (s->aflag == 0)3147 gen_op_andl_A0_ffff();3148 }3149 gen_add_A0_ds_seg(s);3150 break;3151 3915 case 0x70: /* pshufx insn */ 3152 3916 case 0xc6: /* pshufx insn */ … … 3167 3931 if (b1 == 2) { 3168 3932 /* 32 bit access */ 3169 gen_op_ld_T0_A0 [OT_LONG + s->mem_index]();3170 gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0)));3933 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 3934 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); 3171 3935 } else { 3172 3936 /* 64 bit access */ 3173 gen_ldq_env_A0 [s->mem_index >> 2](offsetof(CPUX86State,xmm_t0.XMM_D(0)));3937 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0))); 3174 3938 } 3175 3939 } else { 3176 gen_ldo_env_A0 [s->mem_index >> 2](op2_offset);3940 gen_ldo_env_A0(s->mem_index, op2_offset); 3177 3941 } 3178 3942 } else { … … 3185 3949 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3186 3950 op2_offset = offsetof(CPUX86State,mmx_t0); 3187 gen_ldq_env_A0 [s->mem_index >> 2](op2_offset);3951 gen_ldq_env_A0(s->mem_index, op2_offset); 3188 3952 } else { 3189 3953 rm = (modrm & 7); … … 3192 3956 } 3193 3957 switch(b) { 3958 case 0x0f: /* 3DNow! data insns */ 3959 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) 3960 goto illegal_op; 3961 val = ldub_code(s->pc++); 3962 sse_op2 = sse_op_table5[val]; 3963 if (!sse_op2) 3964 goto illegal_op; 3965 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 3966 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 3967 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1); 3968 break; 3194 3969 case 0x70: /* pshufx insn */ 3195 3970 case 0xc6: /* pshufx insn */ 3196 3971 val = ldub_code(s->pc++); 3197 sse_op3 = (GenOpFunc3 *)sse_op2; 3198 sse_op3(op1_offset, op2_offset, val); 3972 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 3973 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 3974 tcg_gen_helper_0_3(sse_op2, cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); 3199 3975 break; 3200 3976 case 0xc2: … … 3204 3980 goto illegal_op; 3205 3981 sse_op2 = sse_op_table4[val][b1]; 3206 sse_op2(op1_offset, op2_offset); 3982 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 3983 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 3984 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1); 3985 break; 3986 case 0xf7: 3987 /* maskmov : we must prepare A0 */ 3988 if (mod != 3) 3989 goto illegal_op; 3990 #ifdef TARGET_X86_64 3991 if (s->aflag == 2) { 3992 gen_op_movq_A0_reg(R_EDI); 3993 } else 3994 #endif 3995 { 3996 gen_op_movl_A0_reg(R_EDI); 3997 if (s->aflag == 0) 3998 gen_op_andl_A0_ffff(); 3999 } 4000 gen_add_A0_ds_seg(s); 4001 4002 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 4003 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 4004 tcg_gen_helper_0_3(sse_op2, cpu_ptr0, cpu_ptr1, cpu_A0); 3207 4005 break; 3208 4006 default: 3209 sse_op2(op1_offset, op2_offset); 4007 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); 4008 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); 4009 tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1); 3210 4010 break; 3211 4011 } … … 3369 4169 int rex_w, rex_r; 3370 4170 4171 if (unlikely(loglevel & CPU_LOG_TB_OP)) 4172 tcg_gen_debug_insn_start(pc_start); 3371 4173 s->pc = pc_start; 3372 4174 prefixes = 0; … … 3382 4184 #endif 3383 4185 s->rip_offset = 0; /* for relative ip address */ 3384 3385 4186 #ifdef VBOX 3386 4187 /* Always update EIP. Otherwise one must be very careful with generated code that can raise exceptions. */ 3387 4188 gen_update_eip(pc_start - s->cs_base); 3388 4189 #endif 3389 3390 4190 next_byte: 3391 4191 b = ldub_code(s->pc); … … 3497 4297 #ifndef VBOX 3498 4298 if (prefixes & PREFIX_LOCK) 3499 gen_op_lock();4299 tcg_gen_helper_0_0(helper_lock); 3500 4300 #else /* VBOX */ 3501 4301 if (prefixes & PREFIX_LOCK) { … … 3504 4304 return s->pc; 3505 4305 } 3506 gen_op_lock();4306 tcg_gen_helper_0_0(helper_lock); 3507 4307 } 3508 4308 #endif /* VBOX */ … … 3551 4351 gen_op_movl_T0_0(); 3552 4352 s->cc_op = CC_OP_LOGICB + ot; 3553 gen_op_mov_reg_T0 [ot][reg]();4353 gen_op_mov_reg_T0(ot, reg); 3554 4354 gen_op_update1_cc(); 3555 4355 break; … … 3557 4357 opreg = rm; 3558 4358 } 3559 gen_op_mov_TN_reg [ot][1][reg]();4359 gen_op_mov_TN_reg(ot, 1, reg); 3560 4360 gen_op(s, op, ot, opreg); 3561 4361 break; … … 3567 4367 if (mod != 3) { 3568 4368 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3569 gen_op_ld_T1_A0 [ot + s->mem_index]();4369 gen_op_ld_T1_A0(ot + s->mem_index); 3570 4370 } else if (op == OP_XORL && rm == reg) { 3571 4371 goto xor_zero; 3572 4372 } else { 3573 gen_op_mov_TN_reg [ot][1][rm]();4373 gen_op_mov_TN_reg(ot, 1, rm); 3574 4374 } 3575 4375 gen_op(s, op, ot, reg); … … 3584 4384 break; 3585 4385 4386 case 0x82: 4387 if (CODE64(s)) 4388 goto illegal_op; 3586 4389 case 0x80: /* GRP1 */ 3587 4390 case 0x81: 3588 case 0x82:3589 4391 case 0x83: 3590 4392 { … … 3653 4455 s->rip_offset = insn_const_size(ot); 3654 4456 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3655 gen_op_ld_T0_A0 [ot + s->mem_index]();4457 gen_op_ld_T0_A0(ot + s->mem_index); 3656 4458 } else { 3657 gen_op_mov_TN_reg [ot][0][rm]();4459 gen_op_mov_TN_reg(ot, 0, rm); 3658 4460 } 3659 4461 … … 3666 4468 break; 3667 4469 case 2: /* not */ 3668 gen_op_notl_T0();4470 tcg_gen_not_tl(cpu_T[0], cpu_T[0]); 3669 4471 if (mod != 3) { 3670 gen_op_st_T0_A0 [ot + s->mem_index]();4472 gen_op_st_T0_A0(ot + s->mem_index); 3671 4473 } else { 3672 gen_op_mov_reg_T0 [ot][rm]();4474 gen_op_mov_reg_T0(ot, rm); 3673 4475 } 3674 4476 break; 3675 4477 case 3: /* neg */ 3676 gen_op_negl_T0();4478 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]); 3677 4479 if (mod != 3) { 3678 gen_op_st_T0_A0 [ot + s->mem_index]();4480 gen_op_st_T0_A0(ot + s->mem_index); 3679 4481 } else { 3680 gen_op_mov_reg_T0 [ot][rm]();4482 gen_op_mov_reg_T0(ot, rm); 3681 4483 } 3682 4484 gen_op_update_neg_cc(); … … 3686 4488 switch(ot) { 3687 4489 case OT_BYTE: 3688 gen_op_mulb_AL_T0(); 4490 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX); 4491 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]); 4492 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]); 4493 /* XXX: use 32 bit mul which could be faster */ 4494 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 4495 gen_op_mov_reg_T0(OT_WORD, R_EAX); 4496 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 4497 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00); 3689 4498 s->cc_op = CC_OP_MULB; 3690 4499 break; 3691 4500 case OT_WORD: 3692 gen_op_mulw_AX_T0(); 4501 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX); 4502 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]); 4503 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]); 4504 /* XXX: use 32 bit mul which could be faster */ 4505 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 4506 gen_op_mov_reg_T0(OT_WORD, R_EAX); 4507 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 4508 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16); 4509 gen_op_mov_reg_T0(OT_WORD, R_EDX); 4510 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]); 3693 4511 s->cc_op = CC_OP_MULW; 3694 4512 break; 3695 4513 default: 3696 4514 case OT_LONG: 3697 gen_op_mull_EAX_T0(); 4515 #ifdef TARGET_X86_64 4516 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); 4517 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]); 4518 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]); 4519 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 4520 gen_op_mov_reg_T0(OT_LONG, R_EAX); 4521 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 4522 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32); 4523 gen_op_mov_reg_T0(OT_LONG, R_EDX); 4524 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]); 4525 #else 4526 { 4527 TCGv t0, t1; 4528 t0 = tcg_temp_new(TCG_TYPE_I64); 4529 t1 = tcg_temp_new(TCG_TYPE_I64); 4530 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); 4531 tcg_gen_extu_i32_i64(t0, cpu_T[0]); 4532 tcg_gen_extu_i32_i64(t1, cpu_T[1]); 4533 tcg_gen_mul_i64(t0, t0, t1); 4534 tcg_gen_trunc_i64_i32(cpu_T[0], t0); 4535 gen_op_mov_reg_T0(OT_LONG, R_EAX); 4536 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 4537 tcg_gen_shri_i64(t0, t0, 32); 4538 tcg_gen_trunc_i64_i32(cpu_T[0], t0); 4539 gen_op_mov_reg_T0(OT_LONG, R_EDX); 4540 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]); 4541 } 4542 #endif 3698 4543 s->cc_op = CC_OP_MULL; 3699 4544 break; 3700 4545 #ifdef TARGET_X86_64 3701 4546 case OT_QUAD: 3702 gen_op_mulq_EAX_T0();4547 tcg_gen_helper_0_1(helper_mulq_EAX_T0, cpu_T[0]); 3703 4548 s->cc_op = CC_OP_MULQ; 3704 4549 break; … … 3709 4554 switch(ot) { 3710 4555 case OT_BYTE: 3711 gen_op_imulb_AL_T0(); 4556 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX); 4557 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]); 4558 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]); 4559 /* XXX: use 32 bit mul which could be faster */ 4560 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 4561 gen_op_mov_reg_T0(OT_WORD, R_EAX); 4562 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 4563 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]); 4564 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); 3712 4565 s->cc_op = CC_OP_MULB; 3713 4566 break; 3714 4567 case OT_WORD: 3715 gen_op_imulw_AX_T0(); 4568 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX); 4569 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); 4570 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]); 4571 /* XXX: use 32 bit mul which could be faster */ 4572 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 4573 gen_op_mov_reg_T0(OT_WORD, R_EAX); 4574 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 4575 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]); 4576 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); 4577 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16); 4578 gen_op_mov_reg_T0(OT_WORD, R_EDX); 3716 4579 s->cc_op = CC_OP_MULW; 3717 4580 break; 3718 4581 default: 3719 4582 case OT_LONG: 3720 gen_op_imull_EAX_T0(); 4583 #ifdef TARGET_X86_64 4584 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); 4585 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); 4586 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]); 4587 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 4588 gen_op_mov_reg_T0(OT_LONG, R_EAX); 4589 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 4590 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]); 4591 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); 4592 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32); 4593 gen_op_mov_reg_T0(OT_LONG, R_EDX); 4594 #else 4595 { 4596 TCGv t0, t1; 4597 t0 = tcg_temp_new(TCG_TYPE_I64); 4598 t1 = tcg_temp_new(TCG_TYPE_I64); 4599 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); 4600 tcg_gen_ext_i32_i64(t0, cpu_T[0]); 4601 tcg_gen_ext_i32_i64(t1, cpu_T[1]); 4602 tcg_gen_mul_i64(t0, t0, t1); 4603 tcg_gen_trunc_i64_i32(cpu_T[0], t0); 4604 gen_op_mov_reg_T0(OT_LONG, R_EAX); 4605 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 4606 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31); 4607 tcg_gen_shri_i64(t0, t0, 32); 4608 tcg_gen_trunc_i64_i32(cpu_T[0], t0); 4609 gen_op_mov_reg_T0(OT_LONG, R_EDX); 4610 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); 4611 } 4612 #endif 3721 4613 s->cc_op = CC_OP_MULL; 3722 4614 break; 3723 4615 #ifdef TARGET_X86_64 3724 4616 case OT_QUAD: 3725 gen_op_imulq_EAX_T0();4617 tcg_gen_helper_0_1(helper_imulq_EAX_T0, cpu_T[0]); 3726 4618 s->cc_op = CC_OP_MULQ; 3727 4619 break; … … 3733 4625 case OT_BYTE: 3734 4626 gen_jmp_im(pc_start - s->cs_base); 3735 gen_op_divb_AL_T0();4627 tcg_gen_helper_0_1(helper_divb_AL, cpu_T[0]); 3736 4628 break; 3737 4629 case OT_WORD: 3738 4630 gen_jmp_im(pc_start - s->cs_base); 3739 gen_op_divw_AX_T0();4631 tcg_gen_helper_0_1(helper_divw_AX, cpu_T[0]); 3740 4632 break; 3741 4633 default: 3742 4634 case OT_LONG: 3743 4635 gen_jmp_im(pc_start - s->cs_base); 3744 gen_op_divl_EAX_T0();4636 tcg_gen_helper_0_1(helper_divl_EAX, cpu_T[0]); 3745 4637 break; 3746 4638 #ifdef TARGET_X86_64 3747 4639 case OT_QUAD: 3748 4640 gen_jmp_im(pc_start - s->cs_base); 3749 gen_op_divq_EAX_T0();4641 tcg_gen_helper_0_1(helper_divq_EAX, cpu_T[0]); 3750 4642 break; 3751 4643 #endif … … 3756 4648 case OT_BYTE: 3757 4649 gen_jmp_im(pc_start - s->cs_base); 3758 gen_op_idivb_AL_T0();4650 tcg_gen_helper_0_1(helper_idivb_AL, cpu_T[0]); 3759 4651 break; 3760 4652 case OT_WORD: 3761 4653 gen_jmp_im(pc_start - s->cs_base); 3762 gen_op_idivw_AX_T0();4654 tcg_gen_helper_0_1(helper_idivw_AX, cpu_T[0]); 3763 4655 break; 3764 4656 default: 3765 4657 case OT_LONG: 3766 4658 gen_jmp_im(pc_start - s->cs_base); 3767 gen_op_idivl_EAX_T0();4659 tcg_gen_helper_0_1(helper_idivl_EAX, cpu_T[0]); 3768 4660 break; 3769 4661 #ifdef TARGET_X86_64 3770 4662 case OT_QUAD: 3771 4663 gen_jmp_im(pc_start - s->cs_base); 3772 gen_op_idivq_EAX_T0();4664 tcg_gen_helper_0_1(helper_idivq_EAX, cpu_T[0]); 3773 4665 break; 3774 4666 #endif … … 3810 4702 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3811 4703 if (op >= 2 && op != 3 && op != 5) 3812 gen_op_ld_T0_A0 [ot + s->mem_index]();4704 gen_op_ld_T0_A0(ot + s->mem_index); 3813 4705 } else { 3814 gen_op_mov_TN_reg [ot][0][rm]();4706 gen_op_mov_TN_reg(ot, 0, rm); 3815 4707 } 3816 4708 … … 3845 4737 break; 3846 4738 case 3: /* lcall Ev */ 3847 gen_op_ld_T1_A0 [ot + s->mem_index]();4739 gen_op_ld_T1_A0(ot + s->mem_index); 3848 4740 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); 3849 gen_op_ldu_T0_A0 [OT_WORD + s->mem_index]();4741 gen_op_ldu_T0_A0(OT_WORD + s->mem_index); 3850 4742 do_lcall: 3851 4743 if (s->pe && !s->vm86) { … … 3853 4745 gen_op_set_cc_op(s->cc_op); 3854 4746 gen_jmp_im(pc_start - s->cs_base); 3855 gen_op_lcall_protected_T0_T1(dflag, s->pc - pc_start); 4747 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 4748 tcg_gen_helper_0_4(helper_lcall_protected, 4749 cpu_tmp2_i32, cpu_T[1], 4750 tcg_const_i32(dflag), 4751 tcg_const_i32(s->pc - pc_start)); 3856 4752 } else { 3857 gen_op_lcall_real_T0_T1(dflag, s->pc - s->cs_base); 4753 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 4754 tcg_gen_helper_0_4(helper_lcall_real, 4755 cpu_tmp2_i32, cpu_T[1], 4756 tcg_const_i32(dflag), 4757 tcg_const_i32(s->pc - s->cs_base)); 3858 4758 } 3859 4759 gen_eob(s); … … 3866 4766 break; 3867 4767 case 5: /* ljmp Ev */ 3868 gen_op_ld_T1_A0 [ot + s->mem_index]();4768 gen_op_ld_T1_A0(ot + s->mem_index); 3869 4769 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); 3870 gen_op_ldu_T0_A0 [OT_WORD + s->mem_index]();4770 gen_op_ldu_T0_A0(OT_WORD + s->mem_index); 3871 4771 do_ljmp: 3872 4772 if (s->pe && !s->vm86) { … … 3874 4774 gen_op_set_cc_op(s->cc_op); 3875 4775 gen_jmp_im(pc_start - s->cs_base); 3876 gen_op_ljmp_protected_T0_T1(s->pc - pc_start); 4776 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 4777 tcg_gen_helper_0_3(helper_ljmp_protected, 4778 cpu_tmp2_i32, 4779 cpu_T[1], 4780 tcg_const_i32(s->pc - pc_start)); 3877 4781 } else { 3878 gen_op_movl_seg_T0_vm( offsetof(CPUX86State,segs[R_CS]));4782 gen_op_movl_seg_T0_vm(R_CS); 3879 4783 gen_op_movl_T0_T1(); 3880 4784 gen_op_jmp_T0(); … … 3903 4807 3904 4808 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); 3905 gen_op_mov_TN_reg [ot][1][reg]();4809 gen_op_mov_TN_reg(ot, 1, reg); 3906 4810 gen_op_testl_T0_T1_cc(); 3907 4811 s->cc_op = CC_OP_LOGICB + ot; … … 3916 4820 val = insn_get(s, ot); 3917 4821 3918 gen_op_mov_TN_reg [ot][0][OR_EAX]();4822 gen_op_mov_TN_reg(ot, 0, OR_EAX); 3919 4823 gen_op_movl_T1_im(val); 3920 4824 gen_op_testl_T0_T1_cc(); … … 3925 4829 #ifdef TARGET_X86_64 3926 4830 if (dflag == 2) { 3927 gen_op_movslq_RAX_EAX(); 4831 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); 4832 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); 4833 gen_op_mov_reg_T0(OT_QUAD, R_EAX); 3928 4834 } else 3929 4835 #endif 3930 if (dflag == 1) 3931 gen_op_movswl_EAX_AX(); 3932 else 3933 gen_op_movsbw_AX_AL(); 4836 if (dflag == 1) { 4837 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX); 4838 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); 4839 gen_op_mov_reg_T0(OT_LONG, R_EAX); 4840 } else { 4841 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX); 4842 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]); 4843 gen_op_mov_reg_T0(OT_WORD, R_EAX); 4844 } 3934 4845 break; 3935 4846 case 0x99: /* CDQ/CWD */ 3936 4847 #ifdef TARGET_X86_64 3937 4848 if (dflag == 2) { 3938 gen_op_movsqo_RDX_RAX(); 4849 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX); 4850 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63); 4851 gen_op_mov_reg_T0(OT_QUAD, R_EDX); 3939 4852 } else 3940 4853 #endif 3941 if (dflag == 1) 3942 gen_op_movslq_EDX_EAX(); 3943 else 3944 gen_op_movswl_DX_AX(); 4854 if (dflag == 1) { 4855 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); 4856 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); 4857 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31); 4858 gen_op_mov_reg_T0(OT_LONG, R_EDX); 4859 } else { 4860 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX); 4861 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); 4862 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15); 4863 gen_op_mov_reg_T0(OT_WORD, R_EDX); 4864 } 3945 4865 break; 3946 4866 case 0x1af: /* imul Gv, Ev */ … … 3962 4882 gen_op_movl_T1_im(val); 3963 4883 } else { 3964 gen_op_mov_TN_reg [ot][1][reg]();4884 gen_op_mov_TN_reg(ot, 1, reg); 3965 4885 } 3966 4886 3967 4887 #ifdef TARGET_X86_64 3968 4888 if (ot == OT_QUAD) { 3969 gen_op_imulq_T0_T1();4889 tcg_gen_helper_1_2(helper_imulq_T0_T1, cpu_T[0], cpu_T[0], cpu_T[1]); 3970 4890 } else 3971 4891 #endif 3972 4892 if (ot == OT_LONG) { 3973 gen_op_imull_T0_T1(); 4893 #ifdef TARGET_X86_64 4894 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); 4895 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]); 4896 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 4897 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 4898 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]); 4899 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); 4900 #else 4901 { 4902 TCGv t0, t1; 4903 t0 = tcg_temp_new(TCG_TYPE_I64); 4904 t1 = tcg_temp_new(TCG_TYPE_I64); 4905 tcg_gen_ext_i32_i64(t0, cpu_T[0]); 4906 tcg_gen_ext_i32_i64(t1, cpu_T[1]); 4907 tcg_gen_mul_i64(t0, t0, t1); 4908 tcg_gen_trunc_i64_i32(cpu_T[0], t0); 4909 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 4910 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31); 4911 tcg_gen_shri_i64(t0, t0, 32); 4912 tcg_gen_trunc_i64_i32(cpu_T[1], t0); 4913 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0); 4914 } 4915 #endif 3974 4916 } else { 3975 gen_op_imulw_T0_T1(); 3976 } 3977 gen_op_mov_reg_T0[ot][reg](); 4917 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); 4918 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]); 4919 /* XXX: use 32 bit mul which could be faster */ 4920 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); 4921 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); 4922 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]); 4923 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); 4924 } 4925 gen_op_mov_reg_T0(ot, reg); 3978 4926 s->cc_op = CC_OP_MULB + ot; 3979 4927 break; … … 3989 4937 if (mod == 3) { 3990 4938 rm = (modrm & 7) | REX_B(s); 3991 gen_op_mov_TN_reg [ot][0][reg]();3992 gen_op_mov_TN_reg [ot][1][rm]();4939 gen_op_mov_TN_reg(ot, 0, reg); 4940 gen_op_mov_TN_reg(ot, 1, rm); 3993 4941 gen_op_addl_T0_T1(); 3994 gen_op_mov_reg_T1 [ot][reg]();3995 gen_op_mov_reg_T0 [ot][rm]();4942 gen_op_mov_reg_T1(ot, reg); 4943 gen_op_mov_reg_T0(ot, rm); 3996 4944 } else { 3997 4945 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 3998 gen_op_mov_TN_reg [ot][0][reg]();3999 gen_op_ld_T1_A0 [ot + s->mem_index]();4946 gen_op_mov_TN_reg(ot, 0, reg); 4947 gen_op_ld_T1_A0(ot + s->mem_index); 4000 4948 gen_op_addl_T0_T1(); 4001 gen_op_st_T0_A0 [ot + s->mem_index]();4002 gen_op_mov_reg_T1 [ot][reg]();4949 gen_op_st_T0_A0(ot + s->mem_index); 4950 gen_op_mov_reg_T1(ot, reg); 4003 4951 } 4004 4952 gen_op_update2_cc(); … … 4007 4955 case 0x1b0: 4008 4956 case 0x1b1: /* cmpxchg Ev, Gv */ 4009 if ((b & 1) == 0) 4010 ot = OT_BYTE; 4011 else 4012 ot = dflag + OT_WORD; 4013 modrm = ldub_code(s->pc++); 4014 reg = ((modrm >> 3) & 7) | rex_r; 4015 mod = (modrm >> 6) & 3; 4016 gen_op_mov_TN_reg[ot][1][reg](); 4017 if (mod == 3) { 4018 rm = (modrm & 7) | REX_B(s); 4019 gen_op_mov_TN_reg[ot][0][rm](); 4020 gen_op_cmpxchg_T0_T1_EAX_cc[ot](); 4021 gen_op_mov_reg_T0[ot][rm](); 4022 } else { 4023 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 4024 gen_op_ld_T0_A0[ot + s->mem_index](); 4025 gen_op_cmpxchg_mem_T0_T1_EAX_cc[ot + s->mem_index](); 4026 } 4027 s->cc_op = CC_OP_SUBB + ot; 4957 { 4958 int label1, label2; 4959 TCGv t0, t1, t2, a0; 4960 4961 if ((b & 1) == 0) 4962 ot = OT_BYTE; 4963 else 4964 ot = dflag + OT_WORD; 4965 modrm = ldub_code(s->pc++); 4966 reg = ((modrm >> 3) & 7) | rex_r; 4967 mod = (modrm >> 6) & 3; 4968 t0 = tcg_temp_local_new(TCG_TYPE_TL); 4969 t1 = tcg_temp_local_new(TCG_TYPE_TL); 4970 t2 = tcg_temp_local_new(TCG_TYPE_TL); 4971 a0 = tcg_temp_local_new(TCG_TYPE_TL); 4972 gen_op_mov_v_reg(ot, t1, reg); 4973 if (mod == 3) { 4974 rm = (modrm & 7) | REX_B(s); 4975 gen_op_mov_v_reg(ot, t0, rm); 4976 } else { 4977 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 4978 tcg_gen_mov_tl(a0, cpu_A0); 4979 gen_op_ld_v(ot + s->mem_index, t0, a0); 4980 rm = 0; /* avoid warning */ 4981 } 4982 label1 = gen_new_label(); 4983 tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUState, regs[R_EAX])); 4984 tcg_gen_sub_tl(t2, t2, t0); 4985 gen_extu(ot, t2); 4986 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1); 4987 if (mod == 3) { 4988 label2 = gen_new_label(); 4989 gen_op_mov_reg_v(ot, R_EAX, t0); 4990 tcg_gen_br(label2); 4991 gen_set_label(label1); 4992 gen_op_mov_reg_v(ot, rm, t1); 4993 gen_set_label(label2); 4994 } else { 4995 tcg_gen_mov_tl(t1, t0); 4996 gen_op_mov_reg_v(ot, R_EAX, t0); 4997 gen_set_label(label1); 4998 /* always store */ 4999 gen_op_st_v(ot + s->mem_index, t1, a0); 5000 } 5001 tcg_gen_mov_tl(cpu_cc_src, t0); 5002 tcg_gen_mov_tl(cpu_cc_dst, t2); 5003 s->cc_op = CC_OP_SUBB + ot; 5004 tcg_temp_free(t0); 5005 tcg_temp_free(t1); 5006 tcg_temp_free(t2); 5007 tcg_temp_free(a0); 5008 } 4028 5009 break; 4029 5010 case 0x1c7: /* cmpxchg8b */ … … 4032 5013 if ((mod == 3) || ((modrm & 0x38) != 0x8)) 4033 5014 goto illegal_op; 4034 if (s->cc_op != CC_OP_DYNAMIC) 4035 gen_op_set_cc_op(s->cc_op); 4036 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 4037 gen_op_cmpxchg8b(); 5015 #ifdef TARGET_X86_64 5016 if (dflag == 2) { 5017 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) 5018 goto illegal_op; 5019 gen_jmp_im(pc_start - s->cs_base); 5020 if (s->cc_op != CC_OP_DYNAMIC) 5021 gen_op_set_cc_op(s->cc_op); 5022 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 5023 tcg_gen_helper_0_1(helper_cmpxchg16b, cpu_A0); 5024 } else 5025 #endif 5026 { 5027 if (!(s->cpuid_features & CPUID_CX8)) 5028 goto illegal_op; 5029 gen_jmp_im(pc_start - s->cs_base); 5030 if (s->cc_op != CC_OP_DYNAMIC) 5031 gen_op_set_cc_op(s->cc_op); 5032 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 5033 tcg_gen_helper_0_1(helper_cmpxchg8b, cpu_A0); 5034 } 4038 5035 s->cc_op = CC_OP_EFLAGS; 4039 5036 break; … … 4042 5039 /* push/pop */ 4043 5040 case 0x50 ... 0x57: /* push */ 4044 gen_op_mov_TN_reg [OT_LONG][0][(b & 7) | REX_B(s)]();5041 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s)); 4045 5042 gen_push_T0(s); 4046 5043 break; … … 4054 5051 /* NOTE: order is important for pop %sp */ 4055 5052 gen_pop_update(s); 4056 gen_op_mov_reg_T0 [ot][(b & 7) | REX_B(s)]();5053 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s)); 4057 5054 break; 4058 5055 case 0x60: /* pusha */ … … 4093 5090 gen_pop_update(s); 4094 5091 rm = (modrm & 7) | REX_B(s); 4095 gen_op_mov_reg_T0 [ot][rm]();5092 gen_op_mov_reg_T0(ot, rm); 4096 5093 } else { 4097 5094 /* NOTE: order is important too for MMU exceptions */ … … 4114 5111 /* XXX: exception not precise (ESP is updated before potential exception) */ 4115 5112 if (CODE64(s)) { 4116 gen_op_mov_TN_reg [OT_QUAD][0][R_EBP]();4117 gen_op_mov_reg_T0 [OT_QUAD][R_ESP]();5113 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP); 5114 gen_op_mov_reg_T0(OT_QUAD, R_ESP); 4118 5115 } else if (s->ss32) { 4119 gen_op_mov_TN_reg [OT_LONG][0][R_EBP]();4120 gen_op_mov_reg_T0 [OT_LONG][R_ESP]();5116 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP); 5117 gen_op_mov_reg_T0(OT_LONG, R_ESP); 4121 5118 } else { 4122 gen_op_mov_TN_reg [OT_WORD][0][R_EBP]();4123 gen_op_mov_reg_T0 [OT_WORD][R_ESP]();5119 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP); 5120 gen_op_mov_reg_T0(OT_WORD, R_ESP); 4124 5121 } 4125 5122 gen_pop_T0(s); … … 4129 5126 ot = dflag + OT_WORD; 4130 5127 } 4131 gen_op_mov_reg_T0 [ot][R_EBP]();5128 gen_op_mov_reg_T0(ot, R_EBP); 4132 5129 gen_pop_update(s); 4133 5130 break; … … 4160 5157 _first_ does it */ 4161 5158 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) 4162 gen_op_set_inhibit_irq();5159 tcg_gen_helper_0_0(helper_set_inhibit_irq); 4163 5160 s->tf = 0; 4164 5161 } … … 4208 5205 gen_op_movl_T0_im(val); 4209 5206 if (mod != 3) 4210 gen_op_st_T0_A0 [ot + s->mem_index]();5207 gen_op_st_T0_A0(ot + s->mem_index); 4211 5208 else 4212 gen_op_mov_reg_T0 [ot][(modrm & 7) | REX_B(s)]();5209 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s)); 4213 5210 break; 4214 5211 case 0x8a: … … 4226 5223 4227 5224 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); 4228 gen_op_mov_reg_T0 [ot][reg]();5225 gen_op_mov_reg_T0(ot, reg); 4229 5226 break; 4230 5227 case 0x8e: /* mov seg, Gv */ … … 4240 5237 _first_ does it */ 4241 5238 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) 4242 gen_op_set_inhibit_irq();5239 tcg_gen_helper_0_0(helper_set_inhibit_irq); 4243 5240 s->tf = 0; 4244 5241 } … … 4278 5275 4279 5276 if (mod == 3) { 4280 gen_op_mov_TN_reg [ot][0][rm]();5277 gen_op_mov_TN_reg(ot, 0, rm); 4281 5278 switch(ot | (b & 8)) { 4282 5279 case OT_BYTE: 4283 gen_op_movzbl_T0_T0();5280 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]); 4284 5281 break; 4285 5282 case OT_BYTE | 8: 4286 gen_op_movsbl_T0_T0();5283 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]); 4287 5284 break; 4288 5285 case OT_WORD: 4289 gen_op_movzwl_T0_T0();5286 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]); 4290 5287 break; 4291 5288 default: 4292 5289 case OT_WORD | 8: 4293 gen_op_movswl_T0_T0();5290 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); 4294 5291 break; 4295 5292 } 4296 gen_op_mov_reg_T0 [d_ot][reg]();5293 gen_op_mov_reg_T0(d_ot, reg); 4297 5294 } else { 4298 5295 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 4299 5296 if (b & 8) { 4300 gen_op_lds_T0_A0 [ot + s->mem_index]();5297 gen_op_lds_T0_A0(ot + s->mem_index); 4301 5298 } else { 4302 gen_op_ldu_T0_A0 [ot + s->mem_index]();5299 gen_op_ldu_T0_A0(ot + s->mem_index); 4303 5300 } 4304 gen_op_mov_reg_T0 [d_ot][reg]();5301 gen_op_mov_reg_T0(d_ot, reg); 4305 5302 } 4306 5303 } … … 4320 5317 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 4321 5318 s->addseg = val; 4322 gen_op_mov_reg_A0 [ot - OT_WORD][reg]();5319 gen_op_mov_reg_A0(ot - OT_WORD, reg); 4323 5320 break; 4324 5321 … … 4338 5335 offset_addr = ldq_code(s->pc); 4339 5336 s->pc += 8; 4340 if (offset_addr == (int32_t)offset_addr) 4341 gen_op_movq_A0_im(offset_addr); 4342 else 4343 gen_op_movq_A0_im64(offset_addr >> 32, offset_addr); 5337 gen_op_movq_A0_im(offset_addr); 4344 5338 } else 4345 5339 #endif … … 4354 5348 gen_add_A0_ds_seg(s); 4355 5349 if ((b & 2) == 0) { 4356 gen_op_ld_T0_A0 [ot + s->mem_index]();4357 gen_op_mov_reg_T0 [ot][R_EAX]();5350 gen_op_ld_T0_A0(ot + s->mem_index); 5351 gen_op_mov_reg_T0(ot, R_EAX); 4358 5352 } else { 4359 gen_op_mov_TN_reg [ot][0][R_EAX]();4360 gen_op_st_T0_A0 [ot + s->mem_index]();5353 gen_op_mov_TN_reg(ot, 0, R_EAX); 5354 gen_op_st_T0_A0(ot + s->mem_index); 4361 5355 } 4362 5356 } … … 4365 5359 #ifdef TARGET_X86_64 4366 5360 if (s->aflag == 2) { 4367 gen_op_movq_A0_reg[R_EBX](); 4368 gen_op_addq_A0_AL(); 5361 gen_op_movq_A0_reg(R_EBX); 5362 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX); 5363 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff); 5364 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]); 4369 5365 } else 4370 5366 #endif 4371 5367 { 4372 gen_op_movl_A0_reg[R_EBX](); 4373 gen_op_addl_A0_AL(); 5368 gen_op_movl_A0_reg(R_EBX); 5369 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); 5370 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff); 5371 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]); 4374 5372 if (s->aflag == 0) 4375 5373 gen_op_andl_A0_ffff(); 5374 else 5375 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff); 4376 5376 } 4377 5377 gen_add_A0_ds_seg(s); 4378 gen_op_ldu_T0_A0 [OT_BYTE + s->mem_index]();4379 gen_op_mov_reg_T0 [OT_BYTE][R_EAX]();5378 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index); 5379 gen_op_mov_reg_T0(OT_BYTE, R_EAX); 4380 5380 break; 4381 5381 case 0xb0 ... 0xb7: /* mov R, Ib */ 4382 5382 val = insn_get(s, OT_BYTE); 4383 5383 gen_op_movl_T0_im(val); 4384 gen_op_mov_reg_T0 [OT_BYTE][(b & 7) | REX_B(s)]();5384 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s)); 4385 5385 break; 4386 5386 case 0xb8 ... 0xbf: /* mov R, Iv */ … … 4393 5393 reg = (b & 7) | REX_B(s); 4394 5394 gen_movtl_T0_im(tmp); 4395 gen_op_mov_reg_T0 [OT_QUAD][reg]();5395 gen_op_mov_reg_T0(OT_QUAD, reg); 4396 5396 } else 4397 5397 #endif … … 4401 5401 reg = (b & 7) | REX_B(s); 4402 5402 gen_op_movl_T0_im(val); 4403 gen_op_mov_reg_T0 [ot][reg]();5403 gen_op_mov_reg_T0(ot, reg); 4404 5404 } 4405 5405 break; … … 4422 5422 rm = (modrm & 7) | REX_B(s); 4423 5423 do_xchg_reg: 4424 gen_op_mov_TN_reg [ot][0][reg]();4425 gen_op_mov_TN_reg [ot][1][rm]();4426 gen_op_mov_reg_T0 [ot][rm]();4427 gen_op_mov_reg_T1 [ot][reg]();5424 gen_op_mov_TN_reg(ot, 0, reg); 5425 gen_op_mov_TN_reg(ot, 1, rm); 5426 gen_op_mov_reg_T0(ot, rm); 5427 gen_op_mov_reg_T1(ot, reg); 4428 5428 } else { 4429 5429 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 4430 gen_op_mov_TN_reg [ot][0][reg]();5430 gen_op_mov_TN_reg(ot, 0, reg); 4431 5431 /* for xchg, lock is implicit */ 4432 5432 if (!(prefixes & PREFIX_LOCK)) 4433 gen_op_lock();4434 gen_op_ld_T1_A0 [ot + s->mem_index]();4435 gen_op_st_T0_A0 [ot + s->mem_index]();5433 tcg_gen_helper_0_0(helper_lock); 5434 gen_op_ld_T1_A0(ot + s->mem_index); 5435 gen_op_st_T0_A0(ot + s->mem_index); 4436 5436 if (!(prefixes & PREFIX_LOCK)) 4437 gen_op_unlock();4438 gen_op_mov_reg_T1 [ot][reg]();5437 tcg_gen_helper_0_0(helper_unlock); 5438 gen_op_mov_reg_T1(ot, reg); 4439 5439 } 4440 5440 break; … … 4465 5465 goto illegal_op; 4466 5466 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 4467 gen_op_ld_T1_A0 [ot + s->mem_index]();5467 gen_op_ld_T1_A0(ot + s->mem_index); 4468 5468 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); 4469 5469 /* load the segment first to handle exceptions properly */ 4470 gen_op_ldu_T0_A0 [OT_WORD + s->mem_index]();5470 gen_op_ldu_T0_A0(OT_WORD + s->mem_index); 4471 5471 gen_movl_seg_T0(s, op, pc_start - s->cs_base); 4472 5472 /* then put the data */ 4473 gen_op_mov_reg_T1 [ot][reg]();5473 gen_op_mov_reg_T1(ot, reg); 4474 5474 if (s->is_jmp) { 4475 5475 gen_jmp_im(s->pc - s->cs_base); … … 4548 5548 rm = (modrm & 7) | REX_B(s); 4549 5549 reg = ((modrm >> 3) & 7) | rex_r; 4550 4551 5550 if (mod != 3) { 4552 5551 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 4553 gen_op_ld_T0_A0[ot + s->mem_index]();5552 opreg = OR_TMP0; 4554 5553 } else { 4555 gen_op_mov_TN_reg[ot][0][rm]();4556 } 4557 gen_op_mov_TN_reg [ot][1][reg]();5554 opreg = rm; 5555 } 5556 gen_op_mov_TN_reg(ot, 1, reg); 4558 5557 4559 5558 if (shift) { 4560 5559 val = ldub_code(s->pc++); 4561 if (ot == OT_QUAD) 4562 val &= 0x3f; 4563 else 4564 val &= 0x1f; 4565 if (val) { 4566 if (mod == 3) 4567 gen_op_shiftd_T0_T1_im_cc[ot][op](val); 4568 else 4569 gen_op_shiftd_mem_T0_T1_im_cc[ot + s->mem_index][op](val); 4570 if (op == 0 && ot != OT_WORD) 4571 s->cc_op = CC_OP_SHLB + ot; 4572 else 4573 s->cc_op = CC_OP_SARB + ot; 4574 } 5560 tcg_gen_movi_tl(cpu_T3, val); 4575 5561 } else { 4576 if (s->cc_op != CC_OP_DYNAMIC) 4577 gen_op_set_cc_op(s->cc_op); 4578 if (mod == 3) 4579 gen_op_shiftd_T0_T1_ECX_cc[ot][op](); 4580 else 4581 gen_op_shiftd_mem_T0_T1_ECX_cc[ot + s->mem_index][op](); 4582 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ 4583 } 4584 if (mod == 3) { 4585 gen_op_mov_reg_T0[ot][rm](); 4586 } 5562 tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_ECX])); 5563 } 5564 gen_shiftd_rm_T1_T3(s, ot, opreg, op); 4587 5565 break; 4588 5566 … … 4614 5592 switch(op >> 4) { 4615 5593 case 0: 4616 gen_op_flds_FT0_A0(); 5594 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 5595 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5596 tcg_gen_helper_0_1(helper_flds_FT0, cpu_tmp2_i32); 4617 5597 break; 4618 5598 case 1: 4619 gen_op_fildl_FT0_A0(); 5599 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 5600 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5601 tcg_gen_helper_0_1(helper_fildl_FT0, cpu_tmp2_i32); 4620 5602 break; 4621 5603 case 2: 4622 gen_op_fldl_FT0_A0(); 5604 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, 5605 (s->mem_index >> 2) - 1); 5606 tcg_gen_helper_0_1(helper_fldl_FT0, cpu_tmp1_i64); 4623 5607 break; 4624 5608 case 3: 4625 5609 default: 4626 gen_op_fild_FT0_A0(); 5610 gen_op_lds_T0_A0(OT_WORD + s->mem_index); 5611 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5612 tcg_gen_helper_0_1(helper_fildl_FT0, cpu_tmp2_i32); 4627 5613 break; 4628 5614 } 4629 5615 4630 gen_op_fp_arith_ST0_FT0[op1]();5616 tcg_gen_helper_0_0(helper_fp_arith_ST0_FT0[op1]); 4631 5617 if (op1 == 3) { 4632 5618 /* fcomp needs pop */ 4633 gen_op_fpop();5619 tcg_gen_helper_0_0(helper_fpop); 4634 5620 } 4635 5621 } … … 4645 5631 switch(op >> 4) { 4646 5632 case 0: 4647 gen_op_flds_ST0_A0(); 5633 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 5634 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5635 tcg_gen_helper_0_1(helper_flds_ST0, cpu_tmp2_i32); 4648 5636 break; 4649 5637 case 1: 4650 gen_op_fildl_ST0_A0(); 5638 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 5639 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5640 tcg_gen_helper_0_1(helper_fildl_ST0, cpu_tmp2_i32); 4651 5641 break; 4652 5642 case 2: 4653 gen_op_fldl_ST0_A0(); 5643 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, 5644 (s->mem_index >> 2) - 1); 5645 tcg_gen_helper_0_1(helper_fldl_ST0, cpu_tmp1_i64); 4654 5646 break; 4655 5647 case 3: 4656 5648 default: 4657 gen_op_fild_ST0_A0(); 5649 gen_op_lds_T0_A0(OT_WORD + s->mem_index); 5650 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5651 tcg_gen_helper_0_1(helper_fildl_ST0, cpu_tmp2_i32); 4658 5652 break; 4659 5653 } 4660 5654 break; 4661 5655 case 1: 5656 /* XXX: the corresponding CPUID bit must be tested ! */ 4662 5657 switch(op >> 4) { 4663 5658 case 1: 4664 gen_op_fisttl_ST0_A0(); 5659 tcg_gen_helper_1_0(helper_fisttl_ST0, cpu_tmp2_i32); 5660 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5661 gen_op_st_T0_A0(OT_LONG + s->mem_index); 4665 5662 break; 4666 5663 case 2: 4667 gen_op_fisttll_ST0_A0(); 5664 tcg_gen_helper_1_0(helper_fisttll_ST0, cpu_tmp1_i64); 5665 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, 5666 (s->mem_index >> 2) - 1); 4668 5667 break; 4669 5668 case 3: 4670 5669 default: 4671 gen_op_fistt_ST0_A0(); 5670 tcg_gen_helper_1_0(helper_fistt_ST0, cpu_tmp2_i32); 5671 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5672 gen_op_st_T0_A0(OT_WORD + s->mem_index); 5673 break; 4672 5674 } 4673 gen_op_fpop();5675 tcg_gen_helper_0_0(helper_fpop); 4674 5676 break; 4675 5677 default: 4676 5678 switch(op >> 4) { 4677 5679 case 0: 4678 gen_op_fsts_ST0_A0(); 5680 tcg_gen_helper_1_0(helper_fsts_ST0, cpu_tmp2_i32); 5681 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5682 gen_op_st_T0_A0(OT_LONG + s->mem_index); 4679 5683 break; 4680 5684 case 1: 4681 gen_op_fistl_ST0_A0(); 5685 tcg_gen_helper_1_0(helper_fistl_ST0, cpu_tmp2_i32); 5686 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5687 gen_op_st_T0_A0(OT_LONG + s->mem_index); 4682 5688 break; 4683 5689 case 2: 4684 gen_op_fstl_ST0_A0(); 5690 tcg_gen_helper_1_0(helper_fstl_ST0, cpu_tmp1_i64); 5691 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, 5692 (s->mem_index >> 2) - 1); 4685 5693 break; 4686 5694 case 3: 4687 5695 default: 4688 gen_op_fist_ST0_A0(); 5696 tcg_gen_helper_1_0(helper_fist_ST0, cpu_tmp2_i32); 5697 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5698 gen_op_st_T0_A0(OT_WORD + s->mem_index); 4689 5699 break; 4690 5700 } 4691 5701 if ((op & 7) == 3) 4692 gen_op_fpop();5702 tcg_gen_helper_0_0(helper_fpop); 4693 5703 break; 4694 5704 } 4695 5705 break; 4696 5706 case 0x0c: /* fldenv mem */ 4697 gen_op_fldenv_A0(s->dflag); 5707 if (s->cc_op != CC_OP_DYNAMIC) 5708 gen_op_set_cc_op(s->cc_op); 5709 gen_jmp_im(pc_start - s->cs_base); 5710 tcg_gen_helper_0_2(helper_fldenv, 5711 cpu_A0, tcg_const_i32(s->dflag)); 4698 5712 break; 4699 5713 case 0x0d: /* fldcw mem */ 4700 gen_op_fldcw_A0(); 5714 gen_op_ld_T0_A0(OT_WORD + s->mem_index); 5715 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5716 tcg_gen_helper_0_1(helper_fldcw, cpu_tmp2_i32); 4701 5717 break; 4702 5718 case 0x0e: /* fnstenv mem */ 4703 gen_op_fnstenv_A0(s->dflag); 5719 if (s->cc_op != CC_OP_DYNAMIC) 5720 gen_op_set_cc_op(s->cc_op); 5721 gen_jmp_im(pc_start - s->cs_base); 5722 tcg_gen_helper_0_2(helper_fstenv, 5723 cpu_A0, tcg_const_i32(s->dflag)); 4704 5724 break; 4705 5725 case 0x0f: /* fnstcw mem */ 4706 gen_op_fnstcw_A0(); 5726 tcg_gen_helper_1_0(helper_fnstcw, cpu_tmp2_i32); 5727 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5728 gen_op_st_T0_A0(OT_WORD + s->mem_index); 4707 5729 break; 4708 5730 case 0x1d: /* fldt mem */ 4709 gen_op_fldt_ST0_A0(); 5731 if (s->cc_op != CC_OP_DYNAMIC) 5732 gen_op_set_cc_op(s->cc_op); 5733 gen_jmp_im(pc_start - s->cs_base); 5734 tcg_gen_helper_0_1(helper_fldt_ST0, cpu_A0); 4710 5735 break; 4711 5736 case 0x1f: /* fstpt mem */ 4712 gen_op_fstt_ST0_A0(); 4713 gen_op_fpop(); 5737 if (s->cc_op != CC_OP_DYNAMIC) 5738 gen_op_set_cc_op(s->cc_op); 5739 gen_jmp_im(pc_start - s->cs_base); 5740 tcg_gen_helper_0_1(helper_fstt_ST0, cpu_A0); 5741 tcg_gen_helper_0_0(helper_fpop); 4714 5742 break; 4715 5743 case 0x2c: /* frstor mem */ 4716 gen_op_frstor_A0(s->dflag); 5744 if (s->cc_op != CC_OP_DYNAMIC) 5745 gen_op_set_cc_op(s->cc_op); 5746 gen_jmp_im(pc_start - s->cs_base); 5747 tcg_gen_helper_0_2(helper_frstor, 5748 cpu_A0, tcg_const_i32(s->dflag)); 4717 5749 break; 4718 5750 case 0x2e: /* fnsave mem */ 4719 gen_op_fnsave_A0(s->dflag); 5751 if (s->cc_op != CC_OP_DYNAMIC) 5752 gen_op_set_cc_op(s->cc_op); 5753 gen_jmp_im(pc_start - s->cs_base); 5754 tcg_gen_helper_0_2(helper_fsave, 5755 cpu_A0, tcg_const_i32(s->dflag)); 4720 5756 break; 4721 5757 case 0x2f: /* fnstsw mem */ 4722 gen_op_fnstsw_A0(); 5758 tcg_gen_helper_1_0(helper_fnstsw, cpu_tmp2_i32); 5759 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 5760 gen_op_st_T0_A0(OT_WORD + s->mem_index); 4723 5761 break; 4724 5762 case 0x3c: /* fbld */ 4725 gen_op_fbld_ST0_A0(); 5763 if (s->cc_op != CC_OP_DYNAMIC) 5764 gen_op_set_cc_op(s->cc_op); 5765 gen_jmp_im(pc_start - s->cs_base); 5766 tcg_gen_helper_0_1(helper_fbld_ST0, cpu_A0); 4726 5767 break; 4727 5768 case 0x3e: /* fbstp */ 4728 gen_op_fbst_ST0_A0(); 4729 gen_op_fpop(); 5769 if (s->cc_op != CC_OP_DYNAMIC) 5770 gen_op_set_cc_op(s->cc_op); 5771 gen_jmp_im(pc_start - s->cs_base); 5772 tcg_gen_helper_0_1(helper_fbst_ST0, cpu_A0); 5773 tcg_gen_helper_0_0(helper_fpop); 4730 5774 break; 4731 5775 case 0x3d: /* fildll */ 4732 gen_op_fildll_ST0_A0(); 5776 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, 5777 (s->mem_index >> 2) - 1); 5778 tcg_gen_helper_0_1(helper_fildll_ST0, cpu_tmp1_i64); 4733 5779 break; 4734 5780 case 0x3f: /* fistpll */ 4735 gen_op_fistll_ST0_A0(); 4736 gen_op_fpop(); 5781 tcg_gen_helper_1_0(helper_fistll_ST0, cpu_tmp1_i64); 5782 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, 5783 (s->mem_index >> 2) - 1); 5784 tcg_gen_helper_0_0(helper_fpop); 4737 5785 break; 4738 5786 default: … … 4745 5793 switch(op) { 4746 5794 case 0x08: /* fld sti */ 4747 gen_op_fpush();4748 gen_op_fmov_ST0_STN((opreg + 1) & 7);5795 tcg_gen_helper_0_0(helper_fpush); 5796 tcg_gen_helper_0_1(helper_fmov_ST0_STN, tcg_const_i32((opreg + 1) & 7)); 4749 5797 break; 4750 5798 case 0x09: /* fxchg sti */ 4751 5799 case 0x29: /* fxchg4 sti, undocumented op */ 4752 5800 case 0x39: /* fxchg7 sti, undocumented op */ 4753 gen_op_fxchg_ST0_STN(opreg);5801 tcg_gen_helper_0_1(helper_fxchg_ST0_STN, tcg_const_i32(opreg)); 4754 5802 break; 4755 5803 case 0x0a: /* grp d9/2 */ … … 4760 5808 gen_op_set_cc_op(s->cc_op); 4761 5809 gen_jmp_im(pc_start - s->cs_base); 4762 gen_op_fwait();5810 tcg_gen_helper_0_0(helper_fwait); 4763 5811 break; 4764 5812 default: … … 4769 5817 switch(rm) { 4770 5818 case 0: /* fchs */ 4771 gen_op_fchs_ST0();5819 tcg_gen_helper_0_0(helper_fchs_ST0); 4772 5820 break; 4773 5821 case 1: /* fabs */ 4774 gen_op_fabs_ST0();5822 tcg_gen_helper_0_0(helper_fabs_ST0); 4775 5823 break; 4776 5824 case 4: /* ftst */ 4777 gen_op_fldz_FT0();4778 gen_op_fcom_ST0_FT0();5825 tcg_gen_helper_0_0(helper_fldz_FT0); 5826 tcg_gen_helper_0_0(helper_fcom_ST0_FT0); 4779 5827 break; 4780 5828 case 5: /* fxam */ 4781 gen_op_fxam_ST0();5829 tcg_gen_helper_0_0(helper_fxam_ST0); 4782 5830 break; 4783 5831 default: … … 4789 5837 switch(rm) { 4790 5838 case 0: 4791 gen_op_fpush();4792 gen_op_fld1_ST0();5839 tcg_gen_helper_0_0(helper_fpush); 5840 tcg_gen_helper_0_0(helper_fld1_ST0); 4793 5841 break; 4794 5842 case 1: 4795 gen_op_fpush();4796 gen_op_fldl2t_ST0();5843 tcg_gen_helper_0_0(helper_fpush); 5844 tcg_gen_helper_0_0(helper_fldl2t_ST0); 4797 5845 break; 4798 5846 case 2: 4799 gen_op_fpush();4800 gen_op_fldl2e_ST0();5847 tcg_gen_helper_0_0(helper_fpush); 5848 tcg_gen_helper_0_0(helper_fldl2e_ST0); 4801 5849 break; 4802 5850 case 3: 4803 gen_op_fpush();4804 gen_op_fldpi_ST0();5851 tcg_gen_helper_0_0(helper_fpush); 5852 tcg_gen_helper_0_0(helper_fldpi_ST0); 4805 5853 break; 4806 5854 case 4: 4807 gen_op_fpush();4808 gen_op_fldlg2_ST0();5855 tcg_gen_helper_0_0(helper_fpush); 5856 tcg_gen_helper_0_0(helper_fldlg2_ST0); 4809 5857 break; 4810 5858 case 5: 4811 gen_op_fpush();4812 gen_op_fldln2_ST0();5859 tcg_gen_helper_0_0(helper_fpush); 5860 tcg_gen_helper_0_0(helper_fldln2_ST0); 4813 5861 break; 4814 5862 case 6: 4815 gen_op_fpush();4816 gen_op_fldz_ST0();5863 tcg_gen_helper_0_0(helper_fpush); 5864 tcg_gen_helper_0_0(helper_fldz_ST0); 4817 5865 break; 4818 5866 default: … … 4824 5872 switch(rm) { 4825 5873 case 0: /* f2xm1 */ 4826 gen_op_f2xm1();5874 tcg_gen_helper_0_0(helper_f2xm1); 4827 5875 break; 4828 5876 case 1: /* fyl2x */ 4829 gen_op_fyl2x();5877 tcg_gen_helper_0_0(helper_fyl2x); 4830 5878 break; 4831 5879 case 2: /* fptan */ 4832 gen_op_fptan();5880 tcg_gen_helper_0_0(helper_fptan); 4833 5881 break; 4834 5882 case 3: /* fpatan */ 4835 gen_op_fpatan();5883 tcg_gen_helper_0_0(helper_fpatan); 4836 5884 break; 4837 5885 case 4: /* fxtract */ 4838 gen_op_fxtract();5886 tcg_gen_helper_0_0(helper_fxtract); 4839 5887 break; 4840 5888 case 5: /* fprem1 */ 4841 gen_op_fprem1();5889 tcg_gen_helper_0_0(helper_fprem1); 4842 5890 break; 4843 5891 case 6: /* fdecstp */ 4844 gen_op_fdecstp();5892 tcg_gen_helper_0_0(helper_fdecstp); 4845 5893 break; 4846 5894 default: 4847 5895 case 7: /* fincstp */ 4848 gen_op_fincstp();5896 tcg_gen_helper_0_0(helper_fincstp); 4849 5897 break; 4850 5898 } … … 4853 5901 switch(rm) { 4854 5902 case 0: /* fprem */ 4855 gen_op_fprem();5903 tcg_gen_helper_0_0(helper_fprem); 4856 5904 break; 4857 5905 case 1: /* fyl2xp1 */ 4858 gen_op_fyl2xp1();5906 tcg_gen_helper_0_0(helper_fyl2xp1); 4859 5907 break; 4860 5908 case 2: /* fsqrt */ 4861 gen_op_fsqrt();5909 tcg_gen_helper_0_0(helper_fsqrt); 4862 5910 break; 4863 5911 case 3: /* fsincos */ 4864 gen_op_fsincos();5912 tcg_gen_helper_0_0(helper_fsincos); 4865 5913 break; 4866 5914 case 5: /* fscale */ 4867 gen_op_fscale();5915 tcg_gen_helper_0_0(helper_fscale); 4868 5916 break; 4869 5917 case 4: /* frndint */ 4870 gen_op_frndint();5918 tcg_gen_helper_0_0(helper_frndint); 4871 5919 break; 4872 5920 case 6: /* fsin */ 4873 gen_op_fsin();5921 tcg_gen_helper_0_0(helper_fsin); 4874 5922 break; 4875 5923 default: 4876 5924 case 7: /* fcos */ 4877 gen_op_fcos();5925 tcg_gen_helper_0_0(helper_fcos); 4878 5926 break; 4879 5927 } … … 4887 5935 op1 = op & 7; 4888 5936 if (op >= 0x20) { 4889 gen_op_fp_arith_STN_ST0[op1](opreg);5937 tcg_gen_helper_0_1(helper_fp_arith_STN_ST0[op1], tcg_const_i32(opreg)); 4890 5938 if (op >= 0x30) 4891 gen_op_fpop();5939 tcg_gen_helper_0_0(helper_fpop); 4892 5940 } else { 4893 gen_op_fmov_FT0_STN(opreg);4894 gen_op_fp_arith_ST0_FT0[op1]();5941 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg)); 5942 tcg_gen_helper_0_0(helper_fp_arith_ST0_FT0[op1]); 4895 5943 } 4896 5944 } … … 4898 5946 case 0x02: /* fcom */ 4899 5947 case 0x22: /* fcom2, undocumented op */ 4900 gen_op_fmov_FT0_STN(opreg);4901 gen_op_fcom_ST0_FT0();5948 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg)); 5949 tcg_gen_helper_0_0(helper_fcom_ST0_FT0); 4902 5950 break; 4903 5951 case 0x03: /* fcomp */ 4904 5952 case 0x23: /* fcomp3, undocumented op */ 4905 5953 case 0x32: /* fcomp5, undocumented op */ 4906 gen_op_fmov_FT0_STN(opreg);4907 gen_op_fcom_ST0_FT0();4908 gen_op_fpop();5954 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg)); 5955 tcg_gen_helper_0_0(helper_fcom_ST0_FT0); 5956 tcg_gen_helper_0_0(helper_fpop); 4909 5957 break; 4910 5958 case 0x15: /* da/5 */ 4911 5959 switch(rm) { 4912 5960 case 1: /* fucompp */ 4913 gen_op_fmov_FT0_STN(1);4914 gen_op_fucom_ST0_FT0();4915 gen_op_fpop();4916 gen_op_fpop();5961 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(1)); 5962 tcg_gen_helper_0_0(helper_fucom_ST0_FT0); 5963 tcg_gen_helper_0_0(helper_fpop); 5964 tcg_gen_helper_0_0(helper_fpop); 4917 5965 break; 4918 5966 default: … … 4927 5975 break; 4928 5976 case 2: /* fclex */ 4929 gen_op_fclex();5977 tcg_gen_helper_0_0(helper_fclex); 4930 5978 break; 4931 5979 case 3: /* fninit */ 4932 gen_op_fninit();5980 tcg_gen_helper_0_0(helper_fninit); 4933 5981 break; 4934 5982 case 4: /* fsetpm (287 only, just do nop here) */ … … 4941 5989 if (s->cc_op != CC_OP_DYNAMIC) 4942 5990 gen_op_set_cc_op(s->cc_op); 4943 gen_op_fmov_FT0_STN(opreg);4944 gen_op_fucomi_ST0_FT0();5991 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg)); 5992 tcg_gen_helper_0_0(helper_fucomi_ST0_FT0); 4945 5993 s->cc_op = CC_OP_EFLAGS; 4946 5994 break; … … 4948 5996 if (s->cc_op != CC_OP_DYNAMIC) 4949 5997 gen_op_set_cc_op(s->cc_op); 4950 gen_op_fmov_FT0_STN(opreg);4951 gen_op_fcomi_ST0_FT0();5998 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg)); 5999 tcg_gen_helper_0_0(helper_fcomi_ST0_FT0); 4952 6000 s->cc_op = CC_OP_EFLAGS; 4953 6001 break; 4954 6002 case 0x28: /* ffree sti */ 4955 gen_op_ffree_STN(opreg);6003 tcg_gen_helper_0_1(helper_ffree_STN, tcg_const_i32(opreg)); 4956 6004 break; 4957 6005 case 0x2a: /* fst sti */ 4958 gen_op_fmov_STN_ST0(opreg);6006 tcg_gen_helper_0_1(helper_fmov_STN_ST0, tcg_const_i32(opreg)); 4959 6007 break; 4960 6008 case 0x2b: /* fstp sti */ … … 4962 6010 case 0x3a: /* fstp8 sti, undocumented op */ 4963 6011 case 0x3b: /* fstp9 sti, undocumented op */ 4964 gen_op_fmov_STN_ST0(opreg);4965 gen_op_fpop();6012 tcg_gen_helper_0_1(helper_fmov_STN_ST0, tcg_const_i32(opreg)); 6013 tcg_gen_helper_0_0(helper_fpop); 4966 6014 break; 4967 6015 case 0x2c: /* fucom st(i) */ 4968 gen_op_fmov_FT0_STN(opreg);4969 gen_op_fucom_ST0_FT0();6016 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg)); 6017 tcg_gen_helper_0_0(helper_fucom_ST0_FT0); 4970 6018 break; 4971 6019 case 0x2d: /* fucomp st(i) */ 4972 gen_op_fmov_FT0_STN(opreg);4973 gen_op_fucom_ST0_FT0();4974 gen_op_fpop();6020 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg)); 6021 tcg_gen_helper_0_0(helper_fucom_ST0_FT0); 6022 tcg_gen_helper_0_0(helper_fpop); 4975 6023 break; 4976 6024 case 0x33: /* de/3 */ 4977 6025 switch(rm) { 4978 6026 case 1: /* fcompp */ 4979 gen_op_fmov_FT0_STN(1);4980 gen_op_fcom_ST0_FT0();4981 gen_op_fpop();4982 gen_op_fpop();6027 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(1)); 6028 tcg_gen_helper_0_0(helper_fcom_ST0_FT0); 6029 tcg_gen_helper_0_0(helper_fpop); 6030 tcg_gen_helper_0_0(helper_fpop); 4983 6031 break; 4984 6032 default: … … 4987 6035 break; 4988 6036 case 0x38: /* ffreep sti, undocumented op */ 4989 gen_op_ffree_STN(opreg);4990 gen_op_fpop();6037 tcg_gen_helper_0_1(helper_ffree_STN, tcg_const_i32(opreg)); 6038 tcg_gen_helper_0_0(helper_fpop); 4991 6039 break; 4992 6040 case 0x3c: /* df/4 */ 4993 6041 switch(rm) { 4994 6042 case 0: 4995 gen_op_fnstsw_EAX(); 6043 tcg_gen_helper_1_0(helper_fnstsw, cpu_tmp2_i32); 6044 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); 6045 gen_op_mov_reg_T0(OT_WORD, R_EAX); 4996 6046 break; 4997 6047 default: … … 5002 6052 if (s->cc_op != CC_OP_DYNAMIC) 5003 6053 gen_op_set_cc_op(s->cc_op); 5004 gen_op_fmov_FT0_STN(opreg);5005 gen_op_fucomi_ST0_FT0();5006 gen_op_fpop();6054 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg)); 6055 tcg_gen_helper_0_0(helper_fucomi_ST0_FT0); 6056 tcg_gen_helper_0_0(helper_fpop); 5007 6057 s->cc_op = CC_OP_EFLAGS; 5008 6058 break; … … 5010 6060 if (s->cc_op != CC_OP_DYNAMIC) 5011 6061 gen_op_set_cc_op(s->cc_op); 5012 gen_op_fmov_FT0_STN(opreg);5013 gen_op_fcomi_ST0_FT0();5014 gen_op_fpop();6062 tcg_gen_helper_0_1(helper_fmov_FT0_STN, tcg_const_i32(opreg)); 6063 tcg_gen_helper_0_0(helper_fcomi_ST0_FT0); 6064 tcg_gen_helper_0_0(helper_fpop); 5015 6065 s->cc_op = CC_OP_EFLAGS; 5016 6066 break; … … 5018 6068 case 0x18 ... 0x1b: 5019 6069 { 5020 int op1 ;5021 const staticuint8_t fcmov_cc[8] = {6070 int op1, l1; 6071 static const uint8_t fcmov_cc[8] = { 5022 6072 (JCC_B << 1), 5023 6073 (JCC_Z << 1), … … 5025 6075 (JCC_P << 1), 5026 6076 }; 5027 op1 = fcmov_cc[op & 3] | ((op >> 3) & 1); 5028 gen_setcc(s, op1); 5029 gen_op_fcmov_ST0_STN_T0(opreg); 6077 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); 6078 l1 = gen_new_label(); 6079 gen_jcc1(s, s->cc_op, op1, l1); 6080 tcg_gen_helper_0_1(helper_fmov_ST0_STN, tcg_const_i32(opreg)); 6081 gen_set_label(l1); 5030 6082 } 5031 6083 break; … … 5034 6086 } 5035 6087 } 5036 #ifdef USE_CODE_COPY5037 s->tb->cflags |= CF_TB_FP_USED;5038 #endif5039 6088 break; 5040 6089 /************************/ … … 5117 6166 else 5118 6167 ot = dflag ? OT_LONG : OT_WORD; 5119 gen_check_io(s, ot, 1, pc_start - s->cs_base); 6168 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); 6169 gen_op_andl_T0_ffff(); 6170 gen_check_io(s, ot, pc_start - s->cs_base, 6171 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4); 5120 6172 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { 5121 6173 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); 5122 6174 } else { 5123 6175 gen_ins(s, ot); 6176 if (use_icount) { 6177 gen_jmp(s, s->pc - s->cs_base); 6178 } 5124 6179 } 5125 6180 break; … … 5130 6185 else 5131 6186 ot = dflag ? OT_LONG : OT_WORD; 5132 gen_check_io(s, ot, 1, pc_start - s->cs_base); 6187 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); 6188 gen_op_andl_T0_ffff(); 6189 gen_check_io(s, ot, pc_start - s->cs_base, 6190 svm_is_rep(prefixes) | 4); 5133 6191 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { 5134 6192 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); 5135 6193 } else { 5136 6194 gen_outs(s, ot); 6195 if (use_icount) { 6196 gen_jmp(s, s->pc - s->cs_base); 6197 } 5137 6198 } 5138 6199 break; … … 5140 6201 /************************/ 5141 6202 /* port I/O */ 6203 5142 6204 case 0xe4: 5143 6205 case 0xe5: … … 5148 6210 val = ldub_code(s->pc++); 5149 6211 gen_op_movl_T0_im(val); 5150 gen_check_io(s, ot, 0, pc_start - s->cs_base); 5151 gen_op_in[ot](); 5152 gen_op_mov_reg_T1[ot][R_EAX](); 6212 gen_check_io(s, ot, pc_start - s->cs_base, 6213 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); 6214 if (use_icount) 6215 gen_io_start(); 6216 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 6217 tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[1], cpu_tmp2_i32); 6218 gen_op_mov_reg_T1(ot, R_EAX); 6219 if (use_icount) { 6220 gen_io_end(); 6221 gen_jmp(s, s->pc - s->cs_base); 6222 } 5153 6223 break; 5154 6224 case 0xe6: … … 5160 6230 val = ldub_code(s->pc++); 5161 6231 gen_op_movl_T0_im(val); 5162 gen_check_io(s, ot, 0, pc_start - s->cs_base); 6232 gen_check_io(s, ot, pc_start - s->cs_base, 6233 svm_is_rep(prefixes)); 5163 6234 #ifdef VBOX /* bird: linux is writing to this port for delaying I/O. */ 5164 6235 if (val == 0x80) 5165 6236 break; 5166 6237 #endif /* VBOX */ 5167 gen_op_mov_TN_reg[ot][1][R_EAX](); 5168 gen_op_out[ot](); 6238 gen_op_mov_TN_reg(ot, 1, R_EAX); 6239 6240 if (use_icount) 6241 gen_io_start(); 6242 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 6243 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); 6244 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]); 6245 tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32); 6246 if (use_icount) { 6247 gen_io_end(); 6248 gen_jmp(s, s->pc - s->cs_base); 6249 } 5169 6250 break; 5170 6251 case 0xec: … … 5174 6255 else 5175 6256 ot = dflag ? OT_LONG : OT_WORD; 5176 gen_op_mov_TN_reg [OT_WORD][0][R_EDX]();6257 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); 5177 6258 gen_op_andl_T0_ffff(); 5178 gen_check_io(s, ot, 0, pc_start - s->cs_base); 5179 gen_op_in[ot](); 5180 gen_op_mov_reg_T1[ot][R_EAX](); 6259 gen_check_io(s, ot, pc_start - s->cs_base, 6260 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); 6261 if (use_icount) 6262 gen_io_start(); 6263 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 6264 tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[1], cpu_tmp2_i32); 6265 gen_op_mov_reg_T1(ot, R_EAX); 6266 if (use_icount) { 6267 gen_io_end(); 6268 gen_jmp(s, s->pc - s->cs_base); 6269 } 5181 6270 break; 5182 6271 case 0xee: … … 5186 6275 else 5187 6276 ot = dflag ? OT_LONG : OT_WORD; 5188 gen_op_mov_TN_reg [OT_WORD][0][R_EDX]();6277 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); 5189 6278 gen_op_andl_T0_ffff(); 5190 gen_check_io(s, ot, 0, pc_start - s->cs_base); 5191 gen_op_mov_TN_reg[ot][1][R_EAX](); 5192 gen_op_out[ot](); 6279 gen_check_io(s, ot, pc_start - s->cs_base, 6280 svm_is_rep(prefixes)); 6281 gen_op_mov_TN_reg(ot, 1, R_EAX); 6282 6283 if (use_icount) 6284 gen_io_start(); 6285 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 6286 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); 6287 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]); 6288 tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32); 6289 if (use_icount) { 6290 gen_io_end(); 6291 gen_jmp(s, s->pc - s->cs_base); 6292 } 5193 6293 break; 5194 6294 … … 5223 6323 gen_op_set_cc_op(s->cc_op); 5224 6324 gen_jmp_im(pc_start - s->cs_base); 5225 gen_op_lret_protected(s->dflag, val); 6325 tcg_gen_helper_0_2(helper_lret_protected, 6326 tcg_const_i32(s->dflag), 6327 tcg_const_i32(val)); 5226 6328 } else { 5227 6329 gen_stack_A0(s); 5228 6330 /* pop offset */ 5229 gen_op_ld_T0_A0 [1 + s->dflag + s->mem_index]();6331 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index); 5230 6332 if (s->dflag == 0) 5231 6333 gen_op_andl_T0_ffff(); … … 5235 6337 /* pop selector */ 5236 6338 gen_op_addl_A0_im(2 << s->dflag); 5237 gen_op_ld_T0_A0 [1 + s->dflag + s->mem_index]();5238 gen_op_movl_seg_T0_vm( offsetof(CPUX86State,segs[R_CS]));6339 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index); 6340 gen_op_movl_seg_T0_vm(R_CS); 5239 6341 /* add stack offset */ 5240 6342 gen_stack_update(s, val + (4 << s->dflag)); … … 5246 6348 goto do_lret; 5247 6349 case 0xcf: /* iret */ 6350 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET); 5248 6351 if (!s->pe) { 5249 6352 /* real mode */ 5250 gen_op_iret_real(s->dflag);6353 tcg_gen_helper_0_1(helper_iret_real, tcg_const_i32(s->dflag)); 5251 6354 s->cc_op = CC_OP_EFLAGS; 5252 6355 } else if (s->vm86) { … … 5258 6361 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5259 6362 } else { 5260 gen_op_iret_real(s->dflag);6363 tcg_gen_helper_0_1(helper_iret_real, tcg_const_i32(s->dflag)); 5261 6364 s->cc_op = CC_OP_EFLAGS; 5262 6365 } … … 5265 6368 gen_op_set_cc_op(s->cc_op); 5266 6369 gen_jmp_im(pc_start - s->cs_base); 5267 gen_op_iret_protected(s->dflag, s->pc - s->cs_base); 6370 tcg_gen_helper_0_2(helper_iret_protected, 6371 tcg_const_i32(s->dflag), 6372 tcg_const_i32(s->pc - s->cs_base)); 5268 6373 s->cc_op = CC_OP_EFLAGS; 5269 6374 } … … 5353 6458 break; 5354 6459 case 0x140 ... 0x14f: /* cmov Gv, Ev */ 5355 ot = dflag + OT_WORD; 5356 modrm = ldub_code(s->pc++); 5357 reg = ((modrm >> 3) & 7) | rex_r; 5358 mod = (modrm >> 6) & 3; 5359 gen_setcc(s, b); 5360 if (mod != 3) { 5361 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 5362 gen_op_ld_T1_A0[ot + s->mem_index](); 5363 } else { 5364 rm = (modrm & 7) | REX_B(s); 5365 gen_op_mov_TN_reg[ot][1][rm](); 5366 } 5367 gen_op_cmov_reg_T1_T0[ot - OT_WORD][reg](); 6460 { 6461 int l1; 6462 TCGv t0; 6463 6464 ot = dflag + OT_WORD; 6465 modrm = ldub_code(s->pc++); 6466 reg = ((modrm >> 3) & 7) | rex_r; 6467 mod = (modrm >> 6) & 3; 6468 t0 = tcg_temp_local_new(TCG_TYPE_TL); 6469 if (mod != 3) { 6470 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 6471 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0); 6472 } else { 6473 rm = (modrm & 7) | REX_B(s); 6474 gen_op_mov_v_reg(ot, t0, rm); 6475 } 6476 #ifdef TARGET_X86_64 6477 if (ot == OT_LONG) { 6478 /* XXX: specific Intel behaviour ? */ 6479 l1 = gen_new_label(); 6480 gen_jcc1(s, s->cc_op, b ^ 1, l1); 6481 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); 6482 gen_set_label(l1); 6483 tcg_gen_movi_tl(cpu_tmp0, 0); 6484 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET); 6485 } else 6486 #endif 6487 { 6488 l1 = gen_new_label(); 6489 gen_jcc1(s, s->cc_op, b ^ 1, l1); 6490 gen_op_mov_reg_v(ot, reg, t0); 6491 gen_set_label(l1); 6492 } 6493 tcg_temp_free(t0); 6494 } 5368 6495 break; 5369 6496 … … 5371 6498 /* flags */ 5372 6499 case 0x9c: /* pushf */ 6500 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF); 5373 6501 #ifdef VBOX 5374 6502 if (s->vm86 && s->iopl != 3 && (!s->vme || s->dflag)) { … … 5382 6510 #ifdef VBOX 5383 6511 if (s->vm86 && s->vme && s->iopl != 3) 5384 gen_op_movl_T0_eflags_vme();6512 tcg_gen_helper_1_0(helper_read_eflags_vme, cpu_T[0]); 5385 6513 else 5386 6514 #endif 5387 gen_op_movl_T0_eflags();6515 tcg_gen_helper_1_0(helper_read_eflags, cpu_T[0]); 5388 6516 gen_push_T0(s); 5389 6517 } 5390 6518 break; 5391 6519 case 0x9d: /* popf */ 6520 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF); 5392 6521 #ifdef VBOX 5393 6522 if (s->vm86 && s->iopl != 3 && (!s->vme || s->dflag)) { … … 5400 6529 if (s->cpl == 0) { 5401 6530 if (s->dflag) { 5402 gen_op_movl_eflags_T0_cpl0(); 6531 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0], 6532 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK))); 5403 6533 } else { 5404 gen_op_movw_eflags_T0_cpl0(); 6534 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0], 6535 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff)); 5405 6536 } 5406 6537 } else { 5407 6538 if (s->cpl <= s->iopl) { 5408 6539 if (s->dflag) { 5409 gen_op_movl_eflags_T0_io(); 6540 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0], 6541 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK))); 5410 6542 } else { 5411 gen_op_movw_eflags_T0_io(); 6543 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0], 6544 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff)); 5412 6545 } 5413 6546 } else { 5414 6547 if (s->dflag) { 5415 gen_op_movl_eflags_T0(); 6548 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0], 6549 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK))); 5416 6550 } else { 5417 6551 #ifdef VBOX 5418 6552 if (s->vm86 && s->vme) 5419 gen_op_movw_eflags_T0_vme();6553 tcg_gen_helper_0_1(helper_write_eflags_vme, cpu_T[0]); 5420 6554 else 5421 6555 #endif 5422 gen_op_movw_eflags_T0(); 6556 tcg_gen_helper_0_2(helper_write_eflags, cpu_T[0], 6557 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff)); 5423 6558 } 5424 6559 } … … 5434 6569 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) 5435 6570 goto illegal_op; 5436 gen_op_mov_TN_reg [OT_BYTE][0][R_AH]();6571 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH); 5437 6572 if (s->cc_op != CC_OP_DYNAMIC) 5438 6573 gen_op_set_cc_op(s->cc_op); 5439 gen_op_movb_eflags_T0(); 6574 gen_compute_eflags(cpu_cc_src); 6575 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O); 6576 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C); 6577 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]); 5440 6578 s->cc_op = CC_OP_EFLAGS; 5441 6579 break; … … 5445 6583 if (s->cc_op != CC_OP_DYNAMIC) 5446 6584 gen_op_set_cc_op(s->cc_op); 5447 gen_op_movl_T0_eflags(); 5448 gen_op_mov_reg_T0[OT_BYTE][R_AH](); 6585 gen_compute_eflags(cpu_T[0]); 6586 /* Note: gen_compute_eflags() only gives the condition codes */ 6587 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02); 6588 gen_op_mov_reg_T0(OT_BYTE, R_AH); 5449 6589 break; 5450 6590 case 0xf5: /* cmc */ 5451 6591 if (s->cc_op != CC_OP_DYNAMIC) 5452 6592 gen_op_set_cc_op(s->cc_op); 5453 gen_op_cmc(); 6593 gen_compute_eflags(cpu_cc_src); 6594 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C); 5454 6595 s->cc_op = CC_OP_EFLAGS; 5455 6596 break; … … 5457 6598 if (s->cc_op != CC_OP_DYNAMIC) 5458 6599 gen_op_set_cc_op(s->cc_op); 5459 gen_op_clc(); 6600 gen_compute_eflags(cpu_cc_src); 6601 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C); 5460 6602 s->cc_op = CC_OP_EFLAGS; 5461 6603 break; … … 5463 6605 if (s->cc_op != CC_OP_DYNAMIC) 5464 6606 gen_op_set_cc_op(s->cc_op); 5465 gen_op_stc(); 6607 gen_compute_eflags(cpu_cc_src); 6608 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C); 5466 6609 s->cc_op = CC_OP_EFLAGS; 5467 6610 break; 5468 6611 case 0xfc: /* cld */ 5469 gen_op_cld(); 6612 tcg_gen_movi_i32(cpu_tmp2_i32, 1); 6613 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df)); 5470 6614 break; 5471 6615 case 0xfd: /* std */ 5472 gen_op_std(); 6616 tcg_gen_movi_i32(cpu_tmp2_i32, -1); 6617 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df)); 5473 6618 break; 5474 6619 … … 5484 6629 s->rip_offset = 1; 5485 6630 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 5486 gen_op_ld_T0_A0 [ot + s->mem_index]();6631 gen_op_ld_T0_A0(ot + s->mem_index); 5487 6632 } else { 5488 gen_op_mov_TN_reg [ot][0][rm]();6633 gen_op_mov_TN_reg(ot, 0, rm); 5489 6634 } 5490 6635 /* load shift */ … … 5494 6639 goto illegal_op; 5495 6640 op -= 4; 5496 gen_op_btx_T0_T1_cc[ot - OT_WORD][op](); 5497 s->cc_op = CC_OP_SARB + ot; 5498 if (op != 0) { 5499 if (mod != 3) 5500 gen_op_st_T0_A0[ot + s->mem_index](); 5501 else 5502 gen_op_mov_reg_T0[ot][rm](); 5503 gen_op_update_bt_cc(); 5504 } 5505 break; 6641 goto bt_op; 5506 6642 case 0x1a3: /* bt Gv, Ev */ 5507 6643 op = 0; … … 5521 6657 mod = (modrm >> 6) & 3; 5522 6658 rm = (modrm & 7) | REX_B(s); 5523 gen_op_mov_TN_reg [OT_LONG][1][reg]();6659 gen_op_mov_TN_reg(OT_LONG, 1, reg); 5524 6660 if (mod != 3) { 5525 6661 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 5526 6662 /* specific case: we need to add a displacement */ 5527 gen_op_add_bit_A0_T1[ot - OT_WORD](); 5528 gen_op_ld_T0_A0[ot + s->mem_index](); 6663 gen_exts(ot, cpu_T[1]); 6664 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot); 6665 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot); 6666 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); 6667 gen_op_ld_T0_A0(ot + s->mem_index); 5529 6668 } else { 5530 gen_op_mov_TN_reg[ot][0][rm](); 5531 } 5532 gen_op_btx_T0_T1_cc[ot - OT_WORD][op](); 6669 gen_op_mov_TN_reg(ot, 0, rm); 6670 } 6671 bt_op: 6672 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1); 6673 switch(op) { 6674 case 0: 6675 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]); 6676 tcg_gen_movi_tl(cpu_cc_dst, 0); 6677 break; 6678 case 1: 6679 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]); 6680 tcg_gen_movi_tl(cpu_tmp0, 1); 6681 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]); 6682 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0); 6683 break; 6684 case 2: 6685 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]); 6686 tcg_gen_movi_tl(cpu_tmp0, 1); 6687 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]); 6688 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0); 6689 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0); 6690 break; 6691 default: 6692 case 3: 6693 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]); 6694 tcg_gen_movi_tl(cpu_tmp0, 1); 6695 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]); 6696 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0); 6697 break; 6698 } 5533 6699 s->cc_op = CC_OP_SARB + ot; 5534 6700 if (op != 0) { 5535 6701 if (mod != 3) 5536 gen_op_st_T0_A0 [ot + s->mem_index]();6702 gen_op_st_T0_A0(ot + s->mem_index); 5537 6703 else 5538 gen_op_mov_reg_T0[ot][rm](); 5539 gen_op_update_bt_cc(); 6704 gen_op_mov_reg_T0(ot, rm); 6705 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4); 6706 tcg_gen_movi_tl(cpu_cc_dst, 0); 5540 6707 } 5541 6708 break; 5542 6709 case 0x1bc: /* bsf */ 5543 6710 case 0x1bd: /* bsr */ 5544 ot = dflag + OT_WORD; 5545 modrm = ldub_code(s->pc++); 5546 reg = ((modrm >> 3) & 7) | rex_r; 5547 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); 5548 /* NOTE: in order to handle the 0 case, we must load the 5549 result. It could be optimized with a generated jump */ 5550 gen_op_mov_TN_reg[ot][1][reg](); 5551 gen_op_bsx_T0_cc[ot - OT_WORD][b & 1](); 5552 gen_op_mov_reg_T1[ot][reg](); 5553 s->cc_op = CC_OP_LOGICB + ot; 6711 { 6712 int label1; 6713 TCGv t0; 6714 6715 ot = dflag + OT_WORD; 6716 modrm = ldub_code(s->pc++); 6717 reg = ((modrm >> 3) & 7) | rex_r; 6718 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); 6719 gen_extu(ot, cpu_T[0]); 6720 label1 = gen_new_label(); 6721 tcg_gen_movi_tl(cpu_cc_dst, 0); 6722 t0 = tcg_temp_local_new(TCG_TYPE_TL); 6723 tcg_gen_mov_tl(t0, cpu_T[0]); 6724 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1); 6725 if (b & 1) { 6726 tcg_gen_helper_1_1(helper_bsr, cpu_T[0], t0); 6727 } else { 6728 tcg_gen_helper_1_1(helper_bsf, cpu_T[0], t0); 6729 } 6730 gen_op_mov_reg_T0(ot, reg); 6731 tcg_gen_movi_tl(cpu_cc_dst, 1); 6732 gen_set_label(label1); 6733 tcg_gen_discard_tl(cpu_cc_src); 6734 s->cc_op = CC_OP_LOGICB + ot; 6735 tcg_temp_free(t0); 6736 } 5554 6737 break; 5555 6738 /************************/ … … 5560 6743 if (s->cc_op != CC_OP_DYNAMIC) 5561 6744 gen_op_set_cc_op(s->cc_op); 5562 gen_op_daa();6745 tcg_gen_helper_0_0(helper_daa); 5563 6746 s->cc_op = CC_OP_EFLAGS; 5564 6747 break; … … 5568 6751 if (s->cc_op != CC_OP_DYNAMIC) 5569 6752 gen_op_set_cc_op(s->cc_op); 5570 gen_op_das();6753 tcg_gen_helper_0_0(helper_das); 5571 6754 s->cc_op = CC_OP_EFLAGS; 5572 6755 break; … … 5576 6759 if (s->cc_op != CC_OP_DYNAMIC) 5577 6760 gen_op_set_cc_op(s->cc_op); 5578 gen_op_aaa();6761 tcg_gen_helper_0_0(helper_aaa); 5579 6762 s->cc_op = CC_OP_EFLAGS; 5580 6763 break; … … 5584 6767 if (s->cc_op != CC_OP_DYNAMIC) 5585 6768 gen_op_set_cc_op(s->cc_op); 5586 gen_op_aas();6769 tcg_gen_helper_0_0(helper_aas); 5587 6770 s->cc_op = CC_OP_EFLAGS; 5588 6771 break; … … 5594 6777 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); 5595 6778 } else { 5596 gen_op_aam(val);6779 tcg_gen_helper_0_1(helper_aam, tcg_const_i32(val)); 5597 6780 s->cc_op = CC_OP_LOGICB; 5598 6781 } … … 5602 6785 goto illegal_op; 5603 6786 val = ldub_code(s->pc++); 5604 gen_op_aad(val);6787 tcg_gen_helper_0_1(helper_aad, tcg_const_i32(val)); 5605 6788 s->cc_op = CC_OP_LOGICB; 5606 6789 break; … … 5612 6795 if (prefixes & PREFIX_LOCK) 5613 6796 goto illegal_op; 6797 if (prefixes & PREFIX_REPZ) { 6798 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE); 6799 } 5614 6800 break; 5615 6801 case 0x9b: /* fwait */ … … 5621 6807 gen_op_set_cc_op(s->cc_op); 5622 6808 gen_jmp_im(pc_start - s->cs_base); 5623 gen_op_fwait();6809 tcg_gen_helper_0_0(helper_fwait); 5624 6810 } 5625 6811 break; … … 5650 6836 gen_op_set_cc_op(s->cc_op); 5651 6837 gen_jmp_im(pc_start - s->cs_base); 5652 gen_op_into(s->pc - pc_start);6838 tcg_gen_helper_0_1(helper_into, tcg_const_i32(s->pc - pc_start)); 5653 6839 break; 5654 6840 case 0xf1: /* icebp (undocumented, exits to external debugger) */ 6841 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); 5655 6842 #if 1 5656 6843 gen_debug(s, pc_start - s->cs_base); … … 5664 6851 if (!s->vm86) { 5665 6852 if (s->cpl <= s->iopl) { 5666 gen_op_cli();6853 tcg_gen_helper_0_0(helper_cli); 5667 6854 } else { 5668 6855 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); … … 5670 6857 } else { 5671 6858 if (s->iopl == 3) { 5672 gen_op_cli();6859 tcg_gen_helper_0_0(helper_cli); 5673 6860 #ifdef VBOX 5674 6861 } else if (s->iopl != 3 && s->vme) { 5675 gen_op_cli_vme();6862 tcg_gen_helper_0_0(helper_cli_vme); 5676 6863 #endif 5677 6864 } else { … … 5684 6871 if (s->cpl <= s->iopl) { 5685 6872 gen_sti: 5686 gen_op_sti();6873 tcg_gen_helper_0_0(helper_sti); 5687 6874 /* interruptions are enabled only the first insn after sti */ 5688 6875 /* If several instructions disable interrupts, only the 5689 6876 _first_ does it */ 5690 6877 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) 5691 gen_op_set_inhibit_irq();6878 tcg_gen_helper_0_0(helper_set_inhibit_irq); 5692 6879 /* give a chance to handle pending irqs */ 5693 6880 gen_jmp_im(s->pc - s->cs_base); … … 5701 6888 #ifdef VBOX 5702 6889 } else if (s->iopl != 3 && s->vme) { 5703 gen_op_sti_vme();6890 tcg_gen_helper_0_0(helper_sti_vme); 5704 6891 /* give a chance to handle pending irqs */ 5705 6892 gen_jmp_im(s->pc - s->cs_base); … … 5720 6907 if (mod == 3) 5721 6908 goto illegal_op; 5722 gen_op_mov_TN_reg [ot][0][reg]();6909 gen_op_mov_TN_reg(ot, 0, reg); 5723 6910 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 5724 6911 gen_jmp_im(pc_start - s->cs_base); 6912 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 5725 6913 if (ot == OT_WORD) 5726 gen_op_boundw();6914 tcg_gen_helper_0_2(helper_boundw, cpu_A0, cpu_tmp2_i32); 5727 6915 else 5728 gen_op_boundl();6916 tcg_gen_helper_0_2(helper_boundl, cpu_A0, cpu_tmp2_i32); 5729 6917 break; 5730 6918 case 0x1c8 ... 0x1cf: /* bswap reg */ … … 5732 6920 #ifdef TARGET_X86_64 5733 6921 if (dflag == 2) { 5734 gen_op_mov_TN_reg [OT_QUAD][0][reg]();5735 gen_op_bswapq_T0();5736 gen_op_mov_reg_T0 [OT_QUAD][reg]();6922 gen_op_mov_TN_reg(OT_QUAD, 0, reg); 6923 tcg_gen_bswap_i64(cpu_T[0], cpu_T[0]); 6924 gen_op_mov_reg_T0(OT_QUAD, reg); 5737 6925 } else 5738 #endif5739 6926 { 5740 gen_op_mov_TN_reg[OT_LONG][0][reg](); 5741 gen_op_bswapl_T0(); 5742 gen_op_mov_reg_T0[OT_LONG][reg](); 5743 } 6927 TCGv tmp0; 6928 gen_op_mov_TN_reg(OT_LONG, 0, reg); 6929 6930 tmp0 = tcg_temp_new(TCG_TYPE_I32); 6931 tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]); 6932 tcg_gen_bswap_i32(tmp0, tmp0); 6933 tcg_gen_extu_i32_i64(cpu_T[0], tmp0); 6934 gen_op_mov_reg_T0(OT_LONG, reg); 6935 } 6936 #else 6937 { 6938 gen_op_mov_TN_reg(OT_LONG, 0, reg); 6939 tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); 6940 gen_op_mov_reg_T0(OT_LONG, reg); 6941 } 6942 #endif 5744 6943 break; 5745 6944 case 0xd6: /* salc */ … … 5748 6947 if (s->cc_op != CC_OP_DYNAMIC) 5749 6948 gen_op_set_cc_op(s->cc_op); 5750 gen_op_salc(); 6949 gen_compute_eflags_c(cpu_T[0]); 6950 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]); 6951 gen_op_mov_reg_T0(OT_BYTE, R_EAX); 5751 6952 break; 5752 6953 case 0xe0: /* loopnz */ 5753 6954 case 0xe1: /* loopz */ 5754 if (s->cc_op != CC_OP_DYNAMIC)5755 gen_op_set_cc_op(s->cc_op);5756 /* FALL THRU */5757 6955 case 0xe2: /* loop */ 5758 6956 case 0xe3: /* jecxz */ 5759 6957 { 5760 int l1, l2 ;6958 int l1, l2, l3; 5761 6959 5762 6960 tval = (int8_t)insn_get(s, OT_BYTE); … … 5768 6966 l1 = gen_new_label(); 5769 6967 l2 = gen_new_label(); 6968 l3 = gen_new_label(); 5770 6969 b &= 3; 5771 if (b == 3) { 5772 gen_op_jz_ecx[s->aflag](l1); 5773 } else { 5774 gen_op_dec_ECX[s->aflag](); 5775 if (b <= 1) 5776 gen_op_mov_T0_cc(); 5777 gen_op_loop[s->aflag][b](l1); 5778 } 5779 6970 switch(b) { 6971 case 0: /* loopnz */ 6972 case 1: /* loopz */ 6973 if (s->cc_op != CC_OP_DYNAMIC) 6974 gen_op_set_cc_op(s->cc_op); 6975 gen_op_add_reg_im(s->aflag, R_ECX, -1); 6976 gen_op_jz_ecx(s->aflag, l3); 6977 gen_compute_eflags(cpu_tmp0); 6978 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z); 6979 if (b == 0) { 6980 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1); 6981 } else { 6982 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1); 6983 } 6984 break; 6985 case 2: /* loop */ 6986 gen_op_add_reg_im(s->aflag, R_ECX, -1); 6987 gen_op_jnz_ecx(s->aflag, l1); 6988 break; 6989 default: 6990 case 3: /* jcxz */ 6991 gen_op_jz_ecx(s->aflag, l1); 6992 break; 6993 } 6994 6995 gen_set_label(l3); 5780 6996 gen_jmp_im(next_eip); 5781 gen_op_jmp_label(l2); 6997 tcg_gen_br(l2); 6998 5782 6999 gen_set_label(l1); 5783 7000 gen_jmp_im(tval); … … 5791 7008 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5792 7009 } else { 5793 if (b & 2) 5794 gen_op_rdmsr(); 5795 else 5796 gen_op_wrmsr(); 7010 if (s->cc_op != CC_OP_DYNAMIC) 7011 gen_op_set_cc_op(s->cc_op); 7012 gen_jmp_im(pc_start - s->cs_base); 7013 if (b & 2) { 7014 tcg_gen_helper_0_0(helper_rdmsr); 7015 } else { 7016 tcg_gen_helper_0_0(helper_wrmsr); 7017 } 5797 7018 } 5798 7019 break; 5799 7020 case 0x131: /* rdtsc */ 7021 if (s->cc_op != CC_OP_DYNAMIC) 7022 gen_op_set_cc_op(s->cc_op); 5800 7023 gen_jmp_im(pc_start - s->cs_base); 5801 gen_op_rdtsc(); 7024 if (use_icount) 7025 gen_io_start(); 7026 tcg_gen_helper_0_0(helper_rdtsc); 7027 if (use_icount) { 7028 gen_io_end(); 7029 gen_jmp(s, s->pc - s->cs_base); 7030 } 7031 break; 7032 case 0x133: /* rdpmc */ 7033 if (s->cc_op != CC_OP_DYNAMIC) 7034 gen_op_set_cc_op(s->cc_op); 7035 gen_jmp_im(pc_start - s->cs_base); 7036 tcg_gen_helper_0_0(helper_rdpmc); 5802 7037 break; 5803 7038 case 0x134: /* sysenter */ 7039 #ifndef VBOX 7040 /* For Intel SYSENTER is valid on 64-bit */ 7041 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) 7042 #else 7043 /** @todo: make things right */ 5804 7044 if (CODE64(s)) 7045 #endif 5805 7046 goto illegal_op; 5806 7047 if (!s->pe) { … … 5812 7053 } 5813 7054 gen_jmp_im(pc_start - s->cs_base); 5814 gen_op_sysenter();7055 tcg_gen_helper_0_0(helper_sysenter); 5815 7056 gen_eob(s); 5816 7057 } 5817 7058 break; 5818 7059 case 0x135: /* sysexit */ 7060 #ifndef VBOX 7061 /* For Intel SYSEXIT is valid on 64-bit */ 7062 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) 7063 #else 7064 /** @todo: make things right */ 5819 7065 if (CODE64(s)) 7066 #endif 5820 7067 goto illegal_op; 5821 7068 if (!s->pe) { … … 5827 7074 } 5828 7075 gen_jmp_im(pc_start - s->cs_base); 5829 gen_op_sysexit();7076 tcg_gen_helper_0_1(helper_sysexit, tcg_const_i32(dflag)); 5830 7077 gen_eob(s); 5831 7078 } … … 5839 7086 } 5840 7087 gen_jmp_im(pc_start - s->cs_base); 5841 gen_op_syscall(s->pc - pc_start);7088 tcg_gen_helper_0_1(helper_syscall, tcg_const_i32(s->pc - pc_start)); 5842 7089 gen_eob(s); 5843 7090 break; … … 5851 7098 } 5852 7099 gen_jmp_im(pc_start - s->cs_base); 5853 gen_op_sysret(s->dflag);7100 tcg_gen_helper_0_1(helper_sysret, tcg_const_i32(s->dflag)); 5854 7101 /* condition codes are modified only in long mode */ 5855 7102 if (s->lma) … … 5860 7107 #endif 5861 7108 case 0x1a2: /* cpuid */ 5862 gen_op_cpuid(); 7109 if (s->cc_op != CC_OP_DYNAMIC) 7110 gen_op_set_cc_op(s->cc_op); 7111 gen_jmp_im(pc_start - s->cs_base); 7112 tcg_gen_helper_0_0(helper_cpuid); 5863 7113 break; 5864 7114 case 0xf4: /* hlt */ … … 5868 7118 if (s->cc_op != CC_OP_DYNAMIC) 5869 7119 gen_op_set_cc_op(s->cc_op); 5870 gen_jmp_im( s->pc- s->cs_base);5871 gen_op_hlt();7120 gen_jmp_im(pc_start - s->cs_base); 7121 tcg_gen_helper_0_1(helper_hlt, tcg_const_i32(s->pc - pc_start)); 5872 7122 s->is_jmp = 3; 5873 7123 } … … 5881 7131 if (!s->pe || s->vm86) 5882 7132 goto illegal_op; 5883 gen_op_movl_T0_env(offsetof(CPUX86State,ldt.selector)); 7133 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); 7134 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector)); 5884 7135 ot = OT_WORD; 5885 7136 if (mod == 3) … … 5893 7144 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5894 7145 } else { 7146 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); 5895 7147 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); 5896 7148 gen_jmp_im(pc_start - s->cs_base); 5897 gen_op_lldt_T0(); 7149 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 7150 tcg_gen_helper_0_1(helper_lldt, cpu_tmp2_i32); 5898 7151 } 5899 7152 break; … … 5901 7154 if (!s->pe || s->vm86) 5902 7155 goto illegal_op; 5903 gen_op_movl_T0_env(offsetof(CPUX86State,tr.selector)); 7156 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); 7157 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector)); 5904 7158 ot = OT_WORD; 5905 7159 if (mod == 3) … … 5913 7167 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5914 7168 } else { 7169 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); 5915 7170 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); 5916 7171 gen_jmp_im(pc_start - s->cs_base); 5917 gen_op_ltr_T0(); 7172 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); 7173 tcg_gen_helper_0_1(helper_ltr, cpu_tmp2_i32); 5918 7174 } 5919 7175 break; … … 5926 7182 gen_op_set_cc_op(s->cc_op); 5927 7183 if (op == 4) 5928 gen_op_verr();7184 tcg_gen_helper_0_1(helper_verr, cpu_T[0]); 5929 7185 else 5930 gen_op_verw();7186 tcg_gen_helper_0_1(helper_verw, cpu_T[0]); 5931 7187 s->cc_op = CC_OP_EFLAGS; 5932 7188 break; … … 5944 7200 if (mod == 3) 5945 7201 goto illegal_op; 7202 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); 5946 7203 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 5947 gen_op_movl_T0_env(offsetof(CPUX86State, gdt.limit));5948 gen_op_st_T0_A0 [OT_WORD + s->mem_index]();7204 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit)); 7205 gen_op_st_T0_A0(OT_WORD + s->mem_index); 5949 7206 gen_add_A0_im(s, 2); 5950 gen_op_movtl_T0_env(offsetof(CPUX86State, gdt.base));7207 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base)); 5951 7208 if (!s->dflag) 5952 7209 gen_op_andl_T0_im(0xffffff); 5953 gen_op_st_T0_A0 [CODE64(s) + OT_LONG + s->mem_index]();7210 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index); 5954 7211 break; 5955 7212 case 1: … … 5960 7217 s->cpl != 0) 5961 7218 goto illegal_op; 7219 if (s->cc_op != CC_OP_DYNAMIC) 7220 gen_op_set_cc_op(s->cc_op); 5962 7221 gen_jmp_im(pc_start - s->cs_base); 5963 7222 #ifdef TARGET_X86_64 5964 7223 if (s->aflag == 2) { 5965 gen_op_movq_A0_reg[R_EBX](); 5966 gen_op_addq_A0_AL(); 7224 gen_op_movq_A0_reg(R_EAX); 5967 7225 } else 5968 7226 #endif 5969 7227 { 5970 gen_op_movl_A0_reg[R_EBX](); 5971 gen_op_addl_A0_AL(); 7228 gen_op_movl_A0_reg(R_EAX); 5972 7229 if (s->aflag == 0) 5973 7230 gen_op_andl_A0_ffff(); 5974 7231 } 5975 7232 gen_add_A0_ds_seg(s); 5976 gen_op_monitor();7233 tcg_gen_helper_0_1(helper_monitor, cpu_A0); 5977 7234 break; 5978 7235 case 1: /* mwait */ … … 5984 7241 s->cc_op = CC_OP_DYNAMIC; 5985 7242 } 5986 gen_jmp_im( s->pc- s->cs_base);5987 gen_op_mwait();7243 gen_jmp_im(pc_start - s->cs_base); 7244 tcg_gen_helper_0_1(helper_mwait, tcg_const_i32(s->pc - pc_start)); 5988 7245 gen_eob(s); 5989 7246 break; … … 5992 7249 } 5993 7250 } else { /* sidt */ 7251 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); 5994 7252 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 5995 gen_op_movl_T0_env(offsetof(CPUX86State, idt.limit));5996 gen_op_st_T0_A0 [OT_WORD + s->mem_index]();7253 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit)); 7254 gen_op_st_T0_A0(OT_WORD + s->mem_index); 5997 7255 gen_add_A0_im(s, 2); 5998 gen_op_movtl_T0_env(offsetof(CPUX86State, idt.base));7256 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base)); 5999 7257 if (!s->dflag) 6000 7258 gen_op_andl_T0_im(0xffffff); 6001 gen_op_st_T0_A0 [CODE64(s) + OT_LONG + s->mem_index]();7259 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index); 6002 7260 } 6003 7261 break; 6004 7262 case 2: /* lgdt */ 6005 7263 case 3: /* lidt */ 6006 if (mod == 3) 6007 goto illegal_op; 6008 if (s->cpl != 0) { 7264 if (mod == 3) { 7265 if (s->cc_op != CC_OP_DYNAMIC) 7266 gen_op_set_cc_op(s->cc_op); 7267 gen_jmp_im(pc_start - s->cs_base); 7268 switch(rm) { 7269 case 0: /* VMRUN */ 7270 if (!(s->flags & HF_SVME_MASK) || !s->pe) 7271 goto illegal_op; 7272 if (s->cpl != 0) { 7273 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 7274 break; 7275 } else { 7276 tcg_gen_helper_0_2(helper_vmrun, 7277 tcg_const_i32(s->aflag), 7278 tcg_const_i32(s->pc - pc_start)); 7279 tcg_gen_exit_tb(0); 7280 s->is_jmp = 3; 7281 } 7282 break; 7283 case 1: /* VMMCALL */ 7284 if (!(s->flags & HF_SVME_MASK)) 7285 goto illegal_op; 7286 tcg_gen_helper_0_0(helper_vmmcall); 7287 break; 7288 case 2: /* VMLOAD */ 7289 if (!(s->flags & HF_SVME_MASK) || !s->pe) 7290 goto illegal_op; 7291 if (s->cpl != 0) { 7292 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 7293 break; 7294 } else { 7295 tcg_gen_helper_0_1(helper_vmload, 7296 tcg_const_i32(s->aflag)); 7297 } 7298 break; 7299 case 3: /* VMSAVE */ 7300 if (!(s->flags & HF_SVME_MASK) || !s->pe) 7301 goto illegal_op; 7302 if (s->cpl != 0) { 7303 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 7304 break; 7305 } else { 7306 tcg_gen_helper_0_1(helper_vmsave, 7307 tcg_const_i32(s->aflag)); 7308 } 7309 break; 7310 case 4: /* STGI */ 7311 if ((!(s->flags & HF_SVME_MASK) && 7312 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || 7313 !s->pe) 7314 goto illegal_op; 7315 if (s->cpl != 0) { 7316 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 7317 break; 7318 } else { 7319 tcg_gen_helper_0_0(helper_stgi); 7320 } 7321 break; 7322 case 5: /* CLGI */ 7323 if (!(s->flags & HF_SVME_MASK) || !s->pe) 7324 goto illegal_op; 7325 if (s->cpl != 0) { 7326 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 7327 break; 7328 } else { 7329 tcg_gen_helper_0_0(helper_clgi); 7330 } 7331 break; 7332 case 6: /* SKINIT */ 7333 if ((!(s->flags & HF_SVME_MASK) && 7334 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || 7335 !s->pe) 7336 goto illegal_op; 7337 tcg_gen_helper_0_0(helper_skinit); 7338 break; 7339 case 7: /* INVLPGA */ 7340 if (!(s->flags & HF_SVME_MASK) || !s->pe) 7341 goto illegal_op; 7342 if (s->cpl != 0) { 7343 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 7344 break; 7345 } else { 7346 tcg_gen_helper_0_1(helper_invlpga, 7347 tcg_const_i32(s->aflag)); 7348 } 7349 break; 7350 default: 7351 goto illegal_op; 7352 } 7353 } else if (s->cpl != 0) { 6009 7354 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 6010 7355 } else { 7356 gen_svm_check_intercept(s, pc_start, 7357 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE); 6011 7358 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 6012 gen_op_ld_T1_A0 [OT_WORD + s->mem_index]();7359 gen_op_ld_T1_A0(OT_WORD + s->mem_index); 6013 7360 gen_add_A0_im(s, 2); 6014 gen_op_ld_T0_A0 [CODE64(s) + OT_LONG + s->mem_index]();7361 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index); 6015 7362 if (!s->dflag) 6016 7363 gen_op_andl_T0_im(0xffffff); 6017 7364 if (op == 2) { 6018 gen_op_movtl_env_T0(offsetof(CPUX86State,gdt.base));6019 gen_op_movl_env_T1(offsetof(CPUX86State,gdt.limit));7365 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base)); 7366 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit)); 6020 7367 } else { 6021 gen_op_movtl_env_T0(offsetof(CPUX86State,idt.base));6022 gen_op_movl_env_T1(offsetof(CPUX86State,idt.limit));7368 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base)); 7369 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit)); 6023 7370 } 6024 7371 } 6025 7372 break; 6026 7373 case 4: /* smsw */ 6027 gen_op_movl_T0_env(offsetof(CPUX86State,cr[0])); 7374 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); 7375 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0])); 6028 7376 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1); 6029 7377 break; … … 6032 7380 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 6033 7381 } else { 7382 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); 6034 7383 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); 6035 gen_op_lmsw_T0();7384 tcg_gen_helper_0_1(helper_lmsw, cpu_T[0]); 6036 7385 gen_jmp_im(s->pc - s->cs_base); 6037 7386 gen_eob(s); … … 6046 7395 if (CODE64(s) && rm == 0) { 6047 7396 /* swapgs */ 6048 gen_op_movtl_T0_env(offsetof(CPUX86State,segs[R_GS].base));6049 gen_op_movtl_T1_env(offsetof(CPUX86State,kernelgsbase));6050 gen_op_movtl_env_T1(offsetof(CPUX86State,segs[R_GS].base));6051 gen_op_movtl_env_T0(offsetof(CPUX86State,kernelgsbase));7397 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[R_GS].base)); 7398 tcg_gen_ld_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,kernelgsbase)); 7399 tcg_gen_st_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,segs[R_GS].base)); 7400 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,kernelgsbase)); 6052 7401 } else 6053 7402 #endif … … 6056 7405 } 6057 7406 } else { 7407 if (s->cc_op != CC_OP_DYNAMIC) 7408 gen_op_set_cc_op(s->cc_op); 7409 gen_jmp_im(pc_start - s->cs_base); 6058 7410 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 6059 gen_op_invlpg_A0();7411 tcg_gen_helper_0_1(helper_invlpg, cpu_A0); 6060 7412 gen_jmp_im(s->pc - s->cs_base); 6061 7413 gen_eob(s); … … 6072 7424 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 6073 7425 } else { 7426 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); 6074 7427 /* nothing to do */ 6075 7428 } … … 6088 7441 6089 7442 if (mod == 3) { 6090 gen_op_mov_TN_reg [OT_LONG][0][rm]();7443 gen_op_mov_TN_reg(OT_LONG, 0, rm); 6091 7444 /* sign extend */ 6092 7445 if (d_ot == OT_QUAD) 6093 gen_op_movslq_T0_T0();6094 gen_op_mov_reg_T0 [d_ot][reg]();7446 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); 7447 gen_op_mov_reg_T0(d_ot, reg); 6095 7448 } else { 6096 7449 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 6097 7450 if (d_ot == OT_QUAD) { 6098 gen_op_lds_T0_A0 [OT_LONG + s->mem_index]();7451 gen_op_lds_T0_A0(OT_LONG + s->mem_index); 6099 7452 } else { 6100 gen_op_ld_T0_A0 [OT_LONG + s->mem_index]();7453 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 6101 7454 } 6102 gen_op_mov_reg_T0 [d_ot][reg]();7455 gen_op_mov_reg_T0(d_ot, reg); 6103 7456 } 6104 7457 } else 6105 7458 #endif 6106 7459 { 7460 int label1; 7461 TCGv t0, t1, t2; 7462 6107 7463 if (!s->pe || s->vm86) 6108 7464 goto illegal_op; 6109 ot = dflag ? OT_LONG : OT_WORD; 7465 t0 = tcg_temp_local_new(TCG_TYPE_TL); 7466 t1 = tcg_temp_local_new(TCG_TYPE_TL); 7467 t2 = tcg_temp_local_new(TCG_TYPE_TL); 7468 ot = OT_WORD; 6110 7469 modrm = ldub_code(s->pc++); 6111 7470 reg = (modrm >> 3) & 7; … … 6113 7472 rm = modrm & 7; 6114 7473 #ifdef VBOX /* Fix for obvious bug - T1 needs to be loaded */ 6115 gen_op_mov_TN_reg[ot][1][reg](); 7474 /** @todo: how to do that right? */ 7475 //gen_op_mov_TN_reg[ot][1][reg](); 6116 7476 #endif 6117 7477 if (mod != 3) { 6118 7478 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 6119 gen_op_ld_ T0_A0[ot + s->mem_index]();7479 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0); 6120 7480 } else { 6121 gen_op_mov_TN_reg[ot][0][rm](); 7481 gen_op_mov_v_reg(ot, t0, rm); 7482 } 7483 gen_op_mov_v_reg(ot, t1, reg); 7484 tcg_gen_andi_tl(cpu_tmp0, t0, 3); 7485 tcg_gen_andi_tl(t1, t1, 3); 7486 tcg_gen_movi_tl(t2, 0); 7487 label1 = gen_new_label(); 7488 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1); 7489 tcg_gen_andi_tl(t0, t0, ~3); 7490 tcg_gen_or_tl(t0, t0, t1); 7491 tcg_gen_movi_tl(t2, CC_Z); 7492 gen_set_label(label1); 7493 if (mod != 3) { 7494 gen_op_st_v(ot + s->mem_index, t0, cpu_A0); 7495 } else { 7496 gen_op_mov_reg_v(ot, rm, t0); 6122 7497 } 6123 7498 if (s->cc_op != CC_OP_DYNAMIC) 6124 7499 gen_op_set_cc_op(s->cc_op); 6125 gen_op_arpl(); 7500 gen_compute_eflags(cpu_cc_src); 7501 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); 7502 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2); 6126 7503 s->cc_op = CC_OP_EFLAGS; 6127 if (mod != 3) { 6128 gen_op_st_T0_A0[ot + s->mem_index](); 6129 } else { 6130 gen_op_mov_reg_T0[ot][rm](); 6131 } 6132 gen_op_arpl_update(); 7504 tcg_temp_free(t0); 7505 tcg_temp_free(t1); 7506 tcg_temp_free(t2); 6133 7507 } 6134 7508 break; 6135 7509 case 0x102: /* lar */ 6136 7510 case 0x103: /* lsl */ 6137 if (!s->pe || s->vm86) 6138 goto illegal_op; 6139 ot = dflag ? OT_LONG : OT_WORD; 6140 modrm = ldub_code(s->pc++); 6141 reg = ((modrm >> 3) & 7) | rex_r; 6142 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); 6143 gen_op_mov_TN_reg[ot][1][reg](); 6144 if (s->cc_op != CC_OP_DYNAMIC) 6145 gen_op_set_cc_op(s->cc_op); 6146 if (b == 0x102) 6147 gen_op_lar(); 6148 else 6149 gen_op_lsl(); 6150 s->cc_op = CC_OP_EFLAGS; 6151 gen_op_mov_reg_T1[ot][reg](); 7511 { 7512 int label1; 7513 TCGv t0; 7514 if (!s->pe || s->vm86) 7515 goto illegal_op; 7516 ot = dflag ? OT_LONG : OT_WORD; 7517 modrm = ldub_code(s->pc++); 7518 reg = ((modrm >> 3) & 7) | rex_r; 7519 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); 7520 t0 = tcg_temp_local_new(TCG_TYPE_TL); 7521 if (s->cc_op != CC_OP_DYNAMIC) 7522 gen_op_set_cc_op(s->cc_op); 7523 if (b == 0x102) 7524 tcg_gen_helper_1_1(helper_lar, t0, cpu_T[0]); 7525 else 7526 tcg_gen_helper_1_1(helper_lsl, t0, cpu_T[0]); 7527 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z); 7528 label1 = gen_new_label(); 7529 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1); 7530 gen_op_mov_reg_v(ot, reg, t0); 7531 gen_set_label(label1); 7532 s->cc_op = CC_OP_EFLAGS; 7533 tcg_temp_free(t0); 7534 } 6152 7535 break; 6153 7536 case 0x118: … … 6194 7577 case 4: 6195 7578 case 8: 7579 if (s->cc_op != CC_OP_DYNAMIC) 7580 gen_op_set_cc_op(s->cc_op); 7581 gen_jmp_im(pc_start - s->cs_base); 6196 7582 if (b & 2) { 6197 gen_op_mov_TN_reg[ot][0][rm](); 6198 gen_op_movl_crN_T0(reg); 7583 gen_op_mov_TN_reg(ot, 0, rm); 7584 tcg_gen_helper_0_2(helper_write_crN, 7585 tcg_const_i32(reg), cpu_T[0]); 6199 7586 gen_jmp_im(s->pc - s->cs_base); 6200 7587 gen_eob(s); 6201 7588 } else { 6202 #if !defined(CONFIG_USER_ONLY) 6203 if (reg == 8) 6204 gen_op_movtl_T0_cr8(); 6205 else 6206 #endif 6207 gen_op_movtl_T0_env(offsetof(CPUX86State,cr[reg])); 6208 gen_op_mov_reg_T0[ot][rm](); 7589 tcg_gen_helper_1_1(helper_read_crN, 7590 cpu_T[0], tcg_const_i32(reg)); 7591 gen_op_mov_reg_T0(ot, rm); 6209 7592 } 6210 7593 break; … … 6232 7615 goto illegal_op; 6233 7616 if (b & 2) { 6234 gen_op_mov_TN_reg[ot][0][rm](); 6235 gen_op_movl_drN_T0(reg); 7617 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); 7618 gen_op_mov_TN_reg(ot, 0, rm); 7619 tcg_gen_helper_0_2(helper_movl_drN_T0, 7620 tcg_const_i32(reg), cpu_T[0]); 6236 7621 gen_jmp_im(s->pc - s->cs_base); 6237 7622 gen_eob(s); 6238 7623 } else { 6239 gen_op_movtl_T0_env(offsetof(CPUX86State,dr[reg])); 6240 gen_op_mov_reg_T0[ot][rm](); 7624 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); 7625 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg])); 7626 gen_op_mov_reg_T0(ot, rm); 6241 7627 } 6242 7628 } … … 6246 7632 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 6247 7633 } else { 6248 gen_op_clts(); 7634 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); 7635 tcg_gen_helper_0_0(helper_clts); 6249 7636 /* abort block because static cpu state changed */ 6250 7637 gen_jmp_im(s->pc - s->cs_base); … … 6252 7639 } 6253 7640 break; 6254 /* MMX/ SSE/SSE2/PNIsupport */7641 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ 6255 7642 case 0x1c3: /* MOVNTI reg, mem */ 6256 7643 if (!(s->cpuid_features & CPUID_SSE2)) … … 6279 7666 } 6280 7667 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 6281 gen_op_fxsave_A0((s->dflag == 2)); 7668 if (s->cc_op != CC_OP_DYNAMIC) 7669 gen_op_set_cc_op(s->cc_op); 7670 gen_jmp_im(pc_start - s->cs_base); 7671 tcg_gen_helper_0_2(helper_fxsave, 7672 cpu_A0, tcg_const_i32((s->dflag == 2))); 6282 7673 break; 6283 7674 case 1: /* fxrstor */ … … 6290 7681 } 6291 7682 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 6292 gen_op_fxrstor_A0((s->dflag == 2)); 7683 if (s->cc_op != CC_OP_DYNAMIC) 7684 gen_op_set_cc_op(s->cc_op); 7685 gen_jmp_im(pc_start - s->cs_base); 7686 tcg_gen_helper_0_2(helper_fxrstor, 7687 cpu_A0, tcg_const_i32((s->dflag == 2))); 6293 7688 break; 6294 7689 case 2: /* ldmxcsr */ … … 6303 7698 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 6304 7699 if (op == 2) { 6305 gen_op_ld_T0_A0 [OT_LONG + s->mem_index]();6306 gen_op_movl_env_T0(offsetof(CPUX86State, mxcsr));7700 gen_op_ld_T0_A0(OT_LONG + s->mem_index); 7701 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr)); 6307 7702 } else { 6308 gen_op_movl_T0_env(offsetof(CPUX86State, mxcsr));6309 gen_op_st_T0_A0 [OT_LONG + s->mem_index]();7703 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr)); 7704 gen_op_st_T0_A0(OT_LONG + s->mem_index); 6310 7705 } 6311 7706 break; … … 6318 7713 if ((modrm & 0xc7) == 0xc0) { 6319 7714 /* sfence */ 7715 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */ 6320 7716 if (!(s->cpuid_features & CPUID_SSE)) 6321 7717 goto illegal_op; … … 6331 7727 } 6332 7728 break; 6333 case 0x10d: /* prefetch*/7729 case 0x10d: /* 3DNow! prefetch(w) */ 6334 7730 modrm = ldub_code(s->pc++); 7731 mod = (modrm >> 6) & 3; 7732 if (mod == 3) 7733 goto illegal_op; 6335 7734 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); 6336 7735 /* ignore for now */ 6337 7736 break; 6338 7737 case 0x1aa: /* rsm */ 7738 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM); 6339 7739 if (!(s->flags & HF_SMM_MASK)) 6340 7740 goto illegal_op; … … 6344 7744 } 6345 7745 gen_jmp_im(s->pc - s->cs_base); 6346 gen_op_rsm();7746 tcg_gen_helper_0_0(helper_rsm); 6347 7747 gen_eob(s); 6348 7748 break; 7749 case 0x1b8: /* SSE4.2 popcnt */ 7750 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != 7751 PREFIX_REPZ) 7752 goto illegal_op; 7753 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT)) 7754 goto illegal_op; 7755 7756 modrm = ldub_code(s->pc++); 7757 reg = ((modrm >> 3) & 7); 7758 7759 if (s->prefix & PREFIX_DATA) 7760 ot = OT_WORD; 7761 else if (s->dflag != 2) 7762 ot = OT_LONG; 7763 else 7764 ot = OT_QUAD; 7765 7766 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); 7767 tcg_gen_helper_1_2(helper_popcnt, 7768 cpu_T[0], cpu_T[0], tcg_const_i32(ot)); 7769 gen_op_mov_reg_T0(ot, reg); 7770 7771 s->cc_op = CC_OP_EFLAGS; 7772 break; 7773 case 0x10e ... 0x10f: 7774 /* 3DNow! instructions, ignore prefixes */ 7775 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); 6349 7776 case 0x110 ... 0x117: 6350 7777 case 0x128 ... 0x12f: 7778 case 0x138 ... 0x13a: 6351 7779 case 0x150 ... 0x177: 6352 7780 case 0x17c ... 0x17f: … … 6361 7789 /* lock generation */ 6362 7790 if (s->prefix & PREFIX_LOCK) 6363 gen_op_unlock();7791 tcg_gen_helper_0_0(helper_unlock); 6364 7792 return s->pc; 6365 7793 illegal_op: 6366 7794 if (s->prefix & PREFIX_LOCK) 6367 gen_op_unlock();7795 tcg_gen_helper_0_0(helper_unlock); 6368 7796 /* XXX: ensure that no lock was generated */ 6369 7797 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); … … 6371 7799 } 6372 7800 6373 #define CC_OSZAPC (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C)6374 #define CC_OSZAP (CC_O | CC_S | CC_Z | CC_A | CC_P)6375 6376 /* flags read by an operation */6377 static uint16_t opc_read_flags[NB_OPS] = {6378 [INDEX_op_aas] = CC_A,6379 [INDEX_op_aaa] = CC_A,6380 [INDEX_op_das] = CC_A | CC_C,6381 [INDEX_op_daa] = CC_A | CC_C,6382 6383 /* subtle: due to the incl/decl implementation, C is used */6384 [INDEX_op_update_inc_cc] = CC_C,6385 6386 [INDEX_op_into] = CC_O,6387 6388 [INDEX_op_jb_subb] = CC_C,6389 [INDEX_op_jb_subw] = CC_C,6390 [INDEX_op_jb_subl] = CC_C,6391 6392 [INDEX_op_jz_subb] = CC_Z,6393 [INDEX_op_jz_subw] = CC_Z,6394 [INDEX_op_jz_subl] = CC_Z,6395 6396 [INDEX_op_jbe_subb] = CC_Z | CC_C,6397 [INDEX_op_jbe_subw] = CC_Z | CC_C,6398 [INDEX_op_jbe_subl] = CC_Z | CC_C,6399 6400 [INDEX_op_js_subb] = CC_S,6401 [INDEX_op_js_subw] = CC_S,6402 [INDEX_op_js_subl] = CC_S,6403 6404 [INDEX_op_jl_subb] = CC_O | CC_S,6405 [INDEX_op_jl_subw] = CC_O | CC_S,6406 [INDEX_op_jl_subl] = CC_O | CC_S,6407 6408 [INDEX_op_jle_subb] = CC_O | CC_S | CC_Z,6409 [INDEX_op_jle_subw] = CC_O | CC_S | CC_Z,6410 [INDEX_op_jle_subl] = CC_O | CC_S | CC_Z,6411 6412 [INDEX_op_loopnzw] = CC_Z,6413 [INDEX_op_loopnzl] = CC_Z,6414 [INDEX_op_loopzw] = CC_Z,6415 [INDEX_op_loopzl] = CC_Z,6416 6417 [INDEX_op_seto_T0_cc] = CC_O,6418 [INDEX_op_setb_T0_cc] = CC_C,6419 [INDEX_op_setz_T0_cc] = CC_Z,6420 [INDEX_op_setbe_T0_cc] = CC_Z | CC_C,6421 [INDEX_op_sets_T0_cc] = CC_S,6422 [INDEX_op_setp_T0_cc] = CC_P,6423 [INDEX_op_setl_T0_cc] = CC_O | CC_S,6424 [INDEX_op_setle_T0_cc] = CC_O | CC_S | CC_Z,6425 6426 [INDEX_op_setb_T0_subb] = CC_C,6427 [INDEX_op_setb_T0_subw] = CC_C,6428 [INDEX_op_setb_T0_subl] = CC_C,6429 6430 [INDEX_op_setz_T0_subb] = CC_Z,6431 [INDEX_op_setz_T0_subw] = CC_Z,6432 [INDEX_op_setz_T0_subl] = CC_Z,6433 6434 [INDEX_op_setbe_T0_subb] = CC_Z | CC_C,6435 [INDEX_op_setbe_T0_subw] = CC_Z | CC_C,6436 [INDEX_op_setbe_T0_subl] = CC_Z | CC_C,6437 6438 [INDEX_op_sets_T0_subb] = CC_S,6439 [INDEX_op_sets_T0_subw] = CC_S,6440 [INDEX_op_sets_T0_subl] = CC_S,6441 6442 [INDEX_op_setl_T0_subb] = CC_O | CC_S,6443 [INDEX_op_setl_T0_subw] = CC_O | CC_S,6444 [INDEX_op_setl_T0_subl] = CC_O | CC_S,6445 6446 [INDEX_op_setle_T0_subb] = CC_O | CC_S | CC_Z,6447 [INDEX_op_setle_T0_subw] = CC_O | CC_S | CC_Z,6448 [INDEX_op_setle_T0_subl] = CC_O | CC_S | CC_Z,6449 6450 [INDEX_op_movl_T0_eflags] = CC_OSZAPC,6451 [INDEX_op_cmc] = CC_C,6452 [INDEX_op_salc] = CC_C,6453 6454 /* needed for correct flag optimisation before string ops */6455 [INDEX_op_jnz_ecxw] = CC_OSZAPC,6456 [INDEX_op_jnz_ecxl] = CC_OSZAPC,6457 [INDEX_op_jz_ecxw] = CC_OSZAPC,6458 [INDEX_op_jz_ecxl] = CC_OSZAPC,6459 6460 #ifdef TARGET_X86_646461 [INDEX_op_jb_subq] = CC_C,6462 [INDEX_op_jz_subq] = CC_Z,6463 [INDEX_op_jbe_subq] = CC_Z | CC_C,6464 [INDEX_op_js_subq] = CC_S,6465 [INDEX_op_jl_subq] = CC_O | CC_S,6466 [INDEX_op_jle_subq] = CC_O | CC_S | CC_Z,6467 6468 [INDEX_op_loopnzq] = CC_Z,6469 [INDEX_op_loopzq] = CC_Z,6470 6471 [INDEX_op_setb_T0_subq] = CC_C,6472 [INDEX_op_setz_T0_subq] = CC_Z,6473 [INDEX_op_setbe_T0_subq] = CC_Z | CC_C,6474 [INDEX_op_sets_T0_subq] = CC_S,6475 [INDEX_op_setl_T0_subq] = CC_O | CC_S,6476 [INDEX_op_setle_T0_subq] = CC_O | CC_S | CC_Z,6477 6478 [INDEX_op_jnz_ecxq] = CC_OSZAPC,6479 [INDEX_op_jz_ecxq] = CC_OSZAPC,6480 #endif6481 6482 #define DEF_READF(SUFFIX)\6483 [INDEX_op_adcb ## SUFFIX ## _T0_T1_cc] = CC_C,\6484 [INDEX_op_adcw ## SUFFIX ## _T0_T1_cc] = CC_C,\6485 [INDEX_op_adcl ## SUFFIX ## _T0_T1_cc] = CC_C,\6486 X86_64_DEF([INDEX_op_adcq ## SUFFIX ## _T0_T1_cc] = CC_C,)\6487 [INDEX_op_sbbb ## SUFFIX ## _T0_T1_cc] = CC_C,\6488 [INDEX_op_sbbw ## SUFFIX ## _T0_T1_cc] = CC_C,\6489 [INDEX_op_sbbl ## SUFFIX ## _T0_T1_cc] = CC_C,\6490 X86_64_DEF([INDEX_op_sbbq ## SUFFIX ## _T0_T1_cc] = CC_C,)\6491 \6492 [INDEX_op_rclb ## SUFFIX ## _T0_T1_cc] = CC_C,\6493 [INDEX_op_rclw ## SUFFIX ## _T0_T1_cc] = CC_C,\6494 [INDEX_op_rcll ## SUFFIX ## _T0_T1_cc] = CC_C,\6495 X86_64_DEF([INDEX_op_rclq ## SUFFIX ## _T0_T1_cc] = CC_C,)\6496 [INDEX_op_rcrb ## SUFFIX ## _T0_T1_cc] = CC_C,\6497 [INDEX_op_rcrw ## SUFFIX ## _T0_T1_cc] = CC_C,\6498 [INDEX_op_rcrl ## SUFFIX ## _T0_T1_cc] = CC_C,\6499 X86_64_DEF([INDEX_op_rcrq ## SUFFIX ## _T0_T1_cc] = CC_C,)6500 6501 DEF_READF( )6502 DEF_READF(_raw)6503 #ifndef CONFIG_USER_ONLY6504 DEF_READF(_kernel)6505 DEF_READF(_user)6506 #endif6507 };6508 6509 /* flags written by an operation */6510 static uint16_t opc_write_flags[NB_OPS] = {6511 [INDEX_op_update2_cc] = CC_OSZAPC,6512 [INDEX_op_update1_cc] = CC_OSZAPC,6513 [INDEX_op_cmpl_T0_T1_cc] = CC_OSZAPC,6514 [INDEX_op_update_neg_cc] = CC_OSZAPC,6515 /* subtle: due to the incl/decl implementation, C is used */6516 [INDEX_op_update_inc_cc] = CC_OSZAPC,6517 [INDEX_op_testl_T0_T1_cc] = CC_OSZAPC,6518 6519 [INDEX_op_mulb_AL_T0] = CC_OSZAPC,6520 [INDEX_op_mulw_AX_T0] = CC_OSZAPC,6521 [INDEX_op_mull_EAX_T0] = CC_OSZAPC,6522 X86_64_DEF([INDEX_op_mulq_EAX_T0] = CC_OSZAPC,)6523 [INDEX_op_imulb_AL_T0] = CC_OSZAPC,6524 [INDEX_op_imulw_AX_T0] = CC_OSZAPC,6525 [INDEX_op_imull_EAX_T0] = CC_OSZAPC,6526 X86_64_DEF([INDEX_op_imulq_EAX_T0] = CC_OSZAPC,)6527 [INDEX_op_imulw_T0_T1] = CC_OSZAPC,6528 [INDEX_op_imull_T0_T1] = CC_OSZAPC,6529 X86_64_DEF([INDEX_op_imulq_T0_T1] = CC_OSZAPC,)6530 6531 /* sse */6532 [INDEX_op_ucomiss] = CC_OSZAPC,6533 [INDEX_op_ucomisd] = CC_OSZAPC,6534 [INDEX_op_comiss] = CC_OSZAPC,6535 [INDEX_op_comisd] = CC_OSZAPC,6536 6537 /* bcd */6538 [INDEX_op_aam] = CC_OSZAPC,6539 [INDEX_op_aad] = CC_OSZAPC,6540 [INDEX_op_aas] = CC_OSZAPC,6541 [INDEX_op_aaa] = CC_OSZAPC,6542 [INDEX_op_das] = CC_OSZAPC,6543 [INDEX_op_daa] = CC_OSZAPC,6544 6545 [INDEX_op_movb_eflags_T0] = CC_S | CC_Z | CC_A | CC_P | CC_C,6546 [INDEX_op_movw_eflags_T0] = CC_OSZAPC,6547 [INDEX_op_movl_eflags_T0] = CC_OSZAPC,6548 [INDEX_op_movw_eflags_T0_io] = CC_OSZAPC,6549 [INDEX_op_movl_eflags_T0_io] = CC_OSZAPC,6550 [INDEX_op_movw_eflags_T0_cpl0] = CC_OSZAPC,6551 [INDEX_op_movl_eflags_T0_cpl0] = CC_OSZAPC,6552 [INDEX_op_clc] = CC_C,6553 [INDEX_op_stc] = CC_C,6554 [INDEX_op_cmc] = CC_C,6555 6556 [INDEX_op_btw_T0_T1_cc] = CC_OSZAPC,6557 [INDEX_op_btl_T0_T1_cc] = CC_OSZAPC,6558 X86_64_DEF([INDEX_op_btq_T0_T1_cc] = CC_OSZAPC,)6559 [INDEX_op_btsw_T0_T1_cc] = CC_OSZAPC,6560 [INDEX_op_btsl_T0_T1_cc] = CC_OSZAPC,6561 X86_64_DEF([INDEX_op_btsq_T0_T1_cc] = CC_OSZAPC,)6562 [INDEX_op_btrw_T0_T1_cc] = CC_OSZAPC,6563 [INDEX_op_btrl_T0_T1_cc] = CC_OSZAPC,6564 X86_64_DEF([INDEX_op_btrq_T0_T1_cc] = CC_OSZAPC,)6565 [INDEX_op_btcw_T0_T1_cc] = CC_OSZAPC,6566 [INDEX_op_btcl_T0_T1_cc] = CC_OSZAPC,6567 X86_64_DEF([INDEX_op_btcq_T0_T1_cc] = CC_OSZAPC,)6568 6569 [INDEX_op_bsfw_T0_cc] = CC_OSZAPC,6570 [INDEX_op_bsfl_T0_cc] = CC_OSZAPC,6571 X86_64_DEF([INDEX_op_bsfq_T0_cc] = CC_OSZAPC,)6572 [INDEX_op_bsrw_T0_cc] = CC_OSZAPC,6573 [INDEX_op_bsrl_T0_cc] = CC_OSZAPC,6574 X86_64_DEF([INDEX_op_bsrq_T0_cc] = CC_OSZAPC,)6575 6576 [INDEX_op_cmpxchgb_T0_T1_EAX_cc] = CC_OSZAPC,6577 [INDEX_op_cmpxchgw_T0_T1_EAX_cc] = CC_OSZAPC,6578 [INDEX_op_cmpxchgl_T0_T1_EAX_cc] = CC_OSZAPC,6579 X86_64_DEF([INDEX_op_cmpxchgq_T0_T1_EAX_cc] = CC_OSZAPC,)6580 6581 [INDEX_op_cmpxchg8b] = CC_Z,6582 [INDEX_op_lar] = CC_Z,6583 [INDEX_op_lsl] = CC_Z,6584 [INDEX_op_verr] = CC_Z,6585 [INDEX_op_verw] = CC_Z,6586 [INDEX_op_fcomi_ST0_FT0] = CC_Z | CC_P | CC_C,6587 [INDEX_op_fucomi_ST0_FT0] = CC_Z | CC_P | CC_C,6588 6589 #define DEF_WRITEF(SUFFIX)\6590 [INDEX_op_adcb ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6591 [INDEX_op_adcw ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6592 [INDEX_op_adcl ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6593 X86_64_DEF([INDEX_op_adcq ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,)\6594 [INDEX_op_sbbb ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6595 [INDEX_op_sbbw ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6596 [INDEX_op_sbbl ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6597 X86_64_DEF([INDEX_op_sbbq ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,)\6598 \6599 [INDEX_op_rolb ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\6600 [INDEX_op_rolw ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\6601 [INDEX_op_roll ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\6602 X86_64_DEF([INDEX_op_rolq ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,)\6603 [INDEX_op_rorb ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\6604 [INDEX_op_rorw ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\6605 [INDEX_op_rorl ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\6606 X86_64_DEF([INDEX_op_rorq ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,)\6607 \6608 [INDEX_op_rclb ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\6609 [INDEX_op_rclw ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\6610 [INDEX_op_rcll ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\6611 X86_64_DEF([INDEX_op_rclq ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,)\6612 [INDEX_op_rcrb ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\6613 [INDEX_op_rcrw ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\6614 [INDEX_op_rcrl ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\6615 X86_64_DEF([INDEX_op_rcrq ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,)\6616 \6617 [INDEX_op_shlb ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6618 [INDEX_op_shlw ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6619 [INDEX_op_shll ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6620 X86_64_DEF([INDEX_op_shlq ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,)\6621 \6622 [INDEX_op_shrb ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6623 [INDEX_op_shrw ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6624 [INDEX_op_shrl ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6625 X86_64_DEF([INDEX_op_shrq ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,)\6626 \6627 [INDEX_op_sarb ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6628 [INDEX_op_sarw ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6629 [INDEX_op_sarl ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\6630 X86_64_DEF([INDEX_op_sarq ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,)\6631 \6632 [INDEX_op_shldw ## SUFFIX ## _T0_T1_ECX_cc] = CC_OSZAPC,\6633 [INDEX_op_shldl ## SUFFIX ## _T0_T1_ECX_cc] = CC_OSZAPC,\6634 X86_64_DEF([INDEX_op_shldq ## SUFFIX ## _T0_T1_ECX_cc] = CC_OSZAPC,)\6635 [INDEX_op_shldw ## SUFFIX ## _T0_T1_im_cc] = CC_OSZAPC,\6636 [INDEX_op_shldl ## SUFFIX ## _T0_T1_im_cc] = CC_OSZAPC,\6637 X86_64_DEF([INDEX_op_shldq ## SUFFIX ## _T0_T1_im_cc] = CC_OSZAPC,)\6638 \6639 [INDEX_op_shrdw ## SUFFIX ## _T0_T1_ECX_cc] = CC_OSZAPC,\6640 [INDEX_op_shrdl ## SUFFIX ## _T0_T1_ECX_cc] = CC_OSZAPC,\6641 X86_64_DEF([INDEX_op_shrdq ## SUFFIX ## _T0_T1_ECX_cc] = CC_OSZAPC,)\6642 [INDEX_op_shrdw ## SUFFIX ## _T0_T1_im_cc] = CC_OSZAPC,\6643 [INDEX_op_shrdl ## SUFFIX ## _T0_T1_im_cc] = CC_OSZAPC,\6644 X86_64_DEF([INDEX_op_shrdq ## SUFFIX ## _T0_T1_im_cc] = CC_OSZAPC,)\6645 \6646 [INDEX_op_cmpxchgb ## SUFFIX ## _T0_T1_EAX_cc] = CC_OSZAPC,\6647 [INDEX_op_cmpxchgw ## SUFFIX ## _T0_T1_EAX_cc] = CC_OSZAPC,\6648 [INDEX_op_cmpxchgl ## SUFFIX ## _T0_T1_EAX_cc] = CC_OSZAPC,\6649 X86_64_DEF([INDEX_op_cmpxchgq ## SUFFIX ## _T0_T1_EAX_cc] = CC_OSZAPC,)6650 6651 6652 DEF_WRITEF( )6653 DEF_WRITEF(_raw)6654 #ifndef CONFIG_USER_ONLY6655 DEF_WRITEF(_kernel)6656 DEF_WRITEF(_user)6657 #endif6658 };6659 6660 /* simpler form of an operation if no flags need to be generated */6661 static uint16_t opc_simpler[NB_OPS] = {6662 [INDEX_op_update2_cc] = INDEX_op_nop,6663 [INDEX_op_update1_cc] = INDEX_op_nop,6664 [INDEX_op_update_neg_cc] = INDEX_op_nop,6665 #if 06666 /* broken: CC_OP logic must be rewritten */6667 [INDEX_op_update_inc_cc] = INDEX_op_nop,6668 #endif6669 6670 [INDEX_op_shlb_T0_T1_cc] = INDEX_op_shlb_T0_T1,6671 [INDEX_op_shlw_T0_T1_cc] = INDEX_op_shlw_T0_T1,6672 [INDEX_op_shll_T0_T1_cc] = INDEX_op_shll_T0_T1,6673 X86_64_DEF([INDEX_op_shlq_T0_T1_cc] = INDEX_op_shlq_T0_T1,)6674 6675 [INDEX_op_shrb_T0_T1_cc] = INDEX_op_shrb_T0_T1,6676 [INDEX_op_shrw_T0_T1_cc] = INDEX_op_shrw_T0_T1,6677 [INDEX_op_shrl_T0_T1_cc] = INDEX_op_shrl_T0_T1,6678 X86_64_DEF([INDEX_op_shrq_T0_T1_cc] = INDEX_op_shrq_T0_T1,)6679 6680 [INDEX_op_sarb_T0_T1_cc] = INDEX_op_sarb_T0_T1,6681 [INDEX_op_sarw_T0_T1_cc] = INDEX_op_sarw_T0_T1,6682 [INDEX_op_sarl_T0_T1_cc] = INDEX_op_sarl_T0_T1,6683 X86_64_DEF([INDEX_op_sarq_T0_T1_cc] = INDEX_op_sarq_T0_T1,)6684 6685 #define DEF_SIMPLER(SUFFIX)\6686 [INDEX_op_rolb ## SUFFIX ## _T0_T1_cc] = INDEX_op_rolb ## SUFFIX ## _T0_T1,\6687 [INDEX_op_rolw ## SUFFIX ## _T0_T1_cc] = INDEX_op_rolw ## SUFFIX ## _T0_T1,\6688 [INDEX_op_roll ## SUFFIX ## _T0_T1_cc] = INDEX_op_roll ## SUFFIX ## _T0_T1,\6689 X86_64_DEF([INDEX_op_rolq ## SUFFIX ## _T0_T1_cc] = INDEX_op_rolq ## SUFFIX ## _T0_T1,)\6690 \6691 [INDEX_op_rorb ## SUFFIX ## _T0_T1_cc] = INDEX_op_rorb ## SUFFIX ## _T0_T1,\6692 [INDEX_op_rorw ## SUFFIX ## _T0_T1_cc] = INDEX_op_rorw ## SUFFIX ## _T0_T1,\6693 [INDEX_op_rorl ## SUFFIX ## _T0_T1_cc] = INDEX_op_rorl ## SUFFIX ## _T0_T1,\6694 X86_64_DEF([INDEX_op_rorq ## SUFFIX ## _T0_T1_cc] = INDEX_op_rorq ## SUFFIX ## _T0_T1,)6695 6696 DEF_SIMPLER( )6697 DEF_SIMPLER(_raw)6698 #ifndef CONFIG_USER_ONLY6699 DEF_SIMPLER(_kernel)6700 DEF_SIMPLER(_user)6701 #endif6702 };6703 6704 7801 void optimize_flags_init(void) 6705 7802 { 6706 int i; 6707 /* put default values in arrays */ 6708 for(i = 0; i < NB_OPS; i++) { 6709 if (opc_simpler[i] == 0) 6710 opc_simpler[i] = i; 6711 } 6712 } 6713 6714 /* CPU flags computation optimization: we move backward thru the 6715 generated code to see which flags are needed. The operation is 6716 modified if suitable */ 6717 static void optimize_flags(uint16_t *opc_buf, int opc_buf_len) 6718 { 6719 uint16_t *opc_ptr; 6720 int live_flags, write_flags, op; 6721 6722 opc_ptr = opc_buf + opc_buf_len; 6723 /* live_flags contains the flags needed by the next instructions 6724 in the code. At the end of the bloc, we consider that all the 6725 flags are live. */ 6726 live_flags = CC_OSZAPC; 6727 while (opc_ptr > opc_buf) { 6728 op = *--opc_ptr; 6729 /* if none of the flags written by the instruction is used, 6730 then we can try to find a simpler instruction */ 6731 write_flags = opc_write_flags[op]; 6732 if ((live_flags & write_flags) == 0) { 6733 *opc_ptr = opc_simpler[op]; 6734 } 6735 /* compute the live flags before the instruction */ 6736 live_flags &= ~write_flags; 6737 live_flags |= opc_read_flags[op]; 6738 } 7803 #ifndef VBOX 7804 #if TCG_TARGET_REG_BITS == 32 7805 assert(sizeof(CCTable) == (1 << 3)); 7806 #else 7807 assert(sizeof(CCTable) == (1 << 4)); 7808 #endif 7809 #endif 7810 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env"); 7811 cpu_cc_op = tcg_global_mem_new(TCG_TYPE_I32, 7812 TCG_AREG0, offsetof(CPUState, cc_op), "cc_op"); 7813 cpu_cc_src = tcg_global_mem_new(TCG_TYPE_TL, 7814 TCG_AREG0, offsetof(CPUState, cc_src), "cc_src"); 7815 cpu_cc_dst = tcg_global_mem_new(TCG_TYPE_TL, 7816 TCG_AREG0, offsetof(CPUState, cc_dst), "cc_dst"); 7817 cpu_cc_tmp = tcg_global_mem_new(TCG_TYPE_TL, 7818 TCG_AREG0, offsetof(CPUState, cc_tmp), "cc_tmp"); 7819 7820 /* register helpers */ 7821 7822 #define DEF_HELPER(ret, name, params) tcg_register_helper(name, #name); 7823 #include "helper.h" 6739 7824 } 6740 7825 … … 6742 7827 basic block 'tb'. If search_pc is TRUE, also generate PC 6743 7828 information for each intermediate instruction. */ 6744 static inline intgen_intermediate_code_internal(CPUState *env,6745 TranslationBlock *tb,6746 int search_pc)7829 static inline void gen_intermediate_code_internal(CPUState *env, 7830 TranslationBlock *tb, 7831 int search_pc) 6747 7832 { 6748 7833 DisasContext dc1, *dc = &dc1; 6749 7834 target_ulong pc_ptr; 6750 7835 uint16_t *gen_opc_end; 6751 int flags, j, lj, cflags; 7836 int j, lj, cflags; 7837 uint64_t flags; 6752 7838 target_ulong pc_start; 6753 7839 target_ulong cs_base; 7840 int num_insns; 7841 int max_insns; 6754 7842 6755 7843 /* generate intermediate code */ … … 6765 7853 dc->f_st = 0; 6766 7854 dc->vm86 = (flags >> VM_SHIFT) & 1; 6767 #ifdef VBOX_WITH_CALL_RECORD6768 dc->vme = !!(env->cr[4] & CR4_VME_MASK);6769 if ( !(env->state & CPU_RAW_RING0)6770 && (env->cr[0] & CR0_PG_MASK)6771 && !(env->eflags & X86_EFL_IF)6772 && dc->code32)6773 dc->record_call = 1;6774 else6775 dc->record_call = 0;6776 #endif6777 7855 dc->cpl = (flags >> HF_CPL_SHIFT) & 3; 6778 7856 dc->iopl = (flags >> IOPL_SHIFT) & 3; … … 6812 7890 #endif 6813 7891 6814 gen_opc_ptr = gen_opc_buf; 7892 cpu_T[0] = tcg_temp_new(TCG_TYPE_TL); 7893 cpu_T[1] = tcg_temp_new(TCG_TYPE_TL); 7894 cpu_A0 = tcg_temp_new(TCG_TYPE_TL); 7895 cpu_T3 = tcg_temp_new(TCG_TYPE_TL); 7896 7897 cpu_tmp0 = tcg_temp_new(TCG_TYPE_TL); 7898 cpu_tmp1_i64 = tcg_temp_new(TCG_TYPE_I64); 7899 cpu_tmp2_i32 = tcg_temp_new(TCG_TYPE_I32); 7900 cpu_tmp3_i32 = tcg_temp_new(TCG_TYPE_I32); 7901 cpu_tmp4 = tcg_temp_new(TCG_TYPE_TL); 7902 cpu_tmp5 = tcg_temp_new(TCG_TYPE_TL); 7903 cpu_tmp6 = tcg_temp_new(TCG_TYPE_TL); 7904 cpu_ptr0 = tcg_temp_new(TCG_TYPE_PTR); 7905 cpu_ptr1 = tcg_temp_new(TCG_TYPE_PTR); 7906 6815 7907 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; 6816 gen_opparam_ptr = gen_opparam_buf;6817 nb_gen_labels = 0;6818 7908 6819 7909 dc->is_jmp = DISAS_NEXT; 6820 7910 pc_ptr = pc_start; 6821 7911 lj = -1; 6822 7912 num_insns = 0; 7913 max_insns = tb->cflags & CF_COUNT_MASK; 7914 if (max_insns == 0) 7915 max_insns = CF_COUNT_MASK; 7916 7917 gen_icount_start(); 6823 7918 for(;;) { 6824 7919 if (env->nb_breakpoints > 0) { … … 6840 7935 gen_opc_cc_op[lj] = dc->cc_op; 6841 7936 gen_opc_instr_start[lj] = 1; 6842 } 7937 gen_opc_icount[lj] = num_insns; 7938 } 7939 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) 7940 gen_io_start(); 7941 6843 7942 pc_ptr = disas_insn(dc, pc_ptr); 7943 num_insns++; 6844 7944 /* stop translation if indicated */ 6845 7945 if (dc->is_jmp) 6846 7946 break; 6847 6848 #ifdef VBOX6849 #ifdef DEBUG6850 /*6851 if(cpu_check_code_raw(env, pc_ptr, env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK))) == ERROR_SUCCESS)6852 {6853 //should never happen as the jump to the patch code terminates the translation block6854 dprintf(("QEmu is about to execute instructions in our patch block at %08X!!\n", pc_ptr));6855 }6856 */6857 #endif6858 if (env->state & CPU_EMULATE_SINGLE_INSTR)6859 {6860 env->state &= ~CPU_EMULATE_SINGLE_INSTR;6861 gen_jmp_im(pc_ptr - dc->cs_base);6862 gen_eob(dc);6863 break;6864 }6865 #endif /* VBOX */6866 6867 7947 /* if single step mode, we generate only one instruction and 6868 7948 generate an exception */ … … 6871 7951 change to be happen */ 6872 7952 if (dc->tf || dc->singlestep_enabled || 6873 (flags & HF_INHIBIT_IRQ_MASK) || 6874 (cflags & CF_SINGLE_INSN)) { 7953 (flags & HF_INHIBIT_IRQ_MASK)) { 6875 7954 gen_jmp_im(pc_ptr - dc->cs_base); 6876 7955 gen_eob(dc); … … 6879 7958 /* if too long translation, stop generation too */ 6880 7959 if (gen_opc_ptr >= gen_opc_end || 6881 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32)) { 7960 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) || 7961 num_insns >= max_insns) { 6882 7962 gen_jmp_im(pc_ptr - dc->cs_base); 6883 7963 gen_eob(dc); … … 6885 7965 } 6886 7966 } 7967 if (tb->cflags & CF_LAST_IO) 7968 gen_io_end(); 7969 gen_icount_end(tb, num_insns); 6887 7970 *gen_opc_ptr = INDEX_op_end; 6888 7971 /* we don't forget to fill the last values */ … … 6910 7993 target_disas(logfile, pc_start, pc_ptr - pc_start, disas_flags); 6911 7994 fprintf(logfile, "\n"); 6912 if (loglevel & CPU_LOG_TB_OP) {6913 fprintf(logfile, "OP:\n");6914 dump_ops(gen_opc_buf, gen_opparam_buf);6915 fprintf(logfile, "\n");6916 }6917 7995 } 6918 7996 #endif 6919 7997 6920 /* optimize flag computations */ 6921 optimize_flags(gen_opc_buf, gen_opc_ptr - gen_opc_buf); 6922 7998 if (!search_pc) { 7999 tb->size = pc_ptr - pc_start; 8000 tb->icount = num_insns; 8001 } 8002 } 8003 8004 void gen_intermediate_code(CPUState *env, TranslationBlock *tb) 8005 { 8006 gen_intermediate_code_internal(env, tb, 0); 8007 } 8008 8009 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb) 8010 { 8011 gen_intermediate_code_internal(env, tb, 1); 8012 } 8013 8014 void gen_pc_load(CPUState *env, TranslationBlock *tb, 8015 unsigned long searched_pc, int pc_pos, void *puc) 8016 { 8017 int cc_op; 6923 8018 #ifdef DEBUG_DISAS 6924 if (loglevel & CPU_LOG_TB_OP_OPT) { 6925 fprintf(logfile, "AFTER FLAGS OPT:\n"); 6926 dump_ops(gen_opc_buf, gen_opparam_buf); 6927 fprintf(logfile, "\n"); 8019 if (loglevel & CPU_LOG_TB_OP) { 8020 int i; 8021 fprintf(logfile, "RESTORE:\n"); 8022 for(i = 0;i <= pc_pos; i++) { 8023 if (gen_opc_instr_start[i]) { 8024 fprintf(logfile, "0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]); 8025 } 8026 } 8027 fprintf(logfile, "spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n", 8028 searched_pc, pc_pos, gen_opc_pc[pc_pos] - tb->cs_base, 8029 (uint32_t)tb->cs_base); 6928 8030 } 6929 8031 #endif 6930 if (!search_pc) 6931 tb->size = pc_ptr - pc_start; 6932 return 0; 6933 } 6934 6935 int gen_intermediate_code(CPUState *env, TranslationBlock *tb) 6936 { 6937 return gen_intermediate_code_internal(env, tb, 0); 6938 } 6939 6940 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb) 6941 { 6942 return gen_intermediate_code_internal(env, tb, 1); 6943 } 6944 8032 env->eip = gen_opc_pc[pc_pos] - tb->cs_base; 8033 cc_op = gen_opc_cc_op[pc_pos]; 8034 if (cc_op != CC_OP_DYNAMIC) 8035 env->cc_op = cc_op; 8036 } -
trunk/src/recompiler_new/translate-all.c
r13337 r13357 62 62 unsigned long code_gen_max_block_size(void) 63 63 { 64 #ifdef VBOX 65 static long max; 66 #else 64 67 static unsigned long max; 68 #endif 65 69 66 70 if (max == 0) { 67 71 max = TCG_MAX_OP_SIZE; 68 #define DEF(s, n, copy_size) max = copy_size > max? copy_size : max;72 #define DEF(s, n, copy_size) max = (copy_size > max) ? copy_size : max; 69 73 #include "tcg-opc.h" 70 74 #undef DEF
Note:
See TracChangeset
for help on using the changeset viewer.