- Timestamp:
- Oct 13, 2008 7:03:16 PM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 37805
- Location:
- trunk
- Files:
-
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Config.kmk
r13155 r13230 1030 1030 ## @todo eliminate these guys. 1031 1031 ifdef VBOX_WITH_INTERNAL_NETWORKING 1032 DEFS += VBOX_WITH_INTERNAL_NETWORKING 1033 endif 1034 1032 DEFS += VBOX_WITH_INTERNAL_NETWORKING 1033 endif 1034 ifdef VBOX_WITH_NEW_RECOMPILER 1035 DEFS += VBOX_WITH_NEW_RECOMPILER 1036 endif 1035 1037 1036 1038 # -
trunk/include/VBox/vm.h
r13189 r13230 683 683 struct REM s; 684 684 #endif 685 686 #ifdef VBOX_WITH_NEW_RECOMPILER 687 /* Must be multiple of 32 and coherent with REM_ENV_SIZE from REMInternal.h */ 685 688 #if GC_ARCH_BITS == 32 686 char padding[HC_ARCH_BITS == 32 ? 0x6f00 : 0xbf00]; /* multiple of 32 */ 687 #else 688 char padding[HC_ARCH_BITS == 32 ? 0x9f00 : 0xdf00]; /* multiple of 32 */ 689 #endif 689 #define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0xff00 : 0xff00) 690 #else 691 #define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0xff00 : 0xff00) 692 #endif 693 #else 694 #if GC_ARCH_BITS == 32 695 #define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x6f00 : 0xbf00) 696 #else 697 #define VM_REM_SIZE (HC_ARCH_BITS == 32 ? 0x9f00 : 0xdf00) 698 #endif 699 #endif // VBOX_WITH_NEW_RECOMILER 700 char padding[ VM_REM_SIZE]; /* multiple of 32 */ 690 701 } rem; 691 702 -
trunk/src/Makefile.kmk
r13143 r13230 73 73 74 74 ifdef VBOX_WITH_NEW_RECOMPILER 75 SUBDIRS += recompiler_new75 SUBDIRS += recompiler_new 76 76 else 77 77 SUBDIRS += recompiler -
trunk/src/VBox/VMM/REMInternal.h
r12989 r13230 243 243 #endif 244 244 245 #ifdef VBOX_WITH_NEW_RECOMPILER 246 #if GC_ARCH_BITS == 32 247 #define REM_ENV_SIZE (HC_ARCH_BITS == 32 ? 0xff00 : 0xff00) 248 #else 249 #define REM_ENV_SIZE (HC_ARCH_BITS == 32 ? 0xff00 : 0xff00) 250 #endif 251 #else 245 252 #if GC_ARCH_BITS == 32 246 253 #define REM_ENV_SIZE (HC_ARCH_BITS == 32 ? 0x6550 : 0xb4a0) … … 248 255 #define REM_ENV_SIZE (HC_ARCH_BITS == 32 ? 0x9440 : 0xd4a0) 249 256 #endif 257 #endif // VBOX_WITH_NEW_RECOMILER 250 258 251 259 /** Recompiler CPU state. */ -
trunk/src/recompiler_new/Makefile.kmk
r13184 r13230 41 41 DLLS += VBoxREM 42 42 IMPORT_LIBS += VBoxREMImp 43 44 DEFS += VBOX_WITH_NEW_RECOMPILER 45 43 46 44 47 OTHER_CLEAN += \ … … 219 222 $(APPEND) $@ '' 220 223 221 translate-all.c_DEPS =222 translate-op.c_DEPS = $(translate-all.c_DEPS)223 target-i386/translate.c_DEPS = $(translate-all.c_DEPS)224 225 224 # 226 225 # The math testcase as a standalone program for testing and debugging purposes. -
trunk/src/recompiler_new/Sun/config.h
r13184 r13230 29 29 #define TARGET_X86_64 30 30 #endif 31 32 #define unlikely(cond) RT_UNLIKELY(cond) -
trunk/src/recompiler_new/Sun/structs.h
r11982 r13230 230 230 REM_OFFSETOF(CPUState, ft0), 231 231 REM_SIZEOFMEMB(CPUState, ft0), 232 REM_OFFSETOF(CPUState, fp_convert),233 REM_SIZEOFMEMB(CPUState, fp_convert),234 232 REM_OFFSETOF(CPUState, sse_status), 235 233 REM_OFFSETOF(CPUState, mxcsr), … … 266 264 /* cpu-defs.h */ 267 265 REM_OFFSETOF(CPUState, current_tb), 268 REM_OFFSETOF(CPUState, mem_ write_pc),269 REM_OFFSETOF(CPUState, mem_ write_vaddr),266 REM_OFFSETOF(CPUState, mem_io_pc), 267 REM_OFFSETOF(CPUState, mem_io_vaddr), 270 268 REM_OFFSETOF(CPUState, tlb_table), 271 269 REM_SIZEOFMEMB(CPUState, tlb_table), -
trunk/src/recompiler_new/VBoxRecompiler.c
r13144 r13230 289 289 * Init the recompiler. 290 290 */ 291 if (!cpu_x86_init(&pVM->rem.s.Env ))291 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox")) 292 292 { 293 293 AssertMsgFailed(("cpu_x86_init failed - impossible!\n")); … … 3806 3806 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_HARD); 3807 3807 else 3808 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_HARD); 3808 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, 3809 CPU_INTERRUPT_EXTERNAL_HARD); 3809 3810 } 3810 3811 } … … 3841 3842 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT); 3842 3843 else 3843 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_TIMER); 3844 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, 3845 CPU_INTERRUPT_EXTERNAL_TIMER); 3844 3846 } 3845 3847 } … … 3860 3862 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT); 3861 3863 else 3862 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_DMA); 3864 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, 3865 CPU_INTERRUPT_EXTERNAL_DMA); 3863 3866 } 3864 3867 } … … 3879 3882 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT); 3880 3883 else 3881 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_EXIT); 3884 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, 3885 CPU_INTERRUPT_EXTERNAL_EXIT); 3882 3886 } 3883 3887 } … … 3898 3902 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT); 3899 3903 else 3900 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_EXIT); 3904 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, 3905 CPU_INTERRUPT_EXTERNAL_EXIT); 3901 3906 } 3902 3907 } -
trunk/src/recompiler_new/cpu-all.h
r11982 r13230 134 134 #endif 135 135 136 typedef union { 137 float32 f; 138 uint32_t l; 139 } CPU_FloatU; 140 136 141 /* NOTE: arm FPA is horrible as double 32 bit words are stored in big 137 142 endian ! */ … … 152 157 uint64_t ll; 153 158 } CPU_DoubleU; 159 160 #ifdef TARGET_SPARC 161 typedef union { 162 float128 q; 163 #if defined(WORDS_BIGENDIAN) \ 164 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) 165 struct { 166 uint32_t upmost; 167 uint32_t upper; 168 uint32_t lower; 169 uint32_t lowest; 170 } l; 171 struct { 172 uint64_t upper; 173 uint64_t lower; 174 } ll; 175 #else 176 struct { 177 uint32_t lowest; 178 uint32_t lower; 179 uint32_t upper; 180 uint32_t upmost; 181 } l; 182 struct { 183 uint64_t lower; 184 uint64_t upper; 185 } ll; 186 #endif 187 } CPU_QuadU; 188 #endif 154 189 155 190 /* CPU memory access without any memory or io remapping */ … … 831 866 code */ 832 867 #define PAGE_WRITE_ORG 0x0010 868 #define PAGE_RESERVED 0x0020 833 869 834 870 void page_dump(FILE *f); 835 871 int page_get_flags(target_ulong address); 836 872 void page_set_flags(target_ulong start, target_ulong end, int flags); 873 int page_check_range(target_ulong start, target_ulong len, int flags); 837 874 void page_unprotect_range(target_ulong data, target_ulong data_size); 838 875 … … 908 945 extern CPUState *first_cpu; 909 946 extern CPUState *cpu_single_env; 910 extern int code_copy_enabled; 947 extern int64_t qemu_icount; 948 extern int use_icount; 911 949 912 950 #define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */ … … 917 955 #define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */ 918 956 #define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */ 957 #define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */ 958 #define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */ 959 #define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */ 919 960 920 961 #ifdef VBOX 921 962 /** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */ 922 #define CPU_INTERRUPT_SINGLE_INSTR 0x0 200963 #define CPU_INTERRUPT_SINGLE_INSTR 0x0400 923 964 /** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */ 924 #define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x0 400965 #define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x0800 925 966 /** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */ 926 #define CPU_INTERRUPT_RC 0x 0800967 #define CPU_INTERRUPT_RC 0x1000 927 968 /** Exit current TB to process an external interrupt request (also in op.c!!) */ 928 #define CPU_INTERRUPT_EXTERNAL_EXIT 0x 1000969 #define CPU_INTERRUPT_EXTERNAL_EXIT 0x2000 929 970 /** Exit current TB to process an external interrupt request (also in op.c!!) */ 930 #define CPU_INTERRUPT_EXTERNAL_HARD 0x 2000971 #define CPU_INTERRUPT_EXTERNAL_HARD 0x4000 931 972 /** Exit current TB to process an external interrupt request (also in op.c!!) */ 932 #define CPU_INTERRUPT_EXTERNAL_TIMER 0x 4000973 #define CPU_INTERRUPT_EXTERNAL_TIMER 0x8000 933 974 /** Exit current TB to process an external interrupt request (also in op.c!!) */ 934 #define CPU_INTERRUPT_EXTERNAL_DMA 0x 8000975 #define CPU_INTERRUPT_EXTERNAL_DMA 0x10000 935 976 #endif /* VBOX */ 936 977 void cpu_interrupt(CPUState *s, int mask); 937 978 void cpu_reset_interrupt(CPUState *env, int mask); 938 979 980 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type); 981 int cpu_watchpoint_remove(CPUState *env, target_ulong addr); 982 void cpu_watchpoint_remove_all(CPUState *env); 939 983 int cpu_breakpoint_insert(CPUState *env, target_ulong pc); 940 984 int cpu_breakpoint_remove(CPUState *env, target_ulong pc); 985 void cpu_breakpoint_remove_all(CPUState *env); 986 987 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ 988 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ 989 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ 990 941 991 void cpu_single_step(CPUState *env, int enabled); 942 992 void cpu_reset(CPUState *s); … … 981 1031 int cpu_inw(CPUState *env, int addr); 982 1032 int cpu_inl(CPUState *env, int addr); 1033 #endif 1034 1035 /* address in the RAM (different from a physical address) */ 1036 #ifdef USE_KQEMU 1037 typedef uint32_t ram_addr_t; 1038 #else 1039 typedef unsigned long ram_addr_t; 983 1040 #endif 984 1041 … … 1007 1064 #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */ 1008 1065 #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT) 1009 #define IO_MEM_NOTDIRTY ( 4 << IO_MEM_SHIFT) /* used internally, never use directly */1066 #define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT) 1010 1067 #if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE) 1011 1068 #define IO_MEM_RAM_MISSING (5 << IO_MEM_SHIFT) /* used internally, never use directly */ … … 1015 1072 the physical address */ 1016 1073 #define IO_MEM_ROMD (1) 1074 #define IO_MEM_SUBPAGE (2) 1075 #define IO_MEM_SUBWIDTH (4) 1076 1077 /* Flags stored in the low bits of the TLB virtual address. These are 1078 defined so that fast path ram access is all zeros. */ 1079 /* Zero if TLB entry is valid. */ 1080 #define TLB_INVALID_MASK (1 << 3) 1081 /* Set if TLB entry references a clean RAM page. The iotlb entry will 1082 contain the page physical address. */ 1083 #define TLB_NOTDIRTY (1 << 4) 1084 /* Set if TLB entry is an IO callback. */ 1085 #define TLB_MMIO (1 << 5) 1017 1086 1018 1087 typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value); … … 1020 1089 1021 1090 void cpu_register_physical_memory(target_phys_addr_t start_addr, 1022 unsigned longsize,1023 unsigned longphys_offset);1091 ram_addr_t size, 1092 ram_addr_t phys_offset); 1024 1093 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr); 1094 ram_addr_t qemu_ram_alloc(ram_addr_t); 1095 void qemu_ram_free(ram_addr_t addr); 1025 1096 int cpu_register_io_memory(int io_index, 1026 1097 CPUReadMemoryFunc **mem_read, … … 1047 1118 uint64_t ldq_phys(target_phys_addr_t addr); 1048 1119 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val); 1120 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val); 1049 1121 void stb_phys(target_phys_addr_t addr, uint32_t val); 1050 1122 void stw_phys(target_phys_addr_t addr, uint32_t val); … … 1059 1131 #define VGA_DIRTY_FLAG 0x01 1060 1132 #define CODE_DIRTY_FLAG 0x02 1133 #define KQEMU_DIRTY_FLAG 0x04 1134 #define MIGRATION_DIRTY_FLAG 0x08 1061 1135 1062 1136 /* read dirty bit (return 0 or 1) */ … … 1104 1178 int dirty_flags); 1105 1179 void cpu_tlb_update_dirty(CPUState *env); 1180 1181 int cpu_physical_memory_set_dirty_tracking(int enable); 1182 1183 int cpu_physical_memory_get_dirty_tracking(void); 1106 1184 1107 1185 void dump_exec_info(FILE *f, -
trunk/src/recompiler_new/cpu-defs.h
r11982 r13230 54 54 typedef uint32_t target_ulong; 55 55 #define TARGET_FMT_lx "%08x" 56 #define TARGET_FMT_ld "%d" 57 #define TARGET_FMT_lu "%u" 56 58 #elif TARGET_LONG_SIZE == 8 57 59 typedef int64_t target_long; 58 60 typedef uint64_t target_ulong; 59 61 #define TARGET_FMT_lx "%016" PRIx64 62 #define TARGET_FMT_ld "%" PRId64 63 #define TARGET_FMT_lu "%" PRIu64 60 64 #else 61 65 #error TARGET_LONG_SIZE undefined … … 70 74 #if TARGET_PHYS_ADDR_BITS == 32 71 75 typedef uint32_t target_phys_addr_t; 76 #define TARGET_FMT_plx "%08x" 72 77 #elif TARGET_PHYS_ADDR_BITS == 64 73 78 typedef uint64_t target_phys_addr_t; 79 #define TARGET_FMT_plx "%016" PRIx64 74 80 #else 75 81 #error TARGET_PHYS_ADDR_BITS undefined 76 82 #endif 77 78 /* address in the RAM (different from a physical address) */79 typedef unsigned long ram_addr_t;80 83 81 84 #define HOST_LONG_SIZE (HOST_LONG_BITS / 8) … … 92 95 #endif /* VBOX */ 93 96 #define MAX_BREAKPOINTS 32 97 #define MAX_WATCHPOINTS 32 94 98 95 99 #define TB_JMP_CACHE_BITS 12 … … 107 111 #define CPU_TLB_SIZE (1 << CPU_TLB_BITS) 108 112 113 #if TARGET_PHYS_ADDR_BITS == 32 && TARGET_LONG_BITS == 32 114 #define CPU_TLB_ENTRY_BITS 4 115 #else 116 #define CPU_TLB_ENTRY_BITS 5 117 #endif 118 109 119 typedef struct CPUTLBEntry { 110 /* bit 31 to TARGET_PAGE_BITS : virtual address111 bit TARGET_PAGE_BITS-1.. IO_MEM_SHIFT : if non zero, memory io112 zone number120 /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address 121 bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not 122 go directly to ram. 113 123 bit 3 : indicates that the entry is invalid 114 124 bit 2..0 : zero … … 117 127 target_ulong addr_write; 118 128 target_ulong addr_code; 119 /* addend to virtual address to get physical address */ 120 target_phys_addr_t addend; 129 /* Addend to virtual address to get physical address. IO accesses 130 use the correcponding iotlb value. */ 131 #if TARGET_PHYS_ADDR_BITS == 64 132 /* on i386 Linux make sure it is aligned */ 133 target_phys_addr_t addend __attribute__((aligned(8))); 134 #else 135 target_phys_addr_t addend; 136 #endif 137 /* padding to get a power of two size */ 138 uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - 139 (sizeof(target_ulong) * 3 + 140 ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) + 141 sizeof(target_phys_addr_t))]; 121 142 } CPUTLBEntry; 143 144 #ifdef WORDS_BIGENDIAN 145 typedef struct icount_decr_u16 { 146 uint16_t high; 147 uint16_t low; 148 } icount_decr_u16; 149 #else 150 typedef struct icount_decr_u16 { 151 uint16_t low; 152 uint16_t high; 153 } icount_decr_u16; 154 #endif 155 156 157 #define CPU_TEMP_BUF_NLONGS 128 158 #ifdef VBOX 159 struct TCGContext; 122 160 123 161 #define CPU_COMMON \ 124 162 struct TranslationBlock *current_tb; /* currently executing TB */ \ 125 163 /* soft mmu support */ \ 126 /* in order to avoid passing too many arguments to the memory\127 write helpers, we store some rarely used information in the CPU\164 /* in order to avoid passing too many arguments to the MMIO \ 165 helpers, we store some rarely used information in the CPU \ 128 166 context) */ \ 129 unsigned long mem_write_pc; /* host pc at which the memory was \ 130 written */ \ 131 target_ulong mem_write_vaddr; /* target virtual addr at which the \ 132 memory was written */ \ 133 /* 0 = kernel, 1 = user */ \ 134 CPUTLBEntry tlb_table[2][CPU_TLB_SIZE]; \ 167 unsigned long mem_io_pc; /* host pc at which the memory was \ 168 accessed */ \ 169 target_ulong mem_io_vaddr; /* target virtual addr at which the \ 170 memory was accessed */ \ 171 uint32_t halted; /* Nonzero if the CPU is in suspend state */ \ 172 uint32_t interrupt_request; \ 173 /* The meaning of the MMU modes is defined in the target code. */ \ 174 CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ 175 target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ 135 176 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ 177 /* buffer for temporaries in the code generator */ \ 178 long temp_buf[CPU_TEMP_BUF_NLONGS]; \ 179 \ 180 int64_t icount_extra; /* Instructions until next timer event. */ \ 181 /* Number of cycles left, with interrupt flag in high bit. \ 182 This allows a single read-compare-cbranch-write sequence to test \ 183 for both decrementer underflow and exceptions. */ \ 184 union { \ 185 uint32_t u32; \ 186 icount_decr_u16 u16; \ 187 } icount_decr; \ 188 uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \ 136 189 \ 137 190 /* from this point: preserved by CPU reset */ \ … … 141 194 int singlestep_enabled; \ 142 195 \ 196 struct { \ 197 target_ulong vaddr; \ 198 int type; /* PAGE_READ/PAGE_WRITE */ \ 199 } watchpoint[MAX_WATCHPOINTS]; \ 200 int nb_watchpoints; \ 201 int watchpoint_hit; \ 202 \ 203 /* Core interrupt code */ \ 204 jmp_buf jmp_env; \ 205 int exception_index; \ 206 \ 207 int user_mode_only; \ 208 \ 143 209 void *next_cpu; /* next CPU sharing TB cache */ \ 144 210 int cpu_index; /* CPU index (informative) */ \ 211 int running; /* Nonzero if cpu is currently running(usermode). */ \ 145 212 /* user data */ \ 146 void *opaque; 147 148 #endif 213 void *opaque; \ 214 \ 215 const char *cpu_model_str; \ 216 /* Codegenerator context */ \ 217 struct TCGContext *tcg_context; 218 #else 219 220 #define CPU_COMMON \ 221 struct TranslationBlock *current_tb; /* currently executing TB */ \ 222 /* soft mmu support */ \ 223 /* in order to avoid passing too many arguments to the MMIO \ 224 helpers, we store some rarely used information in the CPU \ 225 context) */ \ 226 unsigned long mem_io_pc; /* host pc at which the memory was \ 227 accessed */ \ 228 target_ulong mem_io_vaddr; /* target virtual addr at which the \ 229 memory was accessed */ \ 230 uint32_t halted; /* Nonzero if the CPU is in suspend state */ \ 231 uint32_t interrupt_request; \ 232 /* The meaning of the MMU modes is defined in the target code. */ \ 233 CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ 234 target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ 235 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ 236 /* buffer for temporaries in the code generator */ \ 237 long temp_buf[CPU_TEMP_BUF_NLONGS]; \ 238 \ 239 int64_t icount_extra; /* Instructions until next timer event. */ \ 240 /* Number of cycles left, with interrupt flag in high bit. \ 241 This allows a single read-compare-cbranch-write sequence to test \ 242 for both decrementer underflow and exceptions. */ \ 243 union { \ 244 uint32_t u32; \ 245 icount_decr_u16 u16; \ 246 } icount_decr; \ 247 uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \ 248 \ 249 /* from this point: preserved by CPU reset */ \ 250 /* ice debug support */ \ 251 target_ulong breakpoints[MAX_BREAKPOINTS]; \ 252 int nb_breakpoints; \ 253 int singlestep_enabled; \ 254 \ 255 struct { \ 256 target_ulong vaddr; \ 257 int type; /* PAGE_READ/PAGE_WRITE */ \ 258 } watchpoint[MAX_WATCHPOINTS]; \ 259 int nb_watchpoints; \ 260 int watchpoint_hit; \ 261 \ 262 /* Core interrupt code */ \ 263 jmp_buf jmp_env; \ 264 int exception_index; \ 265 \ 266 int user_mode_only; \ 267 \ 268 void *next_cpu; /* next CPU sharing TB cache */ \ 269 int cpu_index; /* CPU index (informative) */ \ 270 int running; /* Nonzero if cpu is currently running(usermode). */ \ 271 /* user data */ \ 272 void *opaque; \ 273 \ 274 const char *cpu_model_str; 275 #endif 276 277 #endif -
trunk/src/recompiler_new/exec-all.h
r11982 r13230 67 67 #define DISAS_TB_JUMP 3 /* only pc was modified statically */ 68 68 69 structTranslationBlock;69 typedef struct TranslationBlock TranslationBlock; 70 70 71 71 /* XXX: make safe guess about sizes */ 72 #define MAX_OP_PER_INSTR 32 72 #define MAX_OP_PER_INSTR 64 73 /* A Call op needs up to 6 + 2N parameters (N = number of arguments). */ 74 #define MAX_OPC_PARAM 10 73 75 #define OPC_BUF_SIZE 512 74 76 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) 75 77 76 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3) 77 78 extern uint16_t gen_opc_buf[OPC_BUF_SIZE]; 79 extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE]; 80 extern long gen_labels[OPC_BUF_SIZE]; 81 extern int nb_gen_labels; 78 /* Maximum size a TCG op can expand to. This is complicated because a 79 single op may require several host instructions and regirster reloads. 80 For now take a wild guess at 128 bytes, which should allow at least 81 a couple of fixup instructions per argument. */ 82 #define TCG_MAX_OP_SIZE 128 83 84 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM) 85 82 86 extern target_ulong gen_opc_pc[OPC_BUF_SIZE]; 83 87 extern target_ulong gen_opc_npc[OPC_BUF_SIZE]; 84 88 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; 85 89 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; 90 extern uint16_t gen_opc_icount[OPC_BUF_SIZE]; 86 91 extern target_ulong gen_opc_jump_pc[2]; 87 92 extern uint32_t gen_opc_hflags[OPC_BUF_SIZE]; … … 92 97 typedef void (GenOpFunc3)(long, long, long); 93 98 94 #if defined(TARGET_I386) 95 96 void optimize_flags_init(void); 97 98 #endif 99 99 #ifndef VBOX 100 100 extern FILE *logfile; 101 101 extern int loglevel; 102 103 int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb); 104 int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); 105 void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf); 102 #endif 103 104 void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb); 105 void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); 106 void gen_pc_load(CPUState *env, struct TranslationBlock *tb, 107 unsigned long searched_pc, int pc_pos, void *puc); 108 109 unsigned long code_gen_max_block_size(void); 110 void cpu_gen_init(void); 106 111 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, 107 int max_code_size, int*gen_code_size_ptr);112 int *gen_code_size_ptr); 108 113 int cpu_restore_state(struct TranslationBlock *tb, 109 114 CPUState *env, unsigned long searched_pc, 110 115 void *puc); 111 int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb,112 int max_code_size, int *gen_code_size_ptr);113 116 int cpu_restore_state_copy(struct TranslationBlock *tb, 114 117 CPUState *env, unsigned long searched_pc, 115 118 void *puc); 116 119 void cpu_resume_from_signal(CPUState *env1, void *puc); 120 void cpu_io_recompile(CPUState *env, void *retaddr); 121 TranslationBlock *tb_gen_code(CPUState *env, 122 target_ulong pc, target_ulong cs_base, int flags, 123 int cflags); 117 124 void cpu_exec_init(CPUState *env); 118 125 int page_unprotect(target_ulong address, unsigned long pc, void *puc); 119 void tb_invalidate_phys_page_range(target_ ulong start, target_ulongend,126 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end, 120 127 int is_cpu_write_access); 121 128 void tb_invalidate_page_range(target_ulong start, target_ulong end); … … 124 131 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 125 132 target_phys_addr_t paddr, int prot, 126 int is_user, int is_softmmu);127 static inline int tlb_set_page(CPUState *env , target_ulong vaddr,133 int mmu_idx, int is_softmmu); 134 static inline int tlb_set_page(CPUState *env1, target_ulong vaddr, 128 135 target_phys_addr_t paddr, int prot, 129 int is_user, int is_softmmu)136 int mmu_idx, int is_softmmu) 130 137 { 131 138 if (prot & PAGE_READ) 132 139 prot |= PAGE_EXEC; 133 return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu); 134 } 135 136 #define CODE_GEN_MAX_SIZE 65536 140 return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu); 141 } 142 137 143 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ 138 144 … … 140 146 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) 141 147 142 /* maximum total translate dcode allocated */ 143 144 /* NOTE: the translated code area cannot be too big because on some 145 archs the range of "fast" function calls is limited. Here is a 146 summary of the ranges: 147 148 i386 : signed 32 bits 149 arm : signed 26 bits 150 ppc : signed 24 bits 151 sparc : signed 32 bits 152 alpha : signed 23 bits 153 */ 154 155 #if defined(__alpha__) 156 #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024) 157 #elif defined(__ia64) 158 #define CODE_GEN_BUFFER_SIZE (4 * 1024 * 1024) /* range of addl */ 159 #elif defined(__powerpc__) 160 #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024) 161 #else 162 #define CODE_GEN_BUFFER_SIZE (16 * 1024 * 1024) 163 #endif 164 165 //#define CODE_GEN_BUFFER_SIZE (128 * 1024) 148 #define MIN_CODE_GEN_BUFFER_SIZE (1024 * 1024) 166 149 167 150 /* estimated block size for TB allocation */ … … 174 157 #endif 175 158 176 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE) 177 178 #if defined(__powerpc__) 159 #if defined(__powerpc__) || defined(__x86_64__) || defined(__arm__) 179 160 #define USE_DIRECT_JUMP 180 161 #endif … … 182 163 #define USE_DIRECT_JUMP 183 164 #endif 165 184 166 #ifdef VBOX /* bird: not safe in next step because of threading & cpu_interrupt. */ 185 167 #undef USE_DIRECT_JUMP 186 168 #endif /* VBOX */ 187 169 188 typedefstruct TranslationBlock {170 struct TranslationBlock { 189 171 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ 190 172 target_ulong cs_base; /* CS base for this block */ … … 193 175 size <= TARGET_PAGE_SIZE) */ 194 176 uint16_t cflags; /* compile flags */ 195 #define CF_CODE_COPY 0x0001 /* block was generated in code copy mode */ 196 #define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */ 197 #define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */ 198 #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */ 177 #define CF_COUNT_MASK 0x7fff 178 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ 179 199 180 #ifdef VBOX 200 181 #define CF_RAW_MODE 0x0010 /* block was generated in raw mode */ … … 226 207 struct TranslationBlock *jmp_next[2]; 227 208 struct TranslationBlock *jmp_first; 228 } TranslationBlock; 209 uint32_t icount; 210 }; 229 211 230 212 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) … … 249 231 250 232 TranslationBlock *tb_alloc(target_ulong pc); 233 void tb_free(TranslationBlock *tb); 251 234 void tb_flush(CPUState *env); 252 235 void tb_link_phys(TranslationBlock *tb, 253 236 target_ulong phys_pc, target_ulong phys_page2); 237 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr); 254 238 255 239 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 256 240 257 extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];258 241 extern uint8_t *code_gen_ptr; 242 extern int code_gen_max_blocks; 259 243 260 244 #if defined(USE_DIRECT_JUMP) … … 342 326 #define ASM_OP_LABEL_NAME(n, opname) \ 343 327 ASM_NAME(__op_label) #n "." ASM_NAME(opname) 344 345 #if defined(__powerpc__)346 347 /* we patch the jump instruction directly */348 #define GOTO_TB(opname, tbparam, n)\349 do {\350 asm volatile (ASM_DATA_SECTION\351 ASM_OP_LABEL_NAME(n, opname) ":\n"\352 ".long 1f\n"\353 ASM_PREVIOUS_SECTION \354 "b " ASM_NAME(__op_jmp) #n "\n"\355 "1:\n");\356 } while (0)357 358 #elif defined(__i386__) && defined(USE_DIRECT_JUMP)359 360 /* we patch the jump instruction directly */361 #define GOTO_TB(opname, tbparam, n)\362 do {\363 asm volatile (".section .data\n"\364 ASM_OP_LABEL_NAME(n, opname) ":\n"\365 ".long 1f\n"\366 ASM_PREVIOUS_SECTION \367 "jmp " ASM_NAME(__op_jmp) #n "\n"\368 "1:\n");\369 } while (0)370 371 #else372 373 /* jump to next block operations (more portable code, does not need374 cache flushing, but slower because of indirect jump) */375 # ifdef VBOX /* bird: GCC4 (and Ming 3.4.x?) will remove the two unused static376 variables. I've added a dummy __asm__ statement which reference377 the two variables to prevent this. */378 # if __GNUC__ >= 4379 # define GOTO_TB(opname, tbparam, n)\380 do {\381 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\382 static void __attribute__((unused)) *__op_label ## n \383 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\384 __asm__ ("" : : "m" (__op_label ## n), "m" (dummy ## n));\385 goto *(void *)(uintptr_t)(((TranslationBlock *)tbparam)->tb_next[n]);\386 label ## n: ;\387 dummy_label ## n: ;\388 } while (0)389 # else390 # define GOTO_TB(opname, tbparam, n)\391 do {\392 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\393 static void __attribute__((unused)) *__op_label ## n \394 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\395 goto *(void *)(uintptr_t)(((TranslationBlock *)tbparam)->tb_next[n]);\396 label ## n: ;\397 dummy_label ## n: ;\398 } while (0)399 # endif400 # else /* !VBOX */401 #define GOTO_TB(opname, tbparam, n)\402 do {\403 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\404 static void __attribute__((unused)) *__op_label ## n \405 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\406 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\407 label ## n: ;\408 dummy_label ## n: ;\409 } while (0)410 # endif /* !VBOX */411 412 #endif413 328 414 329 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; … … 583 498 void *retaddr); 584 499 585 #define ACCESS_TYPE 3500 #define ACCESS_TYPE (NB_MMU_MODES + 1) 586 501 #define MEMSUFFIX _code 587 502 #define env cpu_single_env … … 661 576 # endif 662 577 } 578 579 580 /* Deterministic execution requires that IO only be performed on the last 581 instruction of a TB so that interrupts take effect immediately. */ 582 static inline int can_do_io(CPUState *env) 583 { 584 if (!use_icount) 585 return 1; 586 587 /* If not executing code then assume we are ok. */ 588 if (!env->current_tb) 589 return 1; 590 591 return env->can_do_io != 0; 592 } 663 593 #endif 664 594 -
trunk/src/recompiler_new/osdep.h
r2422 r13230 26 26 #endif 27 27 28 #define unlikely(cond) RT_UNLIKELY(cond) 29 28 30 #else /* !VBOX */ 29 31 -
trunk/src/recompiler_new/target-i386/cpu.h
r13034 r13230 64 64 #endif /* VBOX */ 65 65 66 #if defined(__i386__) && !defined(CONFIG_SOFTMMU)67 #define USE_CODE_COPY68 #endif69 70 66 #define R_EAX 0 71 67 #define R_ECX 1 … … 140 136 141 137 /* hidden flags - used internally by qemu to represent additionnal cpu 142 states. Only the CPL, INHIBIT_IRQ and HALTEDare not redundant. We avoid138 states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not redundant. We avoid 143 139 using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring 144 140 with eflags. */ … … 167 163 #define HF_HALTED_SHIFT 18 /* CPU halted */ 168 164 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ 165 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ 166 #define HF_SVMI_SHIFT 21 /* SVM intercepts are active */ 169 167 170 168 #define HF_CPL_MASK (3 << HF_CPL_SHIFT) … … 184 182 #define HF_HALTED_MASK (1 << HF_HALTED_SHIFT) 185 183 #define HF_SMM_MASK (1 << HF_SMM_SHIFT) 184 #define HF_SVME_MASK (1 << HF_SVME_SHIFT) 185 #define HF_SVMI_MASK (1 << HF_SVMI_SHIFT) 186 187 /* hflags2 */ 188 189 #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ 190 #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ 191 #define HF2_NMI_SHIFT 2 /* CPU serving NMI */ 192 #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ 193 194 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) 195 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) 196 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) 197 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) 186 198 187 199 #define CR0_PE_MASK (1 << 0) … … 247 259 #endif 248 260 261 #define MSR_IA32_SYSENTER_CS 0x174 262 #define MSR_IA32_SYSENTER_ESP 0x175 263 #define MSR_IA32_SYSENTER_EIP 0x176 264 249 265 #define MSR_MCG_CAP 0x179 250 266 #define MSR_MCG_STATUS 0x17a … … 259 275 #define MSR_EFER_LMA (1 << 10) 260 276 #define MSR_EFER_NXE (1 << 11) 277 #define MSR_EFER_SVME (1 << 12) 261 278 #define MSR_EFER_FFXSR (1 << 14) 279 280 #ifdef VBOX 262 281 #define MSR_APIC_RANGE_START 0x800 263 282 #define MSR_APIC_RANGE_END 0x900 283 #endif 264 284 265 285 #define MSR_STAR 0xc0000081 … … 270 290 #define MSR_GSBASE 0xc0000101 271 291 #define MSR_KERNELGSBASE 0xc0000102 292 293 #define MSR_VM_HSAVE_PA 0xc0010117 272 294 273 295 /* cpuid_features bits */ … … 290 312 #define CPUID_PSE36 (1 << 17) 291 313 #define CPUID_CLFLUSH (1 << 19) 292 /* ... */ 314 #define CPUID_DTS (1 << 21) 315 #define CPUID_ACPI (1 << 22) 293 316 #define CPUID_MMX (1 << 23) 294 317 #define CPUID_FXSR (1 << 24) 295 318 #define CPUID_SSE (1 << 25) 296 319 #define CPUID_SSE2 (1 << 26) 297 298 #ifdef VBOX 299 #define CPUID_HTT (1 << 28) 300 #endif 320 #define CPUID_SS (1 << 27) 321 #define CPUID_HT (1 << 28) 322 #define CPUID_TM (1 << 29) 323 #define CPUID_IA64 (1 << 30) 324 #define CPUID_PBE (1 << 31) 301 325 302 326 #define CPUID_EXT_SSE3 (1 << 0) 327 #define CPUID_EXT_DTES64 (1 << 2) 303 328 #define CPUID_EXT_MONITOR (1 << 3) 304 329 #define CPUID_EXT_DSCPL (1 << 4) … … 311 336 #define CPUID_EXT_CX16 (1 << 13) 312 337 #define CPUID_EXT_XTPR (1 << 14) 313 #define CPUID_EXT_DCA (1 << 17) 314 #define CPUID_EXT_POPCNT (1 << 22) 338 #define CPUID_EXT_PDCM (1 << 15) 339 #define CPUID_EXT_DCA (1 << 18) 340 #define CPUID_EXT_SSE41 (1 << 19) 341 #define CPUID_EXT_SSE42 (1 << 20) 342 #define CPUID_EXT_X2APIC (1 << 21) 343 #define CPUID_EXT_MOVBE (1 << 22) 344 #define CPUID_EXT_POPCNT (1 << 23) 345 #define CPUID_EXT_XSAVE (1 << 26) 346 #define CPUID_EXT_OSXSAVE (1 << 27) 315 347 316 348 #define CPUID_EXT2_SYSCALL (1 << 11) … … 336 368 #define CPUID_EXT3_OSVW (1 << 9) 337 369 #define CPUID_EXT3_IBS (1 << 10) 370 #define CPUID_EXT3_SKINIT (1 << 12) 371 372 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ 373 #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ 374 #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ 375 376 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ 377 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ 378 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ 379 380 #define CPUID_MWAIT_IBE (1 << 1) /* Interrupts can exit capability */ 381 #define CPUID_MWAIT_EMX (1 << 0) /* enumeration supported */ 338 382 339 383 #define EXCP00_DIVZ 0 … … 356 400 #define EXCP12_MCHK 18 357 401 402 #define EXCP_SYSCALL 0x100 /* only happens in user only emulation 403 for syscall instruction */ 404 358 405 enum { 359 406 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ … … 481 528 #endif 482 529 530 #define NB_MMU_MODES 2 531 483 532 typedef struct CPUX86State { 484 #if TARGET_LONG_BITS > HOST_LONG_BITS 485 /* temporaries if we cannot store them in host registers */ 486 target_ulong t0, t1, t2; 487 #endif 488 489 /* standard registers */ 533 /* standard registers */ 490 534 target_ulong regs[CPU_NB_REGS]; 491 535 target_ulong eip; … … 499 543 uint32_t cc_op; 500 544 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ 501 uint32_t hflags; /* hidden flags, see HF_xxx constants */ 545 uint32_t hflags; /* TB flags, see HF_xxx constants. These flags 546 are known at translation time. */ 547 uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ 502 548 503 549 /* segments */ … … 534 580 uint32_t alignment4; /* long double is 12 byte, pad it to 16. */ 535 581 #endif 536 union { 537 float f; 538 double d; 539 int i32; 540 int64_t i64; 541 } fp_convert; 542 582 583 float_status mmx_status; /* for 3DNow! float ops */ 543 584 float_status sse_status; 544 585 uint32_t mxcsr; … … 546 587 XMMReg xmm_t0; 547 588 MMXReg mmx_t0; 589 target_ulong cc_tmp; /* temporary for rcr/rcl */ 548 590 549 591 /* sysenter registers */ … … 556 598 uint64_t efer; 557 599 uint64_t star; 600 601 uint64_t vm_hsave; 602 uint64_t vm_vmcb; 603 uint64_t tsc_offset; 604 uint64_t intercept; 605 uint16_t intercept_cr_read; 606 uint16_t intercept_cr_write; 607 uint16_t intercept_dr_read; 608 uint16_t intercept_dr_write; 609 uint32_t intercept_exceptions; 610 uint8_t v_tpr; 611 558 612 #ifdef TARGET_X86_64 559 613 target_ulong lstar; … … 565 619 uint64_t pat; 566 620 567 /* temporary data for USE_CODE_COPY mode */568 #ifdef USE_CODE_COPY569 uint32_t tmp0;570 uint32_t saved_esp;571 int native_fp_regs; /* if true, the FPU state is in the native CPU regs */572 #endif573 574 621 /* exception/interrupt handling */ 575 jmp_buf jmp_env;576 #if defined(VBOX) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)577 /* This will be removed when switching to the no-crt code everywhere. */578 uint32_t alignment1[23];579 #endif580 int exception_index;581 622 int error_code; 582 623 int exception_is_int; … … 584 625 target_ulong dr[8]; /* debug registers */ 585 626 uint32_t smbase; 586 int interrupt_request;587 int user_mode_only; /* user mode only simulation */627 int old_exception; /* exception in flight */ 628 588 629 589 630 CPU_COMMON … … 638 679 uint32_t limit; 639 680 uint32_t flags; 640 #ifdef VBOX641 681 /** The new selector is saved here when we are unable to sync it before invoking the recompiled code. */ 642 682 uint32_t newselector; 643 #endif644 683 } SegmentCache_Ver16; 645 684 … … 746 785 747 786 #ifdef VBOX 748 CPUX86State *cpu_x86_init(CPUX86State *env );787 CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model); 749 788 #else /* !VBOX */ 750 CPUX86State *cpu_x86_init( void);789 CPUX86State *cpu_x86_init(const char *cpu_model); 751 790 #endif /* !VBOX */ 752 791 int cpu_x86_exec(CPUX86State *s); 753 792 void cpu_x86_close(CPUX86State *s); 793 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, 794 ...)); 754 795 int cpu_get_pic_interrupt(CPUX86State *s); 755 796 /* MSDOS compatibility mode FPU exception support */ … … 853 894 uint8_t cpu_get_apic_tpr(CPUX86State *env); 854 895 #endif 896 #ifdef VBOX 855 897 uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg); 856 898 void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value); 899 #endif 857 900 void cpu_smm_update(CPUX86State *env); 858 901 … … 893 936 894 937 #define TARGET_PAGE_BITS 12 938 939 #define CPUState CPUX86State 940 #define cpu_init cpu_x86_init 941 #define cpu_exec cpu_x86_exec 942 #define cpu_gen_code cpu_x86_gen_code 943 #define cpu_signal_handler cpu_x86_signal_handler 944 #define cpu_list x86_cpu_list 945 946 #define CPU_SAVE_VERSION 7 947 948 /* MMU modes definitions */ 949 #define MMU_MODE0_SUFFIX _kernel 950 #define MMU_MODE1_SUFFIX _user 951 #define MMU_USER_IDX 1 952 static inline int cpu_mmu_index (CPUState *env) 953 { 954 return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0; 955 } 956 957 void optimize_flags_init(void); 958 959 typedef struct CCTable { 960 int (*compute_all)(void); /* return all the flags */ 961 int (*compute_c)(void); /* return the C flag */ 962 } CCTable; 963 964 extern CCTable cc_table[]; 965 966 #if defined(CONFIG_USER_ONLY) 967 static inline void cpu_clone_regs(CPUState *env, target_ulong newsp) 968 { 969 if (newsp) 970 env->regs[R_ESP] = newsp; 971 env->regs[R_EAX] = 0; 972 } 973 #endif 974 975 #define CPU_PC_FROM_TB(env, tb) env->eip = tb->pc - tb->cs_base 976 895 977 #include "cpu-all.h" 896 978 979 #include "svm.h" 980 897 981 #endif /* CPU_I386_H */ -
trunk/src/recompiler_new/target-i386/exec.h
r11982 r13230 42 42 register struct CPUX86State *env asm(AREG0); 43 43 44 #if TARGET_LONG_BITS > HOST_LONG_BITS 45 46 /* no registers can be used */ 47 #define T0 (env->t0) 48 #define T1 (env->t1) 49 #define T2 (env->t2) 50 51 #else 52 53 /* XXX: use unsigned long instead of target_ulong - better code will 54 be generated for 64 bit CPUs */ 55 register target_ulong T0 asm(AREG1); 56 register target_ulong T1 asm(AREG2); 57 register target_ulong T2 asm(AREG3); 58 59 /* if more registers are available, we define some registers too */ 60 #ifdef AREG4 61 register target_ulong EAX asm(AREG4); 62 #define reg_EAX 63 #endif 64 65 #ifdef AREG5 66 register target_ulong ESP asm(AREG5); 67 #define reg_ESP 68 #endif 69 70 #ifdef AREG6 71 register target_ulong EBP asm(AREG6); 72 #define reg_EBP 73 #endif 74 75 #ifdef AREG7 76 register target_ulong ECX asm(AREG7); 77 #define reg_ECX 78 #endif 79 80 #ifdef AREG8 81 register target_ulong EDX asm(AREG8); 82 #define reg_EDX 83 #endif 84 85 #ifdef AREG9 86 register target_ulong EBX asm(AREG9); 87 #define reg_EBX 88 #endif 89 90 #ifdef AREG10 91 register target_ulong ESI asm(AREG10); 92 #define reg_ESI 93 #endif 94 95 #ifdef AREG11 96 register target_ulong EDI asm(AREG11); 97 #define reg_EDI 98 #endif 99 100 #endif /* ! (TARGET_LONG_BITS > HOST_LONG_BITS) */ 101 102 #define A0 T2 103 104 extern FILE *logfile; 105 extern int loglevel; 44 #ifndef VBOX 45 #include "qemu-log.h" 46 #endif 106 47 107 48 #ifndef reg_EAX … … 142 83 #define ST1 ST(1) 143 84 144 #ifdef USE_FP_CONVERT145 #define FP_CONVERT (env->fp_convert)146 #endif147 148 85 #include "cpu.h" 149 86 #include "exec-all.h" 150 87 151 typedef struct CCTable {152 int (*compute_all)(void); /* return all the flags */153 int (*compute_c)(void); /* return the C flag */154 } CCTable;155 156 extern CCTable cc_table[];157 158 void load_seg(int seg_reg, int selector);159 void helper_ljmp_protected_T0_T1(int next_eip);160 void helper_lcall_real_T0_T1(int shift, int next_eip);161 void helper_lcall_protected_T0_T1(int shift, int next_eip);162 void helper_iret_real(int shift);163 void helper_iret_protected(int shift, int next_eip);164 void helper_lret_protected(int shift, int addend);165 void helper_lldt_T0(void);166 void helper_ltr_T0(void);167 void helper_movl_crN_T0(int reg);168 void helper_movl_drN_T0(int reg);169 void helper_invlpg(target_ulong addr);170 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);171 88 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); 172 89 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); 173 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr); 174 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 175 int is_write, int is_user, int is_softmmu); 176 void tlb_fill(target_ulong addr, int is_write, int is_user, 177 void *retaddr); 90 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, 91 int is_write, int mmu_idx, int is_softmmu); 178 92 void __hidden cpu_lock(void); 179 93 void __hidden cpu_unlock(void); 180 void do_interrupt(int intno, int is_int, int error_code, 94 void do_interrupt(int intno, int is_int, int error_code, 181 95 target_ulong next_eip, int is_hw); 182 void do_interrupt_user(int intno, int is_int, int error_code, 96 void do_interrupt_user(int intno, int is_int, int error_code, 183 97 target_ulong next_eip); 184 void raise_interrupt(int intno, int is_int, int error_code, 98 void raise_interrupt(int intno, int is_int, int error_code, 185 99 int next_eip_addend); 186 100 void raise_exception_err(int exception_index, int error_code); … … 197 111 void OPPROTO op_sti_vme(void); 198 112 #endif 199 void helper_divl_EAX_T0(void); 200 void helper_idivl_EAX_T0(void); 201 void helper_mulq_EAX_T0(void); 202 void helper_imulq_EAX_T0(void); 203 void helper_imulq_T0_T1(void); 204 void helper_divq_EAX_T0(void); 205 void helper_idivq_EAX_T0(void); 206 void helper_bswapq_T0(void); 207 void helper_cmpxchg8b(void); 208 void helper_single_step(void); 209 void helper_cpuid(void); 210 void helper_enter_level(int level, int data32); 211 void helper_enter64_level(int level, int data64); 212 void helper_sysenter(void); 213 void helper_sysexit(void); 214 void helper_syscall(int next_eip_addend); 215 void helper_sysret(int dflag); 216 void helper_rdtsc(void); 217 void helper_rdmsr(void); 218 void helper_wrmsr(void); 219 void helper_lsl(void); 220 void helper_lar(void); 221 void helper_verr(void); 222 void helper_verw(void); 223 void helper_rsm(void); 224 225 #ifdef VBOX 226 void helper_external_event(void); 227 void helper_record_call(void); 228 229 /* in helper.c */ 230 void sync_seg(CPUX86State *env1, int seg_reg, int selector); 231 void sync_ldtr(CPUX86State *env1, int selector); 232 int sync_tr(CPUX86State *env1, int selector); 233 234 #endif 113 114 /* n must be a constant to be efficient */ 115 static inline target_long lshift(target_long x, int n) 116 { 117 if (n >= 0) 118 return x << n; 119 else 120 return x >> (-n); 121 } 122 123 #include "helper.h" 124 125 static inline void svm_check_intercept(uint32_t type) 126 { 127 helper_svm_check_intercept_param(type, 0); 128 } 235 129 236 130 void check_iob_T0(void); … … 293 187 #define floatx_to_int32_round_to_zero floatx80_to_int32_round_to_zero 294 188 #define floatx_to_int64_round_to_zero floatx80_to_int64_round_to_zero 189 #define int32_to_floatx int32_to_floatx80 190 #define int64_to_floatx int64_to_floatx80 191 #define float32_to_floatx float32_to_floatx80 192 #define float64_to_floatx float64_to_floatx80 193 #define floatx_to_float32 floatx80_to_float32 194 #define floatx_to_float64 floatx80_to_float64 295 195 #define floatx_abs floatx80_abs 296 196 #define floatx_chs floatx80_chs … … 325 225 #define floatx_to_int32_round_to_zero float64_to_int32_round_to_zero 326 226 #define floatx_to_int64_round_to_zero float64_to_int64_round_to_zero 227 #define int32_to_floatx int32_to_float64 228 #define int64_to_floatx int64_to_float64 229 #define float32_to_floatx float32_to_float64 230 #define float64_to_floatx(x, e) (x) 231 #define floatx_to_float32 float64_to_float32 232 #define floatx_to_float64(x, e) (x) 327 233 #define floatx_abs float64_abs 328 234 #define floatx_chs float64_chs … … 332 238 #endif 333 239 240 #ifdef VBOX 334 241 extern CPU86_LDouble sin(CPU86_LDouble x); 335 242 extern CPU86_LDouble cos(CPU86_LDouble x); … … 341 248 extern CPU86_LDouble floor(CPU86_LDouble x); 342 249 extern CPU86_LDouble ceil(CPU86_LDouble x); 250 #endif 343 251 344 252 #define RC_MASK 0xc00 … … 506 414 extern const CPU86_LDouble f15rk[7]; 507 415 508 void helper_fldt_ST0_A0(void);509 void helper_fstt_ST0_A0(void);510 416 void fpu_raise_exception(void); 511 CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b);512 void helper_fbld_ST0_A0(void);513 void helper_fbst_ST0_A0(void);514 void helper_f2xm1(void);515 void helper_fyl2x(void);516 void helper_fptan(void);517 void helper_fpatan(void);518 void helper_fxtract(void);519 void helper_fprem1(void);520 void helper_fprem(void);521 void helper_fyl2xp1(void);522 void helper_fsqrt(void);523 void helper_fsincos(void);524 void helper_frndint(void);525 void helper_fscale(void);526 void helper_fsin(void);527 void helper_fcos(void);528 void helper_fxam_ST0(void);529 void helper_fstenv(target_ulong ptr, int data32);530 void helper_fldenv(target_ulong ptr, int data32);531 void helper_fsave(target_ulong ptr, int data32);532 void helper_frstor(target_ulong ptr, int data32);533 void helper_fxsave(target_ulong ptr, int data64);534 void helper_fxrstor(target_ulong ptr, int data64);535 417 void restore_native_fp_state(CPUState *env); 536 418 void save_native_fp_state(CPUState *env); 537 float approx_rsqrt(float a);538 float approx_rcp(float a);539 void update_fp_status(void);540 void helper_hlt(void);541 void helper_monitor(void);542 void helper_mwait(void);543 419 544 420 extern const uint8_t parity_table[256]; … … 615 491 #endif 616 492 } 493 494 static inline int cpu_halted(CPUState *env) { 495 /* handle exit of HALTED state */ 496 if (!env->halted) 497 return 0; 498 /* disable halt condition */ 499 if (((env->interrupt_request & CPU_INTERRUPT_HARD) && 500 (env->eflags & IF_MASK)) || 501 (env->interrupt_request & CPU_INTERRUPT_NMI)) { 502 env->halted = 0; 503 return 0; 504 } 505 return EXCP_HALTED; 506 } 507 508 /* load efer and update the corresponding hflags. XXX: do consistency 509 checks with cpuid bits ? */ 510 static inline void cpu_load_efer(CPUState *env, uint64_t val) 511 { 512 env->efer = val; 513 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); 514 if (env->efer & MSR_EFER_LMA) 515 env->hflags |= HF_LMA_MASK; 516 if (env->efer & MSR_EFER_SVME) 517 env->hflags |= HF_SVME_MASK; 518 } -
trunk/src/recompiler_new/translate-all.c
r11982 r13230 39 39 #include "exec-all.h" 40 40 #include "disas.h" 41 42 extern int dyngen_code(uint8_t *gen_code_buf, 43 uint16_t *label_offsets, uint16_t *jmp_offsets, 44 const uint16_t *opc_buf, const uint32_t *opparam_buf, const long *gen_labels); 45 46 enum { 47 #define DEF(s, n, copy_size) INDEX_op_ ## s, 48 #include "opc.h" 49 #undef DEF 50 NB_OPS, 51 }; 41 #include "tcg.h" 42 43 #ifndef VBOX 44 /* code generation context */ 45 TCGContext tcg_ctx; 46 #else 47 TCGContext g_tcg_ctx; 48 static TCGContext* getCompilerCtx(CPUState *env) 49 { 50 /** @todo nike: should be field in CPU env */ 51 //return env->tcg_context; 52 return &g_tcg_ctx; 53 } 54 #endif 52 55 53 56 uint16_t gen_opc_buf[OPC_BUF_SIZE]; … … 67 70 #endif 68 71 69 int code_copy_enabled = 1; 70 71 #ifdef DEBUG_DISAS 72 static const char *op_str[] = { 73 #define DEF(s, n, copy_size) #s, 74 #include "opc.h" 72 /* XXX: suppress that */ 73 unsigned long code_gen_max_block_size(void) 74 { 75 static unsigned long max; 76 77 if (max == 0) { 78 max = TCG_MAX_OP_SIZE; 79 #define DEF(s, n, copy_size) max = copy_size > max? copy_size : max; 80 #include "tcg-opc.h" 75 81 #undef DEF 76 }; 77 78 static uint8_t op_nb_args[] = { 79 #define DEF(s, n, copy_size) n, 80 #include "opc.h" 81 #undef DEF 82 }; 83 #endif /* bird: opc_copy_size is used ouside DEBUG_DISAS and VBOX isn't necessarily defining DEBUG_DISAS presently. */ 84 85 static const unsigned short opc_copy_size[] = { 86 #define DEF(s, n, copy_size) copy_size, 87 #include "opc.h" 88 #undef DEF 89 }; 90 91 #ifdef DEBUG_DISAS /* bird: see previous bird comment. */ 92 void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf) 93 { 94 const uint16_t *opc_ptr; 95 const uint32_t *opparam_ptr; 96 int c, n, i; 97 98 opc_ptr = opc_buf; 99 opparam_ptr = opparam_buf; 100 for(;;) { 101 c = *opc_ptr++; 102 n = op_nb_args[c]; 103 fprintf(logfile, "0x%04x: %s", 104 (int)(opc_ptr - opc_buf - 1), op_str[c]); 105 for(i = 0; i < n; i++) { 106 fprintf(logfile, " 0x%x", opparam_ptr[i]); 107 } 108 fprintf(logfile, "\n"); 109 if (c == INDEX_op_end) 110 break; 111 opparam_ptr += n; 112 } 113 } 114 115 #endif 116 117 /* compute label info */ 118 static void dyngen_labels(long *gen_labels, int nb_gen_labels, 119 uint8_t *gen_code_buf, const uint16_t *opc_buf) 120 { 121 uint8_t *gen_code_ptr; 122 int c, i; 123 unsigned long gen_code_addr[OPC_BUF_SIZE]; 124 125 if (nb_gen_labels == 0) 126 return; 127 /* compute the address of each op code */ 128 129 gen_code_ptr = gen_code_buf; 130 i = 0; 131 for(;;) { 132 c = opc_buf[i]; 133 gen_code_addr[i] =(unsigned long)gen_code_ptr; 134 if (c == INDEX_op_end) 135 break; 136 gen_code_ptr += opc_copy_size[c]; 137 i++; 138 } 139 140 /* compute the address of each label */ 141 for(i = 0; i < nb_gen_labels; i++) { 142 gen_labels[i] = gen_code_addr[gen_labels[i]]; 143 } 144 } 82 max *= OPC_MAX_SIZE; 83 } 84 85 return max; 86 } 87 88 #ifndef VBOX 89 void cpu_gen_init() 90 { 91 tcg_context_init(&tcg_ctx); 92 tcg_set_frame(&tcg_ctx, TCG_AREG0, offsetof(CPUState, temp_buf), 93 CPU_TEMP_BUF_NLONGS * sizeof(long)); 94 } 95 #else 96 void cpu_gen_init(CPUState *env) 97 { 98 TCGContext* tcg_ctx = getCompilerCtx(env); 99 tcg_context_init(tcg_ctx); 100 tcg_set_frame(tcg_ctx, TCG_AREG0, offsetof(CPUState, temp_buf), 101 CPU_TEMP_BUF_NLONGS * sizeof(long)); 102 } 103 #endif 145 104 146 105 /* return non zero if the very first instruction is invalid so that … … 153 112 int max_code_size, int *gen_code_size_ptr) 154 113 { 114 #ifdef VBOX 115 TCGContext *s = getCompilerCtx(env); 116 #else 117 TCGContext *s = &tcg_ctx; 118 #endif 155 119 uint8_t *gen_code_buf; 156 120 int gen_code_size; 157 158 #ifdef USE_CODE_COPY 159 if (code_copy_enabled && 160 cpu_gen_code_copy(env, tb, max_code_size, &gen_code_size) == 0) { 161 /* nothing more to do */ 162 } else 163 #endif 121 #ifdef CONFIG_PROFILER 122 int64_t ti; 123 #endif 124 125 #ifdef CONFIG_PROFILER 126 s->tb_count1++; /* includes aborted translations because of 127 exceptions */ 128 ti = profile_getclock(); 129 #endif 130 131 #ifdef VBOX 132 RAWEx_ProfileStart(env, STATS_QEMU_COMPILATION); 133 tcg_func_start(s); 134 135 if (gen_intermediate_code(env, tb) < 0) 164 136 { 165 #ifdef VBOX 166 RAWEx_ProfileStart(env, STATS_QEMU_COMPILATION); 167 if (gen_intermediate_code(env, tb) < 0) 168 { 169 RAWEx_ProfileStop(env, STATS_QEMU_COMPILATION); 170 return -1; 171 } 137 RAWEx_ProfileStop(env, STATS_QEMU_COMPILATION); 138 return -1; 139 } 172 140 #else /* !VBOX */ 173 if (gen_intermediate_code(env, tb) < 0) 174 return -1; 141 tcg_func_start(s); 142 143 if (gen_intermediate_code(env, tb) < 0) 144 return -1; 175 145 #endif /* !VBOX */ 176 146 177 /* generate machine code */ 178 tb->tb_next_offset[0] = 0xffff; 179 tb->tb_next_offset[1] = 0xffff; 180 gen_code_buf = tb->tc_ptr; 147 /* generate machine code */ 148 gen_code_buf = tb->tc_ptr; 149 tb->tb_next_offset[0] = 0xffff; 150 tb->tb_next_offset[1] = 0xffff; 151 s->tb_next_offset = tb->tb_next_offset; 181 152 #ifdef USE_DIRECT_JUMP 182 /* the following two entries are optional (only used for string ops) */183 tb->tb_jmp_offset[2] = 0xffff;184 tb->tb_jmp_offset[3] = 0xffff;185 #endif 186 dyngen_labels(gen_labels, nb_gen_labels, gen_code_buf, gen_opc_buf);187 188 gen_code_size = dyngen_code(gen_code_buf, tb->tb_next_offset,189 #ifdef USE_DIRECT_JUMP 190 tb->tb_jmp_offset, 191 #else 192 NULL, 193 #endif 194 gen_opc_buf, gen_opparam_buf, gen_labels);195 #ifdef VBOX 196 RAWEx_ProfileStop(env, STATS_QEMU_COMPILATION); 197 #endif 198 }153 s->tb_jmp_offset = tb->tb_jmp_offset; 154 s->tb_next = NULL; 155 /* the following two entries are optional (only used for string ops) */ 156 tb->tb_jmp_offset[2] = 0xffff; 157 tb->tb_jmp_offset[3] = 0xffff; 158 #else 159 s->tb_jmp_offset = NULL; 160 s->tb_next = tb->tb_next; 161 #endif 162 163 #ifdef CONFIG_PROFILER 164 s->tb_count++; 165 s->interm_time += profile_getclock() - ti; 166 s->code_time -= profile_getclock(); 167 #endif 168 169 gen_code_size = dyngen_code(s, gen_code_buf); 199 170 *gen_code_size_ptr = gen_code_size; 171 #ifdef CONFIG_PROFILER 172 s->code_time += profile_getclock(); 173 s->code_in_len += tb->size; 174 s->code_out_len += gen_code_size; 175 #endif 176 177 #ifdef VBOX 178 RAWEx_ProfileStop(env, STATS_QEMU_COMPILATION); 179 #endif 180 200 181 #ifdef DEBUG_DISAS 201 182 if (loglevel & CPU_LOG_TB_OUT_ASM) { … … 215 196 void *puc) 216 197 { 217 int j, c; 198 #ifndef VBOX 199 TCGContext *s = &tcg_ctx; 200 #else 201 TCGContext *s = getCompilerCtx(env); 202 #endif 203 int j; 218 204 unsigned long tc_ptr; 219 uint16_t *opc_ptr; 220 221 #ifdef USE_CODE_COPY 222 if (tb->cflags & CF_CODE_COPY) { 223 return cpu_restore_state_copy(tb, env, searched_pc, puc); 224 } 225 #endif 205 #ifdef CONFIG_PROFILER 206 int64_t ti; 207 #endif 208 209 #ifdef CONFIG_PROFILER 210 ti = profile_getclock(); 211 #endif 212 tcg_func_start(s); 213 214 #ifdef VBOX 215 /** @todo: what's right here? */ 226 216 if (gen_intermediate_code_pc(env, tb) < 0) 227 217 return -1; 228 218 #else 219 gen_intermediate_code_pc(env, tb); 220 #endif 221 222 if (use_icount) { 223 /* Reset the cycle counter to the start of the block. */ 224 env->icount_decr.u16.low += tb->icount; 225 /* Clear the IO flag. */ 226 env->can_do_io = 0; 227 } 228 229 229 /* find opc index corresponding to search_pc */ 230 230 tc_ptr = (unsigned long)tb->tc_ptr; 231 231 if (searched_pc < tc_ptr) 232 232 return -1; 233 j = 0;234 opc_ptr = gen_opc_buf;235 for(;;) { 236 c = *opc_ptr;237 if (c == INDEX_op_end)238 return -1; 239 tc_ptr += opc_copy_size[c];240 if (searched_pc < tc_ptr)241 break; 242 opc_ptr++;243 }244 j = opc_ptr - gen_opc_buf;233 234 s->tb_next_offset = tb->tb_next_offset; 235 #ifdef USE_DIRECT_JUMP 236 s->tb_jmp_offset = tb->tb_jmp_offset; 237 s->tb_next = NULL; 238 #else 239 s->tb_jmp_offset = NULL; 240 s->tb_next = tb->tb_next; 241 #endif 242 j = dyngen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr); 243 if (j < 0) 244 return -1; 245 245 /* now find start of instruction before */ 246 246 while (gen_opc_instr_start[j] == 0) 247 247 j--; 248 #if defined(TARGET_I386) 249 { 250 int cc_op; 251 #ifdef DEBUG_DISAS 252 if (loglevel & CPU_LOG_TB_OP) { 253 int i; 254 fprintf(logfile, "RESTORE:\n"); 255 for(i=0;i<=j; i++) { 256 if (gen_opc_instr_start[i]) { 257 fprintf(logfile, "0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]); 258 } 259 } 260 fprintf(logfile, "spc=0x%08lx j=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n", 261 searched_pc, j, gen_opc_pc[j] - tb->cs_base, 262 (uint32_t)tb->cs_base); 263 } 264 #endif 265 env->eip = gen_opc_pc[j] - tb->cs_base; 266 cc_op = gen_opc_cc_op[j]; 267 if (cc_op != CC_OP_DYNAMIC) 268 env->cc_op = cc_op; 269 } 270 #elif defined(TARGET_ARM) 271 env->regs[15] = gen_opc_pc[j]; 272 #elif defined(TARGET_SPARC) 273 { 274 target_ulong npc; 275 env->pc = gen_opc_pc[j]; 276 npc = gen_opc_npc[j]; 277 if (npc == 1) { 278 /* dynamic NPC: already stored */ 279 } else if (npc == 2) { 280 target_ulong t2 = (target_ulong)puc; 281 /* jump PC: use T2 and the jump targets of the translation */ 282 if (t2) 283 env->npc = gen_opc_jump_pc[0]; 284 else 285 env->npc = gen_opc_jump_pc[1]; 286 } else { 287 env->npc = npc; 288 } 289 } 290 #elif defined(TARGET_PPC) 291 { 292 int type; 293 /* for PPC, we need to look at the micro operation to get the 294 access type */ 295 env->nip = gen_opc_pc[j]; 296 switch(c) { 297 #if defined(CONFIG_USER_ONLY) 298 #define CASE3(op)\ 299 case INDEX_op_ ## op ## _raw 300 #else 301 #define CASE3(op)\ 302 case INDEX_op_ ## op ## _user:\ 303 case INDEX_op_ ## op ## _kernel 304 #endif 305 306 CASE3(stfd): 307 CASE3(stfs): 308 CASE3(lfd): 309 CASE3(lfs): 310 type = ACCESS_FLOAT; 311 break; 312 CASE3(lwarx): 313 type = ACCESS_RES; 314 break; 315 CASE3(stwcx): 316 type = ACCESS_RES; 317 break; 318 CASE3(eciwx): 319 CASE3(ecowx): 320 type = ACCESS_EXT; 321 break; 322 default: 323 type = ACCESS_INT; 324 break; 325 } 326 env->access_type = type; 327 } 328 #elif defined(TARGET_M68K) 329 env->pc = gen_opc_pc[j]; 330 #elif defined(TARGET_MIPS) 331 env->PC = gen_opc_pc[j]; 332 env->hflags &= ~MIPS_HFLAG_BMASK; 333 env->hflags |= gen_opc_hflags[j]; 248 env->icount_decr.u16.low -= gen_opc_icount[j]; 249 250 gen_pc_load(env, tb, searched_pc, j, puc); 251 252 #ifdef CONFIG_PROFILER 253 s->restore_time += profile_getclock() - ti; 254 s->restore_count++; 334 255 #endif 335 256 return 0;
Note:
See TracChangeset
for help on using the changeset viewer.