Changeset 36175 in vbox for trunk/src/recompiler
- Timestamp:
- Mar 4, 2011 4:21:09 PM (14 years ago)
- svn:sync-xref-src-repo-rev:
- 70371
- Location:
- trunk/src/recompiler
- Files:
-
- 3 added
- 49 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler/Sun/config.h
r28800 r36175 21 21 #define TARGET_I386 1 22 22 #define CONFIG_SOFTMMU 1 23 #define TARGET_PHYS_ADDR_BITS 64 23 24 24 25 #ifdef VBOX_WITH_64_BITS_GUESTS -
trunk/src/recompiler/VBoxRecompiler.c
r36170 r36175 324 324 * Register ram types. 325 325 */ 326 pVM->rem.s.iMMIOMemType = cpu_register_io_memory( -1,g_apfnMMIORead, g_apfnMMIOWrite, pVM);326 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM); 327 327 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType)); 328 pVM->rem.s.iHandlerMemType = cpu_register_io_memory( -1,g_apfnHandlerRead, g_apfnHandlerWrite, pVM);328 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM); 329 329 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType)); 330 330 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType)); … … 469 469 470 470 /** 471 * Initializes phys_ram_ size, phys_ram_dirty and phys_ram_dirty_size.471 * Initializes phys_ram_dirty and phys_ram_dirty_size. 472 472 * 473 473 * @returns VBox status code. … … 484 484 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam), 485 485 VERR_OUT_OF_RANGE); 486 phys_ram_size = cb;487 486 phys_ram_dirty_size = cb >> PAGE_SHIFT; 488 487 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb)); … … 813 812 */ 814 813 interrupt_request = pVM->rem.s.Env.interrupt_request; 815 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));814 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))); 816 815 pVM->rem.s.Env.interrupt_request = 0; 817 816 cpu_single_step(&pVM->rem.s.Env, 1); … … 953 952 { 954 953 int interrupt_request = pVM->rem.s.Env.interrupt_request; 955 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));954 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))); 956 955 #ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING 957 956 cpu_single_step(&pVM->rem.s.Env, 0); … … 2439 2438 * (See @remark for why we don't check for other FFs.) 2440 2439 */ 2441 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);2440 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER); 2442 2441 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ 2443 2442 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) … … 4500 4499 #define LOG_GROUP LOG_GROUP_REM_IOPORT 4501 4500 4502 void cpu_outb(CPUState *env, int addr, int val)4501 void cpu_outb(CPUState *env, pio_addr_t addr, uint8_t val) 4503 4502 { 4504 4503 int rc; … … 4519 4518 } 4520 4519 4521 void cpu_outw(CPUState *env, int addr, int val)4520 void cpu_outw(CPUState *env, pio_addr_t addr, uint16_t val) 4522 4521 { 4523 4522 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val)); … … 4534 4533 } 4535 4534 4536 void cpu_outl(CPUState *env, int addr, int val)4535 void cpu_outl(CPUState *env, pio_addr_t addr, uint32_t val) 4537 4536 { 4538 4537 int rc; … … 4550 4549 } 4551 4550 4552 int cpu_inb(CPUState *env, int addr)4551 uint8_t cpu_inb(CPUState *env, pio_addr_t addr) 4553 4552 { 4554 4553 uint32_t u32 = 0; … … 4558 4557 if (/*addr != 0x61 && */addr != 0x71) 4559 4558 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32)); 4560 return ( int)u32;4559 return (uint8_t)u32; 4561 4560 } 4562 4561 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) … … 4564 4563 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc)); 4565 4564 remR3RaiseRC(env->pVM, rc); 4566 return ( int)u32;4565 return (uint8_t)u32; 4567 4566 } 4568 4567 remAbort(rc, __FUNCTION__); 4569 return 0xff;4570 } 4571 4572 int cpu_inw(CPUState *env, int addr)4568 return UINT8_C(0xff); 4569 } 4570 4571 uint16_t cpu_inw(CPUState *env, pio_addr_t addr) 4573 4572 { 4574 4573 uint32_t u32 = 0; … … 4577 4576 { 4578 4577 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32)); 4579 return ( int)u32;4578 return (uint16_t)u32; 4580 4579 } 4581 4580 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) … … 4583 4582 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc)); 4584 4583 remR3RaiseRC(env->pVM, rc); 4585 return ( int)u32;4584 return (uint16_t)u32; 4586 4585 } 4587 4586 remAbort(rc, __FUNCTION__); 4588 return 0xffff;4589 } 4590 4591 int cpu_inl(CPUState *env, int addr)4587 return UINT16_C(0xffff); 4588 } 4589 4590 uint32_t cpu_inl(CPUState *env, pio_addr_t addr) 4592 4591 { 4593 4592 uint32_t u32 = 0; … … 4598 4597 // loglevel = ~0; 4599 4598 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32)); 4600 return (int)u32;4599 return u32; 4601 4600 } 4602 4601 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) … … 4604 4603 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc)); 4605 4604 remR3RaiseRC(env->pVM, rc); 4606 return (int)u32;4605 return u32; 4607 4606 } 4608 4607 remAbort(rc, __FUNCTION__); 4609 return 0xffffffff;4608 return UINT32_C(0xffffffff); 4610 4609 } 4611 4610 -
trunk/src/recompiler/bswap.h
r36170 r36175 152 152 uint8_t *p1 = (uint8_t *)p; 153 153 154 p1[0] = (uint8_t)v;154 p1[0] = v & 0xff; 155 155 p1[1] = v >> 8; 156 156 } … … 160 160 uint8_t *p1 = (uint8_t *)p; 161 161 162 p1[0] = (uint8_t)v;162 p1[0] = v & 0xff; 163 163 p1[1] = v >> 8; 164 164 p1[2] = v >> 16; … … 189 189 190 190 p1[0] = v >> 8; 191 p1[1] = (uint8_t)v;191 p1[1] = v & 0xff; 192 192 } 193 193 … … 199 199 p1[1] = v >> 16; 200 200 p1[2] = v >> 8; 201 p1[3] = (uint8_t)v;201 p1[3] = v & 0xff; 202 202 } 203 203 -
trunk/src/recompiler/cpu-all.h
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 39 38 #endif /* VBOX */ 40 39 #include "qemu-common.h" 41 42 #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) 43 #define WORDS_ALIGNED 44 #endif 40 #include "cpu-common.h" 45 41 46 42 /* some important defines: … … 57 53 */ 58 54 59 #include "bswap.h"60 55 #include "softfloat.h" 61 56 … … 887 882 888 883 void page_dump(FILE *f); 884 int walk_memory_regions(void *, 885 int (*fn)(void *, unsigned long, unsigned long, unsigned long)); 889 886 int page_get_flags(target_ulong address); 890 887 void page_set_flags(target_ulong start, target_ulong end, int flags); … … 893 890 void cpu_exec_init_all(unsigned long tb_size); 894 891 CPUState *cpu_copy(CPUState *env); 892 CPUState *qemu_get_cpu(int cpu); 895 893 896 894 void cpu_dump_state(CPUState *env, FILE *f, … … 912 910 extern int use_icount; 913 911 914 #define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */915 912 #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ 916 913 #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */ … … 922 919 #define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */ 923 920 #define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */ 921 #define CPU_INTERRUPT_INIT 0x400 /* INIT pending. */ 922 #define CPU_INTERRUPT_SIPI 0x800 /* SIPI pending. */ 923 #define CPU_INTERRUPT_MCE 0x1000 /* (x86 only) MCE pending. */ 924 924 925 925 #ifdef VBOX 926 926 /** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */ 927 # define CPU_INTERRUPT_SINGLE_INSTR 0x0 400927 # define CPU_INTERRUPT_SINGLE_INSTR 0x02000000 928 928 /** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */ 929 # define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x0 800929 # define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x04000000 930 930 /** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */ 931 # define CPU_INTERRUPT_RC 0x 1000932 /** Exit current TB to process an external interrupt request (also in op.c!!)*/933 # define CPU_INTERRUPT_EXTERNAL_EXIT 0x 2000934 /** Exit current TB to process an external interrupt request (also in op.c!!)*/935 # define CPU_INTERRUPT_EXTERNAL_HARD 0x 4000936 /** Exit current TB to process an external interrupt request (also in op.c!!)*/937 # define CPU_INTERRUPT_EXTERNAL_TIMER 0x 8000938 /** Exit current TB to process an external interrupt request (also in op.c!!)*/939 # define CPU_INTERRUPT_EXTERNAL_DMA 0x 10000931 # define CPU_INTERRUPT_RC 0x08000000 932 /** Exit current TB to process an external request. */ 933 # define CPU_INTERRUPT_EXTERNAL_EXIT 0x10000000 934 /** Exit current TB to process an external interrupt request. */ 935 # define CPU_INTERRUPT_EXTERNAL_HARD 0x20000000 936 /** Exit current TB to process an external timer request. */ 937 # define CPU_INTERRUPT_EXTERNAL_TIMER 0x40000000 938 /** Exit current TB to process an external DMA request. */ 939 # define CPU_INTERRUPT_EXTERNAL_DMA 0x80000000 940 940 #endif /* VBOX */ 941 941 void cpu_interrupt(CPUState *s, int mask); 942 942 void cpu_reset_interrupt(CPUState *env, int mask); 943 944 void cpu_exit(CPUState *s); 945 946 int qemu_cpu_has_work(CPUState *env); 943 947 944 948 /* Breakpoint/watchpoint flags */ … … 1000 1004 1001 1005 /* IO ports API */ 1002 1003 /* NOTE: as these functions may be even used when there is an isa 1004 brige on non x86 targets, we always defined them */ 1005 #ifndef NO_CPU_IO_DEFS 1006 void cpu_outb(CPUState *env, int addr, int val); 1007 void cpu_outw(CPUState *env, int addr, int val); 1008 void cpu_outl(CPUState *env, int addr, int val); 1009 int cpu_inb(CPUState *env, int addr); 1010 int cpu_inw(CPUState *env, int addr); 1011 int cpu_inl(CPUState *env, int addr); 1012 #endif 1013 1014 /* address in the RAM (different from a physical address) */ 1015 #ifdef USE_KQEMU 1016 typedef uint32_t ram_addr_t; 1017 #else 1018 typedef unsigned long ram_addr_t; 1019 #endif 1006 #include "ioport.h" 1020 1007 1021 1008 /* memory API */ 1022 1009 1023 1010 #ifndef VBOX 1024 extern ram_addr_t phys_ram_size;1025 1011 extern int phys_ram_fd; 1026 extern uint8_t *phys_ram_base;1027 1012 extern uint8_t *phys_ram_dirty; 1028 1013 extern ram_addr_t ram_size; 1014 extern ram_addr_t last_ram_offset; 1029 1015 #else /* VBOX */ 1030 extern RTGCPHYS phys_ram_size;1031 1016 /** This is required for bounds checking the phys_ram_dirty accesses. */ 1032 1017 extern RTGCPHYS phys_ram_dirty_size; … … 1040 1025 so only a limited number of ids are avaiable. */ 1041 1026 1042 #define IO_MEM_SHIFT 31043 1027 #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT)) 1044 1045 #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */1046 #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */1047 #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)1048 #define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)1049 1050 /* Acts like a ROM when read and like a device when written. */1051 #define IO_MEM_ROMD (1)1052 #define IO_MEM_SUBPAGE (2)1053 #define IO_MEM_SUBWIDTH (4)1054 1028 1055 1029 /* Flags stored in the low bits of the TLB virtual address. These are … … 1063 1037 #define TLB_MMIO (1 << 5) 1064 1038 1065 typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);1066 typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);1067 1068 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,1069 ram_addr_t size,1070 ram_addr_t phys_offset,1071 ram_addr_t region_offset);1072 static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,1073 ram_addr_t size,1074 ram_addr_t phys_offset)1075 {1076 cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);1077 }1078 1079 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);1080 ram_addr_t qemu_ram_alloc(ram_addr_t);1081 void qemu_ram_free(ram_addr_t addr);1082 int cpu_register_io_memory(int io_index,1083 CPUReadMemoryFunc **mem_read,1084 CPUWriteMemoryFunc **mem_write,1085 void *opaque);1086 void cpu_unregister_io_memory(int table_address);1087 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);1088 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);1089 1090 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,1091 int len, int is_write);1092 static inline void cpu_physical_memory_read(target_phys_addr_t addr,1093 uint8_t *buf, int len)1094 {1095 cpu_physical_memory_rw(addr, buf, len, 0);1096 }1097 static inline void cpu_physical_memory_write(target_phys_addr_t addr,1098 const uint8_t *buf, int len)1099 {1100 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);1101 }1102 void *cpu_physical_memory_map(target_phys_addr_t addr,1103 target_phys_addr_t *plen,1104 int is_write);1105 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,1106 int is_write, target_phys_addr_t access_len);1107 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));1108 void cpu_unregister_map_client(void *cookie);1109 1110 uint32_t ldub_phys(target_phys_addr_t addr);1111 uint32_t lduw_phys(target_phys_addr_t addr);1112 uint32_t ldl_phys(target_phys_addr_t addr);1113 uint64_t ldq_phys(target_phys_addr_t addr);1114 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);1115 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);1116 void stb_phys(target_phys_addr_t addr, uint32_t val);1117 void stw_phys(target_phys_addr_t addr, uint32_t val);1118 void stl_phys(target_phys_addr_t addr, uint32_t val);1119 void stq_phys(target_phys_addr_t addr, uint64_t val);1120 1121 void cpu_physical_memory_write_rom(target_phys_addr_t addr,1122 const uint8_t *buf, int len);1123 1039 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 1124 1040 uint8_t *buf, int len, int is_write); … … 1130 1046 1131 1047 /* read dirty bit (return 0 or 1) */ 1132 #ifndef VBOX1133 1048 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) 1134 1049 { 1135 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; 1136 } 1137 #else /* VBOX */ 1138 DECLINLINE(int) cpu_physical_memory_is_dirty(ram_addr_t addr) 1139 { 1050 #ifdef VBOX 1140 1051 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1141 1052 { … … 1144 1055 return 0; 1145 1056 } 1057 #endif /* VBOX */ 1146 1058 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; 1147 1059 } 1148 #endif /* VBOX */ 1149 1150 #ifndef VBOX 1060 1151 1061 static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, 1152 1062 int dirty_flags) 1153 1063 { 1154 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; 1155 } 1156 #else /* VBOX */ 1157 DECLINLINE(int) cpu_physical_memory_get_dirty(ram_addr_t addr, 1158 int dirty_flags) 1159 { 1064 #ifdef VBOX 1160 1065 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1161 1066 { … … 1164 1069 return 0xff & dirty_flags; /** @todo I don't think this is the right thing to return, fix! */ 1165 1070 } 1071 #endif /* VBOX */ 1166 1072 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; 1167 1073 } 1168 #endif /* VBOX */ 1169 1170 #ifndef VBOX 1074 1171 1075 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) 1172 1076 { 1173 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff; 1174 } 1175 #else /* VBOX */ 1176 DECLINLINE(void) cpu_physical_memory_set_dirty(ram_addr_t addr) 1177 { 1077 #ifdef VBOX 1178 1078 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1179 1079 { … … 1182 1082 return; 1183 1083 } 1084 #endif /* VBOX */ 1184 1085 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff; 1185 1086 } 1186 #endif /* VBOX */1187 1087 1188 1088 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, … … 1194 1094 int cpu_physical_memory_get_dirty_tracking(void); 1195 1095 1196 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr); 1096 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, 1097 target_phys_addr_t end_addr); 1197 1098 1198 1099 void dump_exec_info(FILE *f, … … 1358 1259 #endif 1359 1260 1261 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status, 1262 uint64_t mcg_status, uint64_t addr, uint64_t misc); 1263 1360 1264 #ifdef VBOX 1361 1265 void tb_invalidate_virt(CPUState *env, uint32_t eip); -
trunk/src/recompiler/cpu-defs.h
r36171 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 38 37 #include <setjmp.h> 39 38 #include <inttypes.h> 39 #include <signal.h> 40 40 #include "osdep.h" 41 41 #include "sys-queue.h" 42 #include "targphys.h" 42 43 43 44 #ifndef TARGET_LONG_BITS 44 45 #error TARGET_LONG_BITS must be defined before including this header 45 #endif46 47 #ifndef TARGET_PHYS_ADDR_BITS48 #if TARGET_LONG_BITS >= HOST_LONG_BITS49 #define TARGET_PHYS_ADDR_BITS TARGET_LONG_BITS50 #else51 #define TARGET_PHYS_ADDR_BITS HOST_LONG_BITS52 #endif53 46 #endif 54 47 … … 70 63 #else 71 64 #error TARGET_LONG_SIZE undefined 72 #endif73 74 /* target_phys_addr_t is the type of a physical address (its size can75 be different from 'target_ulong'). We have sizeof(target_phys_addr)76 = max(sizeof(unsigned long),77 sizeof(size_of_target_physical_address)) because we must pass a78 host pointer to memory operations in some cases */79 80 #if TARGET_PHYS_ADDR_BITS == 3281 typedef uint32_t target_phys_addr_t;82 #define TARGET_FMT_plx "%08x"83 #elif TARGET_PHYS_ADDR_BITS == 6484 typedef uint64_t target_phys_addr_t;85 #define TARGET_FMT_plx "%016" PRIx6486 #else87 #error TARGET_PHYS_ADDR_BITS undefined88 65 #endif 89 66 … … 186 163 memory was accessed */ \ 187 164 uint32_t halted; /* Nonzero if the CPU is in suspend state */ \ 165 uint32_t stop; /* Stop request */ \ 166 uint32_t stopped; /* Artificially stopped */ \ 188 167 uint32_t interrupt_request; \ 189 168 volatile /*sig_atomic_t - vbox*/ int32_t exit_request; \ … … 221 200 int exception_index; \ 222 201 \ 223 void *next_cpu; /* next CPU sharing TB cache */\202 CPUState *next_cpu; /* next CPU sharing TB cache */ \ 224 203 int cpu_index; /* CPU index (informative) */ \ 204 uint32_t host_tid; /* host thread ID */ \ 205 int numa_node; /* NUMA node this cpu is belonging to */ \ 225 206 int running; /* Nonzero if cpu is currently running(usermode). */ \ 226 207 /* user data */ \ 227 208 void *opaque; \ 228 209 \ 210 uint32_t created; \ 211 struct QemuThread *thread; \ 212 struct QemuCond *halt_cond; \ 229 213 const char *cpu_model_str; \ 230 214 struct KVMState *kvm_state; \ -
trunk/src/recompiler/cpu-exec.c
r36171 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 29 28 30 29 #include "config.h" 31 #define CPU_NO_GLOBAL_REGS32 30 #include "exec.h" 33 31 #include "disas.h" … … 61 59 //#define DEBUG_EXEC 62 60 //#define DEBUG_SIGNAL 61 62 int qemu_cpu_has_work(CPUState *env) 63 { 64 return cpu_has_work(env); 65 } 63 66 64 67 void cpu_loop_exit(void) … … 389 392 } 390 393 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING); 391 if (interrupt_request & CPU_INTERRUPT_EXIT)392 {393 env->exception_index = EXCP_INTERRUPT;394 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);395 ret = env->exception_index;396 cpu_loop_exit();397 }398 394 if (interrupt_request & CPU_INTERRUPT_RC) 399 395 { … … 518 514 gcc-4.4/amd64 anymore, see #3883. */ 519 515 env->current_tb = NULL; 520 if ( !(env->interrupt_request & ( CPU_INTERRUPT_ EXIT | CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC516 if ( !(env->interrupt_request & ( CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC 521 517 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT)) 522 518 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER) … … 575 571 #elif defined(TARGET_ARM) 576 572 #elif defined(TARGET_PPC) 573 #elif defined(TARGET_MICROBLAZE) 577 574 #elif defined(TARGET_MIPS) 578 575 #elif defined(TARGET_SH4) … … 587 584 for(;;) { 588 585 if (setjmp(env->jmp_env) == 0) { 586 #if defined(__sparc__) && !defined(HOST_SOLARIS) 587 #undef env 588 env = cpu_single_env; 589 #define env cpu_single_env 590 #endif 589 591 env->current_tb = NULL; 590 592 /* if an exception is pending, we execute it here */ … … 624 626 #elif defined(TARGET_PPC) 625 627 do_interrupt(env); 628 #elif defined(TARGET_MICROBLAZE) 629 do_interrupt(env); 626 630 #elif defined(TARGET_MIPS) 627 631 do_interrupt(env); … … 643 647 env->exception_index = -1; 644 648 } 645 #ifdef USE_KQEMU649 #ifdef CONFIG_KQEMU 646 650 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) { 647 651 int ret; … … 691 695 } 692 696 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \ 693 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) 697 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \ 698 defined(TARGET_MICROBLAZE) 694 699 if (interrupt_request & CPU_INTERRUPT_HALT) { 695 700 env->interrupt_request &= ~CPU_INTERRUPT_HALT; … … 700 705 #endif 701 706 #if defined(TARGET_I386) 702 if (env->hflags2 & HF2_GIF_MASK) { 707 if (interrupt_request & CPU_INTERRUPT_INIT) { 708 svm_check_intercept(SVM_EXIT_INIT); 709 do_cpu_init(env); 710 env->exception_index = EXCP_HALTED; 711 cpu_loop_exit(); 712 } else if (interrupt_request & CPU_INTERRUPT_SIPI) { 713 do_cpu_sipi(env); 714 } else if (env->hflags2 & HF2_GIF_MASK) { 703 715 if ((interrupt_request & CPU_INTERRUPT_SMI) && 704 716 !(env->hflags & HF_SMM_MASK)) { … … 713 725 do_interrupt(EXCP02_NMI, 0, 0, 0, 1); 714 726 next_tb = 0; 727 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 728 env->interrupt_request &= ~CPU_INTERRUPT_MCE; 729 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0); 730 next_tb = 0; 715 731 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 716 732 (((env->hflags2 & HF2_VINTR_MASK) && … … 724 740 intno = cpu_get_pic_interrupt(env); 725 741 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno); 742 #if defined(__sparc__) && !defined(HOST_SOLARIS) 743 #undef env 744 env = cpu_single_env; 745 #define env cpu_single_env 746 #endif 726 747 do_interrupt(intno, 0, 0, 0, 1); 727 748 /* ensure that no TB jump will be modified as … … 755 776 next_tb = 0; 756 777 } 778 #elif defined(TARGET_MICROBLAZE) 779 if ((interrupt_request & CPU_INTERRUPT_HARD) 780 && (env->sregs[SR_MSR] & MSR_IE) 781 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP)) 782 && !(env->iflags & (D_FLAG | IMM_FLAG))) { 783 env->exception_index = EXCP_IRQ; 784 do_interrupt(env); 785 next_tb = 0; 786 } 757 787 #elif defined(TARGET_MIPS) 758 788 if ((interrupt_request & CPU_INTERRUPT_HARD) && … … 770 800 #elif defined(TARGET_SPARC) 771 801 if ((interrupt_request & CPU_INTERRUPT_HARD) && 772 (env->psret != 0)) {802 cpu_interrupts_enabled(env)) { 773 803 int pil = env->interrupt_index & 15; 774 804 int type = env->interrupt_index & 0xf0; … … 781 811 do_interrupt(env); 782 812 env->interrupt_index = 0; 783 #if !defined( TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)813 #if !defined(CONFIG_USER_ONLY) 784 814 cpu_check_irqs(env); 785 815 #endif … … 884 914 | env->cc_dest | (env->cc_x << 4); 885 915 log_cpu_state(env, 0); 916 #elif defined(TARGET_MICROBLAZE) 917 log_cpu_state(env, 0); 886 918 #elif defined(TARGET_MIPS) 887 919 log_cpu_state(env, 0); … … 918 950 { 919 951 if (next_tb != 0 && 920 #ifdef USE_KQEMU952 #ifdef CONFIG_KQEMU 921 953 (env->kqemu_enabled != 2) && 922 954 #endif … … 975 1007 /* reset soft MMU for next block (it can currently 976 1008 only be set by a memory fault) */ 977 #if defined( USE_KQEMU)1009 #if defined(CONFIG_KQEMU) 978 1010 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000) 979 1011 if (kqemu_is_ok(env) && … … 1001 1033 env->sr = (env->sr & 0xffe0) 1002 1034 | env->cc_dest | (env->cc_x << 4); 1035 #elif defined(TARGET_MICROBLAZE) 1003 1036 #elif defined(TARGET_MIPS) 1004 1037 #elif defined(TARGET_SH4) … … 1118 1151 if (ret == 1) { 1119 1152 #if 0 1120 printf("PF exception: EIP=0x% RGv CR2=0x%RGverror=0x%x\n",1153 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n", 1121 1154 env->eip, env->cr[2], env->error_code); 1122 1155 #endif … … 1349 1382 } 1350 1383 1384 #elif defined (TARGET_MICROBLAZE) 1385 static inline int handle_cpu_signal(unsigned long pc, unsigned long address, 1386 int is_write, sigset_t *old_set, 1387 void *puc) 1388 { 1389 TranslationBlock *tb; 1390 int ret; 1391 1392 if (cpu_single_env) 1393 env = cpu_single_env; /* XXX: find a correct solution for multithread */ 1394 #if defined(DEBUG_SIGNAL) 1395 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", 1396 pc, address, is_write, *(unsigned long *)old_set); 1397 #endif 1398 /* XXX: locking issue */ 1399 if (is_write && page_unprotect(h2g(address), pc, puc)) { 1400 return 1; 1401 } 1402 1403 /* see if it is an MMU fault */ 1404 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0); 1405 if (ret < 0) 1406 return 0; /* not an MMU fault */ 1407 if (ret == 0) 1408 return 1; /* the MMU fault was handled without causing real CPU fault */ 1409 1410 /* now we have a real cpu fault */ 1411 tb = tb_find_pc(pc); 1412 if (tb) { 1413 /* the PC is inside the translated code. It means that we have 1414 a virtual CPU fault */ 1415 cpu_restore_state(tb, env, pc, puc); 1416 } 1417 if (ret == 1) { 1418 #if 0 1419 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n", 1420 env->PC, env->error_code, tb); 1421 #endif 1422 /* we restore the process signal mask as the sigreturn should 1423 do it (XXX: use sigsetjmp) */ 1424 sigprocmask(SIG_SETMASK, old_set, NULL); 1425 cpu_loop_exit(); 1426 } else { 1427 /* activate soft MMU for this block */ 1428 cpu_resume_from_signal(env, puc); 1429 } 1430 /* never comes here */ 1431 return 1; 1432 } 1433 1351 1434 #elif defined (TARGET_SH4) 1352 1435 static inline int handle_cpu_signal(unsigned long pc, unsigned long address, … … 1491 1574 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno) 1492 1575 # define ERROR_sig(context) ((context)->uc_mcontext->es.err) 1576 # define MASK_sig(context) ((context)->uc_sigmask) 1577 #elif defined(__OpenBSD__) 1578 # define EIP_sig(context) ((context)->sc_eip) 1579 # define TRAP_sig(context) ((context)->sc_trapno) 1580 # define ERROR_sig(context) ((context)->sc_err) 1581 # define MASK_sig(context) ((context)->sc_mask) 1493 1582 #else 1494 1583 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP]) 1495 1584 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) 1496 1585 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) 1586 # define MASK_sig(context) ((context)->uc_sigmask) 1497 1587 #endif 1498 1588 … … 1501 1591 { 1502 1592 siginfo_t *info = pinfo; 1593 #if defined(__OpenBSD__) 1594 struct sigcontext *uc = puc; 1595 #else 1503 1596 struct ucontext *uc = puc; 1597 #endif 1504 1598 unsigned long pc; 1505 1599 int trapno; … … 1516 1610 trapno == 0xe ? 1517 1611 (ERROR_sig(uc) >> 1) & 1 : 0, 1518 & uc->uc_sigmask, puc);1612 &MASK_sig(uc), puc); 1519 1613 } 1520 1614 … … 1522 1616 1523 1617 #ifdef __NetBSD__ 1524 #define REG_ERR _REG_ERR 1525 #define REG_TRAPNO _REG_TRAPNO 1526 1527 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)] 1528 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc) 1618 #define PC_sig(context) _UC_MACHINE_PC(context) 1619 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) 1620 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) 1621 #define MASK_sig(context) ((context)->uc_sigmask) 1622 #elif defined(__OpenBSD__) 1623 #define PC_sig(context) ((context)->sc_rip) 1624 #define TRAP_sig(context) ((context)->sc_trapno) 1625 #define ERROR_sig(context) ((context)->sc_err) 1626 #define MASK_sig(context) ((context)->sc_mask) 1529 1627 #else 1530 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)] 1531 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP) 1628 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP]) 1629 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) 1630 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) 1631 #define MASK_sig(context) ((context)->uc_sigmask) 1532 1632 #endif 1533 1633 … … 1539 1639 #ifdef __NetBSD__ 1540 1640 ucontext_t *uc = puc; 1641 #elif defined(__OpenBSD__) 1642 struct sigcontext *uc = puc; 1541 1643 #else 1542 1644 struct ucontext *uc = puc; 1543 1645 #endif 1544 1646 1545 pc = QEMU_UC_MACHINE_PC(uc);1647 pc = PC_sig(uc); 1546 1648 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1547 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?1548 ( QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,1549 & uc->uc_sigmask, puc);1649 TRAP_sig(uc) == 0xe ? 1650 (ERROR_sig(uc) >> 1) & 1 : 0, 1651 &MASK_sig(uc), puc); 1550 1652 } 1551 1653 … … 1684 1786 switch((insn >> 19) & 0x3f) { 1685 1787 case 0x05: // stb 1788 case 0x15: // stba 1686 1789 case 0x06: // sth 1790 case 0x16: // stha 1687 1791 case 0x04: // st 1792 case 0x14: // sta 1688 1793 case 0x07: // std 1794 case 0x17: // stda 1795 case 0x0e: // stx 1796 case 0x1e: // stxa 1689 1797 case 0x24: // stf 1798 case 0x34: // stfa 1690 1799 case 0x27: // stdf 1800 case 0x37: // stdfa 1801 case 0x26: // stqf 1802 case 0x36: // stqfa 1691 1803 case 0x25: // stfsr 1804 case 0x3c: // casa 1805 case 0x3e: // casxa 1692 1806 is_write = 1; 1693 1807 break; -
trunk/src/recompiler/cutils.c
r36170 r36175 577 577 } 578 578 579 /* XXX: use host strnlen if available ? */ 580 int qemu_strnlen(const char *s, int max_len) 581 { 582 int i; 583 584 for(i = 0; i < max_len; i++) { 585 if (s[i] == '\0') { 586 break; 587 } 588 } 589 return i; 590 } 591 579 592 #ifndef VBOX 580 593 time_t mktimegm(struct tm *tm) … … 609 622 } 610 623 624 void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov) 625 { 626 int i; 627 628 qiov->iov = iov; 629 qiov->niov = niov; 630 qiov->nalloc = -1; 631 qiov->size = 0; 632 for (i = 0; i < niov; i++) 633 qiov->size += iov[i].iov_len; 634 } 635 611 636 void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len) 612 637 { 638 assert(qiov->nalloc != -1); 639 613 640 if (qiov->niov == qiov->nalloc) { 614 641 qiov->nalloc = 2 * qiov->nalloc + 1; … … 623 650 void qemu_iovec_destroy(QEMUIOVector *qiov) 624 651 { 652 assert(qiov->nalloc != -1); 653 625 654 qemu_free(qiov->iov); 626 655 } … … 628 657 void qemu_iovec_reset(QEMUIOVector *qiov) 629 658 { 659 assert(qiov->nalloc != -1); 660 630 661 qiov->niov = 0; 631 662 qiov->size = 0; … … 658 689 } 659 690 } 691 660 692 #endif /* !VBOX */ -
trunk/src/recompiler/disas.h
r36170 r36175 1 1 #ifndef _QEMU_DISAS_H 2 2 #define _QEMU_DISAS_H 3 4 #include "qemu-common.h" 3 5 4 6 /* Disassemble this for me please... (debugging). */ 5 7 void disas(FILE *out, void *code, unsigned long size); 6 8 void target_disas(FILE *out, target_ulong code, target_ulong size, int flags); 7 void monitor_disas(CPUState *env, 9 10 #ifndef VBOX 11 /* The usual mess... FIXME: Remove this condition once dyngen-exec.h is gone */ 12 #ifndef __DYNGEN_EXEC_H__ 13 void monitor_disas(Monitor *mon, CPUState *env, 8 14 target_ulong pc, int nb_insn, int is_physical, int flags); 15 #endif 16 #endif 9 17 10 18 /* Look up symbol for debugging purpose. Returns "" if unknown. */ -
trunk/src/recompiler/dyngen-exec.h
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 44 43 host headers do not allow that. */ 45 44 #include <stddef.h> 45 #include <stdint.h> 46 46 47 47 #ifndef VBOX … … 49 49 #ifdef __OpenBSD__ 50 50 #include <sys/types.h> 51 #else52 typedef unsigned char uint8_t;53 typedef unsigned short uint16_t;54 typedef unsigned int uint32_t;55 // Linux/Sparc64 defines uint64_t56 #if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__))57 /* XXX may be done for all 64 bits targets ? */58 #if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(_ARCH_PPC64)59 typedef unsigned long uint64_t;60 #else61 typedef unsigned long long uint64_t;62 #endif63 #endif64 65 /* if Solaris/__sun__, don't typedef int8_t, as it will be typedef'd66 prior to this and will cause an error in compliation, conflicting67 with /usr/include/sys/int_types.h, line 75 */68 #ifndef __sun__69 typedef signed char int8_t;70 #endif71 typedef signed short int16_t;72 typedef signed int int32_t;73 // Linux/Sparc64 defines int64_t74 #if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__))75 #if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__) || defined(_ARCH_PPC64)76 typedef signed long int64_t;77 #else78 typedef signed long long int64_t;79 #endif80 #endif81 51 #endif 82 52 … … 84 54 typedef void * host_reg_t; 85 55 86 #define INT8_MIN (-128) 87 #define INT16_MIN (-32767-1) 88 #define INT32_MIN (-2147483647-1) 89 #define INT64_MIN (-(int64_t)(9223372036854775807)-1) 90 #define INT8_MAX (127) 91 #define INT16_MAX (32767) 92 #define INT32_MAX (2147483647) 93 #define INT64_MAX ((int64_t)(9223372036854775807)) 94 #define UINT8_MAX (255) 95 #define UINT16_MAX (65535) 96 #define UINT32_MAX (4294967295U) 97 #define UINT64_MAX ((uint64_t)(18446744073709551615)) 98 99 #ifdef _BSD 56 #ifdef HOST_BSD 100 57 typedef struct __sFILE FILE; 101 58 #else … … 123 80 #define AREG1 "ebx" 124 81 #define AREG2 "esi" 125 #define AREG3 "edi"126 82 #else /* VBOX - why are we different? */ 127 83 # define AREG0 "esi" … … 132 88 #define AREG1 "r15" 133 89 #define AREG2 "r12" 134 #define AREG3 "r13"135 //#define AREG4 "rbp"136 //#define AREG5 "rbx"137 90 #elif defined(_ARCH_PPC) 138 91 #define AREG0 "r27" 139 92 #define AREG1 "r24" 140 93 #define AREG2 "r25" 141 #define AREG3 "r26"142 /* XXX: suppress this hack */143 #if defined(CONFIG_USER_ONLY)144 #define AREG4 "r16"145 #define AREG5 "r17"146 #define AREG6 "r18"147 #define AREG7 "r19"148 #define AREG8 "r20"149 #define AREG9 "r21"150 #define AREG10 "r22"151 #define AREG11 "r23"152 #endif153 94 #elif defined(__arm__) 154 95 #define AREG0 "r7" 155 96 #define AREG1 "r4" 156 97 #define AREG2 "r5" 157 #define AREG3 "r6"158 98 #elif defined(__hppa__) 159 99 #define AREG0 "r17" 160 100 #define AREG1 "r14" 161 101 #define AREG2 "r15" 162 #define AREG3 "r16"163 102 #elif defined(__mips__) 164 103 #define AREG0 "fp" 165 104 #define AREG1 "s0" 166 105 #define AREG2 "s1" 167 #define AREG3 "s2"168 #define AREG4 "s3"169 #define AREG5 "s4"170 #define AREG6 "s5"171 #define AREG7 "s6"172 #define AREG8 "s7"173 106 #elif defined(__sparc__) 174 107 #ifdef HOST_SOLARIS … … 176 109 #define AREG1 "g3" 177 110 #define AREG2 "g4" 178 #define AREG3 "g5"179 #define AREG4 "g6"180 111 #else 181 112 #ifdef __sparc_v9__ … … 187 118 #define AREG1 "g1" 188 119 #define AREG2 "g2" 189 #define AREG3 "g3"190 #define AREG4 "l0"191 #define AREG5 "l1"192 #define AREG6 "l2"193 #define AREG7 "l3"194 #define AREG8 "l4"195 #define AREG9 "l5"196 #define AREG10 "l6"197 #define AREG11 "l7"198 120 #endif 199 121 #endif … … 202 124 #define AREG1 "r7" 203 125 #define AREG2 "r8" 204 #define AREG3 "r9"205 126 #elif defined(__alpha__) 206 127 /* Note $15 is the frame pointer, so anything in op-i386.c that would … … 209 130 #define AREG1 "$9" 210 131 #define AREG2 "$10" 211 #define AREG3 "$11"212 #define AREG4 "$12"213 #define AREG5 "$13"214 #define AREG6 "$14"215 132 #elif defined(__mc68000) 216 133 #define AREG0 "%a5" 217 134 #define AREG1 "%a4" 218 135 #define AREG2 "%d7" 219 #define AREG3 "%d6"220 #define AREG4 "%d5"221 136 #elif defined(__ia64__) 222 137 #define AREG0 "r7" 223 138 #define AREG1 "r4" 224 139 #define AREG2 "r5" 225 #define AREG3 "r6"226 140 #else 227 141 #error unsupported CPU -
trunk/src/recompiler/elf.h
r36140 r36175 119 119 */ 120 120 #define EM_S390_OLD 0xA390 121 122 #define EM_XILINX_MICROBLAZE 0xBAAB 121 123 122 124 /* This is the info that is needed to parse the dynamic section of the file */ … … 1080 1082 #define EI_DATA 5 1081 1083 #define EI_VERSION 6 1082 #define EI_PAD 7 1084 #define EI_OSABI 7 1085 #define EI_PAD 8 1086 1087 #define ELFOSABI_NONE 0 /* UNIX System V ABI */ 1088 #define ELFOSABI_SYSV 0 /* Alias. */ 1089 #define ELFOSABI_HPUX 1 /* HP-UX */ 1090 #define ELFOSABI_NETBSD 2 /* NetBSD. */ 1091 #define ELFOSABI_LINUX 3 /* Linux. */ 1092 #define ELFOSABI_SOLARIS 6 /* Sun Solaris. */ 1093 #define ELFOSABI_AIX 7 /* IBM AIX. */ 1094 #define ELFOSABI_IRIX 8 /* SGI Irix. */ 1095 #define ELFOSABI_FREEBSD 9 /* FreeBSD. */ 1096 #define ELFOSABI_TRU64 10 /* Compaq TRU64 UNIX. */ 1097 #define ELFOSABI_MODESTO 11 /* Novell Modesto. */ 1098 #define ELFOSABI_OPENBSD 12 /* OpenBSD. */ 1099 #define ELFOSABI_ARM 97 /* ARM */ 1100 #define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */ 1083 1101 1084 1102 #define ELFMAG0 0x7f /* EI_MAG */ … … 1107 1125 #define NT_PRPSINFO 3 1108 1126 #define NT_TASKSTRUCT 4 1127 #define NT_AUXV 6 1109 1128 #define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ 1110 1129 -
trunk/src/recompiler/exec-all.h
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 58 57 59 58 /* XXX: make safe guess about sizes */ 60 #define MAX_OP_PER_INSTR 6459 #define MAX_OP_PER_INSTR 96 61 60 /* A Call op needs up to 6 + 2N parameters (N = number of arguments). */ 62 61 #define MAX_OPC_PARAM 10 … … 243 242 244 243 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */ 245 *(uint32_t *)jmp_addr |= ((addr - (jmp_addr + 8)) >> 2) & 0xffffff; 244 *(uint32_t *)jmp_addr = 245 (*(uint32_t *)jmp_addr & ~0xffffff) 246 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff); 246 247 247 248 #if QEMU_GNUC_PREREQ(4, 1) … … 350 351 { 351 352 int mmu_idx, page_index, pd; 353 void *p; 352 354 353 355 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); … … 377 379 return addr + env1->phys_addends[mmu_idx][page_index]; 378 380 # else 379 return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base; 381 p = (void *)(unsigned long)addr 382 + env1->tlb_table[mmu_idx][page_index].addend; 383 return qemu_ram_addr_from_host(p); 380 384 # endif 381 385 } … … 396 400 #endif 397 401 398 #ifdef USE_KQEMU402 #ifdef CONFIG_KQEMU 399 403 #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG)) 400 404 … … 414 418 extern uint32_t kqemu_comm_base; 415 419 420 extern ram_addr_t kqemu_phys_ram_size; 421 extern uint8_t *kqemu_phys_ram_base; 422 416 423 static inline int kqemu_is_ok(CPUState *env) 417 424 { … … 431 438 432 439 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler); 433 #endif 440 441 /* vl.c */ 442 #ifndef VBOX 443 extern int singlestep; 444 #endif 445 446 #endif -
trunk/src/recompiler/exec.c
r36171 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 31 30 #ifndef VBOX 32 31 #ifdef _WIN32 33 #define WIN32_LEAN_AND_MEAN34 32 #include <windows.h> 35 33 #else … … 85 83 #define SMC_BITMAP_USE_THRESHOLD 10 86 84 87 #define MMAP_AREA_START 0x0000000088 #define MMAP_AREA_END 0xa800000089 90 85 #if defined(TARGET_SPARC64) 91 86 #define TARGET_PHYS_ADDR_SPACE_BITS 41 … … 97 92 #elif defined(TARGET_PPC64) 98 93 #define TARGET_PHYS_ADDR_SPACE_BITS 42 99 #elif defined(TARGET_X86_64) && !defined( USE_KQEMU)94 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU) 100 95 #define TARGET_PHYS_ADDR_SPACE_BITS 42 101 #elif defined(TARGET_I386) && !defined( USE_KQEMU)96 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU) 102 97 #define TARGET_PHYS_ADDR_SPACE_BITS 36 103 98 #else … … 121 116 __attribute__((__section__(".gen_code"))) \ 122 117 __attribute__((aligned (32))) 118 #elif defined(_WIN32) 119 /* Maximum alignment for Win32 is 16. */ 120 #define code_gen_section \ 121 __attribute__((aligned (16))) 123 122 #else 124 123 #define code_gen_section \ … … 138 137 #ifndef VBOX 139 138 #if !defined(CONFIG_USER_ONLY) 140 ram_addr_t phys_ram_size;141 139 int phys_ram_fd; 142 uint8_t *phys_ram_base;143 140 uint8_t *phys_ram_dirty; 144 141 static int in_migration; 145 static ram_addr_t phys_ram_alloc_offset = 0; 142 143 typedef struct RAMBlock { 144 uint8_t *host; 145 ram_addr_t offset; 146 ram_addr_t length; 147 struct RAMBlock *next; 148 } RAMBlock; 149 150 static RAMBlock *ram_blocks; 151 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug) 152 then we can no longer assume contiguous ram offsets, and external uses 153 of this variable will break. */ 154 ram_addr_t last_ram_offset; 146 155 #endif 147 156 #else /* VBOX */ 148 RTGCPHYS phys_ram_size;149 157 /* we have memory ranges (the high PC-BIOS mapping) which 150 158 causes some pages to fall outside the dirty map here. */ … … 225 233 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; 226 234 void *io_mem_opaque[IO_MEM_NB_ENTRIES]; 227 char io_mem_used[IO_MEM_NB_ENTRIES];235 static char io_mem_used[IO_MEM_NB_ENTRIES]; 228 236 static int io_mem_watch; 229 237 #endif … … 406 414 size_t len = sizeof(PageDesc) * L2_SIZE; 407 415 /* Don't use qemu_malloc because it may recurse. */ 408 p = mmap( 0, len, PROT_READ | PROT_WRITE,416 p = mmap(NULL, len, PROT_READ | PROT_WRITE, 409 417 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 410 418 *lp = p; … … 431 439 432 440 p = *lp; 433 if (!p) 434 return 0; 441 if (!p) { 442 return NULL; 443 } 435 444 return p + (index & (L2_SIZE - 1)); 436 445 } … … 512 521 513 522 #if defined(CONFIG_USER_ONLY) 514 /* Currently it is not recomm anded to allocate big chunks of data in523 /* Currently it is not recommended to allocate big chunks of data in 515 524 user mode. It will change when a dedicated libc will be used */ 516 525 #define USE_STATIC_CODE_GEN_BUFFER 517 526 #endif 518 527 519 /* VBox allocates codegen buffer dynamically */ 520 #ifndef VBOX 528 #if defined(VBOX) && defined(USE_STATIC_CODE_GEN_BUFFER) 529 # error "VBox allocates codegen buffer dynamically" 530 #endif 531 521 532 #ifdef USE_STATIC_CODE_GEN_BUFFER 522 533 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]; 523 #endif524 534 #endif 525 535 … … 531 541 map_exec(code_gen_buffer, code_gen_buffer_size); 532 542 #else 533 # ifdef VBOX543 # ifdef VBOX 534 544 /* We cannot use phys_ram_size here, as it's 0 now, 535 545 * it only gets initialized once RAM registration callback … … 537 547 */ 538 548 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 539 # else549 # else /* !VBOX */ 540 550 code_gen_buffer_size = tb_size; 541 551 if (code_gen_buffer_size == 0) { … … 550 560 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE) 551 561 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE; 552 # endif /*VBOX */562 # endif /* !VBOX */ 553 563 /* The code gen buffer location may have constraints depending on 554 564 the host cpu and OS */ 555 # ifdef VBOX565 # ifdef VBOX 556 566 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size); 557 567 … … 561 571 return; 562 572 } 563 # else /* !VBOX */573 # else /* !VBOX */ 564 574 #if defined(__linux__) 565 575 { … … 594 604 } 595 605 } 596 #elif defined(__FreeBSD__) 606 #elif defined(__FreeBSD__) || defined(__DragonFly__) 597 607 { 598 608 int flags; … … 620 630 map_exec(code_gen_buffer, code_gen_buffer_size); 621 631 #endif 622 # endif /* !VBOX */632 # endif /* !VBOX */ 623 633 #endif /* !USE_STATIC_CODE_GEN_BUFFER */ 624 634 #ifndef VBOX … … 656 666 CPUState *env = opaque; 657 667 668 cpu_synchronize_state(env, 0); 669 658 670 qemu_put_be32s(f, &env->halted); 659 671 qemu_put_be32s(f, &env->interrupt_request); … … 669 681 qemu_get_be32s(f, &env->halted); 670 682 qemu_get_be32s(f, &env->interrupt_request); 671 env->interrupt_request &= ~CPU_INTERRUPT_EXIT; 683 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the 684 version_id is increased. */ 685 env->interrupt_request &= ~0x01; 672 686 tlb_flush(env, 1); 687 cpu_synchronize_state(env, 1); 673 688 674 689 return 0; 675 690 } 676 691 #endif 692 693 CPUState *qemu_get_cpu(int cpu) 694 { 695 CPUState *env = first_cpu; 696 697 while (env) { 698 if (env->cpu_index == cpu) 699 break; 700 env = env->next_cpu; 701 } 702 703 return env; 704 } 705 677 706 #endif /* !VBOX */ 678 707 … … 682 711 int cpu_index; 683 712 713 #if defined(CONFIG_USER_ONLY) 714 cpu_list_lock(); 715 #endif 684 716 env->next_cpu = NULL; 685 717 penv = &first_cpu; 686 718 cpu_index = 0; 687 719 while (*penv != NULL) { 688 penv = (CPUState **)&(*penv)->next_cpu;720 penv = &(*penv)->next_cpu; 689 721 cpu_index++; 690 722 } 691 723 env->cpu_index = cpu_index; 724 env->numa_node = 0; 692 725 TAILQ_INIT(&env->breakpoints); 693 726 TAILQ_INIT(&env->watchpoints); 694 727 *penv = env; 695 728 #ifndef VBOX 729 #if defined(CONFIG_USER_ONLY) 730 cpu_list_unlock(); 731 #endif 696 732 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) 697 733 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION, … … 789 825 if (!(address + TARGET_PAGE_SIZE <= tb->pc || 790 826 address >= tb->pc + tb->size)) { 791 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n", 827 printf("ERROR invalidate: address=" TARGET_FMT_lx 828 " PC=%08lx size=%04x\n", 792 829 address, (long)tb->pc, tb->size); 793 830 } … … 811 848 } 812 849 } 813 }814 }815 816 static void tb_jmp_check(TranslationBlock *tb)817 {818 TranslationBlock *tb1;819 unsigned int n1;820 821 /* suppress any remaining jumps to this TB */822 tb1 = tb->jmp_first;823 for(;;) {824 n1 = (long)tb1 & 3;825 tb1 = (TranslationBlock *)((long)tb1 & ~3);826 if (n1 == 2)827 break;828 tb1 = tb1->jmp_next[n1];829 }830 /* check end of list */831 if (tb1 != tb) {832 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);833 850 } 834 851 } … … 958 975 tb_phys_invalidate_count++; 959 976 } 960 961 977 962 978 #ifdef VBOX … … 1681 1697 if (env->singlestep_enabled != enabled) { 1682 1698 env->singlestep_enabled = enabled; 1683 /* must flush all the translated code to avoid inconsistancies */ 1684 /* XXX: only flush what is necessary */ 1685 tb_flush(env); 1699 if (kvm_enabled()) 1700 kvm_update_guest_debug(env, 0); 1701 else { 1702 /* must flush all the translated code to avoid inconsistencies */ 1703 /* XXX: only flush what is necessary */ 1704 tb_flush(env); 1705 } 1686 1706 } 1687 1707 #endif … … 1689 1709 1690 1710 #ifndef VBOX 1711 1691 1712 /* enable or disable low levels log */ 1692 1713 void cpu_set_log(int log_flags) … … 1725 1746 cpu_set_log(loglevel); 1726 1747 } 1748 1727 1749 #endif /* !VBOX */ 1728 1750 1729 /* mask must never be zero, except for A20 change call */ 1730 void cpu_interrupt(CPUState *env, int mask) 1731 { 1732 #if !defined(USE_NPTL) 1733 TranslationBlock *tb; 1734 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; 1735 #endif 1736 int old_mask; 1737 1738 if (mask & CPU_INTERRUPT_EXIT) { 1739 env->exit_request = 1; 1740 mask &= ~CPU_INTERRUPT_EXIT; 1741 } 1742 1743 old_mask = env->interrupt_request; 1744 #ifdef VBOX 1745 VM_ASSERT_EMT(env->pVM); 1746 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask); 1747 #else /* !VBOX */ 1748 /* FIXME: This is probably not threadsafe. A different thread could 1749 be in the middle of a read-modify-write operation. */ 1750 env->interrupt_request |= mask; 1751 #endif /* !VBOX */ 1751 static void cpu_unlink_tb(CPUState *env) 1752 { 1752 1753 #if defined(USE_NPTL) 1753 1754 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the … … 1756 1757 signals are used primarily to interrupt blocking syscalls. */ 1757 1758 #else 1759 TranslationBlock *tb; 1760 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; 1761 1762 tb = env->current_tb; 1763 /* if the cpu is currently executing code, we must unlink it and 1764 all the potentially executing TB */ 1765 if (tb && !testandset(&interrupt_lock)) { 1766 env->current_tb = NULL; 1767 tb_reset_jump_recursive(tb); 1768 resetlock(&interrupt_lock); 1769 } 1770 #endif 1771 } 1772 1773 /* mask must never be zero, except for A20 change call */ 1774 void cpu_interrupt(CPUState *env, int mask) 1775 { 1776 int old_mask; 1777 1778 old_mask = env->interrupt_request; 1779 #ifndef VBOX 1780 env->interrupt_request |= mask; 1781 #else /* VBOX */ 1782 VM_ASSERT_EMT(env->pVM); 1783 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask); 1784 #endif /* VBOX */ 1785 1786 #ifndef VBOX 1787 #ifndef CONFIG_USER_ONLY 1788 /* 1789 * If called from iothread context, wake the target cpu in 1790 * case its halted. 1791 */ 1792 if (!qemu_cpu_self(env)) { 1793 qemu_cpu_kick(env); 1794 return; 1795 } 1796 #endif 1797 #endif /* !VBOX */ 1798 1758 1799 if (use_icount) { 1759 1800 env->icount_decr.u16.high = 0xffff; … … 1765 1806 #endif 1766 1807 } else { 1767 tb = env->current_tb; 1768 /* if the cpu is currently executing code, we must unlink it and 1769 all the potentially executing TB */ 1770 if (tb && !testandset(&interrupt_lock)) { 1771 env->current_tb = NULL; 1772 tb_reset_jump_recursive(tb); 1773 resetlock(&interrupt_lock); 1774 } 1775 } 1776 #endif 1808 cpu_unlink_tb(env); 1809 } 1777 1810 } 1778 1811 … … 1788 1821 env->interrupt_request &= ~mask; 1789 1822 #endif /* !VBOX */ 1823 } 1824 1825 void cpu_exit(CPUState *env) 1826 { 1827 env->exit_request = 1; 1828 cpu_unlink_tb(env); 1790 1829 } 1791 1830 … … 1956 1995 } 1957 1996 1958 #ifdef VBOX1959 1997 static CPUTLBEntry s_cputlb_empty_entry = { 1960 1998 .addr_read = -1, … … 1963 2001 .addend = -1, 1964 2002 }; 1965 #endif /* VBOX */1966 2003 1967 2004 /* NOTE: if flush_global is true, also flush global entries (not … … 1979 2016 1980 2017 for(i = 0; i < CPU_TLB_SIZE; i++) { 1981 #ifdef VBOX1982 2018 int mmu_idx; 1983 2019 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1984 2020 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry; 1985 2021 } 1986 #else /* !VBOX */1987 env->tlb_table[0][i].addr_read = -1;1988 env->tlb_table[0][i].addr_write = -1;1989 env->tlb_table[0][i].addr_code = -1;1990 env->tlb_table[1][i].addr_read = -1;1991 env->tlb_table[1][i].addr_write = -1;1992 env->tlb_table[1][i].addr_code = -1;1993 #if (NB_MMU_MODES >= 3)1994 env->tlb_table[2][i].addr_read = -1;1995 env->tlb_table[2][i].addr_write = -1;1996 env->tlb_table[2][i].addr_code = -1;1997 #if (NB_MMU_MODES == 4)1998 env->tlb_table[3][i].addr_read = -1;1999 env->tlb_table[3][i].addr_write = -1;2000 env->tlb_table[3][i].addr_code = -1;2001 #endif2002 #endif2003 #endif /* !VBOX */2004 2022 } 2005 2023 2006 2024 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); 2007 2025 2026 #ifdef CONFIG_KQEMU 2027 if (env->kqemu_enabled) { 2028 kqemu_flush(env, flush_global); 2029 } 2030 #endif 2008 2031 #ifdef VBOX 2009 2032 /* inform raw mode about TLB flush */ 2010 2033 remR3FlushTLB(env, flush_global); 2011 #endif2012 #ifdef USE_KQEMU2013 if (env->kqemu_enabled) {2014 kqemu_flush(env, flush_global);2015 }2016 2034 #endif 2017 2035 tlb_flush_count++; … … 2026 2044 addr == (tlb_entry->addr_code & 2027 2045 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 2028 tlb_entry->addr_read = -1; 2029 tlb_entry->addr_write = -1; 2030 tlb_entry->addr_code = -1; 2046 *tlb_entry = s_cputlb_empty_entry; 2031 2047 } 2032 2048 } … … 2035 2051 { 2036 2052 int i; 2053 int mmu_idx; 2037 2054 2038 2055 #if defined(DEBUG_TLB) … … 2045 2062 addr &= TARGET_PAGE_MASK; 2046 2063 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 2047 tlb_flush_entry(&env->tlb_table[0][i], addr); 2048 tlb_flush_entry(&env->tlb_table[1][i], addr); 2049 #if (NB_MMU_MODES >= 3) 2050 tlb_flush_entry(&env->tlb_table[2][i], addr); 2051 #if (NB_MMU_MODES == 4) 2052 tlb_flush_entry(&env->tlb_table[3][i], addr); 2053 #endif 2054 #endif 2064 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) 2065 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); 2055 2066 2056 2067 tlb_flush_jmp_cache(env, addr); 2057 2068 2058 #ifdef USE_KQEMU2069 #ifdef CONFIG_KQEMU 2059 2070 if (env->kqemu_enabled) { 2060 2071 kqemu_flush_page(env, addr); … … 2104 2115 } 2105 2116 2117 /* Note: start and end must be within the same ram block. */ 2106 2118 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, 2107 2119 int dirty_flags) … … 2119 2131 return; 2120 2132 len = length >> TARGET_PAGE_BITS; 2121 #ifdef USE_KQEMU2133 #ifdef CONFIG_KQEMU 2122 2134 /* XXX: should not depend on cpu context */ 2123 2135 env = first_cpu; … … 2144 2156 start1 = start; 2145 2157 #elif !defined(VBOX) 2146 start1 = start + (unsigned long)phys_ram_base; 2158 start1 = (unsigned long)qemu_get_ram_ptr(start); 2159 /* Chek that we don't span multiple blocks - this breaks the 2160 address comparisons below. */ 2161 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1 2162 != (end - 1) - start) { 2163 abort(); 2164 } 2147 2165 #else 2148 2166 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */ 2149 2167 #endif 2168 2150 2169 for(env = first_cpu; env != NULL; env = env->next_cpu) { 2151 for(i = 0; i < CPU_TLB_SIZE; i++) 2152 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); 2153 for(i = 0; i < CPU_TLB_SIZE; i++) 2154 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); 2155 #if (NB_MMU_MODES >= 3) 2156 for(i = 0; i < CPU_TLB_SIZE; i++) 2157 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length); 2158 #if (NB_MMU_MODES == 4) 2159 for(i = 0; i < CPU_TLB_SIZE; i++) 2160 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length); 2161 #endif 2162 #endif 2170 int mmu_idx; 2171 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 2172 for(i = 0; i < CPU_TLB_SIZE; i++) 2173 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], 2174 start1, length); 2175 } 2163 2176 } 2164 2177 } … … 2168 2181 { 2169 2182 in_migration = enable; 2183 if (kvm_enabled()) { 2184 return kvm_set_migration_log(enable); 2185 } 2170 2186 return 0; 2171 2187 } … … 2177 2193 #endif /* !VBOX */ 2178 2194 2179 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr) 2180 { 2195 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, 2196 target_phys_addr_t end_addr) 2197 { 2198 int ret = 0; 2199 2181 2200 if (kvm_enabled()) 2182 kvm_physical_sync_dirty_bitmap(start_addr, end_addr); 2201 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr); 2202 return ret; 2183 2203 } 2184 2204 … … 2190 2210 { 2191 2211 ram_addr_t ram_addr; 2212 void *p; 2192 2213 2193 2214 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { 2194 /* RAM case */2195 2215 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 2196 2216 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; 2197 2217 #elif !defined(VBOX) 2198 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 2199 tlb_entry->addend - (unsigned long)phys_ram_base; 2218 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK) 2219 + tlb_entry->addend); 2220 ram_addr = qemu_ram_addr_from_host(p); 2200 2221 #else 2201 2222 Assert(phys_addend != -1); … … 2212 2233 { 2213 2234 int i; 2235 int mmu_idx; 2236 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 2237 for(i = 0; i < CPU_TLB_SIZE; i++) 2214 2238 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 2215 for(i = 0; i < CPU_TLB_SIZE; i++) 2216 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]); 2217 for(i = 0; i < CPU_TLB_SIZE; i++) 2218 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]); 2219 # if (NB_MMU_MODES >= 3) 2220 for(i = 0; i < CPU_TLB_SIZE; i++) 2221 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]); 2222 # if (NB_MMU_MODES == 4) 2223 for(i = 0; i < CPU_TLB_SIZE; i++) 2224 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]); 2225 # endif 2226 # endif 2227 #else /* VBOX */ 2228 for(i = 0; i < CPU_TLB_SIZE; i++) 2229 tlb_update_dirty(&env->tlb_table[0][i]); 2230 for(i = 0; i < CPU_TLB_SIZE; i++) 2231 tlb_update_dirty(&env->tlb_table[1][i]); 2232 #if (NB_MMU_MODES >= 3) 2233 for(i = 0; i < CPU_TLB_SIZE; i++) 2234 tlb_update_dirty(&env->tlb_table[2][i]); 2235 #if (NB_MMU_MODES == 4) 2236 for(i = 0; i < CPU_TLB_SIZE; i++) 2237 tlb_update_dirty(&env->tlb_table[3][i]); 2238 #endif 2239 #endif 2240 #endif /* VBOX */ 2239 tlb_update_dirty(&env->tlb_table[mmu_idx][i], env->phys_addends[mmu_idx][i]); 2240 #else 2241 tlb_update_dirty(&env->tlb_table[mmu_idx][i]); 2242 #endif 2243 } 2241 2244 } 2242 2245 … … 2252 2255 { 2253 2256 int i; 2257 int mmu_idx; 2254 2258 2255 2259 vaddr &= TARGET_PAGE_MASK; 2256 2260 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 2257 tlb_set_dirty1(&env->tlb_table[0][i], vaddr); 2258 tlb_set_dirty1(&env->tlb_table[1][i], vaddr); 2259 #if (NB_MMU_MODES >= 3) 2260 tlb_set_dirty1(&env->tlb_table[2][i], vaddr); 2261 #if (NB_MMU_MODES == 4) 2262 tlb_set_dirty1(&env->tlb_table[3][i], vaddr); 2263 #endif 2264 #endif 2261 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) 2262 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); 2265 2263 } 2266 2264 … … 2307 2305 addend = pd & TARGET_PAGE_MASK; 2308 2306 #elif !defined(VBOX) 2309 addend = (unsigned long) phys_ram_base +(pd & TARGET_PAGE_MASK);2307 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); 2310 2308 #else 2311 2309 /** @todo this is racing the phys_page_find call above since it may register 2312 2310 * a new chunk of memory... */ 2313 addend = (unsigned long)remR3TlbGCPhys2Ptr(env, 2314 pd & TARGET_PAGE_MASK, 2315 !!(prot & PAGE_WRITE)); 2311 addend = (unsigned long)remR3TlbGCPhys2Ptr(env, pd & TARGET_PAGE_MASK, !!(prot & PAGE_WRITE)); 2316 2312 #endif 2317 2313 … … 2324 2320 iotlb |= IO_MEM_ROM; 2325 2321 } else { 2326 /* IO handlers are currently passed a ph sical address.2322 /* IO handlers are currently passed a physical address. 2327 2323 It would be nice to pass an offset from the base address 2328 2324 of that region. This would avoid having to special case RAM, … … 2345 2341 2346 2342 code_address = address; 2347 2348 2343 #if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB) 2344 2349 2345 if (addend & 0x3) 2350 2346 { … … 2369 2365 addend &= ~(target_ulong)0x3; 2370 2366 } 2371 #endif 2372 2367 2368 #endif 2373 2369 /* Make accesses to pages with watchpoints go via the 2374 2370 watchpoint trap routines. */ … … 2448 2444 2449 2445 #ifndef VBOX 2450 /* dump memory mappings */ 2451 void page_dump(FILE *f) 2446 2447 /* 2448 * Walks guest process memory "regions" one by one 2449 * and calls callback function 'fn' for each region. 2450 */ 2451 int walk_memory_regions(void *priv, 2452 int (*fn)(void *, unsigned long, unsigned long, unsigned long)) 2452 2453 { 2453 2454 unsigned long start, end; 2455 PageDesc *p = NULL; 2454 2456 int i, j, prot, prot1; 2455 PageDesc *p; 2456 2457 fprintf(f, "%-8s %-8s %-8s %s\n", 2458 "start", "end", "size", "prot"); 2459 start = -1; 2460 end = -1; 2457 int rc = 0; 2458 2459 start = end = -1; 2461 2460 prot = 0; 2462 for(i = 0; i <= L1_SIZE; i++) { 2463 if (i < L1_SIZE) 2464 p = l1_map[i]; 2465 else 2466 p = NULL; 2467 for(j = 0;j < L2_SIZE; j++) { 2468 if (!p) 2469 prot1 = 0; 2470 else 2471 prot1 = p[j].flags; 2461 2462 for (i = 0; i <= L1_SIZE; i++) { 2463 p = (i < L1_SIZE) ? l1_map[i] : NULL; 2464 for (j = 0; j < L2_SIZE; j++) { 2465 prot1 = (p == NULL) ? 0 : p[j].flags; 2466 /* 2467 * "region" is one continuous chunk of memory 2468 * that has same protection flags set. 2469 */ 2472 2470 if (prot1 != prot) { 2473 2471 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); 2474 2472 if (start != -1) { 2475 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", 2476 start, end, end - start, 2477 prot & PAGE_READ ? 'r' : '-', 2478 prot & PAGE_WRITE ? 'w' : '-', 2479 prot & PAGE_EXEC ? 'x' : '-'); 2473 rc = (*fn)(priv, start, end, prot); 2474 /* callback can stop iteration by returning != 0 */ 2475 if (rc != 0) 2476 return (rc); 2480 2477 } 2481 2478 if (prot1 != 0) … … 2485 2482 prot = prot1; 2486 2483 } 2487 if ( !p)2484 if (p == NULL) 2488 2485 break; 2489 2486 } 2490 2487 } 2491 } 2488 return (rc); 2489 } 2490 2491 static int dump_region(void *priv, unsigned long start, 2492 unsigned long end, unsigned long prot) 2493 { 2494 FILE *f = (FILE *)priv; 2495 2496 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", 2497 start, end, end - start, 2498 ((prot & PAGE_READ) ? 'r' : '-'), 2499 ((prot & PAGE_WRITE) ? 'w' : '-'), 2500 ((prot & PAGE_EXEC) ? 'x' : '-')); 2501 2502 return (0); 2503 } 2504 2505 /* dump memory mappings */ 2506 void page_dump(FILE *f) 2507 { 2508 (void) fprintf(f, "%-8s %-8s %-8s %s\n", 2509 "start", "end", "size", "prot"); 2510 walk_memory_regions(f, dump_region); 2511 } 2512 2492 2513 #endif /* !VBOX */ 2493 2514 … … 2503 2524 2504 2525 /* modify the flags of a page and invalidate the code if 2505 necessary. The flag PAGE_WRITE_ORG is position ned automatically2526 necessary. The flag PAGE_WRITE_ORG is positioned automatically 2506 2527 depending on PAGE_WRITE */ 2507 2528 void page_set_flags(target_ulong start, target_ulong end, int flags) … … 2573 2594 2574 2595 /* called from signal handler: invalidate the code and unprotect the 2575 page. Return TRUE if the fault was succes fully handled. */2596 page. Return TRUE if the fault was successfully handled. */ 2576 2597 int page_unprotect(target_ulong address, unsigned long pc, void *puc) 2577 2598 { … … 2657 2678 io memory page. The address used when calling the IO function is 2658 2679 the offset from the start of the region, plus region_offset. Both 2659 start_ region and regon_offset are rounded down to a page boundary2680 start_addr and region_offset are rounded down to a page boundary 2660 2681 before calculating this offset. This should not be a problem unless 2661 2682 the low bits of start_addr and region_offset differ. */ … … 2671 2692 void *subpage; 2672 2693 2673 #ifdef USE_KQEMU2694 #ifdef CONFIG_KQEMU 2674 2695 /* XXX: should not depend on cpu context */ 2675 2696 env = first_cpu; … … 2773 2794 } 2774 2795 2796 #ifdef CONFIG_KQEMU 2775 2797 /* XXX: better than nothing */ 2798 static ram_addr_t kqemu_ram_alloc(ram_addr_t size) 2799 { 2800 ram_addr_t addr; 2801 if ((last_ram_offset + size) > kqemu_phys_ram_size) { 2802 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n", 2803 (uint64_t)size, (uint64_t)kqemu_phys_ram_size); 2804 abort(); 2805 } 2806 addr = last_ram_offset; 2807 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size); 2808 return addr; 2809 } 2810 #endif 2811 2776 2812 ram_addr_t qemu_ram_alloc(ram_addr_t size) 2777 2813 { 2778 ram_addr_t addr; 2779 if ((phys_ram_alloc_offset + size) > phys_ram_size) { 2780 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n", 2781 (uint64_t)size, (uint64_t)phys_ram_size); 2814 RAMBlock *new_block; 2815 2816 #ifdef CONFIG_KQEMU 2817 if (kqemu_phys_ram_base) { 2818 return kqemu_ram_alloc(size); 2819 } 2820 #endif 2821 2822 size = TARGET_PAGE_ALIGN(size); 2823 new_block = qemu_malloc(sizeof(*new_block)); 2824 2825 new_block->host = qemu_vmalloc(size); 2826 new_block->offset = last_ram_offset; 2827 new_block->length = size; 2828 2829 new_block->next = ram_blocks; 2830 ram_blocks = new_block; 2831 2832 phys_ram_dirty = qemu_realloc(phys_ram_dirty, 2833 (last_ram_offset + size) >> TARGET_PAGE_BITS); 2834 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS), 2835 0xff, size >> TARGET_PAGE_BITS); 2836 2837 last_ram_offset += size; 2838 2839 if (kvm_enabled()) 2840 kvm_setup_guest_memory(new_block->host, size); 2841 2842 return new_block->offset; 2843 } 2844 2845 void qemu_ram_free(ram_addr_t addr) 2846 { 2847 /* TODO: implement this. */ 2848 } 2849 2850 /* Return a host pointer to ram allocated with qemu_ram_alloc. 2851 With the exception of the softmmu code in this file, this should 2852 only be used for local memory (e.g. video ram) that the device owns, 2853 and knows it isn't going to access beyond the end of the block. 2854 2855 It should not be used for general purpose DMA. 2856 Use cpu_physical_memory_map/cpu_physical_memory_rw instead. 2857 */ 2858 void *qemu_get_ram_ptr(ram_addr_t addr) 2859 { 2860 RAMBlock *prev; 2861 RAMBlock **prevp; 2862 RAMBlock *block; 2863 2864 #ifdef CONFIG_KQEMU 2865 if (kqemu_phys_ram_base) { 2866 return kqemu_phys_ram_base + addr; 2867 } 2868 #endif 2869 2870 prev = NULL; 2871 prevp = &ram_blocks; 2872 block = ram_blocks; 2873 while (block && (block->offset > addr 2874 || block->offset + block->length <= addr)) { 2875 if (prev) 2876 prevp = &prev->next; 2877 prev = block; 2878 block = block->next; 2879 } 2880 if (!block) { 2881 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 2782 2882 abort(); 2783 2883 } 2784 addr = phys_ram_alloc_offset; 2785 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size); 2786 return addr; 2787 } 2788 2789 void qemu_ram_free(ram_addr_t addr) 2790 { 2791 } 2884 /* Move this entry to to start of the list. */ 2885 if (prev) { 2886 prev->next = block->next; 2887 block->next = *prevp; 2888 *prevp = block; 2889 } 2890 return block->host + (addr - block->offset); 2891 } 2892 2893 /* Some of the softmmu routines need to translate from a host pointer 2894 (typically a TLB entry) back to a ram offset. */ 2895 ram_addr_t qemu_ram_addr_from_host(void *ptr) 2896 { 2897 RAMBlock *prev; 2898 RAMBlock **prevp; 2899 RAMBlock *block; 2900 uint8_t *host = ptr; 2901 2902 #ifdef CONFIG_KQEMU 2903 if (kqemu_phys_ram_base) { 2904 return host - kqemu_phys_ram_base; 2905 } 2906 #endif 2907 2908 prev = NULL; 2909 prevp = &ram_blocks; 2910 block = ram_blocks; 2911 while (block && (block->host > host 2912 || block->host + block->length <= host)) { 2913 if (prev) 2914 prevp = &prev->next; 2915 prev = block; 2916 block = block->next; 2917 } 2918 if (!block) { 2919 fprintf(stderr, "Bad ram pointer %p\n", ptr); 2920 abort(); 2921 } 2922 return block->offset + (host - block->host); 2923 } 2924 2792 2925 #endif /* !VBOX */ 2793 2926 … … 2891 3024 remR3PhysWriteU8(ram_addr, val); 2892 3025 #else 2893 stb_p( phys_ram_base + ram_addr, val);2894 #endif 2895 #ifdef USE_KQEMU3026 stb_p(qemu_get_ram_ptr(ram_addr), val); 3027 #endif 3028 #ifdef CONFIG_KQEMU 2896 3029 if (cpu_single_env->kqemu_enabled && 2897 3030 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) … … 2933 3066 remR3PhysWriteU16(ram_addr, val); 2934 3067 #else 2935 stw_p( phys_ram_base + ram_addr, val);2936 #endif 2937 #ifdef USE_KQEMU3068 stw_p(qemu_get_ram_ptr(ram_addr), val); 3069 #endif 3070 #ifdef CONFIG_KQEMU 2938 3071 if (cpu_single_env->kqemu_enabled && 2939 3072 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) … … 2975 3108 remR3PhysWriteU32(ram_addr, val); 2976 3109 #else 2977 stl_p( phys_ram_base + ram_addr, val);2978 #endif 2979 #ifdef USE_KQEMU3110 stl_p(qemu_get_ram_ptr(ram_addr), val); 3111 #endif 3112 #ifdef CONFIG_KQEMU 2980 3113 if (cpu_single_env->kqemu_enabled && 2981 3114 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) … … 3213 3346 eidx = SUBPAGE_IDX(end); 3214 3347 #if defined(DEBUG_SUBPAGE) 3215 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem % d\n", __func__,3348 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__, 3216 3349 mmio, start, end, idx, eidx, memory); 3217 3350 #endif … … 3244 3377 3245 3378 mmio->base = base; 3246 subpage_memory = cpu_register_io_memory( 0,subpage_read, subpage_write, mmio);3379 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio); 3247 3380 #if defined(DEBUG_SUBPAGE) 3248 3381 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, … … 3269 3402 } 3270 3403 3271 static void io_mem_init(void)3272 {3273 int i;3274 3275 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);3276 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);3277 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);3278 for (i=0; i<5; i++)3279 io_mem_used[i] = 1;3280 3281 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,3282 watch_mem_write, NULL);3283 3284 #ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */3285 /* alloc dirty bits array */3286 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);3287 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);3288 #endif /* !VBOX */3289 }3290 3291 3404 /* mem_read and mem_write are arrays of functions containing the 3292 3405 function to access byte (index 0), word (index 1) and dword (index 3293 2). Functions can be omitted with a NULL function pointer. The 3294 registered functions may be modified dynamically later. 3406 2). Functions can be omitted with a NULL function pointer. 3295 3407 If io_index is non zero, the corresponding io zone is 3296 3408 modified. If it is zero, a new io zone is allocated. The return 3297 3409 value can be used with cpu_register_physical_memory(). (-1) is 3298 3410 returned if error. */ 3299 int cpu_register_io_memory(int io_index,3300 CPUReadMemoryFunc **mem_read,3301 CPUWriteMemoryFunc **mem_write,3302 void *opaque)3411 static int cpu_register_io_memory_fixed(int io_index, 3412 CPUReadMemoryFunc **mem_read, 3413 CPUWriteMemoryFunc **mem_write, 3414 void *opaque) 3303 3415 { 3304 3416 int i, subwidth = 0; … … 3309 3421 return io_index; 3310 3422 } else { 3423 io_index >>= IO_MEM_SHIFT; 3311 3424 if (io_index >= IO_MEM_NB_ENTRIES) 3312 3425 return -1; … … 3323 3436 } 3324 3437 3438 int cpu_register_io_memory(CPUReadMemoryFunc **mem_read, 3439 CPUWriteMemoryFunc **mem_write, 3440 void *opaque) 3441 { 3442 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque); 3443 } 3444 3325 3445 void cpu_unregister_io_memory(int io_table_address) 3326 3446 { … … 3336 3456 } 3337 3457 3338 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index) 3339 { 3340 return io_mem_write[io_index >> IO_MEM_SHIFT]; 3341 } 3342 3343 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index) 3344 { 3345 return io_mem_read[io_index >> IO_MEM_SHIFT]; 3458 static void io_mem_init(void) 3459 { 3460 int i; 3461 3462 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL); 3463 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL); 3464 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL); 3465 for (i=0; i<5; i++) 3466 io_mem_used[i] = 1; 3467 3468 io_mem_watch = cpu_register_io_memory(watch_mem_read, 3469 watch_mem_write, NULL); 3470 #ifdef CONFIG_KQEMU 3471 if (kqemu_phys_ram_base) { 3472 /* alloc dirty bits array */ 3473 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS); 3474 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS); 3475 } 3476 #endif 3346 3477 } 3347 3478 … … 3456 3587 remR3PhysWrite(addr1, buf, l); NOREF(ptr); 3457 3588 #else 3458 ptr = phys_ram_base + addr1;3589 ptr = qemu_get_ram_ptr(addr1); 3459 3590 memcpy(ptr, buf, l); 3460 3591 #endif … … 3511 3642 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr); 3512 3643 #else 3513 ptr = phys_ram_base +(pd & TARGET_PAGE_MASK) +3644 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 3514 3645 (addr & ~TARGET_PAGE_MASK); 3515 3646 memcpy(buf, ptr, l); … … 3555 3686 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3556 3687 /* ROM/RAM case */ 3557 ptr = phys_ram_base + addr1;3688 ptr = qemu_get_ram_ptr(addr1); 3558 3689 memcpy(ptr, buf, l); 3559 3690 } … … 3596 3727 3597 3728 LIST_REMOVE(client, link); 3729 qemu_free(client); 3598 3730 } 3599 3731 … … 3605 3737 client = LIST_FIRST(&map_client_list); 3606 3738 client->callback(client->opaque); 3607 LIST_REMOVE(client, link);3739 cpu_unregister_map_client(client); 3608 3740 } 3609 3741 } … … 3655 3787 } else { 3656 3788 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3657 ptr = phys_ram_base + addr1;3789 ptr = qemu_get_ram_ptr(addr1); 3658 3790 } 3659 3791 if (!done) { … … 3680 3812 if (buffer != bounce.buffer) { 3681 3813 if (is_write) { 3682 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;3814 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer); 3683 3815 while (access_len) { 3684 3816 unsigned l; … … 3735 3867 /* RAM case */ 3736 3868 #ifndef VBOX 3737 ptr = phys_ram_base +(pd & TARGET_PAGE_MASK) +3869 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 3738 3870 (addr & ~TARGET_PAGE_MASK); 3739 3871 val = ldl_p(ptr); … … 3777 3909 /* RAM case */ 3778 3910 #ifndef VBOX 3779 ptr = phys_ram_base +(pd & TARGET_PAGE_MASK) +3911 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 3780 3912 (addr & ~TARGET_PAGE_MASK); 3781 3913 val = ldq_p(ptr); … … 3828 3960 #ifndef VBOX 3829 3961 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 3830 ptr = phys_ram_base + addr1;3962 ptr = qemu_get_ram_ptr(addr1); 3831 3963 stl_p(ptr, val); 3832 3964 #else … … 3875 4007 } else { 3876 4008 #ifndef VBOX 3877 ptr = phys_ram_base +(pd & TARGET_PAGE_MASK) +4009 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 3878 4010 (addr & ~TARGET_PAGE_MASK); 3879 4011 stq_p(ptr, val); … … 3909 4041 /* RAM case */ 3910 4042 #ifndef VBOX 3911 ptr = phys_ram_base + addr1;4043 ptr = qemu_get_ram_ptr(addr1); 3912 4044 stl_p(ptr, val); 3913 4045 #else … … 3950 4082 #endif 3951 4083 3952 /* virtual memory access for debug */ 4084 #ifndef VBOX 4085 /* virtual memory access for debug (includes writing to ROM) */ 3953 4086 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 3954 4087 uint8_t *buf, int len, int is_write) … … 3967 4100 if (l > len) 3968 4101 l = len; 3969 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 3970 buf, l, is_write); 4102 phys_addr += (addr & ~TARGET_PAGE_MASK); 4103 #if !defined(CONFIG_USER_ONLY) 4104 if (is_write) 4105 cpu_physical_memory_write_rom(phys_addr, buf, l); 4106 else 4107 #endif 4108 cpu_physical_memory_rw(phys_addr, buf, l, is_write); 3971 4109 len -= l; 3972 4110 buf += l; … … 3975 4113 return 0; 3976 4114 } 4115 #endif /* !VBOX */ 3977 4116 3978 4117 /* in deterministic execution mode, instructions doing device I/Os -
trunk/src/recompiler/fpu/softfloat-macros.h
r36170 r36175 591 591 index = ( a>>27 ) & 15; 592 592 if ( aExp & 1 ) { 593 z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ index ];593 z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ (int)index ]; 594 594 z = ( ( a / z )<<14 ) + ( z<<15 ); 595 595 a >>= 1; 596 596 } 597 597 else { 598 z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ index ];598 z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ (int)index ]; 599 599 z = a / z + z; 600 600 z = ( 0x20000 <= z ) ? 0xFFFF8000 : ( z<<15 ); -
trunk/src/recompiler/fpu/softfloat-native.c
r36170 r36175 3 3 #include "softfloat.h" 4 4 #include <math.h> 5 #if defined(HOST_SOLARIS) 6 #include <fenv.h> 7 #endif 5 8 6 9 void set_float_rounding_mode(int val STATUS_PARAM) 7 10 { 8 11 STATUS(float_rounding_mode) = val; 9 #if defined(_BSD) && !defined(__APPLE__) || (defined(HOST_SOLARIS) && (HOST_SOLARIS < 10 || HOST_SOLARIS == 11)) /* VBOX adds sol 11 */ 12 #if defined(HOST_BSD) && !defined(__APPLE__) || \ 13 (defined(HOST_SOLARIS) && (HOST_SOLARIS < 10 || HOST_SOLARIS == 11)) /* VBOX adds sol 11 */ 10 14 fpsetround(val); 11 15 #elif defined(__arm__) … … 23 27 #endif 24 28 25 #if defined( _BSD) || (defined(HOST_SOLARIS) && HOST_SOLARIS < 10)29 #if defined(HOST_BSD) || (defined(HOST_SOLARIS) && HOST_SOLARIS < 10) 26 30 #define lrint(d) ((int32_t)rint(d)) 27 31 #define llrint(d) ((int64_t)rint(d)) … … 32 36 #define rintf(f) ((float)rint(f)) 33 37 /* Some defines which only apply to *BSD */ 34 # if defined(VBOX) && defined( _BSD)38 # if defined(VBOX) && defined(HOST_BSD) 35 39 # define lrintl(f) ((int32_t)rint(f)) 36 40 # define llrintl(f) ((int64_t)rint(f)) -
trunk/src/recompiler/fpu/softfloat-native.h
r36170 r36175 21 21 */ 22 22 #if defined(HOST_SOLARIS) && (( HOST_SOLARIS <= 9 ) || ((HOST_SOLARIS >= 10) \ 23 && (__GNUC__ < =4))) \23 && (__GNUC__ < 4))) \ 24 24 || (defined(__OpenBSD__) && (OpenBSD < 200811)) 25 25 /* … … 112 112 | Software IEC/IEEE floating-point rounding mode. 113 113 *----------------------------------------------------------------------------*/ 114 #if (defined( _BSD) && !defined(__APPLE__)) || defined(HOST_SOLARIS)114 #if (defined(HOST_BSD) && !defined(__APPLE__)) || defined(HOST_SOLARIS) 115 115 #if defined(__OpenBSD__) 116 116 #define FE_RM FP_RM -
trunk/src/recompiler/fpu/softfloat-specialize.h
r36170 r36175 167 167 } 168 168 else if ( aIsNaN ) { 169 if ( bIsSignalingNaN | ! bIsNaN )169 if ( bIsSignalingNaN || ! bIsNaN ) 170 170 res = av; 171 171 else { … … 302 302 } 303 303 else if ( aIsNaN ) { 304 if ( bIsSignalingNaN | ! bIsNaN )304 if ( bIsSignalingNaN || ! bIsNaN ) 305 305 res = av; 306 306 else { … … 442 442 } 443 443 else if ( aIsNaN ) { 444 if ( bIsSignalingNaN | ! bIsNaN ) return a;444 if ( bIsSignalingNaN || ! bIsNaN ) return a; 445 445 returnLargerSignificand: 446 446 if ( a.low < b.low ) return b; … … 568 568 } 569 569 else if ( aIsNaN ) { 570 if ( bIsSignalingNaN | ! bIsNaN ) return a;570 if ( bIsSignalingNaN || ! bIsNaN ) return a; 571 571 returnLargerSignificand: 572 572 if ( lt128( a.high<<1, a.low, b.high<<1, b.low ) ) return b; -
trunk/src/recompiler/fpu/softfloat.h
r36170 r36175 95 95 #else 96 96 /* native float support */ 97 #if (defined(__i386__) || defined(__x86_64__)) && (!defined( _BSD) || defined(VBOX)) /** @todo VBOX: not correct on windows */97 #if (defined(__i386__) || defined(__x86_64__)) && (!defined(HOST_BSD) || defined(VBOX)) /** @todo VBOX: not correct on windows */ 98 98 #define FLOATX80 99 99 #endif -
trunk/src/recompiler/hostregs_helper.h
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 67 66 #endif 68 67 69 #ifdef AREG370 DO_REG(3)71 #endif72 73 #ifdef AREG474 DO_REG(4)75 #endif76 77 #ifdef AREG578 DO_REG(5)79 #endif80 81 #ifdef AREG682 DO_REG(6)83 #endif84 85 #ifdef AREG786 DO_REG(7)87 #endif88 89 #ifdef AREG890 DO_REG(8)91 #endif92 93 #ifdef AREG994 DO_REG(9)95 #endif96 97 #ifdef AREG1098 DO_REG(10)99 #endif100 101 #ifdef AREG11102 DO_REG(11)103 #endif104 105 68 #undef SAVE_HOST_REGS 106 69 #undef DECLARE_HOST_REGS -
trunk/src/recompiler/kvm.h
r36170 r36175 16 16 17 17 #include "config.h" 18 #include "sys-queue.h" 18 19 19 20 #ifdef CONFIG_KVM … … 32 33 33 34 int kvm_init_vcpu(CPUState *env); 34 int kvm_sync_vcpus(void);35 35 36 36 int kvm_cpu_exec(CPUState *env); … … 40 40 ram_addr_t phys_offset); 41 41 42 void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr); 42 int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, 43 target_phys_addr_t end_addr); 43 44 44 int kvm_log_start(target_phys_addr_t phys_addr, target_phys_addr_t len); 45 int kvm_log_stop(target_phys_addr_t phys_addr, target_phys_addr_t len); 45 int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size); 46 int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size); 47 int kvm_set_migration_log(int enable); 46 48 47 49 int kvm_has_sync_mmu(void); 48 50 51 void kvm_setup_guest_memory(void *start, size_t size); 52 49 53 int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size); 50 54 int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size); 55 56 int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, 57 target_ulong len, int type); 58 int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, 59 target_ulong len, int type); 60 void kvm_remove_all_breakpoints(CPUState *current_env); 61 int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap); 51 62 52 63 /* internal API */ … … 77 88 int kvm_arch_init_vcpu(CPUState *env); 78 89 90 struct kvm_guest_debug; 91 struct kvm_debug_exit_arch; 92 93 struct kvm_sw_breakpoint { 94 target_ulong pc; 95 target_ulong saved_insn; 96 int use_count; 97 TAILQ_ENTRY(kvm_sw_breakpoint) entry; 98 }; 99 100 TAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint); 101 102 int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info); 103 104 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env, 105 target_ulong pc); 106 107 int kvm_sw_breakpoints_active(CPUState *env); 108 109 int kvm_arch_insert_sw_breakpoint(CPUState *current_env, 110 struct kvm_sw_breakpoint *bp); 111 int kvm_arch_remove_sw_breakpoint(CPUState *current_env, 112 struct kvm_sw_breakpoint *bp); 113 int kvm_arch_insert_hw_breakpoint(target_ulong addr, 114 target_ulong len, int type); 115 int kvm_arch_remove_hw_breakpoint(target_ulong addr, 116 target_ulong len, int type); 117 void kvm_arch_remove_all_hw_breakpoints(void); 118 119 void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg); 120 121 int kvm_check_extension(KVMState *s, unsigned int extension); 122 123 uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, 124 int reg); 125 126 /* generic hooks - to be moved/refactored once there are more users */ 127 128 static inline void cpu_synchronize_state(CPUState *env, int modified) 129 { 130 if (kvm_enabled()) { 131 if (modified) 132 kvm_arch_put_registers(env); 133 else 134 kvm_arch_get_registers(env); 135 } 136 } 137 79 138 #endif -
trunk/src/recompiler/qemu-common.h
r36170 r36175 31 31 #define qemu_toascii(c) RT_C_TO_ASCII((unsigned char)(c)) 32 32 33 #define qemu_init_vcpu(env) do { } while (0) /* we don't need this :-) */ 34 35 33 36 #else /* !VBOX */ 34 37 #ifdef _WIN32 … … 59 62 #include <fcntl.h> 60 63 #include <sys/stat.h> 64 #include <assert.h> 61 65 #include "config-host.h" 62 66 … … 143 147 int strstart(const char *str, const char *val, const char **ptr); 144 148 int stristart(const char *str, const char *val, const char **ptr); 149 int qemu_strnlen(const char *s, int max_len); 145 150 time_t mktimegm(struct tm *tm); 146 151 int qemu_fls(int i); … … 198 203 typedef struct DisplayChangeListener DisplayChangeListener; 199 204 typedef struct DisplaySurface DisplaySurface; 205 typedef struct DisplayAllocator DisplayAllocator; 200 206 typedef struct PixelFormat PixelFormat; 201 207 typedef struct TextConsole TextConsole; … … 212 218 typedef struct SerialState SerialState; 213 219 typedef struct IRQState *qemu_irq; 214 struct pcmcia_card_s; 220 typedef struct PCMCIACardState PCMCIACardState; 221 typedef struct MouseTransformInfo MouseTransformInfo; 222 typedef struct uWireSlave uWireSlave; 223 typedef struct I2SCodec I2SCodec; 224 typedef struct DeviceState DeviceState; 225 typedef struct SSIBus SSIBus; 215 226 216 227 /* CPU save/load. */ … … 220 231 /* Force QEMU to stop what it's doing and service IO */ 221 232 void qemu_service_io(void); 233 234 /* Force QEMU to process pending events */ 235 void qemu_notify_event(void); 236 237 /* Unblock cpu */ 238 void qemu_cpu_kick(void *env); 239 int qemu_cpu_self(void *env); 240 241 #ifdef CONFIG_USER_ONLY 242 #define qemu_init_vcpu(env) do { } while (0) 243 #else 244 void qemu_init_vcpu(void *env); 245 #endif 222 246 223 247 typedef struct QEMUIOVector { … … 229 253 230 254 void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint); 255 void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov); 231 256 void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len); 232 257 void qemu_iovec_destroy(QEMUIOVector *qiov); … … 235 260 void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count); 236 261 262 struct Monitor; 263 typedef struct Monitor Monitor; 264 265 #include "module.h" 266 237 267 #endif /* dyngen-exec.h hack */ 238 268 -
trunk/src/recompiler/qemu-lock.h
r36170 r36175 13 13 * 14 14 * You should have received a copy of the GNU Lesser General Public 15 * License along with this library; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 15 * License along with this library; if not, see <http://www.gnu.org/licenses/> 17 16 */ 18 17 -
trunk/src/recompiler/softmmu_exec.h
r36170 r36175 61 61 #undef ACCESS_TYPE 62 62 #undef MEMSUFFIX 63 #endif /* (NB_MMU_MODES >= 3) */ 63 64 64 65 #if (NB_MMU_MODES >= 4) … … 79 80 #undef ACCESS_TYPE 80 81 #undef MEMSUFFIX 82 #endif /* (NB_MMU_MODES >= 4) */ 81 83 82 #if (NB_MMU_MODES > 4) 83 #error "NB_MMU_MODES > 4 is not supported for now" 84 #endif /* (NB_MMU_MODES > 4) */ 85 #endif /* (NB_MMU_MODES == 4) */ 86 #endif /* (NB_MMU_MODES >= 3) */ 84 #if (NB_MMU_MODES >= 5) 85 86 #define ACCESS_TYPE 4 87 #define MEMSUFFIX MMU_MODE4_SUFFIX 88 #define DATA_SIZE 1 89 #include "softmmu_header.h" 90 91 #define DATA_SIZE 2 92 #include "softmmu_header.h" 93 94 #define DATA_SIZE 4 95 #include "softmmu_header.h" 96 97 #define DATA_SIZE 8 98 #include "softmmu_header.h" 99 #undef ACCESS_TYPE 100 #undef MEMSUFFIX 101 #endif /* (NB_MMU_MODES >= 5) */ 102 103 #if (NB_MMU_MODES > 5) 104 #error "NB_MMU_MODES > 5 is not supported for now" 105 #endif /* (NB_MMU_MODES > 5) */ 87 106 88 107 /* these access are slower, they must be as rare as possible */ -
trunk/src/recompiler/softmmu_header.h
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 81 80 #endif 82 81 83 #if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \84 (ACCESS_TYPE < NB_MMU_MODES) && defined(ASM_SOFTMMU) && !defined(VBOX)85 86 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)87 {88 int res;89 90 asm volatile ("movl %1, %%edx\n"91 "movl %1, %%eax\n"92 "shrl %3, %%edx\n"93 "andl %4, %%eax\n"94 "andl %2, %%edx\n"95 "leal %5(%%edx, %%ebp), %%edx\n"96 "cmpl (%%edx), %%eax\n"97 "movl %1, %%eax\n"98 "je 1f\n"99 "movl %6, %%edx\n"100 "call %7\n"101 "movl %%eax, %0\n"102 "jmp 2f\n"103 "1:\n"104 "addl 12(%%edx), %%eax\n"105 #if DATA_SIZE == 1106 "movzbl (%%eax), %0\n"107 #elif DATA_SIZE == 2108 "movzwl (%%eax), %0\n"109 #elif DATA_SIZE == 4110 "movl (%%eax), %0\n"111 #else112 #error unsupported size113 #endif114 "2:\n"115 : "=r" (res)116 : "r" (ptr),117 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),118 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),119 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),120 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),121 "i" (CPU_MMU_INDEX),122 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))123 : "%eax", "%ecx", "%edx", "memory", "cc");124 return res;125 }126 127 #if DATA_SIZE <= 2128 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)129 {130 int res;131 132 asm volatile ("movl %1, %%edx\n"133 "movl %1, %%eax\n"134 "shrl %3, %%edx\n"135 "andl %4, %%eax\n"136 "andl %2, %%edx\n"137 "leal %5(%%edx, %%ebp), %%edx\n"138 "cmpl (%%edx), %%eax\n"139 "movl %1, %%eax\n"140 "je 1f\n"141 "movl %6, %%edx\n"142 "call %7\n"143 #if DATA_SIZE == 1144 "movsbl %%al, %0\n"145 #elif DATA_SIZE == 2146 "movswl %%ax, %0\n"147 #else148 #error unsupported size149 #endif150 "jmp 2f\n"151 "1:\n"152 "addl 12(%%edx), %%eax\n"153 #if DATA_SIZE == 1154 "movsbl (%%eax), %0\n"155 #elif DATA_SIZE == 2156 "movswl (%%eax), %0\n"157 #else158 #error unsupported size159 #endif160 "2:\n"161 : "=r" (res)162 : "r" (ptr),163 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),164 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),165 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),166 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),167 "i" (CPU_MMU_INDEX),168 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))169 : "%eax", "%ecx", "%edx", "memory", "cc");170 return res;171 }172 #endif173 174 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)175 {176 asm volatile ("movl %0, %%edx\n"177 "movl %0, %%eax\n"178 "shrl %3, %%edx\n"179 "andl %4, %%eax\n"180 "andl %2, %%edx\n"181 "leal %5(%%edx, %%ebp), %%edx\n"182 "cmpl (%%edx), %%eax\n"183 "movl %0, %%eax\n"184 "je 1f\n"185 #if DATA_SIZE == 1186 "movzbl %b1, %%edx\n"187 #elif DATA_SIZE == 2188 "movzwl %w1, %%edx\n"189 #elif DATA_SIZE == 4190 "movl %1, %%edx\n"191 #else192 #error unsupported size193 #endif194 "movl %6, %%ecx\n"195 "call %7\n"196 "jmp 2f\n"197 "1:\n"198 "addl 8(%%edx), %%eax\n"199 #if DATA_SIZE == 1200 "movb %b1, (%%eax)\n"201 #elif DATA_SIZE == 2202 "movw %w1, (%%eax)\n"203 #elif DATA_SIZE == 4204 "movl %1, (%%eax)\n"205 #else206 #error unsupported size207 #endif208 "2:\n"209 :210 : "r" (ptr),211 #if DATA_SIZE == 1212 "q" (v),213 #else214 "r" (v),215 #endif216 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),217 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),218 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),219 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_write)),220 "i" (CPU_MMU_INDEX),221 "m" (*(uint8_t *)&glue(glue(__st, SUFFIX), MMUSUFFIX))222 : "%eax", "%ecx", "%edx", "memory", "cc");223 }224 225 #else226 227 82 /* generic load/store macros */ 228 83 … … 295 150 #endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */ 296 151 297 #endif /* !asm */298 299 152 #if ACCESS_TYPE != (NB_MMU_MODES + 1) 300 153 -
trunk/src/recompiler/softmmu_template.h
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 34 33 #define USUFFIX q 35 34 #define DATA_TYPE uint64_t 36 #define DATA_TYPE_PROMOTED uint64_t 35 #ifdef VBOX 36 # define DATA_TYPE_PROMOTED uint64_t 37 #endif 37 38 #elif DATA_SIZE == 4 38 39 #define SUFFIX l … … 40 41 #define DATA_TYPE uint32_t 41 42 #ifdef VBOX 42 # define DATA_TYPE_PROMOTED RTCCUINTREG43 # define DATA_TYPE_PROMOTED RTCCUINTREG 43 44 #endif 44 45 #elif DATA_SIZE == 2 … … 47 48 #define DATA_TYPE uint16_t 48 49 #ifdef VBOX 49 # define DATA_TYPE_PROMOTED RTCCUINTREG50 # define DATA_TYPE_PROMOTED RTCCUINTREG 50 51 #endif 51 52 #elif DATA_SIZE == 1 … … 54 55 #define DATA_TYPE uint8_t 55 56 #ifdef VBOX 56 # define DATA_TYPE_PROMOTED RTCCUINTREG57 # define DATA_TYPE_PROMOTED RTCCUINTREG 57 58 #endif 58 59 #else … … 97 98 #endif 98 99 #endif /* SHIFT > 2 */ 99 #ifdef USE_KQEMU100 #ifdef CONFIG_KQEMU 100 101 env->last_io_time = cpu_get_time_fast(); 101 102 #endif … … 251 252 #endif 252 253 #endif /* SHIFT > 2 */ 253 #ifdef USE_KQEMU254 #ifdef CONFIG_KQEMU 254 255 env->last_io_time = cpu_get_time_fast(); 255 256 #endif … … 356 357 357 358 #ifdef VBOX 358 # undef DATA_TYPE_PROMOTED359 # undef DATA_TYPE_PROMOTED 359 360 #endif 360 361 #undef READ_ACCESS_TYPE -
trunk/src/recompiler/target-i386/cpu.h
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 52 51 #define ELF_MACHINE EM_386 53 52 #endif 53 54 #define CPUState struct CPUX86State 54 55 55 56 #include "cpu-defs.h" … … 100 101 #define DESC_P_MASK (1 << 15) 101 102 #define DESC_DPL_SHIFT 13 102 #define DESC_DPL_MASK ( 1<< DESC_DPL_SHIFT)103 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) 103 104 #define DESC_S_MASK (1 << 12) 104 105 #define DESC_TYPE_SHIFT 8 106 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) 105 107 #define DESC_A_MASK (1 << 8) 106 108 … … 162 164 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ 163 165 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ 164 #define HF_ OSFXSR_SHIFT 16 /* CR4.OSFXSR*/166 #define HF_RF_SHIFT 16 /* must be same as eflags */ 165 167 #define HF_VM_SHIFT 17 /* must be same as eflags */ 166 168 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ 167 169 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ 168 170 #define HF_SVMI_SHIFT 21 /* SVM intercepts are active */ 171 #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ 169 172 170 173 #define HF_CPL_MASK (3 << HF_CPL_SHIFT) … … 182 185 #define HF_LMA_MASK (1 << HF_LMA_SHIFT) 183 186 #define HF_CS64_MASK (1 << HF_CS64_SHIFT) 184 #define HF_ OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)187 #define HF_RF_MASK (1 << HF_RF_SHIFT) 185 188 #define HF_VM_MASK (1 << HF_VM_SHIFT) 186 189 #define HF_SMM_MASK (1 << HF_SMM_SHIFT) 187 190 #define HF_SVME_MASK (1 << HF_SVME_SHIFT) 188 191 #define HF_SVMI_MASK (1 << HF_SVMI_SHIFT) 192 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) 189 193 190 194 /* hflags2 */ … … 219 223 #define CR4_PSE_MASK (1 << 4) 220 224 #define CR4_PAE_MASK (1 << 5) 225 #define CR4_MCE_MASK (1 << 6) 221 226 #define CR4_PGE_MASK (1 << 7) 222 227 #define CR4_PCE_MASK (1 << 8) … … 264 269 #define PG_ERROR_RSVD_MASK 0x08 265 270 #define PG_ERROR_I_D_MASK 0x10 271 272 #define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ 273 274 #define MCE_CAP_DEF MCG_CTL_P 275 #define MCE_BANKS_DEF 10 276 277 #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ 278 279 #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ 280 #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ 281 #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ 266 282 267 283 #define MSR_IA32_TSC 0x10 … … 304 320 305 321 #define MSR_MTRRdefType 0x2ff 322 323 #define MSR_MC0_CTL 0x400 324 #define MSR_MC0_STATUS 0x401 325 #define MSR_MC0_ADDR 0x402 326 #define MSR_MC0_MISC 0x403 306 327 307 328 #define MSR_EFER 0xc0000080 … … 382 403 #define CPUID_EXT_XSAVE (1 << 26) 383 404 #define CPUID_EXT_OSXSAVE (1 << 27) 405 #define CPUID_EXT_HYPERVISOR (1 << 31) 384 406 385 407 #define CPUID_EXT2_SYSCALL (1 << 11) … … 705 727 uint32_t cpuid_ext3_features; 706 728 uint32_t cpuid_apic_id; 707 708 729 #ifndef VBOX 730 int cpuid_vendor_override; 731 709 732 /* MTRRs */ 710 733 uint64_t mtrr_fixed[11]; … … 715 738 } mtrr_var[8]; 716 739 717 #ifdef USE_KQEMU740 #ifdef CONFIG_KQEMU 718 741 int kqemu_enabled; 719 742 int last_io_time; … … 722 745 /* For KVM */ 723 746 uint64_t interrupt_bitmap[256 / 64]; 747 uint32_t mp_state; 724 748 725 749 /* in order to simplify APIC support, we leave this pointer to the 726 750 user */ 727 751 struct APICState *apic_state; 752 753 uint64 mcg_cap; 754 uint64 mcg_status; 755 uint64 mcg_ctl; 756 uint64 *mce_banks; 728 757 #else /* VBOX */ 758 729 759 uint32_t alignment2[3]; 730 760 /** Profiling tb_flush. */ … … 998 1028 #define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */ 999 1029 1000 #ifdef USE_KQEMU1030 #ifdef CONFIG_KQEMU 1001 1031 static inline int cpu_get_time_fast(void) 1002 1032 { … … 1029 1059 #define TARGET_PAGE_BITS 12 1030 1060 1031 #define CPUState CPUX86State1032 1061 #define cpu_init cpu_x86_init 1033 1062 #define cpu_exec cpu_x86_exec … … 1036 1065 #define cpu_list x86_cpu_list 1037 1066 1038 #define CPU_SAVE_VERSION 81067 #define CPU_SAVE_VERSION 10 1039 1068 1040 1069 /* MMU modes definitions */ … … 1079 1108 *cs_base = env->segs[R_CS].base; 1080 1109 *pc = *cs_base + env->eip; 1081 *flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 1110 *flags = env->hflags | 1111 (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK)); 1082 1112 } 1083 1113 1114 #ifndef VBOX 1115 void apic_init_reset(CPUState *env); 1116 void apic_sipi(CPUState *env); 1117 void do_cpu_init(CPUState *env); 1118 void do_cpu_sipi(CPUState *env); 1119 #endif /* !VBOX */ 1084 1120 #endif /* CPU_I386_H */ -
trunk/src/recompiler/target-i386/exec.h
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 385 384 } 386 385 386 static inline int cpu_has_work(CPUState *env) 387 { 388 int work; 389 390 work = (env->interrupt_request & CPU_INTERRUPT_HARD) && 391 (env->eflags & IF_MASK); 392 work |= env->interrupt_request & CPU_INTERRUPT_NMI; 393 work |= env->interrupt_request & CPU_INTERRUPT_INIT; 394 work |= env->interrupt_request & CPU_INTERRUPT_SIPI; 395 396 return work; 397 } 398 387 399 static inline int cpu_halted(CPUState *env) { 388 400 /* handle exit of HALTED state */ … … 390 402 return 0; 391 403 /* disable halt condition */ 392 if (((env->interrupt_request & CPU_INTERRUPT_HARD) && 393 (env->eflags & IF_MASK)) || 394 (env->interrupt_request & CPU_INTERRUPT_NMI)) { 404 if (cpu_has_work(env)) { 395 405 env->halted = 0; 396 406 return 0; -
trunk/src/recompiler/target-i386/helper.c
r36171 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 35 34 #include <inttypes.h> 36 35 #include <signal.h> 37 #include <assert.h>38 36 #endif /* !VBOX */ 39 37 … … 46 44 47 45 #ifndef VBOX 48 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features, 46 /* feature flags taken from "Intel Processor Identification and the CPUID 47 * Instruction" and AMD's "CPUID Specification". In cases of disagreement 48 * about feature names, the Linux name is used. */ 49 static const char *feature_name[] = { 50 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 51 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", 52 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx", 53 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe", 54 }; 55 static const char *ext_feature_name[] = { 56 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est", 57 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, 58 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt", 59 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor", 60 }; 61 static const char *ext2_feature_name[] = { 62 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 63 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov", 64 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx", 65 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow", 66 }; 67 static const char *ext3_feature_name[] = { 68 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse", 69 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL, 70 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 71 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 72 }; 73 74 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features, 49 75 uint32_t *ext_features, 50 76 uint32_t *ext2_features, … … 52 78 { 53 79 int i; 54 /* feature flags taken from "Intel Processor Identification and the CPUID 55 * Instruction" and AMD's "CPUID Specification". In cases of disagreement 56 * about feature names, the Linux name is used. */ 57 static const char *feature_name[] = { 58 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 59 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", 60 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx", 61 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe", 62 }; 63 static const char *ext_feature_name[] = { 64 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est", 65 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, 66 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt", 67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 68 }; 69 static const char *ext2_feature_name[] = { 70 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", 71 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov", 72 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx", 73 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow", 74 }; 75 static const char *ext3_feature_name[] = { 76 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse", 77 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL, 78 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 79 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 80 }; 80 int found = 0; 81 81 82 82 for ( i = 0 ; i < 32 ; i++ ) 83 83 if (feature_name[i] && !strcmp (flagname, feature_name[i])) { 84 84 *features |= 1 << i; 85 return;85 found = 1; 86 86 } 87 87 for ( i = 0 ; i < 32 ; i++ ) 88 88 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) { 89 89 *ext_features |= 1 << i; 90 return;90 found = 1; 91 91 } 92 92 for ( i = 0 ; i < 32 ; i++ ) 93 93 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) { 94 94 *ext2_features |= 1 << i; 95 return;95 found = 1; 96 96 } 97 97 for ( i = 0 ; i < 32 ; i++ ) 98 98 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) { 99 99 *ext3_features |= 1 << i; 100 return; 101 } 102 fprintf(stderr, "CPU feature %s not found\n", flagname); 100 found = 1; 101 } 102 if (!found) { 103 fprintf(stderr, "CPU feature %s not found\n", flagname); 104 } 103 105 } 104 106 #endif /* !VBOX */ … … 114 116 uint32_t xlevel; 115 117 char model_id[48]; 118 int vendor_override; 116 119 } x86_def_t; 117 120 … … 146 149 .ext_features = CPUID_EXT_SSE3, 147 150 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | 148 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 149 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 151 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 150 152 .ext3_features = CPUID_EXT3_SVM, 151 153 .xlevel = 0x8000000A, … … 271 273 .name = "athlon", 272 274 .level = 2, 273 .vendor1 = 0x68747541, /* "Auth" */274 .vendor2 = 0x69746e65, /* "enti" */275 .vendor3 = 0x444d4163, /* "cAMD" */275 .vendor1 = CPUID_VENDOR_AMD_1, 276 .vendor2 = CPUID_VENDOR_AMD_2, 277 .vendor3 = CPUID_VENDOR_AMD_3, 276 278 .family = 6, 277 279 .model = 2, … … 306 308 }; 307 309 310 static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax, 311 uint32_t *ebx, uint32_t *ecx, uint32_t *edx); 312 313 static int cpu_x86_fill_model_id(char *str) 314 { 315 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 316 int i; 317 318 for (i = 0; i < 3; i++) { 319 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 320 memcpy(str + i * 16 + 0, &eax, 4); 321 memcpy(str + i * 16 + 4, &ebx, 4); 322 memcpy(str + i * 16 + 8, &ecx, 4); 323 memcpy(str + i * 16 + 12, &edx, 4); 324 } 325 return 0; 326 } 327 328 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def) 329 { 330 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 331 332 x86_cpu_def->name = "host"; 333 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 334 x86_cpu_def->level = eax; 335 x86_cpu_def->vendor1 = ebx; 336 x86_cpu_def->vendor2 = edx; 337 x86_cpu_def->vendor3 = ecx; 338 339 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 340 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 341 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 342 x86_cpu_def->stepping = eax & 0x0F; 343 x86_cpu_def->ext_features = ecx; 344 x86_cpu_def->features = edx; 345 346 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx); 347 x86_cpu_def->xlevel = eax; 348 349 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx); 350 x86_cpu_def->ext2_features = edx; 351 x86_cpu_def->ext3_features = ecx; 352 cpu_x86_fill_model_id(x86_cpu_def->model_id); 353 x86_cpu_def->vendor_override = 0; 354 355 return 0; 356 } 357 308 358 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model) 309 359 { … … 324 374 } 325 375 } 326 if (!def) 376 if (kvm_enabled() && strcmp(name, "host") == 0) { 377 cpu_x86_fill_host(x86_cpu_def); 378 } else if (!def) { 327 379 goto error; 328 memcpy(x86_cpu_def, def, sizeof(*def)); 380 } else { 381 memcpy(x86_cpu_def, def, sizeof(*def)); 382 } 383 384 add_flagname_to_bitmaps("hypervisor", &plus_features, 385 &plus_ext_features, &plus_ext2_features, &plus_ext3_features); 329 386 330 387 featurestr = strtok(NULL, ","); … … 375 432 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i); 376 433 } 434 x86_cpu_def->vendor_override = 1; 377 435 } else if (!strcmp(featurestr, "model_id")) { 378 436 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id), … … 429 487 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3; 430 488 } 489 env->cpuid_vendor_override = def->vendor_override; 431 490 env->cpuid_level = def->level; 432 491 if (def->family > 0x0f) … … 495 554 496 555 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 497 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK); 556 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 557 DESC_R_MASK | DESC_A_MASK); 498 558 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 499 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK); 559 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 560 DESC_A_MASK); 500 561 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 501 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK); 562 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 563 DESC_A_MASK); 502 564 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 503 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK); 565 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 566 DESC_A_MASK); 504 567 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 505 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK); 568 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 569 DESC_A_MASK); 506 570 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 507 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK); 571 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 572 DESC_A_MASK); 508 573 509 574 env->eip = 0xfff0; … … 595 660 "SARQ", 596 661 }; 662 663 static void 664 cpu_x86_dump_seg_cache(CPUState *env, FILE *f, 665 int (*cpu_fprintf)(FILE *f, const char *fmt, ...), 666 const char *name, struct SegmentCache *sc) 667 { 668 #ifdef TARGET_X86_64 669 if (env->hflags & HF_CS64_MASK) { 670 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name, 671 sc->selector, sc->base, sc->limit, sc->flags); 672 } else 673 #endif 674 { 675 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector, 676 (uint32_t)sc->base, sc->limit, sc->flags); 677 } 678 679 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK)) 680 goto done; 681 682 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT); 683 if (sc->flags & DESC_S_MASK) { 684 if (sc->flags & DESC_CS_MASK) { 685 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" : 686 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16")); 687 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-', 688 (sc->flags & DESC_R_MASK) ? 'R' : '-'); 689 } else { 690 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16"); 691 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-', 692 (sc->flags & DESC_W_MASK) ? 'W' : '-'); 693 } 694 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-'); 695 } else { 696 static const char *sys_type_name[2][16] = { 697 { /* 32 bit mode */ 698 "Reserved", "TSS16-avl", "LDT", "TSS16-busy", 699 "CallGate16", "TaskGate", "IntGate16", "TrapGate16", 700 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy", 701 "CallGate32", "Reserved", "IntGate32", "TrapGate32" 702 }, 703 { /* 64 bit mode */ 704 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved", 705 "Reserved", "Reserved", "Reserved", "Reserved", 706 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64", 707 "Reserved", "IntGate64", "TrapGate64" 708 } 709 }; 710 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0] 711 [(sc->flags & DESC_TYPE_MASK) 712 >> DESC_TYPE_SHIFT]); 713 } 714 done: 715 cpu_fprintf(f, "\n"); 716 } 597 717 598 718 void cpu_dump_state(CPUState *env, FILE *f, … … 674 794 } 675 795 796 for(i = 0; i < 6; i++) { 797 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i], 798 &env->segs[i]); 799 } 800 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt); 801 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr); 802 676 803 #ifdef TARGET_X86_64 677 804 if (env->hflags & HF_LMA_MASK) { 678 for(i = 0; i < 6; i++) {679 SegmentCache *sc = &env->segs[i];680 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",681 seg_name[i],682 sc->selector,683 sc->base,684 sc->limit,685 sc->flags);686 }687 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",688 env->ldt.selector,689 env->ldt.base,690 env->ldt.limit,691 env->ldt.flags);692 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",693 env->tr.selector,694 env->tr.base,695 env->tr.limit,696 env->tr.flags);697 805 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n", 698 806 env->gdt.base, env->gdt.limit); … … 711 819 #endif 712 820 { 713 for(i = 0; i < 6; i++) {714 SegmentCache *sc = &env->segs[i];715 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",716 seg_name[i],717 sc->selector,718 (uint32_t)sc->base,719 sc->limit,720 sc->flags);721 }722 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",723 env->ldt.selector,724 (uint32_t)env->ldt.base,725 env->ldt.limit,726 env->ldt.flags);727 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",728 env->tr.selector,729 (uint32_t)env->tr.base,730 env->tr.limit,731 env->tr.flags);732 821 cpu_fprintf(f, "GDT= %08x %08x\n", 733 822 (uint32_t)env->gdt.base, env->gdt.limit); … … 939 1028 /* XXX: This value should match the one returned by CPUID 940 1029 * and in exec.c */ 941 #if defined( USE_KQEMU)1030 #if defined(CONFIG_KQEMU) 942 1031 #define PHYS_ADDR_MASK 0xfffff000LL 943 1032 #else … … 1437 1526 prev_debug_excp_handler(env); 1438 1527 } 1528 1529 1530 #ifndef VBOX 1531 /* This should come from sysemu.h - if we could include it here... */ 1532 void qemu_system_reset_request(void); 1533 1534 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status, 1535 uint64_t mcg_status, uint64_t addr, uint64_t misc) 1536 { 1537 uint64_t mcg_cap = cenv->mcg_cap; 1538 unsigned bank_num = mcg_cap & 0xff; 1539 uint64_t *banks = cenv->mce_banks; 1540 1541 if (bank >= bank_num || !(status & MCI_STATUS_VAL)) 1542 return; 1543 1544 /* 1545 * if MSR_MCG_CTL is not all 1s, the uncorrected error 1546 * reporting is disabled 1547 */ 1548 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && 1549 cenv->mcg_ctl != ~(uint64_t)0) 1550 return; 1551 banks += 4 * bank; 1552 /* 1553 * if MSR_MCi_CTL is not all 1s, the uncorrected error 1554 * reporting is disabled for the bank 1555 */ 1556 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0) 1557 return; 1558 if (status & MCI_STATUS_UC) { 1559 if ((cenv->mcg_status & MCG_STATUS_MCIP) || 1560 !(cenv->cr[4] & CR4_MCE_MASK)) { 1561 fprintf(stderr, "injects mce exception while previous " 1562 "one is in progress!\n"); 1563 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); 1564 qemu_system_reset_request(); 1565 return; 1566 } 1567 if (banks[1] & MCI_STATUS_VAL) 1568 status |= MCI_STATUS_OVER; 1569 banks[2] = addr; 1570 banks[3] = misc; 1571 cenv->mcg_status = mcg_status; 1572 banks[1] = status; 1573 cpu_interrupt(cenv, CPU_INTERRUPT_MCE); 1574 } else if (!(banks[1] & MCI_STATUS_VAL) 1575 || !(banks[1] & MCI_STATUS_UC)) { 1576 if (banks[1] & MCI_STATUS_VAL) 1577 status |= MCI_STATUS_OVER; 1578 banks[2] = addr; 1579 banks[3] = misc; 1580 banks[1] = status; 1581 } else 1582 banks[1] |= MCI_STATUS_OVER; 1583 } 1584 #endif /* !VBOX */ 1439 1585 #endif /* !CONFIG_USER_ONLY */ 1440 1586 1441 1587 #ifndef VBOX 1588 1589 static void mce_init(CPUX86State *cenv) 1590 { 1591 unsigned int bank, bank_num; 1592 1593 if (((cenv->cpuid_version >> 8)&0xf) >= 6 1594 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) { 1595 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF; 1596 cenv->mcg_ctl = ~(uint64_t)0; 1597 bank_num = cenv->mcg_cap & 0xff; 1598 cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4); 1599 for (bank = 0; bank < bank_num; bank++) 1600 cenv->mce_banks[bank*4] = ~(uint64_t)0; 1601 } 1602 } 1603 1442 1604 static void host_cpuid(uint32_t function, uint32_t count, 1443 1605 uint32_t *eax, uint32_t *ebx, … … 1499 1661 * actuall cpu, and say goodbye to migration between different vendors 1500 1662 * is you use compatibility mode. */ 1501 if (kvm_enabled() )1663 if (kvm_enabled() && !env->cpuid_vendor_override) 1502 1664 host_cpuid(0, 0, NULL, ebx, ecx, edx); 1503 1665 break; … … 1507 1669 *ecx = env->cpuid_ext_features; 1508 1670 *edx = env->cpuid_features; 1509 1510 /* "Hypervisor present" bit required for Microsoft SVVP */1511 if (kvm_enabled())1512 *ecx |= (1 << 31);1513 1671 break; 1514 1672 case 2: … … 1583 1741 break; 1584 1742 case 0x80000001: 1585 *eax = env->cpuid_ features;1743 *eax = env->cpuid_version; 1586 1744 *ebx = 0; 1587 1745 *ecx = env->cpuid_ext3_features; … … 1589 1747 1590 1748 if (kvm_enabled()) { 1591 uint32_t h_eax, h_edx; 1592 1593 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx); 1594 1595 /* disable CPU features that the host does not support */ 1596 1597 /* long mode */ 1598 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */) 1599 *edx &= ~0x20000000; 1600 /* syscall */ 1601 if ((h_edx & 0x00000800) == 0) 1602 *edx &= ~0x00000800; 1603 /* nx */ 1604 if ((h_edx & 0x00100000) == 0) 1605 *edx &= ~0x00100000; 1606 1607 /* disable CPU features that KVM cannot support */ 1608 1609 /* svm */ 1610 *ecx &= ~4UL; 1611 /* 3dnow */ 1612 *edx &= ~0xc0000000; 1749 /* Nested SVM not yet supported in KVM */ 1750 *ecx &= ~CPUID_EXT3_SVM; 1751 } else { 1752 /* AMD 3DNow! is not supported in QEMU */ 1753 *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT); 1613 1754 } 1614 1755 break; … … 1640 1781 if (env->cpuid_ext2_features & CPUID_EXT2_LM) { 1641 1782 /* 64 bit processor */ 1642 #if defined( USE_KQEMU)1783 #if defined(CONFIG_KQEMU) 1643 1784 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */ 1644 1785 #else … … 1647 1788 #endif 1648 1789 } else { 1649 #if defined( USE_KQEMU)1790 #if defined(CONFIG_KQEMU) 1650 1791 *eax = 0x00000020; /* 32 bits physical */ 1651 1792 #else … … 1675 1816 } 1676 1817 } 1818 1819 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, 1820 target_ulong *base, unsigned int *limit, 1821 unsigned int *flags) 1822 { 1823 SegmentCache *dt; 1824 target_ulong ptr; 1825 uint32_t e1, e2; 1826 int index; 1827 1828 if (selector & 0x4) 1829 dt = &env->ldt; 1830 else 1831 dt = &env->gdt; 1832 index = selector & ~7; 1833 ptr = dt->base + index; 1834 if ((index + 7) > dt->limit 1835 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0 1836 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0) 1837 return 0; 1838 1839 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); 1840 *limit = (e1 & 0xffff) | (e2 & 0x000f0000); 1841 if (e2 & DESC_G_MASK) 1842 *limit = (*limit << 12) | 0xfff; 1843 *flags = e2; 1844 1845 return 1; 1846 } 1847 1677 1848 #endif /* !VBOX */ 1678 1849 … … 1707 1878 return NULL; 1708 1879 } 1880 #ifndef VBOX 1881 mce_init(env); 1882 #endif 1709 1883 cpu_reset(env); 1710 #ifdef USE_KQEMU1884 #ifdef CONFIG_KQEMU 1711 1885 kqemu_init(env); 1712 1886 #endif 1713 if (kvm_enabled()) 1714 kvm_init_vcpu(env); 1887 1888 qemu_init_vcpu(env); 1889 1715 1890 return env; 1716 1891 } 1892 1893 #ifndef VBOX 1894 #if !defined(CONFIG_USER_ONLY) 1895 void do_cpu_init(CPUState *env) 1896 { 1897 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI; 1898 cpu_reset(env); 1899 env->interrupt_request = sipi; 1900 apic_init_reset(env); 1901 } 1902 1903 void do_cpu_sipi(CPUState *env) 1904 { 1905 apic_sipi(env); 1906 } 1907 #else 1908 void do_cpu_init(CPUState *env) 1909 { 1910 } 1911 void do_cpu_sipi(CPUState *env) 1912 { 1913 } 1914 #endif 1915 #endif /* !VBOX */ -
trunk/src/recompiler/target-i386/helper.h
r36170 r36175 63 63 DEF_HELPER_1(mwait, void, int) 64 64 DEF_HELPER_0(debug, void) 65 DEF_HELPER_0(reset_rf, void) 65 66 DEF_HELPER_2(raise_interrupt, void, int, int) 66 67 DEF_HELPER_1(raise_exception, void, int) -
trunk/src/recompiler/target-i386/helper_template.h
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 -
trunk/src/recompiler/target-i386/op_helper.c
r36171 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 751 750 } 752 751 752 static int exeption_has_error_code(int intno) 753 { 754 switch(intno) { 755 case 8: 756 case 10: 757 case 11: 758 case 12: 759 case 13: 760 case 14: 761 case 17: 762 return 1; 763 } 764 return 0; 765 } 766 753 767 #ifdef TARGET_X86_64 754 768 #define SET_ESP(val, sp_mask)\ … … 811 825 812 826 has_error_code = 0; 813 if (!is_int && !is_hw) { 814 switch(intno) { 815 case 8: 816 case 10: 817 case 11: 818 case 12: 819 case 13: 820 case 14: 821 case 17: 822 has_error_code = 1; 823 break; 824 } 825 } 827 if (!is_int && !is_hw) 828 has_error_code = exeption_has_error_code(intno); 826 829 if (is_int) 827 830 old_eip = next_eip; … … 1016 1019 #else 1017 1020 /* 1018 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD1019 * gets confused by seemingly changed EFLAGS. See #3491 and1020 * public bug #2341.1021 */1021 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD 1022 * gets confused by seemingly changed EFLAGS. See #3491 and 1023 * public bug #2341. 1024 */ 1022 1025 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK); 1023 1026 #endif … … 1172 1175 1173 1176 has_error_code = 0; 1174 if (!is_int && !is_hw) { 1175 switch(intno) { 1176 case 8: 1177 case 10: 1178 case 11: 1179 case 12: 1180 case 13: 1181 case 14: 1182 case 17: 1183 has_error_code = 1; 1184 break; 1185 } 1186 } 1177 if (!is_int && !is_hw) 1178 has_error_code = exeption_has_error_code(intno); 1187 1179 if (is_int) 1188 1180 old_eip = next_eip; … … 1414 1406 cpu_x86_set_cpl(env, 3); 1415 1407 } 1416 #ifdef USE_KQEMU1408 #ifdef CONFIG_KQEMU 1417 1409 if (kqemu_is_ok(env)) { 1418 1410 if (env->hflags & HF_LMA_MASK) … … 1452 1444 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, 1453 1445 ~CPU_INTERRUPT_EXTERNAL_EXIT); 1454 cpu_ interrupt(env, CPU_INTERRUPT_EXIT);1446 cpu_exit(env); 1455 1447 } 1456 1448 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA) … … 1548 1540 EIP = next_eip; 1549 1541 } 1542 1543 #if !defined(CONFIG_USER_ONLY) 1544 static void handle_even_inj(int intno, int is_int, int error_code, 1545 int is_hw, int rm) 1546 { 1547 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); 1548 if (!(event_inj & SVM_EVTINJ_VALID)) { 1549 int type; 1550 if (is_int) 1551 type = SVM_EVTINJ_TYPE_SOFT; 1552 else 1553 type = SVM_EVTINJ_TYPE_EXEPT; 1554 event_inj = intno | type | SVM_EVTINJ_VALID; 1555 if (!rm && exeption_has_error_code(intno)) { 1556 event_inj |= SVM_EVTINJ_VALID_ERR; 1557 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code); 1558 } 1559 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj); 1560 } 1561 } 1562 #endif 1550 1563 1551 1564 /* … … 1600 1613 #endif 1601 1614 if (env->cr[0] & CR0_PE_MASK) { 1615 #if !defined(CONFIG_USER_ONLY) 1616 if (env->hflags & HF_SVMI_MASK) 1617 handle_even_inj(intno, is_int, error_code, is_hw, 0); 1618 #endif 1602 1619 #ifdef TARGET_X86_64 1603 1620 if (env->hflags & HF_LMA_MASK) { … … 1620 1637 } 1621 1638 } else { 1639 #if !defined(CONFIG_USER_ONLY) 1640 if (env->hflags & HF_SVMI_MASK) 1641 handle_even_inj(intno, is_int, error_code, is_hw, 1); 1642 #endif 1622 1643 do_interrupt_real(intno, is_int, error_code, next_eip); 1623 1644 } 1645 1646 #if !defined(CONFIG_USER_ONLY) 1647 if (env->hflags & HF_SVMI_MASK) { 1648 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); 1649 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID); 1650 } 1651 #endif 1624 1652 } 1625 1653 … … 2917 2945 EIP = offset; 2918 2946 } 2919 #ifdef USE_KQEMU2947 #ifdef CONFIG_KQEMU 2920 2948 if (kqemu_is_ok(env)) { 2921 2949 env->exception_index = -1; … … 3298 3326 } 3299 3327 env->hflags2 &= ~HF2_NMI_MASK; 3300 #ifdef USE_KQEMU3328 #ifdef CONFIG_KQEMU 3301 3329 if (kqemu_is_ok(env)) { 3302 3330 CC_OP = CC_OP_EFLAGS; … … 3310 3338 { 3311 3339 helper_ret_protected(shift, 0, addend); 3312 #ifdef USE_KQEMU3340 #ifdef CONFIG_KQEMU 3313 3341 if (kqemu_is_ok(env)) { 3314 3342 env->exception_index = -1; … … 3388 3416 ESP = ECX; 3389 3417 EIP = EDX; 3390 #ifdef USE_KQEMU3418 #ifdef CONFIG_KQEMU 3391 3419 if (kqemu_is_ok(env)) { 3392 3420 env->exception_index = -1; … … 3669 3697 env->mtrr_deftype = val; 3670 3698 break; 3699 case MSR_MCG_STATUS: 3700 env->mcg_status = val; 3701 break; 3702 case MSR_MCG_CTL: 3703 if ((env->mcg_cap & MCG_CTL_P) 3704 && (val == 0 || val == ~(uint64_t)0)) 3705 env->mcg_ctl = val; 3706 break; 3671 3707 # endif /* !VBOX */ 3672 3708 default: 3673 3709 # ifndef VBOX 3710 if ((uint32_t)ECX >= MSR_MC0_CTL 3711 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { 3712 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL; 3713 if ((offset & 0x3) != 0 3714 || (val == 0 || val == ~(uint64_t)0)) 3715 env->mce_banks[offset] = val; 3716 break; 3717 } 3674 3718 /* XXX: exception ? */ 3675 3719 # endif … … 3745 3789 break; 3746 3790 #endif 3747 #ifdef USE_KQEMU3791 #ifdef CONFIG_KQEMU 3748 3792 case MSR_QPI_COMMBASE: 3749 3793 if (env->kqemu_enabled) { … … 3802 3846 val = 0; 3803 3847 break; 3848 case MSR_MCG_CAP: 3849 val = env->mcg_cap; 3850 break; 3851 case MSR_MCG_CTL: 3852 if (env->mcg_cap & MCG_CTL_P) 3853 val = env->mcg_ctl; 3854 else 3855 val = 0; 3856 break; 3857 case MSR_MCG_STATUS: 3858 val = env->mcg_status; 3859 break; 3804 3860 # endif /* !VBOX */ 3805 3861 default: 3806 3862 # ifndef VBOX 3863 if ((uint32_t)ECX >= MSR_MC0_CTL 3864 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) { 3865 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL; 3866 val = env->mce_banks[offset]; 3867 break; 3868 } 3807 3869 /* XXX: exception ? */ 3808 3870 val = 0; … … 5313 5375 } 5314 5376 5377 void helper_reset_rf(void) 5378 { 5379 env->eflags &= ~RF_MASK; 5380 } 5381 5315 5382 void helper_raise_interrupt(int intno, int next_eip_addend) 5316 5383 { … … 6221 6288 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR; 6222 6289 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)); 6223 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);6224 6290 6225 6291 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err); … … 6563 6629 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1); 6564 6630 6631 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info), 6632 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj))); 6633 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), 6634 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err))); 6635 6565 6636 env->hflags2 &= ~HF2_GIF_MASK; 6566 6637 /* FIXME: Resets the current ASID register to zero (host ASID). */ -
trunk/src/recompiler/target-i386/ops_sse.h
r36170 r36175 16 16 * 17 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 20 19 */ 21 20 … … 31 30 #if SHIFT == 0 32 31 #define Reg MMXReg 33 #define XMM_ONLY( x...)32 #define XMM_ONLY(...) 34 33 #define B(n) MMX_B(n) 35 34 #define W(n) MMX_W(n) … … 39 38 #else 40 39 #define Reg XMMReg 41 #define XMM_ONLY( x...) x40 #define XMM_ONLY(...) __VA_ARGS__ 42 41 #define B(n) XMM_B(n) 43 42 #define W(n) XMM_W(n) -
trunk/src/recompiler/target-i386/ops_sse_header.h
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 -
trunk/src/recompiler/target-i386/translate.c
r36171 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19 … … 35 34 #include <inttypes.h> 36 35 #include <signal.h> 37 #include <assert.h>38 36 #endif /* !VBOX */ 39 37 … … 55 53 #ifdef TARGET_X86_64 56 54 #define X86_64_ONLY(x) x 57 #define X86_64_DEF( x...) x55 #define X86_64_DEF(...) __VA_ARGS__ 58 56 #define CODE64(s) ((s)->code64) 59 57 #define REX_X(s) ((s)->rex_x) … … 65 63 #else 66 64 #define X86_64_ONLY(x) NULL 67 #define X86_64_DEF( x...)65 #define X86_64_DEF(...) 68 66 #define CODE64(s) 0 69 67 #define REX_X(s) 0 … … 1727 1725 } 1728 1726 1729 /* XXX: add faster immediate case */1730 1727 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1, 1731 1728 int is_right) … … 1824 1821 } 1825 1822 1823 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2, 1824 int is_right) 1825 { 1826 int mask; 1827 int data_bits; 1828 TCGv t0, t1, a0; 1829 1830 /* XXX: inefficient, but we must use local temps */ 1831 t0 = tcg_temp_local_new(); 1832 t1 = tcg_temp_local_new(); 1833 a0 = tcg_temp_local_new(); 1834 1835 if (ot == OT_QUAD) 1836 mask = 0x3f; 1837 else 1838 mask = 0x1f; 1839 1840 /* load */ 1841 if (op1 == OR_TMP0) { 1842 tcg_gen_mov_tl(a0, cpu_A0); 1843 gen_op_ld_v(ot + s->mem_index, t0, a0); 1844 } else { 1845 gen_op_mov_v_reg(ot, t0, op1); 1846 } 1847 1848 gen_extu(ot, t0); 1849 tcg_gen_mov_tl(t1, t0); 1850 1851 op2 &= mask; 1852 data_bits = 8 << ot; 1853 if (op2 != 0) { 1854 int shift = op2 & ((1 << (3 + ot)) - 1); 1855 if (is_right) { 1856 tcg_gen_shri_tl(cpu_tmp4, t0, shift); 1857 tcg_gen_shli_tl(t0, t0, data_bits - shift); 1858 } 1859 else { 1860 tcg_gen_shli_tl(cpu_tmp4, t0, shift); 1861 tcg_gen_shri_tl(t0, t0, data_bits - shift); 1862 } 1863 tcg_gen_or_tl(t0, t0, cpu_tmp4); 1864 } 1865 1866 /* store */ 1867 if (op1 == OR_TMP0) { 1868 gen_op_st_v(ot + s->mem_index, t0, a0); 1869 } else { 1870 gen_op_mov_reg_v(ot, op1, t0); 1871 } 1872 1873 if (op2 != 0) { 1874 /* update eflags */ 1875 if (s->cc_op != CC_OP_DYNAMIC) 1876 gen_op_set_cc_op(s->cc_op); 1877 1878 gen_compute_eflags(cpu_cc_src); 1879 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C)); 1880 tcg_gen_xor_tl(cpu_tmp0, t1, t0); 1881 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1)); 1882 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O); 1883 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0); 1884 if (is_right) { 1885 tcg_gen_shri_tl(t0, t0, data_bits - 1); 1886 } 1887 tcg_gen_andi_tl(t0, t0, CC_C); 1888 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0); 1889 1890 tcg_gen_discard_tl(cpu_cc_dst); 1891 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS); 1892 s->cc_op = CC_OP_EFLAGS; 1893 } 1894 1895 tcg_temp_free(t0); 1896 tcg_temp_free(t1); 1897 tcg_temp_free(a0); 1898 } 1899 1826 1900 /* XXX: add faster immediate = 1 case */ 1827 1901 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1, … … 2038 2112 { 2039 2113 switch(op) { 2114 case OP_ROL: 2115 gen_rot_rm_im(s1, ot, d, c, 0); 2116 break; 2117 case OP_ROR: 2118 gen_rot_rm_im(s1, ot, d, c, 1); 2119 break; 2040 2120 case OP_SHL: 2041 2121 case OP_SHL1: … … 2816 2896 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) { 2817 2897 gen_helper_reset_inhibit_irq(); 2898 } 2899 if (s->tb->flags & HF_RF_MASK) { 2900 gen_helper_reset_rf(); 2818 2901 } 2819 2902 if ( s->singlestep_enabled … … 7058 7141 if (dflag == 2) { 7059 7142 gen_op_mov_TN_reg(OT_QUAD, 0, reg); 7060 tcg_gen_bswap _i64(cpu_T[0], cpu_T[0]);7143 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]); 7061 7144 gen_op_mov_reg_T0(OT_QUAD, reg); 7062 7145 } else 7063 { 7064 TCGv_i32 tmp0; 7065 gen_op_mov_TN_reg(OT_LONG, 0, reg); 7066 7067 tmp0 = tcg_temp_new_i32(); 7068 tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]); 7069 tcg_gen_bswap_i32(tmp0, tmp0); 7070 tcg_gen_extu_i32_i64(cpu_T[0], tmp0); 7071 gen_op_mov_reg_T0(OT_LONG, reg); 7072 } 7073 #else 7146 #endif 7074 7147 { 7075 7148 gen_op_mov_TN_reg(OT_LONG, 0, reg); 7076 tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); 7149 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]); 7150 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]); 7077 7151 gen_op_mov_reg_T0(OT_LONG, reg); 7078 7152 } 7079 #endif7080 7153 break; 7081 7154 case 0xd6: /* salc */ … … 7997 8070 int num_insns; 7998 8071 int max_insns; 8072 #ifdef VBOX 8073 int const singlestep = env->state & CPU_EMULATE_SINGLE_STEP; 8074 #endif 7999 8075 8000 8076 /* generate intermediate code */ … … 8089 8165 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) { 8090 8166 TAILQ_FOREACH(bp, &env->breakpoints, entry) { 8091 if (bp->pc == pc_ptr) { 8167 if (bp->pc == pc_ptr && 8168 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) { 8092 8169 gen_debug(dc, pc_ptr - dc->cs_base); 8093 8170 break; … … 8149 8226 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) || 8150 8227 num_insns >= max_insns) { 8228 gen_jmp_im(pc_ptr - dc->cs_base); 8229 gen_eob(dc); 8230 break; 8231 } 8232 if (singlestep) { 8151 8233 gen_jmp_im(pc_ptr - dc->cs_base); 8152 8234 gen_eob(dc); -
trunk/src/recompiler/tcg/README
r36170 r36175 264 264 8, 16 or 32 bit sign/zero extension (both operands must have the same type) 265 265 266 * bswap16_i32 t0, t1 267 268 16 bit byte swap on a 32 bit value. The two high order bytes must be set 269 to zero. 270 271 * bswap_i32 t0, t1 272 273 32 bit byte swap 274 275 * bswap_i64 t0, t1 266 * bswap16_i32/i64 t0, t1 267 268 16 bit byte swap on a 32/64 bit value. The two/six high order bytes must be 269 set to zero. 270 271 * bswap32_i32/i64 t0, t1 272 273 32 bit byte swap on a 32/64 bit value. With a 64 bit value, the four high 274 order bytes must be set to zero. 275 276 * bswap64_i64 t0, t1 276 277 277 278 64 bit byte swap -
trunk/src/recompiler/tcg/TODO
r14542 r36175 1 - Add new instructions such as: andnot, ror, rol, setcond, clz, ctz, 2 popcnt. 1 - Add new instructions such as: setcond, clz, ctz, popcnt. 3 2 4 - See if it is worth exporting mul2, mulu2, div2, divu2. 3 - See if it is worth exporting mul2, mulu2, div2, divu2. 5 4 6 5 - Support of globals saved in fixed registers between TBs. -
trunk/src/recompiler/tcg/i386/tcg-target.c
r36170 r36175 175 175 #define ARITH_CMP 7 176 176 177 #define SHIFT_ROL 0 178 #define SHIFT_ROR 1 177 179 #define SHIFT_SHL 4 178 180 #define SHIFT_SHR 5 … … 1205 1207 c = SHIFT_SAR; 1206 1208 goto gen_shift32; 1209 case INDEX_op_rotl_i32: 1210 c = SHIFT_ROL; 1211 goto gen_shift32; 1212 case INDEX_op_rotr_i32: 1213 c = SHIFT_ROR; 1214 goto gen_shift32; 1207 1215 1208 1216 case INDEX_op_add2_i32: … … 1231 1239 case INDEX_op_brcond2_i32: 1232 1240 tcg_out_brcond2(s, args, const_args); 1241 break; 1242 1243 case INDEX_op_bswap16_i32: 1244 tcg_out8(s, 0x66); 1245 tcg_out_modrm(s, 0xc1, SHIFT_ROL, args[0]); 1246 tcg_out8(s, 8); 1247 break; 1248 case INDEX_op_bswap32_i32: 1249 tcg_out_opc(s, (0xc8 + args[0]) | P_EXT); 1250 break; 1251 1252 case INDEX_op_neg_i32: 1253 tcg_out_modrm(s, 0xf7, 3, args[0]); 1254 break; 1255 1256 case INDEX_op_not_i32: 1257 tcg_out_modrm(s, 0xf7, 2, args[0]); 1258 break; 1259 1260 case INDEX_op_ext8s_i32: 1261 tcg_out_modrm(s, 0xbe | P_EXT, args[0], args[1]); 1262 break; 1263 case INDEX_op_ext16s_i32: 1264 tcg_out_modrm(s, 0xbf | P_EXT, args[0], args[1]); 1233 1265 break; 1234 1266 … … 1300 1332 { INDEX_op_shr_i32, { "r", "0", "ci" } }, 1301 1333 { INDEX_op_sar_i32, { "r", "0", "ci" } }, 1334 { INDEX_op_sar_i32, { "r", "0", "ci" } }, 1335 { INDEX_op_rotl_i32, { "r", "0", "ci" } }, 1336 { INDEX_op_rotr_i32, { "r", "0", "ci" } }, 1302 1337 1303 1338 { INDEX_op_brcond_i32, { "r", "ri" } }, … … 1306 1341 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } }, 1307 1342 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } }, 1343 1344 { INDEX_op_bswap16_i32, { "r", "0" } }, 1345 { INDEX_op_bswap32_i32, { "r", "0" } }, 1346 1347 { INDEX_op_neg_i32, { "r", "0" } }, 1348 1349 { INDEX_op_not_i32, { "r", "0" } }, 1350 1351 { INDEX_op_ext8s_i32, { "r", "q" } }, 1352 { INDEX_op_ext16s_i32, { "r", "r" } }, 1308 1353 1309 1354 #if TARGET_LONG_BITS == 32 -
trunk/src/recompiler/tcg/i386/tcg-target.h
r36170 r36175 45 45 #define TCG_TARGET_CALL_STACK_OFFSET 0 46 46 47 /* optional instructions */ 48 #define TCG_TARGET_HAS_bswap16_i32 49 #define TCG_TARGET_HAS_bswap32_i32 50 #define TCG_TARGET_HAS_neg_i32 51 #define TCG_TARGET_HAS_not_i32 52 #define TCG_TARGET_HAS_ext8s_i32 53 #define TCG_TARGET_HAS_ext16s_i32 54 #define TCG_TARGET_HAS_rot_i32 55 47 56 /* Note: must be synced with dyngen-exec.h */ 48 57 #ifndef VBOX … … 50 59 #define TCG_AREG1 TCG_REG_EBX 51 60 #define TCG_AREG2 TCG_REG_ESI 52 #define TCG_AREG3 TCG_REG_EDI53 61 #else 54 62 # define TCG_AREG0 TCG_REG_ESI -
trunk/src/recompiler/tcg/tcg-op.h
r36170 r36175 319 319 static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg) 320 320 { 321 if ( GET_TCGV_I32(ret) != GET_TCGV_I32(arg))321 if (!TCGV_EQUAL_I32(ret, arg)) 322 322 tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg); 323 323 } … … 437 437 static inline void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 438 438 { 439 tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2); 439 if (TCGV_EQUAL_I32(arg1, arg2)) { 440 tcg_gen_mov_i32(ret, arg1); 441 } else { 442 tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2); 443 } 440 444 } 441 445 … … 456 460 static inline void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 457 461 { 458 tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2); 462 if (TCGV_EQUAL_I32(arg1, arg2)) { 463 tcg_gen_mov_i32(ret, arg1); 464 } else { 465 tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2); 466 } 459 467 } 460 468 … … 475 483 static inline void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 476 484 { 477 tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2); 485 if (TCGV_EQUAL_I32(arg1, arg2)) { 486 tcg_gen_movi_i32(ret, 0); 487 } else { 488 tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2); 489 } 478 490 } 479 491 … … 626 638 static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg) 627 639 { 628 if ( GET_TCGV_I64(ret) != GET_TCGV_I64(arg)) {640 if (!TCGV_EQUAL_I64(ret, arg)) { 629 641 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); 630 642 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg)); … … 859 871 static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg) 860 872 { 861 if ( GET_TCGV_I64(ret) != GET_TCGV_I64(arg))873 if (!TCGV_EQUAL_I64(ret, arg)) 862 874 tcg_gen_op2_i64(INDEX_op_mov_i64, ret, arg); 863 875 } … … 944 956 static inline void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 945 957 { 946 tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2); 958 if (TCGV_EQUAL_I64(arg1, arg2)) { 959 tcg_gen_mov_i64(ret, arg1); 960 } else { 961 tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2); 962 } 947 963 } 948 964 … … 956 972 static inline void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 957 973 { 958 tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2); 974 if (TCGV_EQUAL_I64(arg1, arg2)) { 975 tcg_gen_mov_i64(ret, arg1); 976 } else { 977 tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2); 978 } 959 979 } 960 980 … … 968 988 static inline void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 969 989 { 970 tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2); 990 if (TCGV_EQUAL_I64(arg1, arg2)) { 991 tcg_gen_movi_i64(ret, 0); 992 } else { 993 tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2); 994 } 971 995 } 972 996 … … 1184 1208 tcg_gen_op2_i32(INDEX_op_bswap16_i32, ret, arg); 1185 1209 #else 1186 TCGv_i32 t0, t1; 1187 t0 = tcg_temp_new_i32(); 1188 t1 = tcg_temp_new_i32(); 1189 1190 tcg_gen_shri_i32(t0, arg, 8); 1191 tcg_gen_andi_i32(t1, arg, 0x000000ff); 1192 tcg_gen_shli_i32(t1, t1, 8); 1193 tcg_gen_or_i32(ret, t0, t1); 1210 TCGv_i32 t0 = tcg_temp_new_i32(); 1211 1212 tcg_gen_ext8u_i32(t0, arg); 1213 tcg_gen_shli_i32(t0, t0, 8); 1214 tcg_gen_shri_i32(ret, arg, 8); 1215 tcg_gen_or_i32(ret, ret, t0); 1194 1216 tcg_temp_free_i32(t0); 1195 tcg_temp_free_i32(t1); 1196 #endif 1197 } 1198 1199 static inline void tcg_gen_bswap_i32(TCGv_i32 ret, TCGv_i32 arg) 1200 { 1201 #ifdef TCG_TARGET_HAS_bswap_i32 1202 tcg_gen_op2_i32(INDEX_op_bswap_i32, ret, arg); 1217 #endif 1218 } 1219 1220 static inline void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg) 1221 { 1222 #ifdef TCG_TARGET_HAS_bswap32_i32 1223 tcg_gen_op2_i32(INDEX_op_bswap32_i32, ret, arg); 1203 1224 #else 1204 1225 TCGv_i32 t0, t1; … … 1277 1298 } 1278 1299 1279 static inline void tcg_gen_bswap_i64(TCGv_i64 ret, TCGv_i64 arg) 1300 /* Note: we assume the six high bytes are set to zero */ 1301 static inline void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg) 1302 { 1303 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg)); 1304 tcg_gen_bswap16_i32(TCGV_LOW(ret), TCGV_LOW(arg)); 1305 } 1306 1307 /* Note: we assume the four high bytes are set to zero */ 1308 static inline void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg) 1309 { 1310 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg)); 1311 tcg_gen_bswap32_i32(TCGV_LOW(ret), TCGV_LOW(arg)); 1312 } 1313 1314 static inline void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg) 1280 1315 { 1281 1316 TCGv_i32 t0, t1; … … 1283 1318 t1 = tcg_temp_new_i32(); 1284 1319 1285 tcg_gen_bswap _i32(t0, TCGV_LOW(arg));1286 tcg_gen_bswap _i32(t1, TCGV_HIGH(arg));1320 tcg_gen_bswap32_i32(t0, TCGV_LOW(arg)); 1321 tcg_gen_bswap32_i32(t1, TCGV_HIGH(arg)); 1287 1322 tcg_gen_mov_i32(TCGV_LOW(ret), t1); 1288 1323 tcg_gen_mov_i32(TCGV_HIGH(ret), t0); … … 1358 1393 } 1359 1394 1360 static inline void tcg_gen_bswap_i64(TCGv_i64 ret, TCGv_i64 arg) 1361 { 1362 #ifdef TCG_TARGET_HAS_bswap_i64 1363 tcg_gen_op2_i64(INDEX_op_bswap_i64, ret, arg); 1364 #else 1365 TCGv_i32 t0, t1; 1366 t0 = tcg_temp_new_i32(); 1367 t1 = tcg_temp_new_i32(); 1395 /* Note: we assume the six high bytes are set to zero */ 1396 static inline void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg) 1397 { 1398 #ifdef TCG_TARGET_HAS_bswap16_i64 1399 tcg_gen_op2_i64(INDEX_op_bswap16_i64, ret, arg); 1400 #else 1401 TCGv_i64 t0 = tcg_temp_new_i64(); 1402 1403 tcg_gen_ext8u_i64(t0, arg); 1404 tcg_gen_shli_i64(t0, t0, 8); 1405 tcg_gen_shri_i64(ret, arg, 8); 1406 tcg_gen_or_i64(ret, ret, t0); 1407 tcg_temp_free_i64(t0); 1408 #endif 1409 } 1410 1411 /* Note: we assume the four high bytes are set to zero */ 1412 static inline void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg) 1413 { 1414 #ifdef TCG_TARGET_HAS_bswap32_i64 1415 tcg_gen_op2_i64(INDEX_op_bswap32_i64, ret, arg); 1416 #else 1417 TCGv_i64 t0, t1; 1418 t0 = tcg_temp_new_i64(); 1419 t1 = tcg_temp_new_i64(); 1420 1421 tcg_gen_shli_i64(t0, arg, 24); 1422 tcg_gen_ext32u_i64(t0, t0); 1423 1424 tcg_gen_andi_i64(t1, arg, 0x0000ff00); 1425 tcg_gen_shli_i64(t1, t1, 8); 1426 tcg_gen_or_i64(t0, t0, t1); 1427 1428 tcg_gen_shri_i64(t1, arg, 8); 1429 tcg_gen_andi_i64(t1, t1, 0x0000ff00); 1430 tcg_gen_or_i64(t0, t0, t1); 1431 1432 tcg_gen_shri_i64(t1, arg, 24); 1433 tcg_gen_or_i64(ret, t0, t1); 1434 tcg_temp_free_i64(t0); 1435 tcg_temp_free_i64(t1); 1436 #endif 1437 } 1438 1439 static inline void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg) 1440 { 1441 #ifdef TCG_TARGET_HAS_bswap64_i64 1442 tcg_gen_op2_i64(INDEX_op_bswap64_i64, ret, arg); 1443 #else 1444 TCGv_i64 t0 = tcg_temp_new_i64(); 1445 TCGv_i64 t1 = tcg_temp_new_i64(); 1368 1446 1369 1447 tcg_gen_shli_i64(t0, arg, 56); … … 1395 1473 tcg_gen_shri_i64(t1, arg, 56); 1396 1474 tcg_gen_or_i64(ret, t0, t1); 1397 tcg_temp_free_i 32(t0);1398 tcg_temp_free_i 32(t1);1475 tcg_temp_free_i64(t0); 1476 tcg_temp_free_i64(t1); 1399 1477 #endif 1400 1478 } … … 1426 1504 static inline void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg) 1427 1505 { 1506 #ifdef TCG_TARGET_HAS_not_i32 1507 tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg); 1508 #else 1428 1509 tcg_gen_xori_i32(ret, arg, -1); 1510 #endif 1429 1511 } 1430 1512 1431 1513 static inline void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg) 1432 1514 { 1515 #ifdef TCG_TARGET_HAS_not_i64 1516 tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg); 1517 #else 1433 1518 tcg_gen_xori_i64(ret, arg, -1); 1519 #endif 1434 1520 } 1435 1521 … … 1502 1588 static inline void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1503 1589 { 1504 TCGv_i32 t0; 1505 t0 = tcg_temp_new_i32(); 1506 tcg_gen_xor_i32(t0, arg1, arg2); 1507 tcg_gen_not_i32(ret, t0); 1508 tcg_temp_free_i32(t0); 1590 tcg_gen_xor_i32(ret, arg1, arg2); 1591 tcg_gen_not_i32(ret, ret); 1509 1592 } 1510 1593 1511 1594 static inline void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1512 1595 { 1513 TCGv_i64 t0; 1514 t0 = tcg_temp_new_i64(); 1515 tcg_gen_xor_i64(t0, arg1, arg2); 1516 tcg_gen_not_i64(ret, t0); 1517 tcg_temp_free_i64(t0); 1596 tcg_gen_xor_i64(ret, arg1, arg2); 1597 tcg_gen_not_i64(ret, ret); 1518 1598 } 1519 1599 1520 1600 static inline void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1521 1601 { 1522 TCGv_i32 t0; 1523 t0 = tcg_temp_new_i32(); 1524 tcg_gen_and_i32(t0, arg1, arg2); 1525 tcg_gen_not_i32(ret, t0); 1526 tcg_temp_free_i32(t0); 1602 tcg_gen_and_i32(ret, arg1, arg2); 1603 tcg_gen_not_i32(ret, ret); 1527 1604 } 1528 1605 1529 1606 static inline void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1530 1607 { 1531 TCGv_i64 t0; 1532 t0 = tcg_temp_new_i64(); 1533 tcg_gen_and_i64(t0, arg1, arg2); 1534 tcg_gen_not_i64(ret, t0); 1535 tcg_temp_free_i64(t0); 1608 tcg_gen_and_i64(ret, arg1, arg2); 1609 tcg_gen_not_i64(ret, ret); 1536 1610 } 1537 1611 1538 1612 static inline void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1539 1613 { 1540 TCGv_i32 t0; 1541 t0 = tcg_temp_new_i32(); 1542 tcg_gen_or_i32(t0, arg1, arg2); 1543 tcg_gen_not_i32(ret, t0); 1544 tcg_temp_free_i32(t0); 1614 tcg_gen_or_i32(ret, arg1, arg2); 1615 tcg_gen_not_i32(ret, ret); 1545 1616 } 1546 1617 1547 1618 static inline void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1548 1619 { 1549 TCGv_i64 t0; 1550 t0 = tcg_temp_new_i64(); 1551 tcg_gen_or_i64(t0, arg1, arg2); 1552 tcg_gen_not_i64(ret, t0); 1553 tcg_temp_free_i64(t0); 1620 tcg_gen_or_i64(ret, arg1, arg2); 1621 tcg_gen_not_i64(ret, ret); 1554 1622 } 1555 1623 … … 1574 1642 static inline void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1575 1643 { 1644 #ifdef TCG_TARGET_HAS_rot_i32 1645 tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2); 1646 #else 1576 1647 TCGv_i32 t0, t1; 1577 1648 … … 1584 1655 tcg_temp_free_i32(t0); 1585 1656 tcg_temp_free_i32(t1); 1657 #endif 1586 1658 } 1587 1659 1588 1660 static inline void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1589 1661 { 1662 #ifdef TCG_TARGET_HAS_rot_i64 1663 tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2); 1664 #else 1590 1665 TCGv_i64 t0, t1; 1591 1666 … … 1598 1673 tcg_temp_free_i64(t0); 1599 1674 tcg_temp_free_i64(t1); 1675 #endif 1600 1676 } 1601 1677 … … 1606 1682 tcg_gen_mov_i32(ret, arg1); 1607 1683 } else { 1684 #ifdef TCG_TARGET_HAS_rot_i32 1685 TCGv_i32 t0 = tcg_const_i32(arg2); 1686 tcg_gen_rotl_i32(ret, arg1, t0); 1687 tcg_temp_free_i32(t0); 1688 #else 1608 1689 TCGv_i32 t0, t1; 1609 1690 t0 = tcg_temp_new_i32(); … … 1614 1695 tcg_temp_free_i32(t0); 1615 1696 tcg_temp_free_i32(t1); 1697 #endif 1616 1698 } 1617 1699 } … … 1623 1705 tcg_gen_mov_i64(ret, arg1); 1624 1706 } else { 1707 #ifdef TCG_TARGET_HAS_rot_i64 1708 TCGv_i64 t0 = tcg_const_i64(arg2); 1709 tcg_gen_rotl_i64(ret, arg1, t0); 1710 tcg_temp_free_i64(t0); 1711 #else 1625 1712 TCGv_i64 t0, t1; 1626 1713 t0 = tcg_temp_new_i64(); … … 1631 1718 tcg_temp_free_i64(t0); 1632 1719 tcg_temp_free_i64(t1); 1720 #endif 1633 1721 } 1634 1722 } … … 1636 1724 static inline void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1637 1725 { 1726 #ifdef TCG_TARGET_HAS_rot_i32 1727 tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2); 1728 #else 1638 1729 TCGv_i32 t0, t1; 1639 1730 … … 1646 1737 tcg_temp_free_i32(t0); 1647 1738 tcg_temp_free_i32(t1); 1739 #endif 1648 1740 } 1649 1741 1650 1742 static inline void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1651 1743 { 1744 #ifdef TCG_TARGET_HAS_rot_i64 1745 tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2); 1746 #else 1652 1747 TCGv_i64 t0, t1; 1653 1748 1654 1749 t0 = tcg_temp_new_i64(); 1655 1750 t1 = tcg_temp_new_i64(); 1656 tcg_gen_sh l_i64(t0, arg1, arg2);1751 tcg_gen_shr_i64(t0, arg1, arg2); 1657 1752 tcg_gen_subfi_i64(t1, 64, arg2); 1658 1753 tcg_gen_shl_i64(t1, arg1, t1); … … 1660 1755 tcg_temp_free_i64(t0); 1661 1756 tcg_temp_free_i64(t1); 1757 #endif 1662 1758 } 1663 1759 … … 1699 1795 #define tcg_gen_qemu_ldst_op_i64 tcg_gen_qemu_ldst_op_i64_i32 1700 1796 #define TCGV_UNUSED(x) TCGV_UNUSED_I32(x) 1701 #define TCGV_EQUAL(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b))1797 #define TCGV_EQUAL(a, b) TCGV_EQUAL_I32(a, b) 1702 1798 #else 1703 1799 #define TCGv TCGv_i64 … … 1710 1806 #define tcg_gen_qemu_ldst_op_i64 tcg_gen_qemu_ldst_op_i64_i64 1711 1807 #define TCGV_UNUSED(x) TCGV_UNUSED_I64(x) 1712 #define TCGV_EQUAL(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b))1808 #define TCGV_EQUAL(a, b) TCGV_EQUAL_I64(a, b) 1713 1809 #endif 1714 1810 … … 1956 2052 #define tcg_gen_mul_tl tcg_gen_mul_i64 1957 2053 #define tcg_gen_muli_tl tcg_gen_muli_i64 2054 #define tcg_gen_div_tl tcg_gen_div_i64 2055 #define tcg_gen_rem_tl tcg_gen_rem_i64 2056 #define tcg_gen_divu_tl tcg_gen_divu_i64 2057 #define tcg_gen_remu_tl tcg_gen_remu_i64 1958 2058 #define tcg_gen_discard_tl tcg_gen_discard_i64 1959 2059 #define tcg_gen_trunc_tl_i32 tcg_gen_trunc_i64_i32 … … 1969 2069 #define tcg_gen_ext32u_tl tcg_gen_ext32u_i64 1970 2070 #define tcg_gen_ext32s_tl tcg_gen_ext32s_i64 2071 #define tcg_gen_bswap16_tl tcg_gen_bswap16_i64 2072 #define tcg_gen_bswap32_tl tcg_gen_bswap32_i64 2073 #define tcg_gen_bswap64_tl tcg_gen_bswap64_i64 1971 2074 #define tcg_gen_concat_tl_i64 tcg_gen_concat32_i64 1972 2075 #define tcg_gen_andc_tl tcg_gen_andc_i64 … … 2019 2122 #define tcg_gen_mul_tl tcg_gen_mul_i32 2020 2123 #define tcg_gen_muli_tl tcg_gen_muli_i32 2124 #define tcg_gen_div_tl tcg_gen_div_i32 2125 #define tcg_gen_rem_tl tcg_gen_rem_i32 2126 #define tcg_gen_divu_tl tcg_gen_divu_i32 2127 #define tcg_gen_remu_tl tcg_gen_remu_i32 2021 2128 #define tcg_gen_discard_tl tcg_gen_discard_i32 2022 2129 #define tcg_gen_trunc_tl_i32 tcg_gen_mov_i32 … … 2032 2139 #define tcg_gen_ext32u_tl tcg_gen_mov_i32 2033 2140 #define tcg_gen_ext32s_tl tcg_gen_mov_i32 2141 #define tcg_gen_bswap16_tl tcg_gen_bswap16_i32 2142 #define tcg_gen_bswap32_tl tcg_gen_bswap32_i32 2034 2143 #define tcg_gen_concat_tl_i64 tcg_gen_concat_i32_i64 2035 2144 #define tcg_gen_andc_tl tcg_gen_andc_i32 -
trunk/src/recompiler/tcg/tcg-opc.h
r36170 r36175 68 68 DEF2(or_i32, 1, 2, 0, 0) 69 69 DEF2(xor_i32, 1, 2, 0, 0) 70 /* shifts */70 /* shifts/rotates */ 71 71 DEF2(shl_i32, 1, 2, 0, 0) 72 72 DEF2(shr_i32, 1, 2, 0, 0) 73 73 DEF2(sar_i32, 1, 2, 0, 0) 74 #ifdef TCG_TARGET_HAS_rot_i32 75 DEF2(rotl_i32, 1, 2, 0, 0) 76 DEF2(rotr_i32, 1, 2, 0, 0) 77 #endif 74 78 75 79 DEF2(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) … … 86 90 DEF2(ext16s_i32, 1, 1, 0, 0) 87 91 #endif 88 #ifdef TCG_TARGET_HAS_bswap_i32 89 DEF2(bswap_i32, 1, 1, 0, 0) 92 #ifdef TCG_TARGET_HAS_bswap16_i32 93 DEF2(bswap16_i32, 1, 1, 0, 0) 94 #endif 95 #ifdef TCG_TARGET_HAS_bswap32_i32 96 DEF2(bswap32_i32, 1, 1, 0, 0) 97 #endif 98 #ifdef TCG_TARGET_HAS_not_i32 99 DEF2(not_i32, 1, 1, 0, 0) 100 #endif 101 #ifdef TCG_TARGET_HAS_neg_i32 102 DEF2(neg_i32, 1, 1, 0, 0) 90 103 #endif 91 104 … … 121 134 DEF2(or_i64, 1, 2, 0, 0) 122 135 DEF2(xor_i64, 1, 2, 0, 0) 123 /* shifts */136 /* shifts/rotates */ 124 137 DEF2(shl_i64, 1, 2, 0, 0) 125 138 DEF2(shr_i64, 1, 2, 0, 0) 126 139 DEF2(sar_i64, 1, 2, 0, 0) 140 #ifdef TCG_TARGET_HAS_rot_i64 141 DEF2(rotl_i64, 1, 2, 0, 0) 142 DEF2(rotr_i64, 1, 2, 0, 0) 143 #endif 127 144 128 145 DEF2(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) … … 136 153 DEF2(ext32s_i64, 1, 1, 0, 0) 137 154 #endif 138 #ifdef TCG_TARGET_HAS_bswap_i64 139 DEF2(bswap_i64, 1, 1, 0, 0) 140 #endif 141 #endif 142 #ifdef TCG_TARGET_HAS_neg_i32 143 DEF2(neg_i32, 1, 1, 0, 0) 155 #ifdef TCG_TARGET_HAS_bswap16_i64 156 DEF2(bswap16_i64, 1, 1, 0, 0) 157 #endif 158 #ifdef TCG_TARGET_HAS_bswap32_i64 159 DEF2(bswap32_i64, 1, 1, 0, 0) 160 #endif 161 #ifdef TCG_TARGET_HAS_bswap64_i64 162 DEF2(bswap64_i64, 1, 1, 0, 0) 163 #endif 164 #ifdef TCG_TARGET_HAS_not_i64 165 DEF2(not_i64, 1, 1, 0, 0) 144 166 #endif 145 167 #ifdef TCG_TARGET_HAS_neg_i64 146 168 DEF2(neg_i64, 1, 1, 0, 0) 169 #endif 147 170 #endif 148 171 -
trunk/src/recompiler/tcg/tcg.c
r36170 r36175 23 23 */ 24 24 25 /* define it to use liveness analysis (better code) */ 26 #define USE_LIVENESS_ANALYSIS 27 28 #include "config.h" 29 30 #ifndef DEBUG_TCG 25 31 /* define it to suppress various consistency checks (faster) */ 26 32 #define NDEBUG 27 28 /* define it to use liveness analysis (better code) */ 29 #define USE_LIVENESS_ANALYSIS 33 #endif 30 34 31 35 #ifndef VBOX 32 #include <assert.h>33 36 #include <stdarg.h> 34 37 #include <stdlib.h> … … 36 39 #include <string.h> 37 40 #include <inttypes.h> 41 #ifdef _WIN32 42 #include <malloc.h> 43 #endif 44 #ifdef _AIX 45 #include <alloca.h> 46 #endif 38 47 #else /* VBOX */ 39 48 # include <stdio.h> 40 49 # include "osdep.h" 41 50 #endif /* VBOX */ 42 #ifdef _WIN32 43 #include <malloc.h> 44 #endif 45 #ifdef _AIX 46 #include <alloca.h> 47 #endif 48 49 #include "config.h" 51 50 52 #include "qemu-common.h" 51 53 #include "cache-utils.h" … … 1157 1159 } 1158 1160 1159 /* globals are live (they may be used by the call) */ 1160 memset(dead_temps, 0, s->nb_globals); 1161 if (!(call_flags & TCG_CALL_CONST)) { 1162 /* globals are live (they may be used by the call) */ 1163 memset(dead_temps, 0, s->nb_globals); 1164 } 1161 1165 1162 1166 /* input args are live */ … … 1860 1864 /* store globals and free associated registers (we assume the call 1861 1865 can modify any global. */ 1862 save_globals(s, allocated_regs); 1866 if (!(flags & TCG_CALL_CONST)) { 1867 save_globals(s, allocated_regs); 1868 } 1863 1869 1864 1870 tcg_out_op(s, opc, &func_arg, &const_func_arg); -
trunk/src/recompiler/tcg/tcg.h
r36170 r36175 22 22 * THE SOFTWARE. 23 23 */ 24 #include "qemu-common.h" 24 25 #include "tcg-target.h" 25 26 … … 121 122 */ 122 123 123 //#define DEBUG_TCGV 1 124 #ifdef DEBUG_TCG 125 #define DEBUG_TCGV 1 126 #endif 124 127 125 128 #ifdef DEBUG_TCGV … … 154 157 #define GET_TCGV_I32(t) (t) 155 158 #define GET_TCGV_I64(t) (t) 159 156 160 #if TCG_TARGET_REG_BITS == 32 157 161 #define TCGV_LOW(t) (t) … … 160 164 161 165 #endif /* DEBUG_TCGV */ 166 167 #define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b)) 168 #define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b)) 162 169 163 170 /* Dummy definition to avoid compiler warnings. */ … … 171 178 #define TCG_CALL_TYPE_REGPARM_2 0x0002 /* i386 style regparm call (2 regs) */ 172 179 #define TCG_CALL_TYPE_REGPARM 0x0003 /* i386 style regparm call (3 regs) */ 173 /* A pure function only reads its arguments and globals variables and174 cannot raise exceptions. Hence a call to a pure function can be180 /* A pure function only reads its arguments and TCG global variables 181 and cannot raise exceptions. Hence a call to a pure function can be 175 182 safely suppressed if the return value is not used. */ 176 183 #define TCG_CALL_PURE 0x0010 184 /* A const function only reads its arguments and does not use TCG 185 global variables. Hence a call to such a function does not 186 save TCG global variables back to their canonical location. */ 187 #define TCG_CALL_CONST 0x0020 177 188 178 189 /* used to align parameters */ … … 482 493 #define tcg_qemu_tb_exec(tb_ptr) ((long REGPARM (*)(void *))code_gen_prologue)(tb_ptr) 483 494 # endif /* !VBOX || !GCC_WITH_BUGGY_REG_PARAM */ 484 485 #endif 495 #endif -
trunk/src/recompiler/tcg/x86_64/tcg-target.c
r36170 r36175 45 45 46 46 static const int tcg_target_reg_alloc_order[] = { 47 TCG_REG_RDI,48 TCG_REG_RSI,49 TCG_REG_RDX,50 TCG_REG_RCX,51 TCG_REG_R8,52 TCG_REG_R9,53 TCG_REG_RAX,54 TCG_REG_R10,55 TCG_REG_R11,56 57 47 TCG_REG_RBP, 58 48 TCG_REG_RBX, … … 61 51 TCG_REG_R14, 62 52 TCG_REG_R15, 53 TCG_REG_R10, 54 TCG_REG_R11, 55 TCG_REG_R9, 56 TCG_REG_R8, 57 TCG_REG_RCX, 58 TCG_REG_RDX, 59 TCG_REG_RSI, 60 TCG_REG_RDI, 61 TCG_REG_RAX, 63 62 }; 64 63 … … 195 194 #define ARITH_CMP 7 196 195 196 #define SHIFT_ROL 0 197 #define SHIFT_ROR 1 197 198 #define SHIFT_SHL 4 198 199 #define SHIFT_SHR 5 … … 244 245 if (opc & P_EXT) 245 246 tcg_out8(s, 0x0f); 246 tcg_out8(s, opc );247 tcg_out8(s, opc & 0xff); 247 248 } 248 249 … … 1202 1203 c = SHIFT_SAR; 1203 1204 goto gen_shift32; 1205 case INDEX_op_rotl_i32: 1206 c = SHIFT_ROL; 1207 goto gen_shift32; 1208 case INDEX_op_rotr_i32: 1209 c = SHIFT_ROR; 1210 goto gen_shift32; 1204 1211 1205 1212 case INDEX_op_shl_i64: … … 1223 1230 c = SHIFT_SAR; 1224 1231 goto gen_shift64; 1232 case INDEX_op_rotl_i64: 1233 c = SHIFT_ROL; 1234 goto gen_shift64; 1235 case INDEX_op_rotr_i64: 1236 c = SHIFT_ROR; 1237 goto gen_shift64; 1225 1238 1226 1239 case INDEX_op_brcond_i32: … … 1233 1246 break; 1234 1247 1235 case INDEX_op_bswap_i32: 1248 case INDEX_op_bswap16_i32: 1249 case INDEX_op_bswap16_i64: 1250 tcg_out8(s, 0x66); 1251 tcg_out_modrm(s, 0xc1, SHIFT_ROL, args[0]); 1252 tcg_out8(s, 8); 1253 break; 1254 case INDEX_op_bswap32_i32: 1255 case INDEX_op_bswap32_i64: 1236 1256 tcg_out_opc(s, (0xc8 + (args[0] & 7)) | P_EXT, 0, args[0], 0); 1237 1257 break; 1238 case INDEX_op_bswap _i64:1258 case INDEX_op_bswap64_i64: 1239 1259 tcg_out_opc(s, (0xc8 + (args[0] & 7)) | P_EXT | P_REXW, 0, args[0], 0); 1240 1260 break; … … 1245 1265 case INDEX_op_neg_i64: 1246 1266 tcg_out_modrm(s, 0xf7 | P_REXW, 3, args[0]); 1267 break; 1268 1269 case INDEX_op_not_i32: 1270 tcg_out_modrm(s, 0xf7, 2, args[0]); 1271 break; 1272 case INDEX_op_not_i64: 1273 tcg_out_modrm(s, 0xf7 | P_REXW, 2, args[0]); 1247 1274 break; 1248 1275 … … 1383 1410 { INDEX_op_shr_i32, { "r", "0", "ci" } }, 1384 1411 { INDEX_op_sar_i32, { "r", "0", "ci" } }, 1412 { INDEX_op_rotl_i32, { "r", "0", "ci" } }, 1413 { INDEX_op_rotr_i32, { "r", "0", "ci" } }, 1385 1414 1386 1415 { INDEX_op_brcond_i32, { "r", "ri" } }, … … 1412 1441 { INDEX_op_shr_i64, { "r", "0", "ci" } }, 1413 1442 { INDEX_op_sar_i64, { "r", "0", "ci" } }, 1443 { INDEX_op_rotl_i64, { "r", "0", "ci" } }, 1444 { INDEX_op_rotr_i64, { "r", "0", "ci" } }, 1414 1445 1415 1446 { INDEX_op_brcond_i64, { "r", "re" } }, 1416 1447 1417 { INDEX_op_bswap_i32, { "r", "0" } }, 1418 { INDEX_op_bswap_i64, { "r", "0" } }, 1448 { INDEX_op_bswap16_i32, { "r", "0" } }, 1449 { INDEX_op_bswap16_i64, { "r", "0" } }, 1450 { INDEX_op_bswap32_i32, { "r", "0" } }, 1451 { INDEX_op_bswap32_i64, { "r", "0" } }, 1452 { INDEX_op_bswap64_i64, { "r", "0" } }, 1419 1453 1420 1454 { INDEX_op_neg_i32, { "r", "0" } }, 1421 1455 { INDEX_op_neg_i64, { "r", "0" } }, 1456 1457 { INDEX_op_not_i32, { "r", "0" } }, 1458 { INDEX_op_not_i64, { "r", "0" } }, 1422 1459 1423 1460 { INDEX_op_ext8s_i32, { "r", "r"} }, -
trunk/src/recompiler/tcg/x86_64/tcg-target.h
r36140 r36175 22 22 * THE SOFTWARE. 23 23 */ 24 25 24 #define TCG_TARGET_X86_64 1 26 25 … … 58 57 59 58 /* optional instructions */ 60 #define TCG_TARGET_HAS_bswap_i32 61 #define TCG_TARGET_HAS_bswap_i64 59 #define TCG_TARGET_HAS_bswap16_i32 60 #define TCG_TARGET_HAS_bswap16_i64 61 #define TCG_TARGET_HAS_bswap32_i32 62 #define TCG_TARGET_HAS_bswap32_i64 63 #define TCG_TARGET_HAS_bswap64_i64 62 64 #define TCG_TARGET_HAS_neg_i32 63 65 #define TCG_TARGET_HAS_neg_i64 66 #define TCG_TARGET_HAS_not_i32 67 #define TCG_TARGET_HAS_not_i64 64 68 #define TCG_TARGET_HAS_ext8s_i32 65 69 #define TCG_TARGET_HAS_ext16s_i32 … … 67 71 #define TCG_TARGET_HAS_ext16s_i64 68 72 #define TCG_TARGET_HAS_ext32s_i64 73 #define TCG_TARGET_HAS_rot_i32 74 #define TCG_TARGET_HAS_rot_i64 69 75 70 76 /* Note: must be synced with dyngen-exec.h */ … … 72 78 #define TCG_AREG1 TCG_REG_R15 73 79 #define TCG_AREG2 TCG_REG_R12 74 #define TCG_AREG3 TCG_REG_R1375 80 76 81 static inline void flush_icache_range(unsigned long start, unsigned long stop) -
trunk/src/recompiler/tests/Makefile
r36140 r36175 79 79 80 80 # NOTE: -fomit-frame-pointer is currently needed : this is a bug in libqemu 81 qruncom: qruncom.c ../i 386-user/libqemu.a81 qruncom: qruncom.c ../ioport-user.c ../i386-user/libqemu.a 82 82 $(CC) $(CFLAGS) -fomit-frame-pointer $(LDFLAGS) -I../target-i386 -I.. -I../i386-user -I../fpu \ 83 -o $@ $ <-L../i386-user -lqemu -lm83 -o $@ $(filter %.c, $^) -L../i386-user -lqemu -lm 84 84 85 85 # arm test -
trunk/src/recompiler/tests/linux-test.c
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 19 * MA 02110-1301, USA. 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 20 18 */ 21 19 … … 76 74 } 77 75 78 #define error(fmt, args...) error1(__FILE__, __LINE__, fmt, ##args)76 #define error(fmt, ...) error1(__FILE__, __LINE__, fmt, ## __VA_ARGS__) 79 77 80 78 #define chk_error(ret) __chk_error(__FILE__, __LINE__, (ret)) -
trunk/src/recompiler/tests/qruncom.c
r36170 r36175 16 16 17 17 //#define SIGTEST 18 19 void cpu_outb(CPUState *env, int addr, int val)20 {21 fprintf(stderr, "outb: port=0x%04x, data=%02x\n", addr, val);22 }23 24 void cpu_outw(CPUState *env, int addr, int val)25 {26 fprintf(stderr, "outw: port=0x%04x, data=%04x\n", addr, val);27 }28 29 void cpu_outl(CPUState *env, int addr, int val)30 {31 fprintf(stderr, "outl: port=0x%04x, data=%08x\n", addr, val);32 }33 34 int cpu_inb(CPUState *env, int addr)35 {36 fprintf(stderr, "inb: port=0x%04x\n", addr);37 return 0;38 }39 40 int cpu_inw(CPUState *env, int addr)41 {42 fprintf(stderr, "inw: port=0x%04x\n", addr);43 return 0;44 }45 46 int cpu_inl(CPUState *env, int addr)47 {48 fprintf(stderr, "inl: port=0x%04x\n", addr);49 return 0;50 }51 18 52 19 int cpu_get_pic_interrupt(CPUState *env) -
trunk/src/recompiler/tests/test-i386.c
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 19 * MA 02110-1301, USA. 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 20 18 */ 21 19 -
trunk/src/recompiler/tests/test-mmap.c
r36170 r36175 20 20 * 21 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 24 * MA 02110-1301, USA. 22 * along with this program; if not, see <http://www.gnu.org/licenses/>. 25 23 */ 26 24 -
trunk/src/recompiler/translate-all.c
r36170 r36175 15 15 * 16 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 18 */ 20 19
Note:
See TracChangeset
for help on using the changeset viewer.