Changeset 36170 in vbox for trunk/src/recompiler/cpu-exec.c
- Timestamp:
- Mar 4, 2011 12:49:02 PM (14 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler/cpu-exec.c
r36140 r36170 16 16 * You should have received a copy of the GNU Lesser General Public 17 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 5 9 Temple Place, Suite 330, Boston, MA 02111-1307USA18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 19 */ 20 20 … … 33 33 #include "disas.h" 34 34 #include "tcg.h" 35 #include "kvm.h" 35 36 36 37 #if !defined(CONFIG_SOFTMMU) … … 45 46 #undef EIP 46 47 #include <signal.h> 48 #ifdef __linux__ 47 49 #include <sys/ucontext.h> 50 #endif 48 51 #endif 49 52 … … 67 70 } 68 71 69 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))70 #define reg_T271 #endif72 73 72 /* exit the current TB from a signal handler. The host registers are 74 73 restored in a state compatible with the CPU emulator … … 77 76 { 78 77 #if !defined(CONFIG_SOFTMMU) 78 #ifdef __linux__ 79 79 struct ucontext *uc = puc; 80 #elif defined(__OpenBSD__) 81 struct sigcontext *uc = puc; 82 #endif 80 83 #endif 81 84 … … 87 90 if (puc) { 88 91 /* XXX: use siglongjmp ? */ 92 #ifdef __linux__ 89 93 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); 90 } 91 #endif 94 #elif defined(__OpenBSD__) 95 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL); 96 #endif 97 } 98 #endif 99 env->exception_index = -1; 92 100 longjmp(env->jmp_env, 1); 93 101 } … … 118 126 /* Restore PC. This may happen if async event occurs before 119 127 the TB starts executing. */ 120 CPU_PC_FROM_TB(env, tb);128 cpu_pc_from_tb(env, tb); 121 129 } 122 130 tb_phys_invalidate(tb, -1); … … 177 185 TranslationBlock *tb; 178 186 target_ulong cs_base, pc; 179 uint64_t flags;187 int flags; 180 188 181 189 /* we record a subset of the CPU state. It will 182 190 always be the same before a given translated block 183 191 is executed. */ 184 #if defined(TARGET_I386) 185 flags = env->hflags; 186 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); 187 cs_base = env->segs[R_CS].base; 188 pc = cs_base + env->eip; 189 #elif defined(TARGET_ARM) 190 flags = env->thumb | (env->vfp.vec_len << 1) 191 | (env->vfp.vec_stride << 4); 192 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) 193 flags |= (1 << 6); 194 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) 195 flags |= (1 << 7); 196 flags |= (env->condexec_bits << 8); 197 cs_base = 0; 198 pc = env->regs[15]; 199 #elif defined(TARGET_SPARC) 200 #ifdef TARGET_SPARC64 201 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled 202 flags = ((env->pstate & PS_AM) << 2) 203 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2)) 204 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2); 205 #else 206 // FPU enable . Supervisor 207 flags = (env->psref << 4) | env->psrs; 208 #endif 209 cs_base = env->npc; 210 pc = env->pc; 211 #elif defined(TARGET_PPC) 212 flags = env->hflags; 213 cs_base = 0; 214 pc = env->nip; 215 #elif defined(TARGET_MIPS) 216 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK); 217 cs_base = 0; 218 pc = env->active_tc.PC; 219 #elif defined(TARGET_M68K) 220 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */ 221 | (env->sr & SR_S) /* Bit 13 */ 222 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */ 223 cs_base = 0; 224 pc = env->pc; 225 #elif defined(TARGET_SH4) 226 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL 227 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */ 228 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */ 229 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */ 230 cs_base = 0; 231 pc = env->pc; 232 #elif defined(TARGET_ALPHA) 233 flags = env->ps; 234 cs_base = 0; 235 pc = env->pc; 236 #elif defined(TARGET_CRIS) 237 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG); 238 flags |= env->dslot; 239 cs_base = 0; 240 pc = env->pc; 241 #else 242 #error unsupported CPU 243 #endif 192 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 244 193 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; 245 194 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || … … 248 197 } 249 198 return tb; 199 } 200 201 static CPUDebugExcpHandler *debug_excp_handler; 202 203 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler) 204 { 205 CPUDebugExcpHandler *old_handler = debug_excp_handler; 206 207 debug_excp_handler = handler; 208 return old_handler; 209 } 210 211 static void cpu_handle_debug_exception(CPUState *env) 212 { 213 CPUWatchpoint *wp; 214 215 if (!env->watchpoint_hit) 216 TAILQ_FOREACH(wp, &env->watchpoints, entry) 217 wp->flags &= ~BP_WATCHPOINT_HIT; 218 219 if (debug_excp_handler) 220 debug_excp_handler(env); 250 221 } 251 222 … … 314 285 /* if an exception is pending, we execute it here */ 315 286 if (env->exception_index >= 0) { 316 Assert(!env->user_mode_only);317 287 if (env->exception_index >= EXCP_INTERRUPT) { 318 288 /* exit request from the cpu execution loop */ 319 289 ret = env->exception_index; 290 if (ret == EXCP_DEBUG) 291 cpu_handle_debug_exception(env); 320 292 break; 321 293 } else { … … 340 312 { 341 313 interrupt_request = env->interrupt_request; 342 #ifndef VBOX 343 if (__builtin_expect(interrupt_request, 0)) 344 #else 345 if (RT_UNLIKELY(interrupt_request != 0)) 346 #endif 347 { 314 if (unlikely(interrupt_request)) { 315 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) { 316 /* Mask out external interrupts for this step. */ 317 interrupt_request &= ~(CPU_INTERRUPT_HARD | 318 CPU_INTERRUPT_FIQ | 319 CPU_INTERRUPT_SMI | 320 CPU_INTERRUPT_NMI); 321 } 322 if (interrupt_request & CPU_INTERRUPT_DEBUG) { 323 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; 324 env->exception_index = EXCP_DEBUG; 325 cpu_loop_exit(); 326 } 348 327 /** @todo: reconcile with what QEMU really does */ 349 328 … … 467 446 468 447 env->current_tb = tb; 448 449 /* cpu_interrupt might be called while translating the 450 TB, but before it is linked into a potentially 451 infinite loop and becomes env->current_tb. Avoid 452 starting execution if there is a pending interrupt. */ 453 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT)) 454 env->current_tb = NULL; 455 469 456 while (env->current_tb) { 470 457 tc_ptr = tb->tc_ptr; … … 483 470 tb = (TranslationBlock *)(long)(next_tb & ~3); 484 471 /* Restore PC. */ 485 CPU_PC_FROM_TB(env, tb);472 cpu_pc_from_tb(env, tb); 486 473 insns_left = env->icount_decr.u32; 487 474 if (env->icount_extra && insns_left >= 0) { … … 540 527 #if defined(TARGET_I386) 541 528 /* restore flags in standard format */ 542 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);529 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 543 530 #else 544 531 #error unsupported target CPU … … 601 588 /* exit request from the cpu execution loop */ 602 589 ret = env->exception_index; 590 if (ret == EXCP_DEBUG) 591 cpu_handle_debug_exception(env); 603 592 break; 604 } else if (env->user_mode_only) { 593 } else { 594 #if defined(CONFIG_USER_ONLY) 605 595 /* if user mode only, we simulate a fake exception 606 596 which will be handled outside the cpu execution … … 616 606 ret = env->exception_index; 617 607 break; 618 } else { 608 #else 619 609 #if defined(TARGET_I386) 620 610 /* simulate a real cpu exception. On i386, it can … … 644 634 do_interrupt(0); 645 635 #endif 636 #endif 646 637 } 647 638 env->exception_index = -1; … … 650 641 if (kqemu_is_ok(env) && env->interrupt_request == 0) { 651 642 int ret; 652 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);643 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 653 644 ret = kqemu_cpu_exec(env); 654 645 /* put eflags in CPU temporary format */ … … 673 664 #endif 674 665 666 if (kvm_enabled()) { 667 kvm_cpu_exec(env); 668 longjmp(env->jmp_env, 1); 669 } 670 675 671 next_tb = 0; /* force lookup of first TB */ 676 672 for(;;) { 677 673 interrupt_request = env->interrupt_request; 678 if (unlikely(interrupt_request) && 679 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) { 674 if (unlikely(interrupt_request)) { 675 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) { 676 /* Mask out external interrupts for this step. */ 677 interrupt_request &= ~(CPU_INTERRUPT_HARD | 678 CPU_INTERRUPT_FIQ | 679 CPU_INTERRUPT_SMI | 680 CPU_INTERRUPT_NMI); 681 } 680 682 if (interrupt_request & CPU_INTERRUPT_DEBUG) { 681 683 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; … … 716 718 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); 717 719 intno = cpu_get_pic_interrupt(env); 718 if (loglevel & CPU_LOG_TB_IN_ASM) { 719 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno); 720 } 720 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno); 721 721 do_interrupt(intno, 0, 0, 0, 1); 722 722 /* ensure that no TB jump will be modified as … … 730 730 /* FIXME: this should respect TPR */ 731 731 svm_check_intercept(SVM_EXIT_VINTR); 732 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); 733 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno); 734 do_interrupt(intno, 0, 0, 0, 1); 732 735 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; 733 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));734 if (loglevel & CPU_LOG_TB_IN_ASM)735 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);736 do_interrupt(intno, 0, 0, 0, 1);737 736 next_tb = 0; 738 737 #endif … … 861 860 } 862 861 #ifdef DEBUG_EXEC 863 if ( (loglevel &CPU_LOG_TB_CPU)) {862 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { 864 863 /* restore flags in standard format */ 865 864 regs_to_env(); 866 865 #if defined(TARGET_I386) 867 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);868 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);866 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 867 log_cpu_state(env, X86_DUMP_CCOP); 869 868 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 870 869 #elif defined(TARGET_ARM) 871 cpu_dump_state(env, logfile, fprintf, 0);870 log_cpu_state(env, 0); 872 871 #elif defined(TARGET_SPARC) 873 cpu_dump_state(env, logfile, fprintf, 0);872 log_cpu_state(env, 0); 874 873 #elif defined(TARGET_PPC) 875 cpu_dump_state(env, logfile, fprintf, 0);874 log_cpu_state(env, 0); 876 875 #elif defined(TARGET_M68K) 877 876 cpu_m68k_flush_flags(env, env->cc_op); … … 879 878 env->sr = (env->sr & 0xffe0) 880 879 | env->cc_dest | (env->cc_x << 4); 881 cpu_dump_state(env, logfile, fprintf, 0);880 log_cpu_state(env, 0); 882 881 #elif defined(TARGET_MIPS) 883 cpu_dump_state(env, logfile, fprintf, 0);882 log_cpu_state(env, 0); 884 883 #elif defined(TARGET_SH4) 885 cpu_dump_state(env, logfile, fprintf, 0);884 log_cpu_state(env, 0); 886 885 #elif defined(TARGET_ALPHA) 887 cpu_dump_state(env, logfile, fprintf, 0);886 log_cpu_state(env, 0); 888 887 #elif defined(TARGET_CRIS) 889 cpu_dump_state(env, logfile, fprintf, 0);888 log_cpu_state(env, 0); 890 889 #else 891 890 #error unsupported target CPU … … 905 904 } 906 905 #ifdef DEBUG_EXEC 907 if ((loglevel & CPU_LOG_EXEC)) { 908 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", 909 (long)tb->tc_ptr, tb->pc, 910 lookup_symbol(tb->pc)); 911 } 906 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", 907 (long)tb->tc_ptr, tb->pc, 908 lookup_symbol(tb->pc)); 912 909 #endif 913 910 /* see if we can patch the calling TB. When the TB … … 925 922 spin_unlock(&tb_lock); 926 923 env->current_tb = tb; 924 925 /* cpu_interrupt might be called while translating the 926 TB, but before it is linked into a potentially 927 infinite loop and becomes env->current_tb. Avoid 928 starting execution if there is a pending interrupt. */ 929 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT)) 930 env->current_tb = NULL; 931 927 932 while (env->current_tb) { 928 933 tc_ptr = tb->tc_ptr; … … 940 945 tb = (TranslationBlock *)(long)(next_tb & ~3); 941 946 /* Restore PC. */ 942 CPU_PC_FROM_TB(env, tb);947 cpu_pc_from_tb(env, tb); 943 948 insns_left = env->icount_decr.u32; 944 949 if (env->icount_extra && insns_left >= 0) { … … 981 986 #if defined(TARGET_I386) 982 987 /* restore flags in standard format */ 983 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);988 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 984 989 #elif defined(TARGET_ARM) 985 990 /* XXX: Save/restore host fpu exception state?. */ … … 1241 1246 do it (XXX: use sigsetjmp) */ 1242 1247 sigprocmask(SIG_SETMASK, old_set, NULL); 1243 do_raise_exception_err(env->exception_index, env->error_code);1248 cpu_loop_exit(); 1244 1249 } else { 1245 1250 /* activate soft MMU for this block */ … … 1330 1335 do it (XXX: use sigsetjmp) */ 1331 1336 sigprocmask(SIG_SETMASK, old_set, NULL); 1332 do_raise_exception_err(env->exception_index, env->error_code);1337 cpu_loop_exit(); 1333 1338 } else { 1334 1339 /* activate soft MMU for this block */ … … 1511 1516 #elif defined(__x86_64__) 1512 1517 1518 #ifdef __NetBSD__ 1519 #define REG_ERR _REG_ERR 1520 #define REG_TRAPNO _REG_TRAPNO 1521 1522 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)] 1523 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc) 1524 #else 1525 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)] 1526 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP) 1527 #endif 1528 1513 1529 int cpu_signal_handler(int host_signum, void *pinfo, 1514 1530 void *puc) 1515 1531 { 1516 1532 siginfo_t *info = pinfo; 1533 unsigned long pc; 1534 #ifdef __NetBSD__ 1535 ucontext_t *uc = puc; 1536 #else 1517 1537 struct ucontext *uc = puc; 1518 unsigned long pc; 1519 1520 pc = uc->uc_mcontext.gregs[REG_RIP];1538 #endif 1539 1540 pc = QEMU_UC_MACHINE_PC(uc); 1521 1541 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1522 uc->uc_mcontext.gregs[REG_TRAPNO]== 0xe ?1523 ( uc->uc_mcontext.gregs[REG_ERR]>> 1) & 1 : 0,1542 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ? 1543 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0, 1524 1544 &uc->uc_sigmask, puc); 1525 1545 } 1526 1546 1527 #elif defined(_ _powerpc__)1547 #elif defined(_ARCH_PPC) 1528 1548 1529 1549 /*********************************************************************** … … 1642 1662 unsigned long pc = regs[1]; 1643 1663 #else 1664 #ifdef __linux__ 1644 1665 struct sigcontext *sc = puc; 1645 1666 unsigned long pc = sc->sigc_regs.tpc; 1646 1667 void *sigmask = (void *)sc->sigc_mask; 1668 #elif defined(__OpenBSD__) 1669 struct sigcontext *uc = puc; 1670 unsigned long pc = uc->sc_pc; 1671 void *sigmask = (void *)(long)uc->sc_mask; 1672 #endif 1647 1673 #endif 1648 1674
Note:
See TracChangeset
for help on using the changeset viewer.