VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 3023

Last change on this file since 3023 was 3023, checked in by vboxsync, 18 years ago

only check for excessive faults when in protected mode

  • Property svn:eol-style set to native
File size: 69.3 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#include "exec.h"
22#include "disas.h"
23
24#if !defined(CONFIG_SOFTMMU)
25#undef EAX
26#undef ECX
27#undef EDX
28#undef EBX
29#undef ESP
30#undef EBP
31#undef ESI
32#undef EDI
33#undef EIP
34#include <signal.h>
35#include <sys/ucontext.h>
36#endif
37
38int tb_invalidated_flag;
39
40//#define DEBUG_EXEC
41//#define DEBUG_SIGNAL
42
43#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_M68K)
44/* XXX: unify with i386 target */
45void cpu_loop_exit(void)
46{
47 longjmp(env->jmp_env, 1);
48}
49#endif
50#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
51#define reg_T2
52#endif
53
54/* exit the current TB from a signal handler. The host registers are
55 restored in a state compatible with the CPU emulator
56 */
57void cpu_resume_from_signal(CPUState *env1, void *puc)
58{
59#if !defined(CONFIG_SOFTMMU)
60 struct ucontext *uc = puc;
61#endif
62
63 env = env1;
64
65 /* XXX: restore cpu registers saved in host registers */
66
67#if !defined(CONFIG_SOFTMMU)
68 if (puc) {
69 /* XXX: use siglongjmp ? */
70 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
71 }
72#endif
73 longjmp(env->jmp_env, 1);
74}
75
76
77static TranslationBlock *tb_find_slow(target_ulong pc,
78 target_ulong cs_base,
79 unsigned int flags)
80{
81 TranslationBlock *tb, **ptb1;
82 int code_gen_size;
83 unsigned int h;
84 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
85 uint8_t *tc_ptr;
86
87 spin_lock(&tb_lock);
88
89 tb_invalidated_flag = 0;
90
91 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
92
93 /* find translated block using physical mappings */
94 phys_pc = get_phys_addr_code(env, pc);
95 phys_page1 = phys_pc & TARGET_PAGE_MASK;
96 phys_page2 = -1;
97 h = tb_phys_hash_func(phys_pc);
98 ptb1 = &tb_phys_hash[h];
99 for(;;) {
100 tb = *ptb1;
101 if (!tb)
102 goto not_found;
103 if (tb->pc == pc &&
104 tb->page_addr[0] == phys_page1 &&
105 tb->cs_base == cs_base &&
106 tb->flags == flags) {
107 /* check next page if needed */
108 if (tb->page_addr[1] != -1) {
109 virt_page2 = (pc & TARGET_PAGE_MASK) +
110 TARGET_PAGE_SIZE;
111 phys_page2 = get_phys_addr_code(env, virt_page2);
112 if (tb->page_addr[1] == phys_page2)
113 goto found;
114 } else {
115 goto found;
116 }
117 }
118 ptb1 = &tb->phys_hash_next;
119 }
120 not_found:
121 /* if no translated code available, then translate it now */
122 tb = tb_alloc(pc);
123 if (!tb) {
124 /* flush must be done */
125 tb_flush(env);
126 /* cannot fail at this point */
127 tb = tb_alloc(pc);
128 /* don't forget to invalidate previous TB info */
129 tb_invalidated_flag = 1;
130 }
131 tc_ptr = code_gen_ptr;
132 tb->tc_ptr = tc_ptr;
133 tb->cs_base = cs_base;
134 tb->flags = flags;
135 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
136 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
137
138 /* check next page if needed */
139 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
140 phys_page2 = -1;
141 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
142 phys_page2 = get_phys_addr_code(env, virt_page2);
143 }
144 tb_link_phys(tb, phys_pc, phys_page2);
145
146 found:
147 /* we add the TB in the virtual pc hash table */
148 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
149 spin_unlock(&tb_lock);
150 return tb;
151}
152
153static inline TranslationBlock *tb_find_fast(void)
154{
155 TranslationBlock *tb;
156 target_ulong cs_base, pc;
157 unsigned int flags;
158
159 /* we record a subset of the CPU state. It will
160 always be the same before a given translated block
161 is executed. */
162#if defined(TARGET_I386)
163 flags = env->hflags;
164 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
165 cs_base = env->segs[R_CS].base;
166 pc = cs_base + env->eip;
167#elif defined(TARGET_ARM)
168 flags = env->thumb | (env->vfp.vec_len << 1)
169 | (env->vfp.vec_stride << 4);
170 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
171 flags |= (1 << 6);
172 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
173 flags |= (1 << 7);
174 cs_base = 0;
175 pc = env->regs[15];
176#elif defined(TARGET_SPARC)
177#ifdef TARGET_SPARC64
178 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
179 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
180 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
181#else
182 // FPU enable . MMU enabled . MMU no-fault . Supervisor
183 flags = (env->psref << 3) | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1)
184 | env->psrs;
185#endif
186 cs_base = env->npc;
187 pc = env->pc;
188#elif defined(TARGET_PPC)
189 flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
190 (msr_se << MSR_SE) | (msr_le << MSR_LE);
191 cs_base = 0;
192 pc = env->nip;
193#elif defined(TARGET_MIPS)
194 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
195 cs_base = 0;
196 pc = env->PC;
197#elif defined(TARGET_M68K)
198 flags = env->fpcr & M68K_FPCR_PREC;
199 cs_base = 0;
200 pc = env->pc;
201#elif defined(TARGET_SH4)
202 flags = env->sr & (SR_MD | SR_RB);
203 cs_base = 0; /* XXXXX */
204 pc = env->pc;
205#else
206#error unsupported CPU
207#endif
208 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
209 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
210 tb->flags != flags, 0)) {
211 tb = tb_find_slow(pc, cs_base, flags);
212 /* Note: we do it here to avoid a gcc bug on Mac OS X when
213 doing it in tb_find_slow */
214 if (tb_invalidated_flag) {
215 /* as some TB could have been invalidated because
216 of memory exceptions while generating the code, we
217 must recompute the hash index here */
218 T0 = 0;
219 }
220 }
221 return tb;
222}
223
224
225/* main execution loop */
226
227#ifdef VBOX
228
229int cpu_exec(CPUState *env1)
230{
231#define DECLARE_HOST_REGS 1
232#include "hostregs_helper.h"
233 int ret, interrupt_request;
234 void (*gen_func)(void);
235 TranslationBlock *tb;
236 uint8_t *tc_ptr;
237
238#if defined(TARGET_I386)
239 /* handle exit of HALTED state */
240 if (env1->hflags & HF_HALTED_MASK) {
241 /* disable halt condition */
242 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
243 (env1->eflags & IF_MASK)) {
244 env1->hflags &= ~HF_HALTED_MASK;
245 } else {
246 return EXCP_HALTED;
247 }
248 }
249#elif defined(TARGET_PPC)
250 if (env1->halted) {
251 if (env1->msr[MSR_EE] &&
252 (env1->interrupt_request &
253 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) {
254 env1->halted = 0;
255 } else {
256 return EXCP_HALTED;
257 }
258 }
259#elif defined(TARGET_SPARC)
260 if (env1->halted) {
261 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
262 (env1->psret != 0)) {
263 env1->halted = 0;
264 } else {
265 return EXCP_HALTED;
266 }
267 }
268#elif defined(TARGET_ARM)
269 if (env1->halted) {
270 /* An interrupt wakes the CPU even if the I and F CPSR bits are
271 set. */
272 if (env1->interrupt_request
273 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
274 env1->halted = 0;
275 } else {
276 return EXCP_HALTED;
277 }
278 }
279#elif defined(TARGET_MIPS)
280 if (env1->halted) {
281 if (env1->interrupt_request &
282 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
283 env1->halted = 0;
284 } else {
285 return EXCP_HALTED;
286 }
287 }
288#endif
289
290 cpu_single_env = env1;
291
292 /* first we save global registers */
293#define SAVE_HOST_REGS 1
294#include "hostregs_helper.h"
295 env = env1;
296#if defined(__sparc__) && !defined(HOST_SOLARIS)
297 /* we also save i7 because longjmp may not restore it */
298 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
299#endif
300
301#if defined(TARGET_I386)
302
303 env_to_regs();
304 /* put eflags in CPU temporary format */
305 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
306 DF = 1 - (2 * ((env->eflags >> 10) & 1));
307 CC_OP = CC_OP_EFLAGS;
308 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
309#elif defined(TARGET_ARM)
310#elif defined(TARGET_SPARC)
311#if defined(reg_REGWPTR)
312 saved_regwptr = REGWPTR;
313#endif
314#elif defined(TARGET_PPC)
315#elif defined(TARGET_MIPS)
316#elif defined(TARGET_SH4)
317 /* XXXXX */
318#else
319#error unsupported target CPU
320#endif
321#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
322 env->exception_index = -1;
323#endif
324
325 /* prepare setjmp context for exception handling */
326 for(;;) {
327 if (setjmp(env->jmp_env) == 0)
328 {
329 env->current_tb = NULL;
330 VMMR3Unlock(env->pVM);
331 VMMR3Lock(env->pVM);
332
333 /*
334 * Check for fatal errors first
335 */
336 if (env->interrupt_request & CPU_INTERRUPT_RC) {
337 env->exception_index = EXCP_RC;
338 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
339 ret = env->exception_index;
340 cpu_loop_exit();
341 }
342
343 /* if an exception is pending, we execute it here */
344 if (env->exception_index >= 0) {
345 Assert(!env->user_mode_only);
346 if (env->exception_index >= EXCP_INTERRUPT) {
347 /* exit request from the cpu execution loop */
348 ret = env->exception_index;
349 break;
350 } else {
351 /* simulate a real cpu exception. On i386, it can
352 trigger new exceptions, but we do not handle
353 double or triple faults yet. */
354 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
355 Log(("do_interrupt %d %d %08x\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
356 do_interrupt(env->exception_index,
357 env->exception_is_int,
358 env->error_code,
359 env->exception_next_eip, 0);
360 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
361 }
362 env->exception_index = -1;
363 }
364
365 T0 = 0; /* force lookup of first TB */
366 for(;;)
367 {
368 interrupt_request = env->interrupt_request;
369 if (__builtin_expect(interrupt_request, 0))
370 {
371 /* Single instruction exec request, we execute it and return (one way or the other).
372 The caller will always reschedule after doing this operation! */
373 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
374 {
375 /* not in flight are we? (if we are, we trapped) */
376 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
377 {
378 ASMAtomicOrS32(&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
379 env->exception_index = EXCP_SINGLE_INSTR;
380 if (emulate_single_instr(env) == -1)
381 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%08x!!\n", env->eip));
382
383 /* When we receive an external interrupt during execution of this single
384 instruction, then we should stay here. We will leave when we're ready
385 for raw-mode or when interrupted by pending EMT requests. */
386 interrupt_request = env->interrupt_request; /* reload this! */
387 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
388 || !(env->eflags & IF_MASK)
389 || (env->hflags & HF_INHIBIT_IRQ_MASK)
390 || (env->state & CPU_RAW_HWACC)
391 )
392 {
393 env->exception_index = ret = EXCP_SINGLE_INSTR;
394 cpu_loop_exit();
395 }
396 }
397 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
398 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
399 }
400
401 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
402 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
403 !(env->hflags & HF_SMM_MASK)) {
404 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
405 do_smm_enter();
406 T0 = 0;
407 }
408 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
409 (env->eflags & IF_MASK) &&
410 !(env->hflags & HF_INHIBIT_IRQ_MASK))
411 {
412 /* if hardware interrupt pending, we execute it */
413 int intno;
414 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_HARD);
415 intno = cpu_get_pic_interrupt(env);
416 if (intno >= 0)
417 {
418 Log(("do_interrupt %d\n", intno));
419 do_interrupt(intno, 0, 0, 0, 1);
420 }
421 /* ensure that no TB jump will be modified as
422 the program flow was changed */
423 T0 = 0;
424 }
425 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
426 {
427 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
428 /* ensure that no TB jump will be modified as
429 the program flow was changed */
430 T0 = 0;
431 }
432 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
433 if (interrupt_request & CPU_INTERRUPT_EXIT)
434 {
435 env->exception_index = EXCP_INTERRUPT;
436 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
437 ret = env->exception_index;
438 cpu_loop_exit();
439 }
440 if (interrupt_request & CPU_INTERRUPT_RC)
441 {
442 env->exception_index = EXCP_RC;
443 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
444 ret = env->exception_index;
445 cpu_loop_exit();
446 }
447 }
448
449 /*
450 * Check if we the CPU state allows us to execute the code in raw-mode.
451 */
452 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
453 if (remR3CanExecuteRaw(env,
454 env->eip + env->segs[R_CS].base,
455 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
456 &env->exception_index))
457 {
458 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
459 ret = env->exception_index;
460 cpu_loop_exit();
461 }
462 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
463
464 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
465 tb = tb_find_fast();
466
467 /* see if we can patch the calling TB. When the TB
468 spans two pages, we cannot safely do a direct
469 jump. */
470 if (T0 != 0
471 && !(tb->cflags & CF_RAW_MODE)
472 && tb->page_addr[1] == -1)
473 {
474 spin_lock(&tb_lock);
475 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
476 spin_unlock(&tb_lock);
477 }
478 tc_ptr = tb->tc_ptr;
479 env->current_tb = tb;
480 /* execute the generated code */
481 gen_func = (void *)tc_ptr;
482 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
483
484#if defined(DEBUG) && defined(VBOX) && !defined(DEBUG_dmik)
485#if !defined(DEBUG_bird)
486 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
487 {
488 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
489 {
490 Log(("EMR0: %08X ESP=%08X IF=%d TF=%d CPL=%d\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3));
491 }
492 }
493 else
494 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
495 {
496 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
497 {
498 if(env->eflags & VM_MASK)
499 {
500 Log(("EMV86: %04X:%04X IF=%d TF=%d CPL=%d CR0=%08X\n", env->segs[R_CS].selector, env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0]));
501 }
502 else
503 {
504 Log(("EMR3: %08X ESP=%08X IF=%d TF=%d CPL=%d IOPL=%d CR0=%08X\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), env->cr[0]));
505 }
506 }
507 }
508 else
509 {
510 Log(("EMRM: %04X:%08X SS:ESP=%04X:%08X IF=%d TF=%d CPL=%d PE=%d PG=%d\n", env->segs[R_CS].selector, env->eip, env->segs[R_SS].selector, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0] & X86_CR0_PE, env->cr[0] & X86_CR0_PG));
511 }
512#endif /* !DEBUG_bird */
513//{
514//static int blaat = 0;
515
516//if (env->eip == 0xc03f3c27 && ++blaat == 2)
517// env->state |= CPU_EMULATE_SINGLE_STEP;
518//}
519 if(env->state & CPU_EMULATE_SINGLE_STEP)
520 {
521#ifdef DEBUG_bird
522 static int s_cTimes = 0;
523 if (s_cTimes++ > 10000)
524 {
525 RTLogPrintf("Enough stepping!\n");
526 #if 0
527 env->exception_index = EXCP_DEBUG;
528 ret = env->exception_index;
529 cpu_loop_exit();
530 #else
531 env->state &= ~CPU_EMULATE_SINGLE_STEP;
532 #endif
533 }
534#endif
535 TMCpuTickPause(env->pVM);
536 remR3DisasInstr(env, -1, NULL);
537 TMCpuTickResume(env->pVM);
538 if(emulate_single_instr(env) == -1)
539 {
540 Log(("emulate_single_instr failed for EIP=%08X!!\n", env->eip));
541 }
542 }
543 else
544 {
545 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
546 gen_func();
547 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
548 }
549#else /* !DEBUG || !VBOX || DEBUG_dmik */
550
551 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
552 gen_func();
553 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
554
555#endif /* !DEBUG || !VBOX || DEBUG_dmik */
556 env->current_tb = NULL;
557 /* reset soft MMU for next block (it can currently
558 only be set by a memory fault) */
559#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
560 if (env->hflags & HF_SOFTMMU_MASK) {
561 env->hflags &= ~HF_SOFTMMU_MASK;
562 /* do not allow linking to another block */
563 T0 = 0;
564 }
565#endif
566 }
567 } else {
568 env_to_regs();
569 }
570#ifdef VBOX_HIGH_RES_TIMERS_HACK
571 /* NULL the current_tb here so cpu_interrupt() doesn't do
572 anything unnecessary (like crashing during emulate single instruction). */
573 env->current_tb = NULL;
574 TMTimerPoll(env1->pVM);
575#endif
576 } /* for(;;) */
577
578#if defined(TARGET_I386)
579 /* restore flags in standard format */
580 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
581#else
582#error unsupported target CPU
583#endif
584#include "hostregs_helper.h"
585 return ret;
586}
587
588
589#else /* !VBOX */
590
591
592int cpu_exec(CPUState *env1)
593{
594#define DECLARE_HOST_REGS 1
595#include "hostregs_helper.h"
596#if defined(__sparc__) && !defined(HOST_SOLARIS)
597 int saved_i7;
598 target_ulong tmp_T0;
599#endif
600 int ret, interrupt_request;
601 void (*gen_func)(void);
602 TranslationBlock *tb;
603 uint8_t *tc_ptr;
604
605#if defined(TARGET_I386)
606 /* handle exit of HALTED state */
607 if (env1->hflags & HF_HALTED_MASK) {
608 /* disable halt condition */
609 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
610 (env1->eflags & IF_MASK)) {
611 env1->hflags &= ~HF_HALTED_MASK;
612 } else {
613 return EXCP_HALTED;
614 }
615 }
616#elif defined(TARGET_PPC)
617 if (env1->halted) {
618 if (env1->msr[MSR_EE] &&
619 (env1->interrupt_request &
620 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) {
621 env1->halted = 0;
622 } else {
623 return EXCP_HALTED;
624 }
625 }
626#elif defined(TARGET_SPARC)
627 if (env1->halted) {
628 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
629 (env1->psret != 0)) {
630 env1->halted = 0;
631 } else {
632 return EXCP_HALTED;
633 }
634 }
635#elif defined(TARGET_ARM)
636 if (env1->halted) {
637 /* An interrupt wakes the CPU even if the I and F CPSR bits are
638 set. */
639 if (env1->interrupt_request
640 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
641 env1->halted = 0;
642 } else {
643 return EXCP_HALTED;
644 }
645 }
646#elif defined(TARGET_MIPS)
647 if (env1->halted) {
648 if (env1->interrupt_request &
649 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
650 env1->halted = 0;
651 } else {
652 return EXCP_HALTED;
653 }
654 }
655#endif
656
657 cpu_single_env = env1;
658
659 /* first we save global registers */
660#define SAVE_HOST_REGS 1
661#include "hostregs_helper.h"
662 env = env1;
663#if defined(__sparc__) && !defined(HOST_SOLARIS)
664 /* we also save i7 because longjmp may not restore it */
665 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
666#endif
667
668#if defined(TARGET_I386)
669 env_to_regs();
670 /* put eflags in CPU temporary format */
671 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
672 DF = 1 - (2 * ((env->eflags >> 10) & 1));
673 CC_OP = CC_OP_EFLAGS;
674 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
675#elif defined(TARGET_ARM)
676#elif defined(TARGET_SPARC)
677#if defined(reg_REGWPTR)
678 saved_regwptr = REGWPTR;
679#endif
680#elif defined(TARGET_PPC)
681#elif defined(TARGET_M68K)
682 env->cc_op = CC_OP_FLAGS;
683 env->cc_dest = env->sr & 0xf;
684 env->cc_x = (env->sr >> 4) & 1;
685#elif defined(TARGET_MIPS)
686#elif defined(TARGET_SH4)
687 /* XXXXX */
688#else
689#error unsupported target CPU
690#endif
691#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
692 env->exception_index = -1;
693#endif
694
695 /* prepare setjmp context for exception handling */
696 for(;;) {
697 if (setjmp(env->jmp_env) == 0) {
698 env->current_tb = NULL;
699#ifdef VBOX
700 VMMR3Unlock(env->pVM);
701 VMMR3Lock(env->pVM);
702
703 /* Check for high priority requests first (like fatal
704 errors). */
705 if (env->interrupt_request & CPU_INTERRUPT_RC) {
706 env->exception_index = EXCP_RC;
707 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
708 ret = env->exception_index;
709 cpu_loop_exit();
710 }
711#endif /* VBOX */
712
713
714 /* if an exception is pending, we execute it here */
715 if (env->exception_index >= 0) {
716 if (env->exception_index >= EXCP_INTERRUPT) {
717 /* exit request from the cpu execution loop */
718 ret = env->exception_index;
719 break;
720 } else if (env->user_mode_only) {
721 /* if user mode only, we simulate a fake exception
722 which will be handled outside the cpu execution
723 loop */
724#if defined(TARGET_I386)
725 do_interrupt_user(env->exception_index,
726 env->exception_is_int,
727 env->error_code,
728 env->exception_next_eip);
729#endif
730 ret = env->exception_index;
731 break;
732 } else {
733#if defined(TARGET_I386)
734 /* simulate a real cpu exception. On i386, it can
735 trigger new exceptions, but we do not handle
736 double or triple faults yet. */
737 do_interrupt(env->exception_index,
738 env->exception_is_int,
739 env->error_code,
740 env->exception_next_eip, 0);
741#elif defined(TARGET_PPC)
742 do_interrupt(env);
743#elif defined(TARGET_MIPS)
744 do_interrupt(env);
745#elif defined(TARGET_SPARC)
746 do_interrupt(env->exception_index);
747#elif defined(TARGET_ARM)
748 do_interrupt(env);
749#elif defined(TARGET_SH4)
750 do_interrupt(env);
751#endif
752 }
753 env->exception_index = -1;
754 }
755#ifdef USE_KQEMU
756 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
757 int ret;
758 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
759 ret = kqemu_cpu_exec(env);
760 /* put eflags in CPU temporary format */
761 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
762 DF = 1 - (2 * ((env->eflags >> 10) & 1));
763 CC_OP = CC_OP_EFLAGS;
764 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
765 if (ret == 1) {
766 /* exception */
767 longjmp(env->jmp_env, 1);
768 } else if (ret == 2) {
769 /* softmmu execution needed */
770 } else {
771 if (env->interrupt_request != 0) {
772 /* hardware interrupt will be executed just after */
773 } else {
774 /* otherwise, we restart */
775 longjmp(env->jmp_env, 1);
776 }
777 }
778 }
779#endif
780
781 T0 = 0; /* force lookup of first TB */
782 for(;;) {
783#if defined(__sparc__) && !defined(HOST_SOLARIS)
784 /* g1 can be modified by some libc? functions */
785 tmp_T0 = T0;
786#endif
787 interrupt_request = env->interrupt_request;
788 if (__builtin_expect(interrupt_request, 0)) {
789#ifdef VBOX
790 /* Single instruction exec request, we execute it and return (one way or the other).
791 The caller will always reschedule after doing this operation! */
792 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
793 {
794 /* not in flight are we? */
795 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
796 {
797 ASMAtomicOrS32(&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
798 env->exception_index = EXCP_SINGLE_INSTR;
799 if (emulate_single_instr(env) == -1)
800 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%08x!!\n", env->eip));
801
802 /* When we receive an external interrupt during execution of this single
803 instruction, then we should stay here. We will leave when we're ready
804 for raw-mode or when interrupted by pending EMT requests. */
805 interrupt_request = env->interrupt_request; /* reload this! */
806 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
807 || !(env->eflags & IF_MASK)
808 || (env->hflags & HF_INHIBIT_IRQ_MASK)
809 )
810 {
811 env->exception_index = ret = EXCP_SINGLE_INSTR;
812 cpu_loop_exit();
813 }
814 }
815 env->exception_index = EXCP_SINGLE_INSTR;
816 cpu_loop_exit();
817 }
818
819 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
820#endif /* VBOX */
821#if defined(TARGET_I386)
822 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
823 !(env->hflags & HF_SMM_MASK)) {
824 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
825 do_smm_enter();
826#if defined(__sparc__) && !defined(HOST_SOLARIS)
827 tmp_T0 = 0;
828#else
829 T0 = 0;
830#endif
831 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
832 (env->eflags & IF_MASK) &&
833 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
834 int intno;
835#if defined(VBOX)
836 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_HARD);
837#else
838 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
839#endif
840 intno = cpu_get_pic_interrupt(env);
841 if (loglevel & CPU_LOG_TB_IN_ASM) {
842 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
843 }
844#if defined(VBOX)
845 if (intno >= 0)
846#endif
847 do_interrupt(intno, 0, 0, 0, 1);
848 /* ensure that no TB jump will be modified as
849 the program flow was changed */
850#if defined(__sparc__) && !defined(HOST_SOLARIS)
851 tmp_T0 = 0;
852#else
853 T0 = 0;
854#endif
855 }
856#elif defined(TARGET_PPC)
857#if 0
858 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
859 cpu_ppc_reset(env);
860 }
861#endif
862 if (msr_ee != 0) {
863 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
864 /* Raise it */
865 env->exception_index = EXCP_EXTERNAL;
866 env->error_code = 0;
867 do_interrupt(env);
868 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
869#if defined(__sparc__) && !defined(HOST_SOLARIS)
870 tmp_T0 = 0;
871#else
872 T0 = 0;
873#endif
874 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
875 /* Raise it */
876 env->exception_index = EXCP_DECR;
877 env->error_code = 0;
878 do_interrupt(env);
879 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
880#if defined(__sparc__) && !defined(HOST_SOLARIS)
881 tmp_T0 = 0;
882#else
883 T0 = 0;
884#endif
885 }
886 }
887#elif defined(TARGET_MIPS)
888 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
889 (env->CP0_Status & (1 << CP0St_IE)) &&
890 (env->CP0_Status & env->CP0_Cause & 0x0000FF00) &&
891 !(env->hflags & MIPS_HFLAG_EXL) &&
892 !(env->hflags & MIPS_HFLAG_ERL) &&
893 !(env->hflags & MIPS_HFLAG_DM)) {
894 /* Raise it */
895 env->exception_index = EXCP_EXT_INTERRUPT;
896 env->error_code = 0;
897 do_interrupt(env);
898#if defined(__sparc__) && !defined(HOST_SOLARIS)
899 tmp_T0 = 0;
900#else
901 T0 = 0;
902#endif
903 }
904#elif defined(TARGET_SPARC)
905 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
906 (env->psret != 0)) {
907 int pil = env->interrupt_index & 15;
908 int type = env->interrupt_index & 0xf0;
909
910 if (((type == TT_EXTINT) &&
911 (pil == 15 || pil > env->psrpil)) ||
912 type != TT_EXTINT) {
913 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
914 do_interrupt(env->interrupt_index);
915 env->interrupt_index = 0;
916#if defined(__sparc__) && !defined(HOST_SOLARIS)
917 tmp_T0 = 0;
918#else
919 T0 = 0;
920#endif
921 }
922 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
923 //do_interrupt(0, 0, 0, 0, 0);
924 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
925 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
926 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
927 env->halted = 1;
928 env->exception_index = EXCP_HLT;
929 cpu_loop_exit();
930 }
931#elif defined(TARGET_ARM)
932 if (interrupt_request & CPU_INTERRUPT_FIQ
933 && !(env->uncached_cpsr & CPSR_F)) {
934 env->exception_index = EXCP_FIQ;
935 do_interrupt(env);
936 }
937 if (interrupt_request & CPU_INTERRUPT_HARD
938 && !(env->uncached_cpsr & CPSR_I)) {
939 env->exception_index = EXCP_IRQ;
940 do_interrupt(env);
941 }
942#elif defined(TARGET_SH4)
943 /* XXXXX */
944#endif
945 /* Don't use the cached interupt_request value,
946 do_interrupt may have updated the EXITTB flag. */
947 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
948#if defined(VBOX)
949 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
950#else
951 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
952#endif
953 /* ensure that no TB jump will be modified as
954 the program flow was changed */
955#if defined(__sparc__) && !defined(HOST_SOLARIS)
956 tmp_T0 = 0;
957#else
958 T0 = 0;
959#endif
960 }
961#ifdef VBOX
962 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
963#endif
964 if (interrupt_request & CPU_INTERRUPT_EXIT) {
965#if defined(VBOX)
966 env->exception_index = EXCP_INTERRUPT;
967 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
968#else
969 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
970 env->exception_index = EXCP_INTERRUPT;
971#endif
972 cpu_loop_exit();
973 }
974#if defined(VBOX)
975 if (interrupt_request & CPU_INTERRUPT_RC) {
976 env->exception_index = EXCP_RC;
977 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
978 cpu_loop_exit();
979 }
980#endif
981 }
982#ifdef DEBUG_EXEC
983 if ((loglevel & CPU_LOG_TB_CPU)) {
984#if defined(TARGET_I386)
985 /* restore flags in standard format */
986#ifdef reg_EAX
987 env->regs[R_EAX] = EAX;
988#endif
989#ifdef reg_EBX
990 env->regs[R_EBX] = EBX;
991#endif
992#ifdef reg_ECX
993 env->regs[R_ECX] = ECX;
994#endif
995#ifdef reg_EDX
996 env->regs[R_EDX] = EDX;
997#endif
998#ifdef reg_ESI
999 env->regs[R_ESI] = ESI;
1000#endif
1001#ifdef reg_EDI
1002 env->regs[R_EDI] = EDI;
1003#endif
1004#ifdef reg_EBP
1005 env->regs[R_EBP] = EBP;
1006#endif
1007#ifdef reg_ESP
1008 env->regs[R_ESP] = ESP;
1009#endif
1010 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
1011 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1012 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1013#elif defined(TARGET_ARM)
1014 cpu_dump_state(env, logfile, fprintf, 0);
1015#elif defined(TARGET_SPARC)
1016 REGWPTR = env->regbase + (env->cwp * 16);
1017 env->regwptr = REGWPTR;
1018 cpu_dump_state(env, logfile, fprintf, 0);
1019#elif defined(TARGET_PPC)
1020 cpu_dump_state(env, logfile, fprintf, 0);
1021#elif defined(TARGET_M68K)
1022 cpu_m68k_flush_flags(env, env->cc_op);
1023 env->cc_op = CC_OP_FLAGS;
1024 env->sr = (env->sr & 0xffe0)
1025 | env->cc_dest | (env->cc_x << 4);
1026 cpu_dump_state(env, logfile, fprintf, 0);
1027#elif defined(TARGET_MIPS)
1028 cpu_dump_state(env, logfile, fprintf, 0);
1029#elif defined(TARGET_SH4)
1030 cpu_dump_state(env, logfile, fprintf, 0);
1031#else
1032#error unsupported target CPU
1033#endif
1034 }
1035#endif
1036#ifdef VBOX
1037 /*
1038 * Check if we the CPU state allows us to execute the code in raw-mode.
1039 */
1040 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
1041 if (remR3CanExecuteRaw(env,
1042 env->eip + env->segs[R_CS].base,
1043 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK))
1044 flags, &env->exception_index))
1045 {
1046 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
1047 ret = env->exception_index;
1048 cpu_loop_exit();
1049 }
1050 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
1051#endif /* VBOX */
1052 tb = tb_find_fast();
1053#ifdef DEBUG_EXEC
1054 if ((loglevel & CPU_LOG_EXEC)) {
1055 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
1056 (long)tb->tc_ptr, tb->pc,
1057 lookup_symbol(tb->pc));
1058 }
1059#endif
1060#if defined(__sparc__) && !defined(HOST_SOLARIS)
1061 T0 = tmp_T0;
1062#endif
1063 /* see if we can patch the calling TB. When the TB
1064 spans two pages, we cannot safely do a direct
1065 jump. */
1066 {
1067 if (T0 != 0 &&
1068#if USE_KQEMU
1069 (env->kqemu_enabled != 2) &&
1070#endif
1071#ifdef VBOX
1072 !(tb->cflags & CF_RAW_MODE) &&
1073#endif
1074 tb->page_addr[1] == -1
1075#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1076 && (tb->cflags & CF_CODE_COPY) ==
1077 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
1078#endif
1079 ) {
1080 spin_lock(&tb_lock);
1081 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
1082#if defined(USE_CODE_COPY)
1083 /* propagates the FP use info */
1084 ((TranslationBlock *)(T0 & ~3))->cflags |=
1085 (tb->cflags & CF_FP_USED);
1086#endif
1087 spin_unlock(&tb_lock);
1088 }
1089 }
1090 tc_ptr = tb->tc_ptr;
1091 env->current_tb = tb;
1092 /* execute the generated code */
1093 gen_func = (void *)tc_ptr;
1094#if defined(__sparc__)
1095 __asm__ __volatile__("call %0\n\t"
1096 "mov %%o7,%%i0"
1097 : /* no outputs */
1098 : "r" (gen_func)
1099 : "i0", "i1", "i2", "i3", "i4", "i5",
1100 "l0", "l1", "l2", "l3", "l4", "l5",
1101 "l6", "l7");
1102#elif defined(__arm__)
1103 asm volatile ("mov pc, %0\n\t"
1104 ".global exec_loop\n\t"
1105 "exec_loop:\n\t"
1106 : /* no outputs */
1107 : "r" (gen_func)
1108 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
1109#elif defined(TARGET_I386) && defined(USE_CODE_COPY)
1110{
1111 if (!(tb->cflags & CF_CODE_COPY)) {
1112 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
1113 save_native_fp_state(env);
1114 }
1115 gen_func();
1116 } else {
1117 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
1118 restore_native_fp_state(env);
1119 }
1120 /* we work with native eflags */
1121 CC_SRC = cc_table[CC_OP].compute_all();
1122 CC_OP = CC_OP_EFLAGS;
1123 asm(".globl exec_loop\n"
1124 "\n"
1125 "debug1:\n"
1126 " pushl %%ebp\n"
1127 " fs movl %10, %9\n"
1128 " fs movl %11, %%eax\n"
1129 " andl $0x400, %%eax\n"
1130 " fs orl %8, %%eax\n"
1131 " pushl %%eax\n"
1132 " popf\n"
1133 " fs movl %%esp, %12\n"
1134 " fs movl %0, %%eax\n"
1135 " fs movl %1, %%ecx\n"
1136 " fs movl %2, %%edx\n"
1137 " fs movl %3, %%ebx\n"
1138 " fs movl %4, %%esp\n"
1139 " fs movl %5, %%ebp\n"
1140 " fs movl %6, %%esi\n"
1141 " fs movl %7, %%edi\n"
1142 " fs jmp *%9\n"
1143 "exec_loop:\n"
1144 " fs movl %%esp, %4\n"
1145 " fs movl %12, %%esp\n"
1146 " fs movl %%eax, %0\n"
1147 " fs movl %%ecx, %1\n"
1148 " fs movl %%edx, %2\n"
1149 " fs movl %%ebx, %3\n"
1150 " fs movl %%ebp, %5\n"
1151 " fs movl %%esi, %6\n"
1152 " fs movl %%edi, %7\n"
1153 " pushf\n"
1154 " popl %%eax\n"
1155 " movl %%eax, %%ecx\n"
1156 " andl $0x400, %%ecx\n"
1157 " shrl $9, %%ecx\n"
1158 " andl $0x8d5, %%eax\n"
1159 " fs movl %%eax, %8\n"
1160 " movl $1, %%eax\n"
1161 " subl %%ecx, %%eax\n"
1162 " fs movl %%eax, %11\n"
1163 " fs movl %9, %%ebx\n" /* get T0 value */
1164 " popl %%ebp\n"
1165 :
1166 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
1167 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
1168 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
1169 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
1170 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
1171 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
1172 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
1173 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
1174 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
1175 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
1176 "a" (gen_func),
1177 "m" (*(uint8_t *)offsetof(CPUState, df)),
1178 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
1179 : "%ecx", "%edx"
1180 );
1181 }
1182}
1183#elif defined(__ia64)
1184 struct fptr {
1185 void *ip;
1186 void *gp;
1187 } fp;
1188
1189 fp.ip = tc_ptr;
1190 fp.gp = code_gen_buffer + 2 * (1 << 20);
1191 (*(void (*)(void)) &fp)();
1192#else
1193#if defined(DEBUG) && defined(VBOX) && !defined(DEBUG_dmik)
1194#if !defined(DEBUG_bird)
1195 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
1196 {
1197 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
1198 {
1199 Log(("EMR0: %08X IF=%d TF=%d CPL=%d\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3));
1200 }
1201 }
1202 else
1203 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
1204 {
1205 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
1206 {
1207 if(env->eflags & VM_MASK)
1208 {
1209 Log(("EMV86: %08X IF=%d TF=%d CPL=%d flags=%08X CR0=%08X\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, flags, env->cr[0]));
1210 }
1211 else
1212 {
1213 Log(("EMR3: %08X IF=%d TF=%d CPL=%d IOPL=%d flags=%08X CR0=%08X\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), flags, env->cr[0]));
1214 }
1215 }
1216 }
1217#endif /* !DEBUG_bird */
1218 if(env->state & CPU_EMULATE_SINGLE_STEP)
1219 {
1220#ifdef DEBUG_bird
1221 static int s_cTimes = 0;
1222 if (s_cTimes++ > 1000000) /* 1 million */
1223 {
1224 RTLogPrintf("Enough stepping!\n");
1225 #if 0
1226 env->exception_index = EXCP_DEBUG;
1227 cpu_loop_exit();
1228 #else
1229 env->state &= ~CPU_EMULATE_SINGLE_STEP;
1230 #endif
1231 }
1232#endif
1233 TMCpuTickPause(env->pVM);
1234 remR3DisasInstr(env, -1, NULL);
1235 TMCpuTickResume(env->pVM);
1236 if(emulate_single_instr(env) == -1)
1237 {
1238 printf("emulate_single_instr failed for EIP=%08X!!\n", env->eip);
1239 }
1240 }
1241 else
1242 {
1243 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
1244 gen_func();
1245 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
1246 }
1247#else /* !DEBUG || !VBOX || DEBUG_dmik */
1248
1249#ifdef VBOX
1250 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
1251 gen_func();
1252 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
1253#else /* !VBOX */
1254 gen_func();
1255#endif /* !VBOX */
1256
1257#endif /* !DEBUG || !VBOX || DEBUG_dmik */
1258#endif
1259 env->current_tb = NULL;
1260 /* reset soft MMU for next block (it can currently
1261 only be set by a memory fault) */
1262#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
1263 if (env->hflags & HF_SOFTMMU_MASK) {
1264 env->hflags &= ~HF_SOFTMMU_MASK;
1265 /* do not allow linking to another block */
1266 T0 = 0;
1267 }
1268#endif
1269#if defined(USE_KQEMU)
1270#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
1271 if (kqemu_is_ok(env) &&
1272 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
1273 cpu_loop_exit();
1274 }
1275#endif
1276 }
1277 } else {
1278 env_to_regs();
1279 }
1280 } /* for(;;) */
1281
1282
1283#if defined(TARGET_I386)
1284#if defined(USE_CODE_COPY)
1285 if (env->native_fp_regs) {
1286 save_native_fp_state(env);
1287 }
1288#endif
1289 /* restore flags in standard format */
1290 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
1291#elif defined(TARGET_ARM)
1292 /* XXX: Save/restore host fpu exception state?. */
1293#elif defined(TARGET_SPARC)
1294#if defined(reg_REGWPTR)
1295 REGWPTR = saved_regwptr;
1296#endif
1297#elif defined(TARGET_PPC)
1298#elif defined(TARGET_M68K)
1299 cpu_m68k_flush_flags(env, env->cc_op);
1300 env->cc_op = CC_OP_FLAGS;
1301 env->sr = (env->sr & 0xffe0)
1302 | env->cc_dest | (env->cc_x << 4);
1303#elif defined(TARGET_MIPS)
1304#elif defined(TARGET_SH4)
1305 /* XXXXX */
1306#else
1307#error unsupported target CPU
1308#endif
1309#if defined(__sparc__) && !defined(HOST_SOLARIS)
1310 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
1311#endif
1312#include "hostregs_helper.h"
1313
1314 /* fail safe : never use cpu_single_env outside cpu_exec() */
1315 cpu_single_env = NULL;
1316 return ret;
1317}
1318
1319#endif /* !VBOX */
1320
1321/* must only be called from the generated code as an exception can be
1322 generated */
1323void tb_invalidate_page_range(target_ulong start, target_ulong end)
1324{
1325 /* XXX: cannot enable it yet because it yields to MMU exception
1326 where NIP != read address on PowerPC */
1327#if 0
1328 target_ulong phys_addr;
1329 phys_addr = get_phys_addr_code(env, start);
1330 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1331#endif
1332}
1333
1334#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1335
1336void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1337{
1338 CPUX86State *saved_env;
1339
1340 saved_env = env;
1341 env = s;
1342 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1343 selector &= 0xffff;
1344 cpu_x86_load_seg_cache(env, seg_reg, selector,
1345 (selector << 4), 0xffff, 0);
1346 } else {
1347 load_seg(seg_reg, selector);
1348 }
1349 env = saved_env;
1350}
1351
1352void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1353{
1354 CPUX86State *saved_env;
1355
1356 saved_env = env;
1357 env = s;
1358
1359 helper_fsave((target_ulong)ptr, data32);
1360
1361 env = saved_env;
1362}
1363
1364void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1365{
1366 CPUX86State *saved_env;
1367
1368 saved_env = env;
1369 env = s;
1370
1371 helper_frstor((target_ulong)ptr, data32);
1372
1373 env = saved_env;
1374}
1375
1376#endif /* TARGET_I386 */
1377
1378#if !defined(CONFIG_SOFTMMU)
1379
1380#if defined(TARGET_I386)
1381
1382/* 'pc' is the host PC at which the exception was raised. 'address' is
1383 the effective address of the memory exception. 'is_write' is 1 if a
1384 write caused the exception and otherwise 0'. 'old_set' is the
1385 signal set which should be restored */
1386static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1387 int is_write, sigset_t *old_set,
1388 void *puc)
1389{
1390 TranslationBlock *tb;
1391 int ret;
1392
1393 if (cpu_single_env)
1394 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1395#if defined(DEBUG_SIGNAL)
1396 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1397 pc, address, is_write, *(unsigned long *)old_set);
1398#endif
1399 /* XXX: locking issue */
1400 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1401 return 1;
1402 }
1403
1404 /* see if it is an MMU fault */
1405 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1406 ((env->hflags & HF_CPL_MASK) == 3), 0);
1407 if (ret < 0)
1408 return 0; /* not an MMU fault */
1409 if (ret == 0)
1410 return 1; /* the MMU fault was handled without causing real CPU fault */
1411 /* now we have a real cpu fault */
1412 tb = tb_find_pc(pc);
1413 if (tb) {
1414 /* the PC is inside the translated code. It means that we have
1415 a virtual CPU fault */
1416 cpu_restore_state(tb, env, pc, puc);
1417 }
1418 if (ret == 1) {
1419#if 0
1420 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
1421 env->eip, env->cr[2], env->error_code);
1422#endif
1423 /* we restore the process signal mask as the sigreturn should
1424 do it (XXX: use sigsetjmp) */
1425 sigprocmask(SIG_SETMASK, old_set, NULL);
1426 raise_exception_err(env->exception_index, env->error_code);
1427 } else {
1428 /* activate soft MMU for this block */
1429 env->hflags |= HF_SOFTMMU_MASK;
1430 cpu_resume_from_signal(env, puc);
1431 }
1432 /* never comes here */
1433 return 1;
1434}
1435
1436#elif defined(TARGET_ARM)
1437static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1438 int is_write, sigset_t *old_set,
1439 void *puc)
1440{
1441 TranslationBlock *tb;
1442 int ret;
1443
1444 if (cpu_single_env)
1445 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1446#if defined(DEBUG_SIGNAL)
1447 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1448 pc, address, is_write, *(unsigned long *)old_set);
1449#endif
1450 /* XXX: locking issue */
1451 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1452 return 1;
1453 }
1454 /* see if it is an MMU fault */
1455 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1456 if (ret < 0)
1457 return 0; /* not an MMU fault */
1458 if (ret == 0)
1459 return 1; /* the MMU fault was handled without causing real CPU fault */
1460 /* now we have a real cpu fault */
1461 tb = tb_find_pc(pc);
1462 if (tb) {
1463 /* the PC is inside the translated code. It means that we have
1464 a virtual CPU fault */
1465 cpu_restore_state(tb, env, pc, puc);
1466 }
1467 /* we restore the process signal mask as the sigreturn should
1468 do it (XXX: use sigsetjmp) */
1469 sigprocmask(SIG_SETMASK, old_set, NULL);
1470 cpu_loop_exit();
1471}
1472#elif defined(TARGET_SPARC)
1473static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1474 int is_write, sigset_t *old_set,
1475 void *puc)
1476{
1477 TranslationBlock *tb;
1478 int ret;
1479
1480 if (cpu_single_env)
1481 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1482#if defined(DEBUG_SIGNAL)
1483 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1484 pc, address, is_write, *(unsigned long *)old_set);
1485#endif
1486 /* XXX: locking issue */
1487 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1488 return 1;
1489 }
1490 /* see if it is an MMU fault */
1491 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1492 if (ret < 0)
1493 return 0; /* not an MMU fault */
1494 if (ret == 0)
1495 return 1; /* the MMU fault was handled without causing real CPU fault */
1496 /* now we have a real cpu fault */
1497 tb = tb_find_pc(pc);
1498 if (tb) {
1499 /* the PC is inside the translated code. It means that we have
1500 a virtual CPU fault */
1501 cpu_restore_state(tb, env, pc, puc);
1502 }
1503 /* we restore the process signal mask as the sigreturn should
1504 do it (XXX: use sigsetjmp) */
1505 sigprocmask(SIG_SETMASK, old_set, NULL);
1506 cpu_loop_exit();
1507}
1508#elif defined (TARGET_PPC)
1509static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1510 int is_write, sigset_t *old_set,
1511 void *puc)
1512{
1513 TranslationBlock *tb;
1514 int ret;
1515
1516 if (cpu_single_env)
1517 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1518#if defined(DEBUG_SIGNAL)
1519 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1520 pc, address, is_write, *(unsigned long *)old_set);
1521#endif
1522 /* XXX: locking issue */
1523 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1524 return 1;
1525 }
1526
1527 /* see if it is an MMU fault */
1528 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1529 if (ret < 0)
1530 return 0; /* not an MMU fault */
1531 if (ret == 0)
1532 return 1; /* the MMU fault was handled without causing real CPU fault */
1533
1534 /* now we have a real cpu fault */
1535 tb = tb_find_pc(pc);
1536 if (tb) {
1537 /* the PC is inside the translated code. It means that we have
1538 a virtual CPU fault */
1539 cpu_restore_state(tb, env, pc, puc);
1540 }
1541 if (ret == 1) {
1542#if 0
1543 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1544 env->nip, env->error_code, tb);
1545#endif
1546 /* we restore the process signal mask as the sigreturn should
1547 do it (XXX: use sigsetjmp) */
1548 sigprocmask(SIG_SETMASK, old_set, NULL);
1549 do_raise_exception_err(env->exception_index, env->error_code);
1550 } else {
1551 /* activate soft MMU for this block */
1552 cpu_resume_from_signal(env, puc);
1553 }
1554 /* never comes here */
1555 return 1;
1556}
1557
1558#elif defined(TARGET_M68K)
1559static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1560 int is_write, sigset_t *old_set,
1561 void *puc)
1562{
1563 TranslationBlock *tb;
1564 int ret;
1565
1566 if (cpu_single_env)
1567 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1568#if defined(DEBUG_SIGNAL)
1569 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1570 pc, address, is_write, *(unsigned long *)old_set);
1571#endif
1572 /* XXX: locking issue */
1573 if (is_write && page_unprotect(address, pc, puc)) {
1574 return 1;
1575 }
1576 /* see if it is an MMU fault */
1577 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1578 if (ret < 0)
1579 return 0; /* not an MMU fault */
1580 if (ret == 0)
1581 return 1; /* the MMU fault was handled without causing real CPU fault */
1582 /* now we have a real cpu fault */
1583 tb = tb_find_pc(pc);
1584 if (tb) {
1585 /* the PC is inside the translated code. It means that we have
1586 a virtual CPU fault */
1587 cpu_restore_state(tb, env, pc, puc);
1588 }
1589 /* we restore the process signal mask as the sigreturn should
1590 do it (XXX: use sigsetjmp) */
1591 sigprocmask(SIG_SETMASK, old_set, NULL);
1592 cpu_loop_exit();
1593 /* never comes here */
1594 return 1;
1595}
1596
1597#elif defined (TARGET_MIPS)
1598static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1599 int is_write, sigset_t *old_set,
1600 void *puc)
1601{
1602 TranslationBlock *tb;
1603 int ret;
1604
1605 if (cpu_single_env)
1606 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1607#if defined(DEBUG_SIGNAL)
1608 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1609 pc, address, is_write, *(unsigned long *)old_set);
1610#endif
1611 /* XXX: locking issue */
1612 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1613 return 1;
1614 }
1615
1616 /* see if it is an MMU fault */
1617 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1618 if (ret < 0)
1619 return 0; /* not an MMU fault */
1620 if (ret == 0)
1621 return 1; /* the MMU fault was handled without causing real CPU fault */
1622
1623 /* now we have a real cpu fault */
1624 tb = tb_find_pc(pc);
1625 if (tb) {
1626 /* the PC is inside the translated code. It means that we have
1627 a virtual CPU fault */
1628 cpu_restore_state(tb, env, pc, puc);
1629 }
1630 if (ret == 1) {
1631#if 0
1632 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1633 env->nip, env->error_code, tb);
1634#endif
1635 /* we restore the process signal mask as the sigreturn should
1636 do it (XXX: use sigsetjmp) */
1637 sigprocmask(SIG_SETMASK, old_set, NULL);
1638 do_raise_exception_err(env->exception_index, env->error_code);
1639 } else {
1640 /* activate soft MMU for this block */
1641 cpu_resume_from_signal(env, puc);
1642 }
1643 /* never comes here */
1644 return 1;
1645}
1646
1647#elif defined (TARGET_SH4)
1648static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1649 int is_write, sigset_t *old_set,
1650 void *puc)
1651{
1652 TranslationBlock *tb;
1653 int ret;
1654
1655 if (cpu_single_env)
1656 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1657#if defined(DEBUG_SIGNAL)
1658 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1659 pc, address, is_write, *(unsigned long *)old_set);
1660#endif
1661 /* XXX: locking issue */
1662 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1663 return 1;
1664 }
1665
1666 /* see if it is an MMU fault */
1667 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1668 if (ret < 0)
1669 return 0; /* not an MMU fault */
1670 if (ret == 0)
1671 return 1; /* the MMU fault was handled without causing real CPU fault */
1672
1673 /* now we have a real cpu fault */
1674 tb = tb_find_pc(pc);
1675 if (tb) {
1676 /* the PC is inside the translated code. It means that we have
1677 a virtual CPU fault */
1678 cpu_restore_state(tb, env, pc, puc);
1679 }
1680#if 0
1681 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1682 env->nip, env->error_code, tb);
1683#endif
1684 /* we restore the process signal mask as the sigreturn should
1685 do it (XXX: use sigsetjmp) */
1686 sigprocmask(SIG_SETMASK, old_set, NULL);
1687 cpu_loop_exit();
1688 /* never comes here */
1689 return 1;
1690}
1691#else
1692#error unsupported target CPU
1693#endif
1694
1695#if defined(__i386__)
1696
1697#if defined(USE_CODE_COPY)
1698static void cpu_send_trap(unsigned long pc, int trap,
1699 struct ucontext *uc)
1700{
1701 TranslationBlock *tb;
1702
1703 if (cpu_single_env)
1704 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1705 /* now we have a real cpu fault */
1706 tb = tb_find_pc(pc);
1707 if (tb) {
1708 /* the PC is inside the translated code. It means that we have
1709 a virtual CPU fault */
1710 cpu_restore_state(tb, env, pc, uc);
1711 }
1712 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1713 raise_exception_err(trap, env->error_code);
1714}
1715#endif
1716
1717int cpu_signal_handler(int host_signum, void *pinfo,
1718 void *puc)
1719{
1720 siginfo_t *info = pinfo;
1721 struct ucontext *uc = puc;
1722 unsigned long pc;
1723 int trapno;
1724
1725#ifndef REG_EIP
1726/* for glibc 2.1 */
1727#define REG_EIP EIP
1728#define REG_ERR ERR
1729#define REG_TRAPNO TRAPNO
1730#endif
1731 pc = uc->uc_mcontext.gregs[REG_EIP];
1732 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1733#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1734 if (trapno == 0x00 || trapno == 0x05) {
1735 /* send division by zero or bound exception */
1736 cpu_send_trap(pc, trapno, uc);
1737 return 1;
1738 } else
1739#endif
1740 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1741 trapno == 0xe ?
1742 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1743 &uc->uc_sigmask, puc);
1744}
1745
1746#elif defined(__x86_64__)
1747
1748int cpu_signal_handler(int host_signum, void *pinfo,
1749 void *puc)
1750{
1751 siginfo_t *info = pinfo;
1752 struct ucontext *uc = puc;
1753 unsigned long pc;
1754
1755 pc = uc->uc_mcontext.gregs[REG_RIP];
1756 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1757 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1758 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1759 &uc->uc_sigmask, puc);
1760}
1761
1762#elif defined(__powerpc__)
1763
1764/***********************************************************************
1765 * signal context platform-specific definitions
1766 * From Wine
1767 */
1768#ifdef linux
1769/* All Registers access - only for local access */
1770# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1771/* Gpr Registers access */
1772# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1773# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1774# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1775# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1776# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1777# define LR_sig(context) REG_sig(link, context) /* Link register */
1778# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1779/* Float Registers access */
1780# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1781# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1782/* Exception Registers access */
1783# define DAR_sig(context) REG_sig(dar, context)
1784# define DSISR_sig(context) REG_sig(dsisr, context)
1785# define TRAP_sig(context) REG_sig(trap, context)
1786#endif /* linux */
1787
1788#ifdef __APPLE__
1789# include <sys/ucontext.h>
1790typedef struct ucontext SIGCONTEXT;
1791/* All Registers access - only for local access */
1792# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1793# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1794# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1795# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1796/* Gpr Registers access */
1797# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1798# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1799# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1800# define CTR_sig(context) REG_sig(ctr, context)
1801# define XER_sig(context) REG_sig(xer, context) /* Link register */
1802# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1803# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1804/* Float Registers access */
1805# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1806# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1807/* Exception Registers access */
1808# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1809# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1810# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1811#endif /* __APPLE__ */
1812
1813int cpu_signal_handler(int host_signum, void *pinfo,
1814 void *puc)
1815{
1816 siginfo_t *info = pinfo;
1817 struct ucontext *uc = puc;
1818 unsigned long pc;
1819 int is_write;
1820
1821 pc = IAR_sig(uc);
1822 is_write = 0;
1823#if 0
1824 /* ppc 4xx case */
1825 if (DSISR_sig(uc) & 0x00800000)
1826 is_write = 1;
1827#else
1828 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1829 is_write = 1;
1830#endif
1831 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1832 is_write, &uc->uc_sigmask, puc);
1833}
1834
1835#elif defined(__alpha__)
1836
1837int cpu_signal_handler(int host_signum, void *pinfo,
1838 void *puc)
1839{
1840 siginfo_t *info = pinfo;
1841 struct ucontext *uc = puc;
1842 uint32_t *pc = uc->uc_mcontext.sc_pc;
1843 uint32_t insn = *pc;
1844 int is_write = 0;
1845
1846 /* XXX: need kernel patch to get write flag faster */
1847 switch (insn >> 26) {
1848 case 0x0d: // stw
1849 case 0x0e: // stb
1850 case 0x0f: // stq_u
1851 case 0x24: // stf
1852 case 0x25: // stg
1853 case 0x26: // sts
1854 case 0x27: // stt
1855 case 0x2c: // stl
1856 case 0x2d: // stq
1857 case 0x2e: // stl_c
1858 case 0x2f: // stq_c
1859 is_write = 1;
1860 }
1861
1862 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1863 is_write, &uc->uc_sigmask, puc);
1864}
1865#elif defined(__sparc__)
1866
1867int cpu_signal_handler(int host_signum, void *pinfo,
1868 void *puc)
1869{
1870 siginfo_t *info = pinfo;
1871 uint32_t *regs = (uint32_t *)(info + 1);
1872 void *sigmask = (regs + 20);
1873 unsigned long pc;
1874 int is_write;
1875 uint32_t insn;
1876
1877 /* XXX: is there a standard glibc define ? */
1878 pc = regs[1];
1879 /* XXX: need kernel patch to get write flag faster */
1880 is_write = 0;
1881 insn = *(uint32_t *)pc;
1882 if ((insn >> 30) == 3) {
1883 switch((insn >> 19) & 0x3f) {
1884 case 0x05: // stb
1885 case 0x06: // sth
1886 case 0x04: // st
1887 case 0x07: // std
1888 case 0x24: // stf
1889 case 0x27: // stdf
1890 case 0x25: // stfsr
1891 is_write = 1;
1892 break;
1893 }
1894 }
1895 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1896 is_write, sigmask, NULL);
1897}
1898
1899#elif defined(__arm__)
1900
1901int cpu_signal_handler(int host_signum, void *pinfo,
1902 void *puc)
1903{
1904 siginfo_t *info = pinfo;
1905 struct ucontext *uc = puc;
1906 unsigned long pc;
1907 int is_write;
1908
1909 pc = uc->uc_mcontext.gregs[R15];
1910 /* XXX: compute is_write */
1911 is_write = 0;
1912 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1913 is_write,
1914 &uc->uc_sigmask, puc);
1915}
1916
1917#elif defined(__mc68000)
1918
1919int cpu_signal_handler(int host_signum, void *pinfo,
1920 void *puc)
1921{
1922 siginfo_t *info = pinfo;
1923 struct ucontext *uc = puc;
1924 unsigned long pc;
1925 int is_write;
1926
1927 pc = uc->uc_mcontext.gregs[16];
1928 /* XXX: compute is_write */
1929 is_write = 0;
1930 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1931 is_write,
1932 &uc->uc_sigmask, puc);
1933}
1934
1935#elif defined(__ia64)
1936
1937#ifndef __ISR_VALID
1938 /* This ought to be in <bits/siginfo.h>... */
1939# define __ISR_VALID 1
1940#endif
1941
1942int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1943{
1944 siginfo_t *info = pinfo;
1945 struct ucontext *uc = puc;
1946 unsigned long ip;
1947 int is_write = 0;
1948
1949 ip = uc->uc_mcontext.sc_ip;
1950 switch (host_signum) {
1951 case SIGILL:
1952 case SIGFPE:
1953 case SIGSEGV:
1954 case SIGBUS:
1955 case SIGTRAP:
1956 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1957 /* ISR.W (write-access) is bit 33: */
1958 is_write = (info->si_isr >> 33) & 1;
1959 break;
1960
1961 default:
1962 break;
1963 }
1964 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1965 is_write,
1966 &uc->uc_sigmask, puc);
1967}
1968
1969#elif defined(__s390__)
1970
1971int cpu_signal_handler(int host_signum, void *pinfo,
1972 void *puc)
1973{
1974 siginfo_t *info = pinfo;
1975 struct ucontext *uc = puc;
1976 unsigned long pc;
1977 int is_write;
1978
1979 pc = uc->uc_mcontext.psw.addr;
1980 /* XXX: compute is_write */
1981 is_write = 0;
1982 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1983 is_write,
1984 &uc->uc_sigmask, puc);
1985}
1986
1987#else
1988
1989#error host CPU specific signal handler needed
1990
1991#endif
1992
1993#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette