VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 1630

Last change on this file since 1630 was 1147, checked in by vboxsync, 18 years ago

Corrected logging statement for v86 execution.

  • Property svn:eol-style set to native
File size: 58.1 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#include "exec.h"
22#include "disas.h"
23
24#if !defined(CONFIG_SOFTMMU)
25#undef EAX
26#undef ECX
27#undef EDX
28#undef EBX
29#undef ESP
30#undef EBP
31#undef ESI
32#undef EDI
33#undef EIP
34#include <signal.h>
35#include <sys/ucontext.h>
36#endif
37
38int tb_invalidated_flag;
39
40//#define DEBUG_EXEC
41//#define DEBUG_SIGNAL
42
43#if defined(TARGET_ARM) || defined(TARGET_SPARC)
44/* XXX: unify with i386 target */
45void cpu_loop_exit(void)
46{
47 longjmp(env->jmp_env, 1);
48}
49#endif
50
51/* exit the current TB from a signal handler. The host registers are
52 restored in a state compatible with the CPU emulator
53 */
54void cpu_resume_from_signal(CPUState *env1, void *puc)
55{
56#if !defined(CONFIG_SOFTMMU)
57 struct ucontext *uc = puc;
58#endif
59
60 env = env1;
61
62 /* XXX: restore cpu registers saved in host registers */
63
64#if !defined(CONFIG_SOFTMMU)
65 if (puc) {
66 /* XXX: use siglongjmp ? */
67 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
68 }
69#endif
70 longjmp(env->jmp_env, 1);
71}
72
73/* main execution loop */
74
75#ifdef VBOX
76
77int cpu_exec(CPUState *env1)
78{
79 int saved_T0, saved_T1, saved_T2;
80 CPUState *saved_env;
81#ifdef reg_EAX
82 int saved_EAX;
83#endif
84#ifdef reg_ECX
85 int saved_ECX;
86#endif
87#ifdef reg_EDX
88 int saved_EDX;
89#endif
90#ifdef reg_EBX
91 int saved_EBX;
92#endif
93#ifdef reg_ESP
94 int saved_ESP;
95#endif
96#ifdef reg_EBP
97 int saved_EBP;
98#endif
99#ifdef reg_ESI
100 int saved_ESI;
101#endif
102#ifdef reg_EDI
103 int saved_EDI;
104#endif
105 int code_gen_size, ret, interrupt_request;
106 void (*gen_func)(void);
107 TranslationBlock *tb, **ptb;
108 target_ulong cs_base, pc;
109 uint8_t *tc_ptr;
110 unsigned int flags;
111
112 /* first we save global registers */
113 saved_env = env;
114 env = env1;
115 saved_T0 = T0;
116 saved_T1 = T1;
117 saved_T2 = T2;
118
119#ifdef reg_EAX
120 saved_EAX = EAX;
121#endif
122#ifdef reg_ECX
123 saved_ECX = ECX;
124#endif
125#ifdef reg_EDX
126 saved_EDX = EDX;
127#endif
128#ifdef reg_EBX
129 saved_EBX = EBX;
130#endif
131#ifdef reg_ESP
132 saved_ESP = ESP;
133#endif
134#ifdef reg_EBP
135 saved_EBP = EBP;
136#endif
137#ifdef reg_ESI
138 saved_ESI = ESI;
139#endif
140#ifdef reg_EDI
141 saved_EDI = EDI;
142#endif
143
144 env_to_regs();
145 /* put eflags in CPU temporary format */
146 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
147 DF = 1 - (2 * ((env->eflags >> 10) & 1));
148 CC_OP = CC_OP_EFLAGS;
149 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
150
151#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
152 env->exception_index = -1;
153#endif
154
155 /* prepare setjmp context for exception handling */
156 for(;;) {
157 if (setjmp(env->jmp_env) == 0)
158 {
159 env->current_tb = NULL;
160 VMMR3Unlock(env->pVM);
161 VMMR3Lock(env->pVM);
162
163 /*
164 * Check for fatal errors first
165 */
166 if (env->interrupt_request & CPU_INTERRUPT_RC)
167 {
168 env->exception_index = EXCP_RC;
169 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
170 ret = env->exception_index;
171 cpu_loop_exit();
172 }
173
174 /* if an exception is pending, we execute it here */
175 if (env->exception_index >= 0)
176 {
177 Assert(!env->user_mode_only);
178 if (env->exception_index >= EXCP_INTERRUPT)
179 {
180 /* exit request from the cpu execution loop */
181 ret = env->exception_index;
182 break;
183 }
184 else
185 {
186 /* simulate a real cpu exception. On i386, it can
187 trigger new exceptions, but we do not handle
188 double or triple faults yet. */
189 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
190 Log(("do_interrupt %d %d %08x\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
191 do_interrupt(env->exception_index,
192 env->exception_is_int,
193 env->error_code,
194 env->exception_next_eip, 0);
195 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
196 }
197 env->exception_index = -1;
198 }
199
200 T0 = 0; /* force lookup of first TB */
201 for(;;)
202 {
203 interrupt_request = env->interrupt_request;
204 if (__builtin_expect(interrupt_request, 0))
205 {
206 /* Single instruction exec request, we execute it and return (one way or the other).
207 The caller will always reschedule after doing this operation! */
208 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
209 {
210 /* not in flight are we? (if we are, we trapped) */
211 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
212 {
213 ASMAtomicOrS32(&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
214 env->exception_index = EXCP_SINGLE_INSTR;
215 if (emulate_single_instr(env) == -1)
216 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%08x!!\n", env->eip));
217
218 /* When we receive an external interrupt during execution of this single
219 instruction, then we should stay here. We will leave when we're ready
220 for raw-mode or when interrupted by pending EMT requests. */
221 interrupt_request = env->interrupt_request; /* reload this! */
222 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
223 || !(env->eflags & IF_MASK)
224 || (env->hflags & HF_INHIBIT_IRQ_MASK)
225 || (env->state & CPU_RAW_HWACC)
226 )
227 {
228 env->exception_index = ret = EXCP_SINGLE_INSTR;
229 cpu_loop_exit();
230 }
231 }
232 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
233 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
234 }
235
236 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
237 /* if hardware interrupt pending, we execute it */
238 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
239 (env->eflags & IF_MASK) &&
240 !(env->hflags & HF_INHIBIT_IRQ_MASK))
241 {
242 int intno;
243 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_HARD);
244 intno = cpu_get_pic_interrupt(env);
245 if (intno >= 0)
246 {
247 Log(("do_interrupt %d\n", intno));
248 do_interrupt(intno, 0, 0, 0, 1);
249 }
250 /* ensure that no TB jump will be modified as
251 the program flow was changed */
252 T0 = 0;
253 }
254 if (interrupt_request & CPU_INTERRUPT_EXITTB)
255 {
256 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
257 /* ensure that no TB jump will be modified as
258 the program flow was changed */
259 T0 = 0;
260 }
261 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
262 if (interrupt_request & CPU_INTERRUPT_EXIT)
263 {
264 env->exception_index = EXCP_INTERRUPT;
265 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
266 ret = env->exception_index;
267 cpu_loop_exit();
268 }
269 if (interrupt_request & CPU_INTERRUPT_RC)
270 {
271 env->exception_index = EXCP_RC;
272 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
273 ret = env->exception_index;
274 cpu_loop_exit();
275 }
276 }
277 /* we record a subset of the CPU state. It will
278 always be the same before a given translated block
279 is executed. */
280 flags = env->hflags;
281 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
282 cs_base = env->segs[R_CS].base;
283 pc = cs_base + env->eip;
284
285 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
286 if (remR3CanExecuteRaw(env, pc, flags, &env->exception_index))
287 {
288 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
289 ret = env->exception_index;
290 cpu_loop_exit();
291 }
292 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
293
294 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
295 tb = tb_find(&ptb, pc, cs_base,
296 flags);
297 if (!tb)
298 {
299 TranslationBlock **ptb1;
300 unsigned int h;
301 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
302
303 spin_lock(&tb_lock);
304
305 tb_invalidated_flag = 0;
306
307 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
308
309 /* find translated block using physical mappings */
310 phys_pc = get_phys_addr_code(env, pc);
311 phys_page1 = phys_pc & TARGET_PAGE_MASK;
312 phys_page2 = -1;
313 h = tb_phys_hash_func(phys_pc);
314 ptb1 = &tb_phys_hash[h];
315 for(;;) {
316 tb = *ptb1;
317 if (!tb)
318 goto not_found;
319 if (tb->pc == pc &&
320 tb->page_addr[0] == phys_page1 &&
321 tb->cs_base == cs_base &&
322 tb->flags == flags) {
323 /* check next page if needed */
324 if (tb->page_addr[1] != -1) {
325 virt_page2 = (pc & TARGET_PAGE_MASK) +
326 TARGET_PAGE_SIZE;
327 phys_page2 = get_phys_addr_code(env, virt_page2);
328 if (tb->page_addr[1] == phys_page2)
329 goto found;
330 } else {
331 goto found;
332 }
333 }
334 ptb1 = &tb->phys_hash_next;
335 }
336 not_found:
337 /* if no translated code available, then translate it now */
338 tb = tb_alloc(pc);
339 if (!tb) {
340 /* flush must be done */
341 tb_flush(env);
342 /* cannot fail at this point */
343 tb = tb_alloc(pc);
344 /* don't forget to invalidate previous TB info */
345 ptb = &tb_hash[tb_hash_func(pc)];
346 T0 = 0;
347 }
348 tc_ptr = code_gen_ptr;
349 tb->tc_ptr = tc_ptr;
350 tb->cs_base = cs_base;
351 tb->flags = flags;
352 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
353 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
354 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
355 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
356
357 /* check next page if needed */
358 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
359 phys_page2 = -1;
360 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
361 phys_page2 = get_phys_addr_code(env, virt_page2);
362 }
363 tb_link_phys(tb, phys_pc, phys_page2);
364
365 found:
366 if (tb_invalidated_flag) {
367 /* as some TB could have been invalidated because
368 of memory exceptions while generating the code, we
369 must recompute the hash index here */
370 ptb = &tb_hash[tb_hash_func(pc)];
371 while (*ptb != NULL)
372 ptb = &(*ptb)->hash_next;
373 T0 = 0;
374 }
375 /* we add the TB in the virtual pc hash table */
376 *ptb = tb;
377 tb->hash_next = NULL;
378 tb_link(tb);
379 spin_unlock(&tb_lock);
380 }
381 /* see if we can patch the calling TB. */
382 {
383 if (T0 != 0
384 && !(tb->cflags & CF_RAW_MODE)
385 ) {
386 spin_lock(&tb_lock);
387 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
388 spin_unlock(&tb_lock);
389 }
390 }
391 tc_ptr = tb->tc_ptr;
392 env->current_tb = tb;
393 /* execute the generated code */
394 gen_func = (void *)tc_ptr;
395 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
396
397#if defined(DEBUG) && defined(VBOX) && !defined(DEBUG_dmik)
398#if !defined(DEBUG_bird)
399 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
400 {
401 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
402 {
403 Log(("EMR0: %08X ESP=%08X IF=%d TF=%d CPL=%d\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3));
404 }
405 }
406 else
407 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
408 {
409 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
410 {
411 if(env->eflags & VM_MASK)
412 {
413 Log(("EMV86: %04X:%04X IF=%d TF=%d CPL=%d flags=%08X CR0=%08X\n", env->segs[R_CS].selector, env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, flags, env->cr[0]));
414 }
415 else
416 {
417 Log(("EMR3: %08X ESP=%08X IF=%d TF=%d CPL=%d IOPL=%d flags=%08X CR0=%08X\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), flags, env->cr[0]));
418 }
419 }
420 }
421 else
422 {
423 Log(("EMRM: %04X:%08X SS:ESP=%04X:%08X IF=%d TF=%d CPL=%d PE=%d PG=%d\n", env->segs[R_CS].selector, env->eip, env->segs[R_SS].selector, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0] & X86_CR0_PE, env->cr[0] & X86_CR0_PG));
424 }
425#endif /* !DEBUG_bird */
426 if(env->state & CPU_EMULATE_SINGLE_STEP)
427 {
428#ifdef DEBUG_bird
429 static int s_cTimes = 0;
430 if (s_cTimes++ > 1000000)
431 {
432 RTLogPrintf("Enough stepping!\n");
433 #if 0
434 env->exception_index = EXCP_DEBUG;
435 ret = env->exception_index;
436 cpu_loop_exit();
437 #else
438 env->state &= ~CPU_EMULATE_SINGLE_STEP;
439 #endif
440 }
441#endif
442
443 TMCpuTickPause(env->pVM);
444 remR3DisasInstr(env, -1, NULL);
445 TMCpuTickResume(env->pVM);
446 if(emulate_single_instr(env) == -1)
447 {
448 Log(("emulate_single_instr failed for EIP=%08X!!\n", env->eip));
449 }
450 }
451 else
452 {
453 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
454 gen_func();
455 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
456 }
457#else /* !DEBUG || !VBOX || DEBUG_dmik */
458
459 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
460 gen_func();
461 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
462
463#endif /* !DEBUG || !VBOX || DEBUG_dmik */
464 env->current_tb = NULL;
465 /* reset soft MMU for next block (it can currently
466 only be set by a memory fault) */
467#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
468 if (env->hflags & HF_SOFTMMU_MASK) {
469 env->hflags &= ~HF_SOFTMMU_MASK;
470 /* do not allow linking to another block */
471 T0 = 0;
472 }
473#endif
474 }
475 } else {
476 env_to_regs();
477 }
478#ifdef VBOX_HIGH_RES_TIMERS_HACK
479 /* NULL the current_tb here so cpu_interrupt() doesn't do
480 anything unnecessary (like crashing during emulate single instruction). */
481 env->current_tb = NULL;
482 TMTimerPoll(env1->pVM);
483#endif
484 } /* for(;;) */
485
486#if defined(TARGET_I386)
487 /* restore flags in standard format */
488 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
489
490 /* restore global registers */
491#ifdef reg_EAX
492 EAX = saved_EAX;
493#endif
494#ifdef reg_ECX
495 ECX = saved_ECX;
496#endif
497#ifdef reg_EDX
498 EDX = saved_EDX;
499#endif
500#ifdef reg_EBX
501 EBX = saved_EBX;
502#endif
503#ifdef reg_ESP
504 ESP = saved_ESP;
505#endif
506#ifdef reg_EBP
507 EBP = saved_EBP;
508#endif
509#ifdef reg_ESI
510 ESI = saved_ESI;
511#endif
512#ifdef reg_EDI
513 EDI = saved_EDI;
514#endif
515#else
516#error unsupported target CPU
517#endif
518 T0 = saved_T0;
519 T1 = saved_T1;
520 T2 = saved_T2;
521 env = saved_env;
522 return ret;
523}
524
525
526#else /* !VBOX */
527
528
529int cpu_exec(CPUState *env1)
530{
531 int saved_T0, saved_T1, saved_T2;
532 CPUState *saved_env;
533#ifdef reg_EAX
534 int saved_EAX;
535#endif
536#ifdef reg_ECX
537 int saved_ECX;
538#endif
539#ifdef reg_EDX
540 int saved_EDX;
541#endif
542#ifdef reg_EBX
543 int saved_EBX;
544#endif
545#ifdef reg_ESP
546 int saved_ESP;
547#endif
548#ifdef reg_EBP
549 int saved_EBP;
550#endif
551#ifdef reg_ESI
552 int saved_ESI;
553#endif
554#ifdef reg_EDI
555 int saved_EDI;
556#endif
557#ifdef __sparc__
558 int saved_i7, tmp_T0;
559#endif
560 int code_gen_size, ret, interrupt_request;
561 void (*gen_func)(void);
562 TranslationBlock *tb, **ptb;
563 target_ulong cs_base, pc;
564 uint8_t *tc_ptr;
565 unsigned int flags;
566
567 /* first we save global registers */
568 saved_env = env;
569 env = env1;
570 saved_T0 = T0;
571 saved_T1 = T1;
572 saved_T2 = T2;
573#ifdef __sparc__
574 /* we also save i7 because longjmp may not restore it */
575 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
576#endif
577
578#if defined(TARGET_I386)
579#ifdef reg_EAX
580 saved_EAX = EAX;
581#endif
582#ifdef reg_ECX
583 saved_ECX = ECX;
584#endif
585#ifdef reg_EDX
586 saved_EDX = EDX;
587#endif
588#ifdef reg_EBX
589 saved_EBX = EBX;
590#endif
591#ifdef reg_ESP
592 saved_ESP = ESP;
593#endif
594#ifdef reg_EBP
595 saved_EBP = EBP;
596#endif
597#ifdef reg_ESI
598 saved_ESI = ESI;
599#endif
600#ifdef reg_EDI
601 saved_EDI = EDI;
602#endif
603
604 env_to_regs();
605 /* put eflags in CPU temporary format */
606 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
607 DF = 1 - (2 * ((env->eflags >> 10) & 1));
608 CC_OP = CC_OP_EFLAGS;
609 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
610#elif defined(TARGET_ARM)
611 {
612 unsigned int psr;
613 psr = env->cpsr;
614 env->CF = (psr >> 29) & 1;
615 env->NZF = (psr & 0xc0000000) ^ 0x40000000;
616 env->VF = (psr << 3) & 0x80000000;
617 env->QF = (psr >> 27) & 1;
618 env->cpsr = psr & ~CACHED_CPSR_BITS;
619 }
620#elif defined(TARGET_SPARC)
621#elif defined(TARGET_PPC)
622#else
623#error unsupported target CPU
624#endif
625#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
626 env->exception_index = -1;
627#endif
628
629 /* prepare setjmp context for exception handling */
630 for(;;) {
631 if (setjmp(env->jmp_env) == 0) {
632 env->current_tb = NULL;
633#ifdef VBOX
634 VMMR3Unlock(env->pVM);
635 VMMR3Lock(env->pVM);
636
637 /* Check for high priority requests first (like fatal
638 errors). */
639 if (env->interrupt_request & CPU_INTERRUPT_RC) {
640 env->exception_index = EXCP_RC;
641 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
642 cpu_loop_exit();
643 }
644#endif /* VBOX */
645
646
647 /* if an exception is pending, we execute it here */
648 if (env->exception_index >= 0) {
649 if (env->exception_index >= EXCP_INTERRUPT) {
650 /* exit request from the cpu execution loop */
651 ret = env->exception_index;
652 break;
653 } else if (env->user_mode_only) {
654 /* if user mode only, we simulate a fake exception
655 which will be hanlded outside the cpu execution
656 loop */
657#if defined(TARGET_I386)
658 do_interrupt_user(env->exception_index,
659 env->exception_is_int,
660 env->error_code,
661 env->exception_next_eip);
662#endif
663 ret = env->exception_index;
664 break;
665 } else {
666#if defined(TARGET_I386)
667 /* simulate a real cpu exception. On i386, it can
668 trigger new exceptions, but we do not handle
669 double or triple faults yet. */
670 do_interrupt(env->exception_index,
671 env->exception_is_int,
672 env->error_code,
673 env->exception_next_eip, 0);
674#elif defined(TARGET_PPC)
675 do_interrupt(env);
676#elif defined(TARGET_SPARC)
677 do_interrupt(env->exception_index,
678 env->error_code);
679#endif
680 }
681 env->exception_index = -1;
682 }
683
684 T0 = 0; /* force lookup of first TB */
685 for(;;) {
686#ifdef __sparc__
687 /* g1 can be modified by some libc? functions */
688 tmp_T0 = T0;
689#endif
690 interrupt_request = env->interrupt_request;
691 if (__builtin_expect(interrupt_request, 0)) {
692#ifdef VBOX
693 /* Single instruction exec request, we execute it and return (one way or the other).
694 The caller will cleans the request if it's one of the other ways... the caller
695 also locks everything so no atomic and/or required. */
696 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR) {
697 /* not in flight are we? */
698 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT)) {
699 env->interrupt_request |= CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT;
700 env->exception_index = EXCP_SINGLE_INSTR;
701 if (emulate_single_instr(env) == -1)
702 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%08x!!\n", env->eip));
703 }
704 env->exception_index = EXCP_SINGLE_INSTR;
705 cpu_loop_exit();
706 }
707#endif /* VBOX */
708#if defined(TARGET_I386)
709 /* if hardware interrupt pending, we execute it */
710 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
711 (env->eflags & IF_MASK) &&
712 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
713 int intno;
714#if defined(VBOX)
715 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_HARD);
716#else
717 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
718#endif
719 intno = cpu_get_pic_interrupt(env);
720 if (loglevel & CPU_LOG_TB_IN_ASM) {
721 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
722 }
723#if defined(VBOX)
724 if (intno >= 0)
725#endif
726 do_interrupt(intno, 0, 0, 0, 1);
727 /* ensure that no TB jump will be modified as
728 the program flow was changed */
729#ifdef __sparc__
730 tmp_T0 = 0;
731#else
732 T0 = 0;
733#endif
734 }
735#elif defined(TARGET_PPC)
736#if 0
737 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
738 cpu_ppc_reset(env);
739 }
740#endif
741 if (msr_ee != 0) {
742 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
743 /* Raise it */
744 env->exception_index = EXCP_EXTERNAL;
745 env->error_code = 0;
746 do_interrupt(env);
747 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
748 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
749 /* Raise it */
750 env->exception_index = EXCP_DECR;
751 env->error_code = 0;
752 do_interrupt(env);
753 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
754 }
755 }
756#elif defined(TARGET_SPARC)
757 if (interrupt_request & CPU_INTERRUPT_HARD) {
758 do_interrupt(env->interrupt_index, 0);
759 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
760 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
761 //do_interrupt(0, 0, 0, 0, 0);
762 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
763 }
764#endif
765 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
766#if defined(VBOX)
767 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
768#else
769 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
770#endif
771 /* ensure that no TB jump will be modified as
772 the program flow was changed */
773#ifdef __sparc__
774 tmp_T0 = 0;
775#else
776 T0 = 0;
777#endif
778 }
779 if (interrupt_request & CPU_INTERRUPT_EXIT) {
780#if defined(VBOX)
781 env->exception_index = EXCP_INTERRUPT;
782 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
783#else
784 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
785 env->exception_index = EXCP_INTERRUPT;
786#endif
787 cpu_loop_exit();
788 }
789#if defined(VBOX)
790 if (interrupt_request & CPU_INTERRUPT_RC) {
791 env->exception_index = EXCP_RC;
792 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
793 cpu_loop_exit();
794 }
795#endif
796 }
797#ifdef DEBUG_EXEC
798 if ((loglevel & CPU_LOG_EXEC)) {
799#if defined(TARGET_I386)
800 /* restore flags in standard format */
801 env->regs[R_EAX] = EAX;
802 env->regs[R_EBX] = EBX;
803 env->regs[R_ECX] = ECX;
804 env->regs[R_EDX] = EDX;
805 env->regs[R_ESI] = ESI;
806 env->regs[R_EDI] = EDI;
807 env->regs[R_EBP] = EBP;
808 env->regs[R_ESP] = ESP;
809 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
810 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
811 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
812#elif defined(TARGET_ARM)
813 env->cpsr = compute_cpsr();
814 cpu_dump_state(env, logfile, fprintf, 0);
815 env->cpsr &= ~CACHED_CPSR_BITS;
816#elif defined(TARGET_SPARC)
817 cpu_dump_state (env, logfile, fprintf, 0);
818#elif defined(TARGET_PPC)
819 cpu_dump_state(env, logfile, fprintf, 0);
820#else
821#error unsupported target CPU
822#endif
823 }
824#endif
825 /* we record a subset of the CPU state. It will
826 always be the same before a given translated block
827 is executed. */
828#if defined(TARGET_I386)
829 flags = env->hflags;
830 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
831 cs_base = env->segs[R_CS].base;
832 pc = cs_base + env->eip;
833#elif defined(TARGET_ARM)
834 flags = env->thumb;
835 cs_base = 0;
836 pc = env->regs[15];
837#elif defined(TARGET_SPARC)
838 flags = 0;
839 cs_base = env->npc;
840 pc = env->pc;
841#elif defined(TARGET_PPC)
842 flags = 0;
843 cs_base = 0;
844 pc = env->nip;
845#else
846#error unsupported CPU
847#endif
848
849#ifdef VBOX
850 if (remR3CanExecuteRaw(env, pc, flags, &env->exception_index))
851 cpu_loop_exit();
852#endif /* VBOX */
853
854 tb = tb_find(&ptb, pc, cs_base,
855 flags);
856 if (!tb) {
857 TranslationBlock **ptb1;
858 unsigned int h;
859 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
860
861
862 spin_lock(&tb_lock);
863
864 tb_invalidated_flag = 0;
865
866 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
867
868 /* find translated block using physical mappings */
869 phys_pc = get_phys_addr_code(env, pc);
870 phys_page1 = phys_pc & TARGET_PAGE_MASK;
871 phys_page2 = -1;
872 h = tb_phys_hash_func(phys_pc);
873 ptb1 = &tb_phys_hash[h];
874 for(;;) {
875 tb = *ptb1;
876 if (!tb)
877 goto not_found;
878 if (tb->pc == pc &&
879 tb->page_addr[0] == phys_page1 &&
880 tb->cs_base == cs_base &&
881 tb->flags == flags) {
882 /* check next page if needed */
883 if (tb->page_addr[1] != -1) {
884 virt_page2 = (pc & TARGET_PAGE_MASK) +
885 TARGET_PAGE_SIZE;
886 phys_page2 = get_phys_addr_code(env, virt_page2);
887 if (tb->page_addr[1] == phys_page2)
888 goto found;
889 } else {
890 goto found;
891 }
892 }
893 ptb1 = &tb->phys_hash_next;
894 }
895 not_found:
896 /* if no translated code available, then translate it now */
897 tb = tb_alloc(pc);
898 if (!tb) {
899 /* flush must be done */
900 tb_flush(env);
901 /* cannot fail at this point */
902 tb = tb_alloc(pc);
903 /* don't forget to invalidate previous TB info */
904 ptb = &tb_hash[tb_hash_func(pc)];
905 T0 = 0;
906 }
907 tc_ptr = code_gen_ptr;
908 tb->tc_ptr = tc_ptr;
909 tb->cs_base = cs_base;
910 tb->flags = flags;
911 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
912 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
913
914 /* check next page if needed */
915 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
916 phys_page2 = -1;
917 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
918 phys_page2 = get_phys_addr_code(env, virt_page2);
919 }
920 tb_link_phys(tb, phys_pc, phys_page2);
921
922 found:
923 if (tb_invalidated_flag) {
924 /* as some TB could have been invalidated because
925 of memory exceptions while generating the code, we
926 must recompute the hash index here */
927 ptb = &tb_hash[tb_hash_func(pc)];
928 while (*ptb != NULL)
929 ptb = &(*ptb)->hash_next;
930 T0 = 0;
931 }
932 /* we add the TB in the virtual pc hash table */
933 *ptb = tb;
934 tb->hash_next = NULL;
935 tb_link(tb);
936 spin_unlock(&tb_lock);
937 }
938#ifdef DEBUG_EXEC
939 if ((loglevel & CPU_LOG_EXEC)) {
940 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
941 (long)tb->tc_ptr, tb->pc,
942 lookup_symbol(tb->pc));
943 }
944#endif
945#ifdef __sparc__
946 T0 = tmp_T0;
947#endif
948 /* see if we can patch the calling TB. */
949 {
950 if (T0 != 0
951#ifdef VBOX
952 && !(tb->cflags & CF_RAW_MODE)
953#endif
954#if defined(TARGET_I386) && defined(USE_CODE_COPY)
955 && (tb->cflags & CF_CODE_COPY) ==
956 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
957#endif
958 ) {
959 spin_lock(&tb_lock);
960 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
961#if defined(USE_CODE_COPY)
962 /* propagates the FP use info */
963 ((TranslationBlock *)(T0 & ~3))->cflags |=
964 (tb->cflags & CF_FP_USED);
965#endif
966 spin_unlock(&tb_lock);
967 }
968 }
969 tc_ptr = tb->tc_ptr;
970 env->current_tb = tb;
971 /* execute the generated code */
972 gen_func = (void *)tc_ptr;
973#if defined(__sparc__)
974 __asm__ __volatile__("call %0\n\t"
975 "mov %%o7,%%i0"
976 : /* no outputs */
977 : "r" (gen_func)
978 : "i0", "i1", "i2", "i3", "i4", "i5");
979#elif defined(__arm__)
980 asm volatile ("mov pc, %0\n\t"
981 ".global exec_loop\n\t"
982 "exec_loop:\n\t"
983 : /* no outputs */
984 : "r" (gen_func)
985 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
986#elif defined(TARGET_I386) && defined(USE_CODE_COPY)
987{
988 if (!(tb->cflags & CF_CODE_COPY)) {
989 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
990 save_native_fp_state(env);
991 }
992 gen_func();
993 } else {
994 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
995 restore_native_fp_state(env);
996 }
997 /* we work with native eflags */
998 CC_SRC = cc_table[CC_OP].compute_all();
999 CC_OP = CC_OP_EFLAGS;
1000 asm(".globl exec_loop\n"
1001 "\n"
1002 "debug1:\n"
1003 " pushl %%ebp\n"
1004 " fs movl %10, %9\n"
1005 " fs movl %11, %%eax\n"
1006 " andl $0x400, %%eax\n"
1007 " fs orl %8, %%eax\n"
1008 " pushl %%eax\n"
1009 " popf\n"
1010 " fs movl %%esp, %12\n"
1011 " fs movl %0, %%eax\n"
1012 " fs movl %1, %%ecx\n"
1013 " fs movl %2, %%edx\n"
1014 " fs movl %3, %%ebx\n"
1015 " fs movl %4, %%esp\n"
1016 " fs movl %5, %%ebp\n"
1017 " fs movl %6, %%esi\n"
1018 " fs movl %7, %%edi\n"
1019 " fs jmp *%9\n"
1020 "exec_loop:\n"
1021 " fs movl %%esp, %4\n"
1022 " fs movl %12, %%esp\n"
1023 " fs movl %%eax, %0\n"
1024 " fs movl %%ecx, %1\n"
1025 " fs movl %%edx, %2\n"
1026 " fs movl %%ebx, %3\n"
1027 " fs movl %%ebp, %5\n"
1028 " fs movl %%esi, %6\n"
1029 " fs movl %%edi, %7\n"
1030 " pushf\n"
1031 " popl %%eax\n"
1032 " movl %%eax, %%ecx\n"
1033 " andl $0x400, %%ecx\n"
1034 " shrl $9, %%ecx\n"
1035 " andl $0x8d5, %%eax\n"
1036 " fs movl %%eax, %8\n"
1037 " movl $1, %%eax\n"
1038 " subl %%ecx, %%eax\n"
1039 " fs movl %%eax, %11\n"
1040 " fs movl %9, %%ebx\n" /* get T0 value */
1041 " popl %%ebp\n"
1042 :
1043 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
1044 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
1045 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
1046 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
1047 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
1048 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
1049 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
1050 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
1051 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
1052 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
1053 "a" (gen_func),
1054 "m" (*(uint8_t *)offsetof(CPUState, df)),
1055 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
1056 : "%ecx", "%edx"
1057 );
1058 }
1059}
1060#else
1061#if defined(DEBUG) && defined(VBOX) && !defined(DEBUG_dmik)
1062#if !defined(DEBUG_bird)
1063 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
1064 {
1065 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
1066 {
1067 Log(("EMR0: %08X IF=%d TF=%d CPL=%d\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3));
1068 }
1069 }
1070 else
1071 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
1072 {
1073 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
1074 {
1075 if(env->eflags & VM_MASK)
1076 {
1077 Log(("EMV86: %08X IF=%d TF=%d CPL=%d flags=%08X CR0=%08X\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, flags, env->cr[0]));
1078 }
1079 else
1080 {
1081 Log(("EMR3: %08X IF=%d TF=%d CPL=%d IOPL=%d flags=%08X CR0=%08X\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), flags, env->cr[0]));
1082 }
1083 }
1084 }
1085#endif /* !DEBUG_bird */
1086 if(env->state & CPU_EMULATE_SINGLE_STEP)
1087 {
1088#ifdef DEBUG_bird
1089 static int s_cTimes = 0;
1090 if (s_cTimes++ > 1000000) /* 1 million */
1091 {
1092 RTLogPrintf("Enough stepping!\n");
1093 #if 0
1094 env->exception_index = EXCP_DEBUG;
1095 cpu_loop_exit();
1096 #else
1097 env->state &= ~CPU_EMULATE_SINGLE_STEP;
1098 #endif
1099 }
1100#endif
1101 TMCpuTickPause(env->pVM);
1102 remR3DisasInstr(env, -1, NULL);
1103 TMCpuTickResume(env->pVM);
1104 if(emulate_single_instr(env) == -1)
1105 {
1106 printf("emulate_single_instr failed for EIP=%08X!!\n", env->eip);
1107 }
1108 }
1109 else
1110 {
1111 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
1112 gen_func();
1113 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
1114 }
1115#else /* !DEBUG || !VBOX || DEBUG_dmik */
1116
1117#ifdef VBOX
1118 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
1119 gen_func();
1120 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
1121#else /* !VBOX */
1122 gen_func();
1123#endif /* !VBOX */
1124
1125#endif /* !DEBUG || !VBOX || DEBUG_dmik */
1126#endif
1127 env->current_tb = NULL;
1128 /* reset soft MMU for next block (it can currently
1129 only be set by a memory fault) */
1130#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
1131 if (env->hflags & HF_SOFTMMU_MASK) {
1132 env->hflags &= ~HF_SOFTMMU_MASK;
1133 /* do not allow linking to another block */
1134 T0 = 0;
1135 }
1136#endif
1137 }
1138 } else {
1139 env_to_regs();
1140 }
1141 } /* for(;;) */
1142
1143
1144#if defined(TARGET_I386)
1145#if defined(USE_CODE_COPY)
1146 if (env->native_fp_regs) {
1147 save_native_fp_state(env);
1148 }
1149#endif
1150 /* restore flags in standard format */
1151 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
1152
1153 /* restore global registers */
1154#ifdef reg_EAX
1155 EAX = saved_EAX;
1156#endif
1157#ifdef reg_ECX
1158 ECX = saved_ECX;
1159#endif
1160#ifdef reg_EDX
1161 EDX = saved_EDX;
1162#endif
1163#ifdef reg_EBX
1164 EBX = saved_EBX;
1165#endif
1166#ifdef reg_ESP
1167 ESP = saved_ESP;
1168#endif
1169#ifdef reg_EBP
1170 EBP = saved_EBP;
1171#endif
1172#ifdef reg_ESI
1173 ESI = saved_ESI;
1174#endif
1175#ifdef reg_EDI
1176 EDI = saved_EDI;
1177#endif
1178#elif defined(TARGET_ARM)
1179 env->cpsr = compute_cpsr();
1180#elif defined(TARGET_SPARC)
1181#elif defined(TARGET_PPC)
1182#else
1183#error unsupported target CPU
1184#endif
1185#ifdef __sparc__
1186 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
1187#endif
1188 T0 = saved_T0;
1189 T1 = saved_T1;
1190 T2 = saved_T2;
1191 env = saved_env;
1192 return ret;
1193}
1194
1195#endif /* !VBOX */
1196
1197/* must only be called from the generated code as an exception can be
1198 generated */
1199void tb_invalidate_page_range(target_ulong start, target_ulong end)
1200{
1201 /* XXX: cannot enable it yet because it yields to MMU exception
1202 where NIP != read address on PowerPC */
1203#if 0
1204 target_ulong phys_addr;
1205 phys_addr = get_phys_addr_code(env, start);
1206 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1207#endif
1208}
1209
1210#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1211
1212void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1213{
1214 CPUX86State *saved_env;
1215
1216 saved_env = env;
1217 env = s;
1218 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1219 selector &= 0xffff;
1220 cpu_x86_load_seg_cache(env, seg_reg, selector,
1221 (selector << 4), 0xffff, 0);
1222 } else {
1223 load_seg(seg_reg, selector);
1224 }
1225 env = saved_env;
1226}
1227
1228void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1229{
1230 CPUX86State *saved_env;
1231
1232 saved_env = env;
1233 env = s;
1234
1235 helper_fsave((target_ulong)ptr, data32);
1236
1237 env = saved_env;
1238}
1239
1240void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1241{
1242 CPUX86State *saved_env;
1243
1244 saved_env = env;
1245 env = s;
1246
1247 helper_frstor((target_ulong)ptr, data32);
1248
1249 env = saved_env;
1250}
1251
1252#endif /* TARGET_I386 */
1253
1254#if !defined(CONFIG_SOFTMMU)
1255
1256#if defined(TARGET_I386)
1257
1258/* 'pc' is the host PC at which the exception was raised. 'address' is
1259 the effective address of the memory exception. 'is_write' is 1 if a
1260 write caused the exception and otherwise 0'. 'old_set' is the
1261 signal set which should be restored */
1262static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1263 int is_write, sigset_t *old_set,
1264 void *puc)
1265{
1266 TranslationBlock *tb;
1267 int ret;
1268
1269 if (cpu_single_env)
1270 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1271#if defined(DEBUG_SIGNAL)
1272 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1273 pc, address, is_write, *(unsigned long *)old_set);
1274#endif
1275 /* XXX: locking issue */
1276 if (is_write && page_unprotect(address, pc, puc)) {
1277 return 1;
1278 }
1279
1280 /* see if it is an MMU fault */
1281 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1282 ((env->hflags & HF_CPL_MASK) == 3), 0);
1283 if (ret < 0)
1284 return 0; /* not an MMU fault */
1285 if (ret == 0)
1286 return 1; /* the MMU fault was handled without causing real CPU fault */
1287 /* now we have a real cpu fault */
1288 tb = tb_find_pc(pc);
1289 if (tb) {
1290 /* the PC is inside the translated code. It means that we have
1291 a virtual CPU fault */
1292 cpu_restore_state(tb, env, pc, puc);
1293 }
1294 if (ret == 1) {
1295#if 0
1296 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
1297 env->eip, env->cr[2], env->error_code);
1298#endif
1299 /* we restore the process signal mask as the sigreturn should
1300 do it (XXX: use sigsetjmp) */
1301 sigprocmask(SIG_SETMASK, old_set, NULL);
1302 raise_exception_err(EXCP0E_PAGE, env->error_code);
1303 } else {
1304 /* activate soft MMU for this block */
1305 env->hflags |= HF_SOFTMMU_MASK;
1306 cpu_resume_from_signal(env, puc);
1307 }
1308 /* never comes here */
1309 return 1;
1310}
1311
1312#elif defined(TARGET_ARM)
1313static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1314 int is_write, sigset_t *old_set,
1315 void *puc)
1316{
1317 TranslationBlock *tb;
1318 int ret;
1319
1320 if (cpu_single_env)
1321 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1322#if defined(DEBUG_SIGNAL)
1323 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1324 pc, address, is_write, *(unsigned long *)old_set);
1325#endif
1326 /* XXX: locking issue */
1327 if (is_write && page_unprotect(address, pc, puc)) {
1328 return 1;
1329 }
1330 /* see if it is an MMU fault */
1331 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1332 if (ret < 0)
1333 return 0; /* not an MMU fault */
1334 if (ret == 0)
1335 return 1; /* the MMU fault was handled without causing real CPU fault */
1336 /* now we have a real cpu fault */
1337 tb = tb_find_pc(pc);
1338 if (tb) {
1339 /* the PC is inside the translated code. It means that we have
1340 a virtual CPU fault */
1341 cpu_restore_state(tb, env, pc, puc);
1342 }
1343 /* we restore the process signal mask as the sigreturn should
1344 do it (XXX: use sigsetjmp) */
1345 sigprocmask(SIG_SETMASK, old_set, NULL);
1346 cpu_loop_exit();
1347}
1348#elif defined(TARGET_SPARC)
1349static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1350 int is_write, sigset_t *old_set,
1351 void *puc)
1352{
1353 TranslationBlock *tb;
1354 int ret;
1355
1356 if (cpu_single_env)
1357 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1358#if defined(DEBUG_SIGNAL)
1359 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1360 pc, address, is_write, *(unsigned long *)old_set);
1361#endif
1362 /* XXX: locking issue */
1363 if (is_write && page_unprotect(address, pc, puc)) {
1364 return 1;
1365 }
1366 /* see if it is an MMU fault */
1367 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1368 if (ret < 0)
1369 return 0; /* not an MMU fault */
1370 if (ret == 0)
1371 return 1; /* the MMU fault was handled without causing real CPU fault */
1372 /* now we have a real cpu fault */
1373 tb = tb_find_pc(pc);
1374 if (tb) {
1375 /* the PC is inside the translated code. It means that we have
1376 a virtual CPU fault */
1377 cpu_restore_state(tb, env, pc, puc);
1378 }
1379 /* we restore the process signal mask as the sigreturn should
1380 do it (XXX: use sigsetjmp) */
1381 sigprocmask(SIG_SETMASK, old_set, NULL);
1382 cpu_loop_exit();
1383}
1384#elif defined (TARGET_PPC)
1385static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1386 int is_write, sigset_t *old_set,
1387 void *puc)
1388{
1389 TranslationBlock *tb;
1390 int ret;
1391
1392 if (cpu_single_env)
1393 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1394#if defined(DEBUG_SIGNAL)
1395 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1396 pc, address, is_write, *(unsigned long *)old_set);
1397#endif
1398 /* XXX: locking issue */
1399 if (is_write && page_unprotect(address, pc, puc)) {
1400 return 1;
1401 }
1402
1403 /* see if it is an MMU fault */
1404 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1405 if (ret < 0)
1406 return 0; /* not an MMU fault */
1407 if (ret == 0)
1408 return 1; /* the MMU fault was handled without causing real CPU fault */
1409
1410 /* now we have a real cpu fault */
1411 tb = tb_find_pc(pc);
1412 if (tb) {
1413 /* the PC is inside the translated code. It means that we have
1414 a virtual CPU fault */
1415 cpu_restore_state(tb, env, pc, puc);
1416 }
1417 if (ret == 1) {
1418#if 0
1419 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1420 env->nip, env->error_code, tb);
1421#endif
1422 /* we restore the process signal mask as the sigreturn should
1423 do it (XXX: use sigsetjmp) */
1424 sigprocmask(SIG_SETMASK, old_set, NULL);
1425 do_raise_exception_err(env->exception_index, env->error_code);
1426 } else {
1427 /* activate soft MMU for this block */
1428 cpu_resume_from_signal(env, puc);
1429 }
1430 /* never comes here */
1431 return 1;
1432}
1433#else
1434#error unsupported target CPU
1435#endif
1436
1437#if defined(__i386__)
1438
1439#if defined(USE_CODE_COPY)
1440static void cpu_send_trap(unsigned long pc, int trap,
1441 struct ucontext *uc)
1442{
1443 TranslationBlock *tb;
1444
1445 if (cpu_single_env)
1446 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1447 /* now we have a real cpu fault */
1448 tb = tb_find_pc(pc);
1449 if (tb) {
1450 /* the PC is inside the translated code. It means that we have
1451 a virtual CPU fault */
1452 cpu_restore_state(tb, env, pc, uc);
1453 }
1454 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1455 raise_exception_err(trap, env->error_code);
1456}
1457#endif
1458
1459int cpu_signal_handler(int host_signum, struct siginfo *info,
1460 void *puc)
1461{
1462 struct ucontext *uc = puc;
1463 unsigned long pc;
1464 int trapno;
1465
1466#ifndef REG_EIP
1467/* for glibc 2.1 */
1468#define REG_EIP EIP
1469#define REG_ERR ERR
1470#define REG_TRAPNO TRAPNO
1471#endif
1472 pc = uc->uc_mcontext.gregs[REG_EIP];
1473 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1474#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1475 if (trapno == 0x00 || trapno == 0x05) {
1476 /* send division by zero or bound exception */
1477 cpu_send_trap(pc, trapno, uc);
1478 return 1;
1479 } else
1480#endif
1481 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1482 trapno == 0xe ?
1483 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1484 &uc->uc_sigmask, puc);
1485}
1486
1487#elif defined(__x86_64__)
1488
1489int cpu_signal_handler(int host_signum, struct siginfo *info,
1490 void *puc)
1491{
1492 struct ucontext *uc = puc;
1493 unsigned long pc;
1494
1495 pc = uc->uc_mcontext.gregs[REG_RIP];
1496 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1497 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1498 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1499 &uc->uc_sigmask, puc);
1500}
1501
1502#elif defined(__powerpc__)
1503
1504/***********************************************************************
1505 * signal context platform-specific definitions
1506 * From Wine
1507 */
1508#ifdef linux
1509/* All Registers access - only for local access */
1510# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1511/* Gpr Registers access */
1512# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1513# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1514# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1515# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1516# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1517# define LR_sig(context) REG_sig(link, context) /* Link register */
1518# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1519/* Float Registers access */
1520# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1521# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1522/* Exception Registers access */
1523# define DAR_sig(context) REG_sig(dar, context)
1524# define DSISR_sig(context) REG_sig(dsisr, context)
1525# define TRAP_sig(context) REG_sig(trap, context)
1526#endif /* linux */
1527
1528#ifdef __APPLE__
1529# include <sys/ucontext.h>
1530typedef struct ucontext SIGCONTEXT;
1531/* All Registers access - only for local access */
1532# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1533# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1534# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1535# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1536/* Gpr Registers access */
1537# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1538# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1539# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1540# define CTR_sig(context) REG_sig(ctr, context)
1541# define XER_sig(context) REG_sig(xer, context) /* Link register */
1542# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1543# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1544/* Float Registers access */
1545# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1546# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1547/* Exception Registers access */
1548# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1549# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1550# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1551#endif /* __APPLE__ */
1552
1553int cpu_signal_handler(int host_signum, struct siginfo *info,
1554 void *puc)
1555{
1556 struct ucontext *uc = puc;
1557 unsigned long pc;
1558 int is_write;
1559
1560 pc = IAR_sig(uc);
1561 is_write = 0;
1562#if 0
1563 /* ppc 4xx case */
1564 if (DSISR_sig(uc) & 0x00800000)
1565 is_write = 1;
1566#else
1567 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1568 is_write = 1;
1569#endif
1570 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1571 is_write, &uc->uc_sigmask, puc);
1572}
1573
1574#elif defined(__alpha__)
1575
1576int cpu_signal_handler(int host_signum, struct siginfo *info,
1577 void *puc)
1578{
1579 struct ucontext *uc = puc;
1580 uint32_t *pc = uc->uc_mcontext.sc_pc;
1581 uint32_t insn = *pc;
1582 int is_write = 0;
1583
1584 /* XXX: need kernel patch to get write flag faster */
1585 switch (insn >> 26) {
1586 case 0x0d: // stw
1587 case 0x0e: // stb
1588 case 0x0f: // stq_u
1589 case 0x24: // stf
1590 case 0x25: // stg
1591 case 0x26: // sts
1592 case 0x27: // stt
1593 case 0x2c: // stl
1594 case 0x2d: // stq
1595 case 0x2e: // stl_c
1596 case 0x2f: // stq_c
1597 is_write = 1;
1598 }
1599
1600 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1601 is_write, &uc->uc_sigmask, puc);
1602}
1603#elif defined(__sparc__)
1604
1605int cpu_signal_handler(int host_signum, struct siginfo *info,
1606 void *puc)
1607{
1608 uint32_t *regs = (uint32_t *)(info + 1);
1609 void *sigmask = (regs + 20);
1610 unsigned long pc;
1611 int is_write;
1612 uint32_t insn;
1613
1614 /* XXX: is there a standard glibc define ? */
1615 pc = regs[1];
1616 /* XXX: need kernel patch to get write flag faster */
1617 is_write = 0;
1618 insn = *(uint32_t *)pc;
1619 if ((insn >> 30) == 3) {
1620 switch((insn >> 19) & 0x3f) {
1621 case 0x05: // stb
1622 case 0x06: // sth
1623 case 0x04: // st
1624 case 0x07: // std
1625 case 0x24: // stf
1626 case 0x27: // stdf
1627 case 0x25: // stfsr
1628 is_write = 1;
1629 break;
1630 }
1631 }
1632 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1633 is_write, sigmask, NULL);
1634}
1635
1636#elif defined(__arm__)
1637
1638int cpu_signal_handler(int host_signum, struct siginfo *info,
1639 void *puc)
1640{
1641 struct ucontext *uc = puc;
1642 unsigned long pc;
1643 int is_write;
1644
1645 pc = uc->uc_mcontext.gregs[R15];
1646 /* XXX: compute is_write */
1647 is_write = 0;
1648 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1649 is_write,
1650 &uc->uc_sigmask);
1651}
1652
1653#elif defined(__mc68000)
1654
1655int cpu_signal_handler(int host_signum, struct siginfo *info,
1656 void *puc)
1657{
1658 struct ucontext *uc = puc;
1659 unsigned long pc;
1660 int is_write;
1661
1662 pc = uc->uc_mcontext.gregs[16];
1663 /* XXX: compute is_write */
1664 is_write = 0;
1665 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1666 is_write,
1667 &uc->uc_sigmask, puc);
1668}
1669
1670#else
1671
1672#error host CPU specific signal handler needed
1673
1674#endif
1675
1676#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette