VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 2192

Last change on this file since 2192 was 55, checked in by vboxsync, 18 years ago

RAM size should be an *unsigned* int

  • Property svn:eol-style set to native
File size: 72.7 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#ifdef _WIN32
22#include <windows.h>
23#else
24#include <sys/types.h>
25#include <sys/mman.h>
26#endif
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
35#include "cpu.h"
36#include "exec-all.h"
37
38#ifdef VBOX
39#include <VBox/vm.h>
40#endif
41
42//#define DEBUG_TB_INVALIDATE
43//#define DEBUG_FLUSH
44//#define DEBUG_TLB
45
46/* make various TB consistency checks */
47//#define DEBUG_TB_CHECK
48//#define DEBUG_TLB_CHECK
49
50/* threshold to flush the translated code buffer */
51#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
52
53#define SMC_BITMAP_USE_THRESHOLD 10
54
55#define MMAP_AREA_START 0x00000000
56#define MMAP_AREA_END 0xa8000000
57
58TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
59TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
60TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
61int nb_tbs;
62/* any access to the tbs or the page table must use this lock */
63spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
64
65uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
66uint8_t *code_gen_ptr;
67
68#if !defined(VBOX)
69int phys_ram_fd;
70#endif /* !VBOX */
71uint32_t phys_ram_size;
72uint8_t *phys_ram_base;
73uint8_t *phys_ram_dirty;
74
75typedef struct PageDesc {
76 /* list of TBs intersecting this ram page */
77 TranslationBlock *first_tb;
78 /* in order to optimize self modifying code, we count the number
79 of lookups we do to a given page to use a bitmap */
80 unsigned int code_write_count;
81 uint8_t *code_bitmap;
82#if defined(CONFIG_USER_ONLY)
83 unsigned long flags;
84#endif
85} PageDesc;
86
87typedef struct PhysPageDesc {
88 /* offset in host memory of the page + io_index in the low 12 bits */
89 unsigned long phys_offset;
90} PhysPageDesc;
91
92typedef struct VirtPageDesc {
93 /* physical address of code page. It is valid only if 'valid_tag'
94 matches 'virt_valid_tag' */
95 target_ulong phys_addr;
96 unsigned int valid_tag;
97#if !defined(CONFIG_SOFTMMU)
98 /* original page access rights. It is valid only if 'valid_tag'
99 matches 'virt_valid_tag' */
100 unsigned int prot;
101#endif
102} VirtPageDesc;
103
104#define L2_BITS 10
105#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
106
107#define L1_SIZE (1 << L1_BITS)
108#define L2_SIZE (1 << L2_BITS)
109
110static void io_mem_init(void);
111
112unsigned long qemu_real_host_page_size;
113unsigned long qemu_host_page_bits;
114unsigned long qemu_host_page_size;
115unsigned long qemu_host_page_mask;
116
117/* XXX: for system emulation, it could just be an array */
118static PageDesc *l1_map[L1_SIZE];
119static PhysPageDesc *l1_phys_map[L1_SIZE];
120
121#if !defined(CONFIG_USER_ONLY)
122static VirtPageDesc *l1_virt_map[L1_SIZE];
123static unsigned int virt_valid_tag;
124#endif
125
126/* io memory support */
127CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
128CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
129void *io_mem_opaque[IO_MEM_NB_ENTRIES];
130static int io_mem_nb;
131
132#ifndef VBOX
133/* log support */
134char *logfilename = "/tmp/qemu.log";
135FILE *logfile;
136int loglevel;
137#endif
138
139/* statistics */
140static int tlb_flush_count;
141static int tb_flush_count;
142static int tb_phys_invalidate_count;
143
144static void page_init(void)
145{
146 /* NOTE: we can always suppose that qemu_host_page_size >=
147 TARGET_PAGE_SIZE */
148#ifdef _WIN32
149 {
150 SYSTEM_INFO system_info;
151 DWORD old_protect;
152
153 GetSystemInfo(&system_info);
154 qemu_real_host_page_size = system_info.dwPageSize;
155
156 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
157 PAGE_EXECUTE_READWRITE, &old_protect);
158 }
159#else
160 qemu_real_host_page_size = getpagesize();
161 {
162 unsigned long start, end;
163
164 start = (unsigned long)code_gen_buffer;
165 start &= ~(qemu_real_host_page_size - 1);
166
167 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
168 end += qemu_real_host_page_size - 1;
169 end &= ~(qemu_real_host_page_size - 1);
170
171 mprotect((void *)start, end - start,
172 PROT_READ | PROT_WRITE | PROT_EXEC);
173 }
174#endif
175
176 if (qemu_host_page_size == 0)
177 qemu_host_page_size = qemu_real_host_page_size;
178 if (qemu_host_page_size < TARGET_PAGE_SIZE)
179 qemu_host_page_size = TARGET_PAGE_SIZE;
180 qemu_host_page_bits = 0;
181 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
182 qemu_host_page_bits++;
183 qemu_host_page_mask = ~(qemu_host_page_size - 1);
184#if !defined(CONFIG_USER_ONLY)
185 virt_valid_tag = 1;
186#endif
187}
188
189static inline PageDesc *page_find_alloc(unsigned int index)
190{
191 PageDesc **lp, *p;
192
193 lp = &l1_map[index >> L2_BITS];
194 p = *lp;
195 if (!p) {
196 /* allocate if not found */
197 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
198 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
199 *lp = p;
200 }
201 return p + (index & (L2_SIZE - 1));
202}
203
204static inline PageDesc *page_find(unsigned int index)
205{
206 PageDesc *p;
207
208 p = l1_map[index >> L2_BITS];
209 if (!p)
210 return 0;
211 return p + (index & (L2_SIZE - 1));
212}
213
214static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
215{
216 PhysPageDesc **lp, *p;
217
218 lp = &l1_phys_map[index >> L2_BITS];
219 p = *lp;
220 if (!p) {
221 /* allocate if not found */
222 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
223 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
224 *lp = p;
225 }
226 return p + (index & (L2_SIZE - 1));
227}
228
229static inline PhysPageDesc *phys_page_find(unsigned int index)
230{
231 PhysPageDesc *p;
232
233 p = l1_phys_map[index >> L2_BITS];
234 if (!p)
235 return 0;
236#ifdef VBOX
237 p = p + (index & (L2_SIZE - 1));
238 if ((p->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
239 remR3GrowDynRange(p->phys_offset & TARGET_PAGE_MASK);
240 return p;
241#else
242 return p + (index & (L2_SIZE - 1));
243#endif
244}
245
246#if !defined(CONFIG_USER_ONLY)
247static void tlb_protect_code(CPUState *env, target_ulong addr);
248static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
249
250static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
251{
252 VirtPageDesc **lp, *p;
253
254 /* XXX: should not truncate for 64 bit addresses */
255#if TARGET_LONG_BITS > 32
256 index &= (L1_SIZE - 1);
257#endif
258 lp = &l1_virt_map[index >> L2_BITS];
259 p = *lp;
260 if (!p) {
261 /* allocate if not found */
262 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
263 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
264 *lp = p;
265 }
266 return p + (index & (L2_SIZE - 1));
267}
268
269static inline VirtPageDesc *virt_page_find(unsigned int index)
270{
271 VirtPageDesc *p;
272
273 p = l1_virt_map[index >> L2_BITS];
274 if (!p)
275 return 0;
276 return p + (index & (L2_SIZE - 1));
277}
278
279static void virt_page_flush(void)
280{
281 int i, j;
282 VirtPageDesc *p;
283
284 virt_valid_tag++;
285
286 if (virt_valid_tag == 0) {
287 virt_valid_tag = 1;
288 for(i = 0; i < L1_SIZE; i++) {
289 p = l1_virt_map[i];
290 if (p) {
291 for(j = 0; j < L2_SIZE; j++)
292 p[j].valid_tag = 0;
293 }
294 }
295 }
296}
297#else
298static void virt_page_flush(void)
299{
300}
301#endif
302
303void cpu_exec_init(void)
304{
305 if (!code_gen_ptr) {
306 code_gen_ptr = code_gen_buffer;
307 page_init();
308 io_mem_init();
309 }
310}
311
312static inline void invalidate_page_bitmap(PageDesc *p)
313{
314 if (p->code_bitmap) {
315 qemu_free(p->code_bitmap);
316 p->code_bitmap = NULL;
317 }
318 p->code_write_count = 0;
319}
320
321/* set to NULL all the 'first_tb' fields in all PageDescs */
322static void page_flush_tb(void)
323{
324 int i, j;
325 PageDesc *p;
326
327 for(i = 0; i < L1_SIZE; i++) {
328 p = l1_map[i];
329 if (p) {
330 for(j = 0; j < L2_SIZE; j++) {
331 p->first_tb = NULL;
332 invalidate_page_bitmap(p);
333 p++;
334 }
335 }
336 }
337}
338
339/* flush all the translation blocks */
340/* XXX: tb_flush is currently not thread safe */
341void tb_flush(CPUState *env)
342{
343#if defined(DEBUG_FLUSH)
344 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
345 code_gen_ptr - code_gen_buffer,
346 nb_tbs,
347 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
348#endif
349 nb_tbs = 0;
350 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
351 virt_page_flush();
352
353 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
354 page_flush_tb();
355
356 code_gen_ptr = code_gen_buffer;
357 /* XXX: flush processor icache at this point if cache flush is
358 expensive */
359 tb_flush_count++;
360}
361
362#ifdef DEBUG_TB_CHECK
363
364static void tb_invalidate_check(unsigned long address)
365{
366 TranslationBlock *tb;
367 int i;
368 address &= TARGET_PAGE_MASK;
369 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
370 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
371 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
372 address >= tb->pc + tb->size)) {
373 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
374 address, tb->pc, tb->size);
375 }
376 }
377 }
378}
379
380/* verify that all the pages have correct rights for code */
381static void tb_page_check(void)
382{
383 TranslationBlock *tb;
384 int i, flags1, flags2;
385
386 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
387 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
388 flags1 = page_get_flags(tb->pc);
389 flags2 = page_get_flags(tb->pc + tb->size - 1);
390 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
391 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
392 tb->pc, tb->size, flags1, flags2);
393 }
394 }
395 }
396}
397
398void tb_jmp_check(TranslationBlock *tb)
399{
400 TranslationBlock *tb1;
401 unsigned int n1;
402
403 /* suppress any remaining jumps to this TB */
404 tb1 = tb->jmp_first;
405 for(;;) {
406 n1 = (long)tb1 & 3;
407 tb1 = (TranslationBlock *)((long)tb1 & ~3);
408 if (n1 == 2)
409 break;
410 tb1 = tb1->jmp_next[n1];
411 }
412 /* check end of list */
413 if (tb1 != tb) {
414 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
415 }
416}
417
418#endif
419
420/* invalidate one TB */
421static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
422 int next_offset)
423{
424 TranslationBlock *tb1;
425 for(;;) {
426 tb1 = *ptb;
427 if (tb1 == tb) {
428 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
429 break;
430 }
431 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
432 }
433}
434
435static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
436{
437 TranslationBlock *tb1;
438 unsigned int n1;
439
440 for(;;) {
441 tb1 = *ptb;
442 n1 = (long)tb1 & 3;
443 tb1 = (TranslationBlock *)((long)tb1 & ~3);
444 if (tb1 == tb) {
445 *ptb = tb1->page_next[n1];
446 break;
447 }
448 ptb = &tb1->page_next[n1];
449 }
450}
451
452static inline void tb_jmp_remove(TranslationBlock *tb, int n)
453{
454 TranslationBlock *tb1, **ptb;
455 unsigned int n1;
456
457 ptb = &tb->jmp_next[n];
458 tb1 = *ptb;
459 if (tb1) {
460 /* find tb(n) in circular list */
461 for(;;) {
462 tb1 = *ptb;
463 n1 = (long)tb1 & 3;
464 tb1 = (TranslationBlock *)((long)tb1 & ~3);
465 if (n1 == n && tb1 == tb)
466 break;
467 if (n1 == 2) {
468 ptb = &tb1->jmp_first;
469 } else {
470 ptb = &tb1->jmp_next[n1];
471 }
472 }
473 /* now we can suppress tb(n) from the list */
474 *ptb = tb->jmp_next[n];
475
476 tb->jmp_next[n] = NULL;
477 }
478}
479
480/* reset the jump entry 'n' of a TB so that it is not chained to
481 another TB */
482static inline void tb_reset_jump(TranslationBlock *tb, int n)
483{
484 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
485}
486
487static inline void tb_invalidate(TranslationBlock *tb)
488{
489 unsigned int h, n1;
490 TranslationBlock *tb1, *tb2, **ptb;
491
492 tb_invalidated_flag = 1;
493
494 /* remove the TB from the hash list */
495 h = tb_hash_func(tb->pc);
496 ptb = &tb_hash[h];
497 for(;;) {
498 tb1 = *ptb;
499 /* NOTE: the TB is not necessarily linked in the hash. It
500 indicates that it is not currently used */
501 if (tb1 == NULL)
502 return;
503 if (tb1 == tb) {
504 *ptb = tb1->hash_next;
505 break;
506 }
507 ptb = &tb1->hash_next;
508 }
509
510 /* suppress this TB from the two jump lists */
511 tb_jmp_remove(tb, 0);
512 tb_jmp_remove(tb, 1);
513
514 /* suppress any remaining jumps to this TB */
515 tb1 = tb->jmp_first;
516 for(;;) {
517 n1 = (long)tb1 & 3;
518 if (n1 == 2)
519 break;
520 tb1 = (TranslationBlock *)((long)tb1 & ~3);
521 tb2 = tb1->jmp_next[n1];
522 tb_reset_jump(tb1, n1);
523 tb1->jmp_next[n1] = NULL;
524 tb1 = tb2;
525 }
526 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
527}
528
529#ifdef VBOX
530void tb_invalidate_virt(CPUState *env, uint32_t eip)
531{
532#if 1
533 tb_flush(env);
534#else
535 uint8_t *cs_base, *pc;
536 unsigned int flags, h, phys_pc;
537 TranslationBlock *tb, **ptb;
538
539 flags = env->hflags;
540 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
541 cs_base = env->segs[R_CS].base;
542 pc = cs_base + eip;
543
544 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
545 flags);
546
547 if(tb)
548 {
549#ifdef DEBUG
550 printf("invalidating TB (%08X) at %08X\n", tb, eip);
551#endif
552 tb_invalidate(tb);
553 //Note: this will leak TBs, but the whole cache will be flushed
554 // when it happens too often
555 tb->pc = 0;
556 tb->cs_base = 0;
557 tb->flags = 0;
558 }
559#endif
560}
561
562# ifdef VBOX_STRICT
563/**
564 * Gets the page offset.
565 */
566unsigned long get_phys_page_offset(target_ulong addr)
567{
568 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
569 return p ? p->phys_offset : 0;
570}
571# endif /* VBOX_STRICT */
572#endif /* VBOX */
573
574static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
575{
576 PageDesc *p;
577 unsigned int h;
578 target_ulong phys_pc;
579
580 /* remove the TB from the hash list */
581 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
582 h = tb_phys_hash_func(phys_pc);
583 tb_remove(&tb_phys_hash[h], tb,
584 offsetof(TranslationBlock, phys_hash_next));
585
586 /* remove the TB from the page list */
587 if (tb->page_addr[0] != page_addr) {
588 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
589 tb_page_remove(&p->first_tb, tb);
590 invalidate_page_bitmap(p);
591 }
592 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
593 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
594 tb_page_remove(&p->first_tb, tb);
595 invalidate_page_bitmap(p);
596 }
597
598 tb_invalidate(tb);
599 tb_phys_invalidate_count++;
600}
601
602static inline void set_bits(uint8_t *tab, int start, int len)
603{
604 int end, mask, end1;
605
606 end = start + len;
607 tab += start >> 3;
608 mask = 0xff << (start & 7);
609 if ((start & ~7) == (end & ~7)) {
610 if (start < end) {
611 mask &= ~(0xff << (end & 7));
612 *tab |= mask;
613 }
614 } else {
615 *tab++ |= mask;
616 start = (start + 8) & ~7;
617 end1 = end & ~7;
618 while (start < end1) {
619 *tab++ = 0xff;
620 start += 8;
621 }
622 if (start < end) {
623 mask = ~(0xff << (end & 7));
624 *tab |= mask;
625 }
626 }
627}
628
629static void build_page_bitmap(PageDesc *p)
630{
631 int n, tb_start, tb_end;
632 TranslationBlock *tb;
633
634 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
635 if (!p->code_bitmap)
636 return;
637 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
638
639 tb = p->first_tb;
640 while (tb != NULL) {
641 n = (long)tb & 3;
642 tb = (TranslationBlock *)((long)tb & ~3);
643 /* NOTE: this is subtle as a TB may span two physical pages */
644 if (n == 0) {
645 /* NOTE: tb_end may be after the end of the page, but
646 it is not a problem */
647 tb_start = tb->pc & ~TARGET_PAGE_MASK;
648 tb_end = tb_start + tb->size;
649 if (tb_end > TARGET_PAGE_SIZE)
650 tb_end = TARGET_PAGE_SIZE;
651 } else {
652 tb_start = 0;
653 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
654 }
655 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
656 tb = tb->page_next[n];
657 }
658}
659
660#ifdef TARGET_HAS_PRECISE_SMC
661
662static void tb_gen_code(CPUState *env,
663 target_ulong pc, target_ulong cs_base, int flags,
664 int cflags)
665{
666 TranslationBlock *tb;
667 uint8_t *tc_ptr;
668 target_ulong phys_pc, phys_page2, virt_page2;
669 int code_gen_size;
670
671 phys_pc = get_phys_addr_code(env, pc);
672 tb = tb_alloc(pc);
673 if (!tb) {
674 /* flush must be done */
675 tb_flush(env);
676 /* cannot fail at this point */
677 tb = tb_alloc(pc);
678 }
679 tc_ptr = code_gen_ptr;
680 tb->tc_ptr = tc_ptr;
681 tb->cs_base = cs_base;
682 tb->flags = flags;
683 tb->cflags = cflags;
684 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
685 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
686
687 /* check next page if needed */
688 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
689 phys_page2 = -1;
690 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
691 phys_page2 = get_phys_addr_code(env, virt_page2);
692 }
693 tb_link_phys(tb, phys_pc, phys_page2);
694}
695#endif
696
697/* invalidate all TBs which intersect with the target physical page
698 starting in range [start;end[. NOTE: start and end must refer to
699 the same physical page. 'is_cpu_write_access' should be true if called
700 from a real cpu write access: the virtual CPU will exit the current
701 TB if code is modified inside this TB. */
702void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
703 int is_cpu_write_access)
704{
705 int n, current_tb_modified, current_tb_not_found, current_flags;
706 CPUState *env = cpu_single_env;
707 PageDesc *p;
708 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
709 target_ulong tb_start, tb_end;
710 target_ulong current_pc, current_cs_base;
711
712 p = page_find(start >> TARGET_PAGE_BITS);
713 if (!p)
714 return;
715 if (!p->code_bitmap &&
716 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
717 is_cpu_write_access) {
718 /* build code bitmap */
719 build_page_bitmap(p);
720 }
721
722 /* we remove all the TBs in the range [start, end[ */
723 /* XXX: see if in some cases it could be faster to invalidate all the code */
724 current_tb_not_found = is_cpu_write_access;
725 current_tb_modified = 0;
726 current_tb = NULL; /* avoid warning */
727 current_pc = 0; /* avoid warning */
728 current_cs_base = 0; /* avoid warning */
729 current_flags = 0; /* avoid warning */
730 tb = p->first_tb;
731 while (tb != NULL) {
732 n = (long)tb & 3;
733 tb = (TranslationBlock *)((long)tb & ~3);
734 tb_next = tb->page_next[n];
735 /* NOTE: this is subtle as a TB may span two physical pages */
736 if (n == 0) {
737 /* NOTE: tb_end may be after the end of the page, but
738 it is not a problem */
739 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
740 tb_end = tb_start + tb->size;
741 } else {
742 tb_start = tb->page_addr[1];
743 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
744 }
745 if (!(tb_end <= start || tb_start >= end)) {
746#ifdef TARGET_HAS_PRECISE_SMC
747 if (current_tb_not_found) {
748 current_tb_not_found = 0;
749 current_tb = NULL;
750 if (env->mem_write_pc) {
751 /* now we have a real cpu fault */
752 current_tb = tb_find_pc(env->mem_write_pc);
753 }
754 }
755 if (current_tb == tb &&
756 !(current_tb->cflags & CF_SINGLE_INSN)) {
757 /* If we are modifying the current TB, we must stop
758 its execution. We could be more precise by checking
759 that the modification is after the current PC, but it
760 would require a specialized function to partially
761 restore the CPU state */
762
763 current_tb_modified = 1;
764 cpu_restore_state(current_tb, env,
765 env->mem_write_pc, NULL);
766#if defined(TARGET_I386)
767 current_flags = env->hflags;
768 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
769 current_cs_base = (target_ulong)env->segs[R_CS].base;
770 current_pc = current_cs_base + env->eip;
771#else
772#error unsupported CPU
773#endif
774 }
775#endif /* TARGET_HAS_PRECISE_SMC */
776 saved_tb = env->current_tb;
777 env->current_tb = NULL;
778 tb_phys_invalidate(tb, -1);
779 env->current_tb = saved_tb;
780 if (env->interrupt_request && env->current_tb)
781 cpu_interrupt(env, env->interrupt_request);
782 }
783 tb = tb_next;
784 }
785#if !defined(CONFIG_USER_ONLY)
786 /* if no code remaining, no need to continue to use slow writes */
787 if (!p->first_tb) {
788 invalidate_page_bitmap(p);
789 if (is_cpu_write_access) {
790 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
791 }
792 }
793#endif
794#ifdef TARGET_HAS_PRECISE_SMC
795 if (current_tb_modified) {
796 /* we generate a block containing just the instruction
797 modifying the memory. It will ensure that it cannot modify
798 itself */
799 env->current_tb = NULL;
800 tb_gen_code(env, current_pc, current_cs_base, current_flags,
801 CF_SINGLE_INSN);
802 cpu_resume_from_signal(env, NULL);
803 }
804#endif
805}
806
807/* len must be <= 8 and start must be a multiple of len */
808static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
809{
810 PageDesc *p;
811 int offset, b;
812#if 0
813 if (1) {
814 if (loglevel) {
815 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
816 cpu_single_env->mem_write_vaddr, len,
817 cpu_single_env->eip,
818 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
819 }
820 }
821#endif
822 p = page_find(start >> TARGET_PAGE_BITS);
823 if (!p)
824 return;
825 if (p->code_bitmap) {
826 offset = start & ~TARGET_PAGE_MASK;
827 b = p->code_bitmap[offset >> 3] >> (offset & 7);
828 if (b & ((1 << len) - 1))
829 goto do_invalidate;
830 } else {
831 do_invalidate:
832 tb_invalidate_phys_page_range(start, start + len, 1);
833 }
834}
835
836#if !defined(CONFIG_SOFTMMU)
837static void tb_invalidate_phys_page(target_ulong addr,
838 unsigned long pc, void *puc)
839{
840 int n, current_flags, current_tb_modified;
841 target_ulong current_pc, current_cs_base;
842 PageDesc *p;
843 TranslationBlock *tb, *current_tb;
844#ifdef TARGET_HAS_PRECISE_SMC
845 CPUState *env = cpu_single_env;
846#endif
847
848 addr &= TARGET_PAGE_MASK;
849 p = page_find(addr >> TARGET_PAGE_BITS);
850 if (!p)
851 return;
852 tb = p->first_tb;
853 current_tb_modified = 0;
854 current_tb = NULL;
855 current_pc = 0; /* avoid warning */
856 current_cs_base = 0; /* avoid warning */
857 current_flags = 0; /* avoid warning */
858#ifdef TARGET_HAS_PRECISE_SMC
859 if (tb && pc != 0) {
860 current_tb = tb_find_pc(pc);
861 }
862#endif
863 while (tb != NULL) {
864 n = (long)tb & 3;
865 tb = (TranslationBlock *)((long)tb & ~3);
866#ifdef TARGET_HAS_PRECISE_SMC
867 if (current_tb == tb &&
868 !(current_tb->cflags & CF_SINGLE_INSN)) {
869 /* If we are modifying the current TB, we must stop
870 its execution. We could be more precise by checking
871 that the modification is after the current PC, but it
872 would require a specialized function to partially
873 restore the CPU state */
874
875 current_tb_modified = 1;
876 cpu_restore_state(current_tb, env, pc, puc);
877#if defined(TARGET_I386)
878 current_flags = env->hflags;
879 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
880 current_cs_base = (target_ulong)env->segs[R_CS].base;
881 current_pc = current_cs_base + env->eip;
882#else
883#error unsupported CPU
884#endif
885 }
886#endif /* TARGET_HAS_PRECISE_SMC */
887 tb_phys_invalidate(tb, addr);
888 tb = tb->page_next[n];
889 }
890 p->first_tb = NULL;
891#ifdef TARGET_HAS_PRECISE_SMC
892 if (current_tb_modified) {
893 /* we generate a block containing just the instruction
894 modifying the memory. It will ensure that it cannot modify
895 itself */
896 env->current_tb = NULL;
897 tb_gen_code(env, current_pc, current_cs_base, current_flags,
898 CF_SINGLE_INSN);
899 cpu_resume_from_signal(env, puc);
900 }
901#endif
902}
903#endif
904
905/* add the tb in the target page and protect it if necessary */
906static inline void tb_alloc_page(TranslationBlock *tb,
907 unsigned int n, unsigned int page_addr)
908{
909 PageDesc *p;
910 TranslationBlock *last_first_tb;
911
912 tb->page_addr[n] = page_addr;
913 p = page_find(page_addr >> TARGET_PAGE_BITS);
914 tb->page_next[n] = p->first_tb;
915 last_first_tb = p->first_tb;
916 p->first_tb = (TranslationBlock *)((long)tb | n);
917 invalidate_page_bitmap(p);
918
919#if defined(TARGET_HAS_SMC) || 1
920
921#if defined(CONFIG_USER_ONLY)
922 if (p->flags & PAGE_WRITE) {
923 unsigned long host_start, host_end, addr;
924 int prot;
925
926 /* force the host page as non writable (writes will have a
927 page fault + mprotect overhead) */
928 host_start = page_addr & qemu_host_page_mask;
929 host_end = host_start + qemu_host_page_size;
930 prot = 0;
931 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
932 prot |= page_get_flags(addr);
933 mprotect((void *)host_start, qemu_host_page_size,
934 (prot & PAGE_BITS) & ~PAGE_WRITE);
935#ifdef DEBUG_TB_INVALIDATE
936 printf("protecting code page: 0x%08lx\n",
937 host_start);
938#endif
939 p->flags &= ~PAGE_WRITE;
940 }
941#else
942 /* if some code is already present, then the pages are already
943 protected. So we handle the case where only the first TB is
944 allocated in a physical page */
945 if (!last_first_tb) {
946 target_ulong virt_addr;
947
948 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
949 tlb_protect_code(cpu_single_env, virt_addr);
950 }
951#endif
952
953#endif /* TARGET_HAS_SMC */
954}
955
956/* Allocate a new translation block. Flush the translation buffer if
957 too many translation blocks or too much generated code. */
958TranslationBlock *tb_alloc(target_ulong pc)
959{
960 TranslationBlock *tb;
961
962 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
963 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
964 return NULL;
965 tb = &tbs[nb_tbs++];
966 tb->pc = pc;
967 tb->cflags = 0;
968 return tb;
969}
970
971/* add a new TB and link it to the physical page tables. phys_page2 is
972 (-1) to indicate that only one page contains the TB. */
973void tb_link_phys(TranslationBlock *tb,
974 target_ulong phys_pc, target_ulong phys_page2)
975{
976 unsigned int h;
977 TranslationBlock **ptb;
978
979 /* add in the physical hash table */
980 h = tb_phys_hash_func(phys_pc);
981 ptb = &tb_phys_hash[h];
982 tb->phys_hash_next = *ptb;
983 *ptb = tb;
984
985 /* add in the page list */
986 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
987 if (phys_page2 != -1)
988 tb_alloc_page(tb, 1, phys_page2);
989 else
990 tb->page_addr[1] = -1;
991#ifdef DEBUG_TB_CHECK
992 tb_page_check();
993#endif
994}
995
996/* link the tb with the other TBs */
997void tb_link(TranslationBlock *tb)
998{
999#if !defined(CONFIG_USER_ONLY)
1000 {
1001 VirtPageDesc *vp;
1002 target_ulong addr;
1003
1004 /* save the code memory mappings (needed to invalidate the code) */
1005 addr = tb->pc & TARGET_PAGE_MASK;
1006 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
1007#ifdef DEBUG_TLB_CHECK
1008 if (vp->valid_tag == virt_valid_tag &&
1009 vp->phys_addr != tb->page_addr[0]) {
1010 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1011 addr, tb->page_addr[0], vp->phys_addr);
1012 }
1013#endif
1014 vp->phys_addr = tb->page_addr[0];
1015 if (vp->valid_tag != virt_valid_tag) {
1016 vp->valid_tag = virt_valid_tag;
1017#if !defined(CONFIG_SOFTMMU)
1018 vp->prot = 0;
1019#endif
1020 }
1021
1022 if (tb->page_addr[1] != -1) {
1023 addr += TARGET_PAGE_SIZE;
1024 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
1025#ifdef DEBUG_TLB_CHECK
1026 if (vp->valid_tag == virt_valid_tag &&
1027 vp->phys_addr != tb->page_addr[1]) {
1028 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1029 addr, tb->page_addr[1], vp->phys_addr);
1030 }
1031#endif
1032 vp->phys_addr = tb->page_addr[1];
1033 if (vp->valid_tag != virt_valid_tag) {
1034 vp->valid_tag = virt_valid_tag;
1035#if !defined(CONFIG_SOFTMMU)
1036 vp->prot = 0;
1037#endif
1038 }
1039 }
1040 }
1041#endif
1042
1043 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1044 tb->jmp_next[0] = NULL;
1045 tb->jmp_next[1] = NULL;
1046#ifdef USE_CODE_COPY
1047 tb->cflags &= ~CF_FP_USED;
1048 if (tb->cflags & CF_TB_FP_USED)
1049 tb->cflags |= CF_FP_USED;
1050#endif
1051
1052 /* init original jump addresses */
1053 if (tb->tb_next_offset[0] != 0xffff)
1054 tb_reset_jump(tb, 0);
1055 if (tb->tb_next_offset[1] != 0xffff)
1056 tb_reset_jump(tb, 1);
1057}
1058
1059/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1060 tb[1].tc_ptr. Return NULL if not found */
1061TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1062{
1063 int m_min, m_max, m;
1064 unsigned long v;
1065 TranslationBlock *tb;
1066
1067 if (nb_tbs <= 0)
1068 return NULL;
1069 if (tc_ptr < (unsigned long)code_gen_buffer ||
1070 tc_ptr >= (unsigned long)code_gen_ptr)
1071 return NULL;
1072 /* binary search (cf Knuth) */
1073 m_min = 0;
1074 m_max = nb_tbs - 1;
1075 while (m_min <= m_max) {
1076 m = (m_min + m_max) >> 1;
1077 tb = &tbs[m];
1078 v = (unsigned long)tb->tc_ptr;
1079 if (v == tc_ptr)
1080 return tb;
1081 else if (tc_ptr < v) {
1082 m_max = m - 1;
1083 } else {
1084 m_min = m + 1;
1085 }
1086 }
1087 return &tbs[m_max];
1088}
1089
1090static void tb_reset_jump_recursive(TranslationBlock *tb);
1091
1092static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1093{
1094 TranslationBlock *tb1, *tb_next, **ptb;
1095 unsigned int n1;
1096
1097 tb1 = tb->jmp_next[n];
1098 if (tb1 != NULL) {
1099 /* find head of list */
1100 for(;;) {
1101 n1 = (long)tb1 & 3;
1102 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1103 if (n1 == 2)
1104 break;
1105 tb1 = tb1->jmp_next[n1];
1106 }
1107 /* we are now sure now that tb jumps to tb1 */
1108 tb_next = tb1;
1109
1110 /* remove tb from the jmp_first list */
1111 ptb = &tb_next->jmp_first;
1112 for(;;) {
1113 tb1 = *ptb;
1114 n1 = (long)tb1 & 3;
1115 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1116 if (n1 == n && tb1 == tb)
1117 break;
1118 ptb = &tb1->jmp_next[n1];
1119 }
1120 *ptb = tb->jmp_next[n];
1121 tb->jmp_next[n] = NULL;
1122
1123 /* suppress the jump to next tb in generated code */
1124 tb_reset_jump(tb, n);
1125
1126 /* suppress jumps in the tb on which we could have jumped */
1127 tb_reset_jump_recursive(tb_next);
1128 }
1129}
1130
1131static void tb_reset_jump_recursive(TranslationBlock *tb)
1132{
1133 tb_reset_jump_recursive2(tb, 0);
1134 tb_reset_jump_recursive2(tb, 1);
1135}
1136
1137#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1138static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1139{
1140 target_ulong phys_addr;
1141
1142 phys_addr = cpu_get_phys_page_debug(env, pc);
1143 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1144}
1145#endif
1146
1147/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1148 breakpoint is reached */
1149int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1150{
1151#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1152 int i;
1153
1154 for(i = 0; i < env->nb_breakpoints; i++) {
1155 if (env->breakpoints[i] == pc)
1156 return 0;
1157 }
1158
1159 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1160 return -1;
1161 env->breakpoints[env->nb_breakpoints++] = pc;
1162
1163 breakpoint_invalidate(env, pc);
1164 return 0;
1165#else
1166 return -1;
1167#endif
1168}
1169
1170/* remove a breakpoint */
1171int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1172{
1173#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1174 int i;
1175 for(i = 0; i < env->nb_breakpoints; i++) {
1176 if (env->breakpoints[i] == pc)
1177 goto found;
1178 }
1179 return -1;
1180 found:
1181 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1182 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1183 env->nb_breakpoints--;
1184
1185 breakpoint_invalidate(env, pc);
1186 return 0;
1187#else
1188 return -1;
1189#endif
1190}
1191
1192/* enable or disable single step mode. EXCP_DEBUG is returned by the
1193 CPU loop after each instruction */
1194void cpu_single_step(CPUState *env, int enabled)
1195{
1196#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1197 if (env->singlestep_enabled != enabled) {
1198 env->singlestep_enabled = enabled;
1199 /* must flush all the translated code to avoid inconsistancies */
1200 /* XXX: only flush what is necessary */
1201 tb_flush(env);
1202 }
1203#endif
1204}
1205
1206#ifndef VBOX
1207/* enable or disable low levels log */
1208void cpu_set_log(int log_flags)
1209{
1210 loglevel = log_flags;
1211 if (loglevel && !logfile) {
1212 logfile = fopen(logfilename, "w");
1213 if (!logfile) {
1214 perror(logfilename);
1215 _exit(1);
1216 }
1217#if !defined(CONFIG_SOFTMMU)
1218 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1219 {
1220 static uint8_t logfile_buf[4096];
1221 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1222 }
1223#else
1224 setvbuf(logfile, NULL, _IOLBF, 0);
1225#endif
1226 }
1227}
1228
1229void cpu_set_log_filename(const char *filename)
1230{
1231 logfilename = strdup(filename);
1232}
1233#endif
1234
1235/* mask must never be zero, except for A20 change call */
1236void cpu_interrupt(CPUState *env, int mask)
1237{
1238 TranslationBlock *tb;
1239 static int interrupt_lock;
1240
1241#if defined(VBOX)
1242 VM_ASSERT_EMT(env->pVM);
1243 ASMAtomicOrS32(&env->interrupt_request, mask);
1244#else /* VBOX */
1245 env->interrupt_request |= mask;
1246#endif /* VBOX */
1247 /* if the cpu is currently executing code, we must unlink it and
1248 all the potentially executing TB */
1249 tb = env->current_tb;
1250 if (tb && !testandset(&interrupt_lock)) {
1251 env->current_tb = NULL;
1252 tb_reset_jump_recursive(tb);
1253 interrupt_lock = 0;
1254 }
1255}
1256
1257void cpu_reset_interrupt(CPUState *env, int mask)
1258{
1259#if defined(VBOX)
1260 /*
1261 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1262 * for future changes!
1263 */
1264 ASMAtomicAndS32(&env->interrupt_request, ~mask);
1265#else /* !VBOX */
1266 env->interrupt_request &= ~mask;
1267#endif /* !VBOX */
1268}
1269
1270#ifndef VBOX
1271CPULogItem cpu_log_items[] = {
1272 { CPU_LOG_TB_OUT_ASM, "out_asm",
1273 "show generated host assembly code for each compiled TB" },
1274 { CPU_LOG_TB_IN_ASM, "in_asm",
1275 "show target assembly code for each compiled TB" },
1276 { CPU_LOG_TB_OP, "op",
1277 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1278#ifdef TARGET_I386
1279 { CPU_LOG_TB_OP_OPT, "op_opt",
1280 "show micro ops after optimization for each compiled TB" },
1281#endif
1282 { CPU_LOG_INT, "int",
1283 "show interrupts/exceptions in short format" },
1284 { CPU_LOG_EXEC, "exec",
1285 "show trace before each executed TB (lots of logs)" },
1286 { CPU_LOG_TB_CPU, "cpu",
1287 "show CPU state before bloc translation" },
1288#ifdef TARGET_I386
1289 { CPU_LOG_PCALL, "pcall",
1290 "show protected mode far calls/returns/exceptions" },
1291#endif
1292#ifdef DEBUG_IOPORT
1293 { CPU_LOG_IOPORT, "ioport",
1294 "show all i/o ports accesses" },
1295#endif
1296 { 0, NULL, NULL },
1297};
1298
1299static int cmp1(const char *s1, int n, const char *s2)
1300{
1301 if (strlen(s2) != n)
1302 return 0;
1303 return memcmp(s1, s2, n) == 0;
1304}
1305
1306/* takes a comma separated list of log masks. Return 0 if error. */
1307int cpu_str_to_log_mask(const char *str)
1308{
1309 CPULogItem *item;
1310 int mask;
1311 const char *p, *p1;
1312
1313 p = str;
1314 mask = 0;
1315 for(;;) {
1316 p1 = strchr(p, ',');
1317 if (!p1)
1318 p1 = p + strlen(p);
1319 if(cmp1(p,p1-p,"all")) {
1320 for(item = cpu_log_items; item->mask != 0; item++) {
1321 mask |= item->mask;
1322 }
1323 } else {
1324 for(item = cpu_log_items; item->mask != 0; item++) {
1325 if (cmp1(p, p1 - p, item->name))
1326 goto found;
1327 }
1328 return 0;
1329 }
1330 found:
1331 mask |= item->mask;
1332 if (*p1 != ',')
1333 break;
1334 p = p1 + 1;
1335 }
1336 return mask;
1337}
1338#endif /* !VBOX */
1339
1340#if !defined(VBOX) /* VBOX: we have our own routine. */
1341void cpu_abort(CPUState *env, const char *fmt, ...)
1342{
1343 va_list ap;
1344
1345 va_start(ap, fmt);
1346 fprintf(stderr, "qemu: fatal: ");
1347 vfprintf(stderr, fmt, ap);
1348 fprintf(stderr, "\n");
1349#ifdef TARGET_I386
1350 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1351#else
1352 cpu_dump_state(env, stderr, fprintf, 0);
1353#endif
1354 va_end(ap);
1355 abort();
1356}
1357#endif /* !VBOX */
1358
1359#if !defined(CONFIG_USER_ONLY)
1360
1361/* NOTE: if flush_global is true, also flush global entries (not
1362 implemented yet) */
1363void tlb_flush(CPUState *env, int flush_global)
1364{
1365 int i;
1366
1367#if defined(DEBUG_TLB)
1368 printf("tlb_flush:\n");
1369#endif
1370 /* must reset current TB so that interrupts cannot modify the
1371 links while we are modifying them */
1372 env->current_tb = NULL;
1373
1374 for(i = 0; i < CPU_TLB_SIZE; i++) {
1375 env->tlb_read[0][i].address = -1;
1376 env->tlb_write[0][i].address = -1;
1377 env->tlb_read[1][i].address = -1;
1378 env->tlb_write[1][i].address = -1;
1379 }
1380
1381 virt_page_flush();
1382 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1383
1384#if !defined(CONFIG_SOFTMMU)
1385 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1386#elif defined(VBOX)
1387 /* inform raw mode about TLB flush */
1388 remR3FlushTLB(env, flush_global);
1389#endif
1390 tlb_flush_count++;
1391}
1392
1393static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1394{
1395 if (addr == (tlb_entry->address &
1396 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1397 tlb_entry->address = -1;
1398}
1399
1400void tlb_flush_page(CPUState *env, target_ulong addr)
1401{
1402 int i, n;
1403 VirtPageDesc *vp;
1404 PageDesc *p;
1405 TranslationBlock *tb;
1406
1407#if defined(DEBUG_TLB)
1408 printf("tlb_flush_page: 0x%08x\n", addr);
1409#endif
1410 /* must reset current TB so that interrupts cannot modify the
1411 links while we are modifying them */
1412 env->current_tb = NULL;
1413
1414 addr &= TARGET_PAGE_MASK;
1415 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1416 tlb_flush_entry(&env->tlb_read[0][i], addr);
1417 tlb_flush_entry(&env->tlb_write[0][i], addr);
1418 tlb_flush_entry(&env->tlb_read[1][i], addr);
1419 tlb_flush_entry(&env->tlb_write[1][i], addr);
1420
1421 /* remove from the virtual pc hash table all the TB at this
1422 virtual address */
1423
1424 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1425 if (vp && vp->valid_tag == virt_valid_tag) {
1426 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1427 if (p) {
1428 /* we remove all the links to the TBs in this virtual page */
1429 tb = p->first_tb;
1430 while (tb != NULL) {
1431 n = (long)tb & 3;
1432 tb = (TranslationBlock *)((long)tb & ~3);
1433 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1434 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1435 tb_invalidate(tb);
1436 }
1437 tb = tb->page_next[n];
1438 }
1439 }
1440 vp->valid_tag = 0;
1441 }
1442
1443#if !defined(CONFIG_SOFTMMU)
1444 if (addr < MMAP_AREA_END)
1445 munmap((void *)addr, TARGET_PAGE_SIZE);
1446#elif defined(VBOX)
1447 /* inform raw mode about TLB page flush */
1448 remR3FlushPage(env, addr);
1449#endif
1450}
1451
1452static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1453{
1454 if (addr == (tlb_entry->address &
1455 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1456 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1457 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1458 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1459 }
1460}
1461
1462/* update the TLBs so that writes to code in the virtual page 'addr'
1463 can be detected */
1464static void tlb_protect_code(CPUState *env, target_ulong addr)
1465{
1466 int i;
1467
1468 addr &= TARGET_PAGE_MASK;
1469 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1470 tlb_protect_code1(&env->tlb_write[0][i], addr);
1471 tlb_protect_code1(&env->tlb_write[1][i], addr);
1472#if !defined(CONFIG_SOFTMMU)
1473 /* NOTE: as we generated the code for this page, it is already at
1474 least readable */
1475 if (addr < MMAP_AREA_END)
1476 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1477#endif
1478
1479#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1480 remR3ProtectCode(env, addr);
1481#endif
1482}
1483
1484static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1485 unsigned long phys_addr)
1486{
1487 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1488 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1489 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1490 }
1491}
1492
1493/* update the TLB so that writes in physical page 'phys_addr' are no longer
1494 tested self modifying code */
1495static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1496{
1497 int i;
1498
1499 phys_addr &= TARGET_PAGE_MASK;
1500#ifdef VBOX
1501 phys_addr = (unsigned long)remR3GCPhys2HCVirt(env, phys_addr);
1502#else
1503 phys_addr += (long)phys_ram_base;
1504#endif
1505 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1506 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1507 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1508}
1509
1510static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1511 unsigned long start, unsigned long length)
1512{
1513 unsigned long addr;
1514 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1515 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1516 if ((addr - start) < length) {
1517 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1518 }
1519 }
1520}
1521
1522void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1523{
1524 CPUState *env;
1525 unsigned long length, start1;
1526 int i;
1527
1528 start &= TARGET_PAGE_MASK;
1529 end = TARGET_PAGE_ALIGN(end);
1530
1531 length = end - start;
1532 if (length == 0)
1533 return;
1534 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1535
1536 env = cpu_single_env;
1537 /* we modify the TLB cache so that the dirty bit will be set again
1538 when accessing the range */
1539#ifdef VBOX
1540 start1 = (unsigned long)remR3GCPhys2HCVirt(env, start);
1541#else
1542 start1 = start + (unsigned long)phys_ram_base;
1543#endif
1544 for(i = 0; i < CPU_TLB_SIZE; i++)
1545 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1546 for(i = 0; i < CPU_TLB_SIZE; i++)
1547 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1548
1549#if !defined(CONFIG_SOFTMMU) && !defined(VBOX)
1550 /* XXX: this is expensive */
1551 {
1552 VirtPageDesc *p;
1553 int j;
1554 target_ulong addr;
1555
1556 for(i = 0; i < L1_SIZE; i++) {
1557 p = l1_virt_map[i];
1558 if (p) {
1559 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1560 for(j = 0; j < L2_SIZE; j++) {
1561 if (p->valid_tag == virt_valid_tag &&
1562 p->phys_addr >= start && p->phys_addr < end &&
1563 (p->prot & PROT_WRITE)) {
1564 if (addr < MMAP_AREA_END) {
1565 mprotect((void *)addr, TARGET_PAGE_SIZE,
1566 p->prot & ~PROT_WRITE);
1567 }
1568 }
1569 addr += TARGET_PAGE_SIZE;
1570 p++;
1571 }
1572 }
1573 }
1574 }
1575#endif
1576}
1577
1578static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1579 unsigned long start)
1580{
1581 unsigned long addr;
1582 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1583 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1584 if (addr == start) {
1585 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1586 }
1587 }
1588}
1589
1590/* update the TLB corresponding to virtual page vaddr and phys addr
1591 addr so that it is no longer dirty */
1592static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1593{
1594 CPUState *env = cpu_single_env;
1595 int i;
1596
1597#ifdef VBOX
1598 if (remR3HCVirt2GCPhys(env, (void *)addr) > phys_ram_size)
1599 {
1600 Log(("phys_ram_dirty exceeded at address %VGp, ignoring\n",
1601 (RTGCPHYS)(addr - (uintptr_t)phys_ram_base)));
1602 return;
1603 }
1604 phys_ram_dirty[(unsigned long)remR3HCVirt2GCPhys(env, (void *)addr) >> TARGET_PAGE_BITS] = 1;
1605#else
1606 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1607#endif
1608
1609
1610 addr &= TARGET_PAGE_MASK;
1611 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1612 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1613 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1614}
1615
1616/* add a new TLB entry. At most one entry for a given virtual address
1617 is permitted. Return 0 if OK or 2 if the page could not be mapped
1618 (can only happen in non SOFTMMU mode for I/O pages or pages
1619 conflicting with the host address space). */
1620int tlb_set_page(CPUState *env, target_ulong vaddr,
1621 target_phys_addr_t paddr, int prot,
1622 int is_user, int is_softmmu)
1623{
1624 PhysPageDesc *p;
1625 unsigned long pd;
1626 TranslationBlock *first_tb;
1627 unsigned int index;
1628 target_ulong address;
1629 unsigned long addend;
1630 int ret;
1631
1632 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1633 first_tb = NULL;
1634 if (!p) {
1635 pd = IO_MEM_UNASSIGNED;
1636 } else {
1637 PageDesc *p1;
1638 pd = p->phys_offset;
1639 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1640 /* NOTE: we also allocate the page at this stage */
1641 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1642 first_tb = p1->first_tb;
1643 }
1644 }
1645#if defined(DEBUG_TLB)
1646 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1647 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1648#endif
1649
1650 ret = 0;
1651#if !defined(CONFIG_SOFTMMU)
1652 if (is_softmmu)
1653#endif
1654 {
1655 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1656 /* IO memory case */
1657 address = vaddr | pd;
1658 addend = paddr;
1659 } else {
1660 /* standard memory */
1661 address = vaddr;
1662#ifdef VBOX
1663 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
1664#else
1665 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1666#endif
1667 }
1668
1669 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1670 addend -= vaddr;
1671 if (prot & PAGE_READ) {
1672 env->tlb_read[is_user][index].address = address;
1673 env->tlb_read[is_user][index].addend = addend;
1674 } else {
1675 env->tlb_read[is_user][index].address = -1;
1676 env->tlb_read[is_user][index].addend = -1;
1677 }
1678 if (prot & PAGE_WRITE) {
1679 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1680 /* ROM: access is ignored (same as unassigned) */
1681 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1682 env->tlb_write[is_user][index].addend = addend;
1683 } else
1684 /* XXX: the PowerPC code seems not ready to handle
1685 self modifying code with DCBI */
1686#if defined(TARGET_HAS_SMC) || 1
1687 if (first_tb) {
1688 /* if code is present, we use a specific memory
1689 handler. It works only for physical memory access */
1690 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1691 env->tlb_write[is_user][index].addend = addend;
1692 } else
1693#endif
1694 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1695 !cpu_physical_memory_is_dirty(pd)) {
1696 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1697 env->tlb_write[is_user][index].addend = addend;
1698 } else {
1699 env->tlb_write[is_user][index].address = address;
1700 env->tlb_write[is_user][index].addend = addend;
1701 }
1702 } else {
1703 env->tlb_write[is_user][index].address = -1;
1704 env->tlb_write[is_user][index].addend = -1;
1705 }
1706#ifdef VBOX
1707 /* inform raw mode about TLB page change */
1708 remR3SetPage(env, &env->tlb_read[is_user][index], &env->tlb_write[is_user][index], prot, is_user);
1709#endif
1710 }
1711#if !defined(CONFIG_SOFTMMU)
1712 else {
1713 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1714 /* IO access: no mapping is done as it will be handled by the
1715 soft MMU */
1716 if (!(env->hflags & HF_SOFTMMU_MASK))
1717 ret = 2;
1718 } else {
1719 void *map_addr;
1720
1721 if (vaddr >= MMAP_AREA_END) {
1722 ret = 2;
1723 } else {
1724 if (prot & PROT_WRITE) {
1725 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1726#if defined(TARGET_HAS_SMC) || 1
1727 first_tb ||
1728#endif
1729 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1730 !cpu_physical_memory_is_dirty(pd))) {
1731 /* ROM: we do as if code was inside */
1732 /* if code is present, we only map as read only and save the
1733 original mapping */
1734 VirtPageDesc *vp;
1735
1736 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1737 vp->phys_addr = pd;
1738 vp->prot = prot;
1739 vp->valid_tag = virt_valid_tag;
1740 prot &= ~PAGE_WRITE;
1741 }
1742 }
1743 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1744 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1745 if (map_addr == MAP_FAILED) {
1746 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1747 paddr, vaddr);
1748 }
1749 }
1750 }
1751 }
1752#endif
1753 return ret;
1754}
1755
1756/* called from signal handler: invalidate the code and unprotect the
1757 page. Return TRUE if the fault was succesfully handled. */
1758int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1759{
1760#if !defined(CONFIG_SOFTMMU)
1761 VirtPageDesc *vp;
1762
1763#if defined(DEBUG_TLB)
1764 printf("page_unprotect: addr=0x%08x\n", addr);
1765#endif
1766 addr &= TARGET_PAGE_MASK;
1767
1768 /* if it is not mapped, no need to worry here */
1769 if (addr >= MMAP_AREA_END)
1770 return 0;
1771 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1772 if (!vp)
1773 return 0;
1774 /* NOTE: in this case, validate_tag is _not_ tested as it
1775 validates only the code TLB */
1776 if (vp->valid_tag != virt_valid_tag)
1777 return 0;
1778 if (!(vp->prot & PAGE_WRITE))
1779 return 0;
1780#if defined(DEBUG_TLB)
1781 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1782 addr, vp->phys_addr, vp->prot);
1783#endif
1784 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1785 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1786 (unsigned long)addr, vp->prot);
1787 /* set the dirty bit */
1788 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1789 /* flush the code inside */
1790 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1791 return 1;
1792#elif defined(VBOX)
1793 addr &= TARGET_PAGE_MASK;
1794
1795 /* if it is not mapped, no need to worry here */
1796 if (addr >= MMAP_AREA_END)
1797 return 0;
1798 return 1;
1799#else
1800 return 0;
1801#endif
1802}
1803
1804#else
1805
1806void tlb_flush(CPUState *env, int flush_global)
1807{
1808}
1809
1810void tlb_flush_page(CPUState *env, target_ulong addr)
1811{
1812}
1813
1814int tlb_set_page(CPUState *env, target_ulong vaddr,
1815 target_phys_addr_t paddr, int prot,
1816 int is_user, int is_softmmu)
1817{
1818 return 0;
1819}
1820
1821#ifndef VBOX
1822/* dump memory mappings */
1823void page_dump(FILE *f)
1824{
1825 unsigned long start, end;
1826 int i, j, prot, prot1;
1827 PageDesc *p;
1828
1829 fprintf(f, "%-8s %-8s %-8s %s\n",
1830 "start", "end", "size", "prot");
1831 start = -1;
1832 end = -1;
1833 prot = 0;
1834 for(i = 0; i <= L1_SIZE; i++) {
1835 if (i < L1_SIZE)
1836 p = l1_map[i];
1837 else
1838 p = NULL;
1839 for(j = 0;j < L2_SIZE; j++) {
1840 if (!p)
1841 prot1 = 0;
1842 else
1843 prot1 = p[j].flags;
1844 if (prot1 != prot) {
1845 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1846 if (start != -1) {
1847 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1848 start, end, end - start,
1849 prot & PAGE_READ ? 'r' : '-',
1850 prot & PAGE_WRITE ? 'w' : '-',
1851 prot & PAGE_EXEC ? 'x' : '-');
1852 }
1853 if (prot1 != 0)
1854 start = end;
1855 else
1856 start = -1;
1857 prot = prot1;
1858 }
1859 if (!p)
1860 break;
1861 }
1862 }
1863}
1864#endif /* !VBOX */
1865
1866int page_get_flags(unsigned long address)
1867{
1868 PageDesc *p;
1869
1870 p = page_find(address >> TARGET_PAGE_BITS);
1871 if (!p)
1872 return 0;
1873 return p->flags;
1874}
1875
1876/* modify the flags of a page and invalidate the code if
1877 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1878 depending on PAGE_WRITE */
1879void page_set_flags(unsigned long start, unsigned long end, int flags)
1880{
1881 PageDesc *p;
1882 unsigned long addr;
1883
1884 start = start & TARGET_PAGE_MASK;
1885 end = TARGET_PAGE_ALIGN(end);
1886 if (flags & PAGE_WRITE)
1887 flags |= PAGE_WRITE_ORG;
1888#if defined(VBOX)
1889 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
1890#endif
1891 spin_lock(&tb_lock);
1892 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1893 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1894 /* if the write protection is set, then we invalidate the code
1895 inside */
1896 if (!(p->flags & PAGE_WRITE) &&
1897 (flags & PAGE_WRITE) &&
1898 p->first_tb) {
1899 tb_invalidate_phys_page(addr, 0, NULL);
1900 }
1901 p->flags = flags;
1902 }
1903 spin_unlock(&tb_lock);
1904}
1905
1906/* called from signal handler: invalidate the code and unprotect the
1907 page. Return TRUE if the fault was succesfully handled. */
1908int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1909{
1910 unsigned int page_index, prot, pindex;
1911 PageDesc *p, *p1;
1912 unsigned long host_start, host_end, addr;
1913
1914 host_start = address & qemu_host_page_mask;
1915 page_index = host_start >> TARGET_PAGE_BITS;
1916 p1 = page_find(page_index);
1917 if (!p1)
1918 return 0;
1919 host_end = host_start + qemu_host_page_size;
1920 p = p1;
1921 prot = 0;
1922 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1923 prot |= p->flags;
1924 p++;
1925 }
1926 /* if the page was really writable, then we change its
1927 protection back to writable */
1928 if (prot & PAGE_WRITE_ORG) {
1929 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1930 if (!(p1[pindex].flags & PAGE_WRITE)) {
1931 mprotect((void *)host_start, qemu_host_page_size,
1932 (prot & PAGE_BITS) | PAGE_WRITE);
1933 p1[pindex].flags |= PAGE_WRITE;
1934 /* and since the content will be modified, we must invalidate
1935 the corresponding translated code. */
1936 tb_invalidate_phys_page(address, pc, puc);
1937#ifdef DEBUG_TB_CHECK
1938 tb_invalidate_check(address);
1939#endif
1940 return 1;
1941 }
1942 }
1943 return 0;
1944}
1945
1946/* call this function when system calls directly modify a memory area */
1947void page_unprotect_range(uint8_t *data, unsigned long data_size)
1948{
1949 unsigned long start, end, addr;
1950
1951 start = (unsigned long)data;
1952 end = start + data_size;
1953 start &= TARGET_PAGE_MASK;
1954 end = TARGET_PAGE_ALIGN(end);
1955 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1956 page_unprotect(addr, 0, NULL);
1957 }
1958}
1959
1960static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1961{
1962}
1963#endif /* defined(CONFIG_USER_ONLY) */
1964
1965/* register physical memory. 'size' must be a multiple of the target
1966 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1967 io memory page */
1968void cpu_register_physical_memory(target_phys_addr_t start_addr,
1969 unsigned long size,
1970 unsigned long phys_offset)
1971{
1972 unsigned long addr, end_addr;
1973 PhysPageDesc *p;
1974
1975 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1976 end_addr = start_addr + size;
1977 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1978 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
1979 p->phys_offset = phys_offset;
1980#ifdef VBOX
1981 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
1982 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
1983#else
1984 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1985#endif
1986 phys_offset += TARGET_PAGE_SIZE;
1987 }
1988}
1989
1990static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1991{
1992 return 0;
1993}
1994
1995static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1996{
1997}
1998
1999static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2000 unassigned_mem_readb,
2001 unassigned_mem_readb,
2002 unassigned_mem_readb,
2003};
2004
2005static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2006 unassigned_mem_writeb,
2007 unassigned_mem_writeb,
2008 unassigned_mem_writeb,
2009};
2010
2011/* self modifying code support in soft mmu mode : writing to a page
2012 containing code comes to these functions */
2013
2014static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2015{
2016 unsigned long phys_addr;
2017
2018#ifdef VBOX
2019 phys_addr = remR3HCVirt2GCPhys(cpu_single_env, (void *)addr);
2020#else
2021 phys_addr = addr - (unsigned long)phys_ram_base;
2022#endif
2023#if !defined(CONFIG_USER_ONLY)
2024 tb_invalidate_phys_page_fast(phys_addr, 1);
2025#endif
2026 stb_p((uint8_t *)(long)addr, val);
2027 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
2028}
2029
2030static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2031{
2032 unsigned long phys_addr;
2033
2034#ifdef VBOX
2035 phys_addr = remR3HCVirt2GCPhys(cpu_single_env, (void *)addr);
2036#else
2037 phys_addr = addr - (unsigned long)phys_ram_base;
2038#endif
2039#if !defined(CONFIG_USER_ONLY)
2040 tb_invalidate_phys_page_fast(phys_addr, 2);
2041#endif
2042 stw_p((uint8_t *)(long)addr, val);
2043 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
2044}
2045
2046static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2047{
2048 unsigned long phys_addr;
2049
2050#ifdef VBOX
2051 phys_addr = remR3HCVirt2GCPhys(cpu_single_env, (void *)addr);
2052#else
2053 phys_addr = addr - (unsigned long)phys_ram_base;
2054#endif
2055#if !defined(CONFIG_USER_ONLY)
2056 tb_invalidate_phys_page_fast(phys_addr, 4);
2057#endif
2058 stl_p((uint8_t *)(long)addr, val);
2059 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
2060}
2061
2062static CPUReadMemoryFunc *code_mem_read[3] = {
2063 NULL, /* never used */
2064 NULL, /* never used */
2065 NULL, /* never used */
2066};
2067
2068static CPUWriteMemoryFunc *code_mem_write[3] = {
2069 code_mem_writeb,
2070 code_mem_writew,
2071 code_mem_writel,
2072};
2073
2074static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2075{
2076 stb_p((uint8_t *)(long)addr, val);
2077 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2078}
2079
2080static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2081{
2082 stw_p((uint8_t *)(long)addr, val);
2083 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2084}
2085
2086static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2087{
2088 stl_p((uint8_t *)(long)addr, val);
2089 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2090}
2091
2092static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2093 notdirty_mem_writeb,
2094 notdirty_mem_writew,
2095 notdirty_mem_writel,
2096};
2097
2098static void io_mem_init(void)
2099{
2100 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
2101 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2102 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
2103 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
2104#ifdef VBOX
2105 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2106 io_mem_nb = 6;
2107#else
2108 io_mem_nb = 5;
2109#endif
2110
2111#if !defined(VBOX) /* VBOX: we do this later when the RAM is allocated. */
2112 /* alloc dirty bits array */
2113 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
2114#endif /* !VBOX */
2115}
2116
2117/* mem_read and mem_write are arrays of functions containing the
2118 function to access byte (index 0), word (index 1) and dword (index
2119 2). All functions must be supplied. If io_index is non zero, the
2120 corresponding io zone is modified. If it is zero, a new io zone is
2121 allocated. The return value can be used with
2122 cpu_register_physical_memory(). (-1) is returned if error. */
2123int cpu_register_io_memory(int io_index,
2124 CPUReadMemoryFunc **mem_read,
2125 CPUWriteMemoryFunc **mem_write,
2126 void *opaque)
2127{
2128 int i;
2129
2130 if (io_index <= 0) {
2131 if (io_index >= IO_MEM_NB_ENTRIES)
2132 return -1;
2133 io_index = io_mem_nb++;
2134 } else {
2135 if (io_index >= IO_MEM_NB_ENTRIES)
2136 return -1;
2137 }
2138
2139 for(i = 0;i < 3; i++) {
2140 io_mem_read[io_index][i] = mem_read[i];
2141 io_mem_write[io_index][i] = mem_write[i];
2142 }
2143 io_mem_opaque[io_index] = opaque;
2144 return io_index << IO_MEM_SHIFT;
2145}
2146
2147CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2148{
2149 return io_mem_write[io_index >> IO_MEM_SHIFT];
2150}
2151
2152CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2153{
2154 return io_mem_read[io_index >> IO_MEM_SHIFT];
2155}
2156
2157/* physical memory access (slow version, mainly for debug) */
2158#if defined(CONFIG_USER_ONLY)
2159void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2160 int len, int is_write)
2161{
2162 int l, flags;
2163 target_ulong page;
2164
2165 while (len > 0) {
2166 page = addr & TARGET_PAGE_MASK;
2167 l = (page + TARGET_PAGE_SIZE) - addr;
2168 if (l > len)
2169 l = len;
2170 flags = page_get_flags(page);
2171 if (!(flags & PAGE_VALID))
2172 return;
2173 if (is_write) {
2174 if (!(flags & PAGE_WRITE))
2175 return;
2176 memcpy((uint8_t *)addr, buf, len);
2177 } else {
2178 if (!(flags & PAGE_READ))
2179 return;
2180 memcpy(buf, (uint8_t *)addr, len);
2181 }
2182 len -= l;
2183 buf += l;
2184 addr += l;
2185 }
2186}
2187
2188/* never used */
2189uint32_t ldl_phys(target_phys_addr_t addr)
2190{
2191 return 0;
2192}
2193
2194void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2195{
2196}
2197
2198void stl_phys(target_phys_addr_t addr, uint32_t val)
2199{
2200}
2201
2202#else
2203void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2204 int len, int is_write)
2205{
2206 int l, io_index;
2207 uint8_t *ptr;
2208 uint32_t val;
2209 target_phys_addr_t page;
2210 unsigned long pd;
2211 PhysPageDesc *p;
2212
2213 while (len > 0) {
2214 page = addr & TARGET_PAGE_MASK;
2215 l = (page + TARGET_PAGE_SIZE) - addr;
2216 if (l > len)
2217 l = len;
2218 p = phys_page_find(page >> TARGET_PAGE_BITS);
2219 if (!p) {
2220 pd = IO_MEM_UNASSIGNED;
2221 } else {
2222 pd = p->phys_offset;
2223 }
2224
2225 if (is_write) {
2226 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2227 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2228 if (l >= 4 && ((addr & 3) == 0)) {
2229 /* 32 bit read access */
2230 val = ldl_p(buf);
2231 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2232 l = 4;
2233 } else if (l >= 2 && ((addr & 1) == 0)) {
2234 /* 16 bit read access */
2235 val = lduw_p(buf);
2236 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2237 l = 2;
2238 } else {
2239 /* 8 bit access */
2240 val = ldub_p(buf);
2241 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2242 l = 1;
2243 }
2244 } else {
2245 unsigned long addr1;
2246 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2247 /* RAM case */
2248#ifdef VBOX
2249 ptr = remR3GCPhys2HCVirt(cpu_single_env, addr1);;
2250 remR3PhysWriteBytes(ptr, buf, l);
2251#else
2252 ptr = phys_ram_base + addr1;
2253 memcpy(ptr, buf, l);
2254#endif
2255 /* invalidate code */
2256 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2257 /* set dirty bit */
2258 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 1;
2259 }
2260 } else {
2261 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2262 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2263 /* I/O case */
2264 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2265 if (l >= 4 && ((addr & 3) == 0)) {
2266 /* 32 bit read access */
2267 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2268 stl_p(buf, val);
2269 l = 4;
2270 } else if (l >= 2 && ((addr & 1) == 0)) {
2271 /* 16 bit read access */
2272 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2273 stw_p(buf, val);
2274 l = 2;
2275 } else {
2276 /* 8 bit access */
2277 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2278 stb_p(buf, val);
2279 l = 1;
2280 }
2281 } else {
2282 /* RAM case */
2283#ifdef VBOX
2284 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK));
2285 remR3PhysReadBytes(ptr, buf, l);
2286#else
2287 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2288 (addr & ~TARGET_PAGE_MASK);
2289 memcpy(buf, ptr, l);
2290#endif
2291 }
2292 }
2293 len -= l;
2294 buf += l;
2295 addr += l;
2296 }
2297}
2298
2299/* warning: addr must be aligned */
2300uint32_t ldl_phys(target_phys_addr_t addr)
2301{
2302 int io_index;
2303 uint8_t *ptr;
2304 uint32_t val;
2305 unsigned long pd;
2306 PhysPageDesc *p;
2307
2308 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2309 if (!p) {
2310 pd = IO_MEM_UNASSIGNED;
2311 } else {
2312 pd = p->phys_offset;
2313 }
2314
2315 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2316 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2317 /* I/O case */
2318 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2319 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2320 } else {
2321 /* RAM case */
2322#ifdef VBOX
2323 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK));
2324#else
2325 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2326 (addr & ~TARGET_PAGE_MASK);
2327#endif
2328 val = ldl_p(ptr);
2329 }
2330 return val;
2331}
2332
2333/* warning: addr must be aligned. The ram page is not masked as dirty
2334 and the code inside is not invalidated. It is useful if the dirty
2335 bits are used to track modified PTEs */
2336void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2337{
2338 int io_index;
2339 uint8_t *ptr;
2340 unsigned long pd;
2341 PhysPageDesc *p;
2342
2343 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2344 if (!p) {
2345 pd = IO_MEM_UNASSIGNED;
2346 } else {
2347 pd = p->phys_offset;
2348 }
2349
2350 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2351 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2352 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2353 } else {
2354#ifdef VBOX
2355 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK));
2356#else
2357 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2358 (addr & ~TARGET_PAGE_MASK);
2359#endif
2360 stl_p(ptr, val);
2361 }
2362}
2363
2364/* warning: addr must be aligned */
2365/* XXX: optimize code invalidation test */
2366void stl_phys(target_phys_addr_t addr, uint32_t val)
2367{
2368 int io_index;
2369 uint8_t *ptr;
2370 unsigned long pd;
2371 PhysPageDesc *p;
2372
2373 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2374 if (!p) {
2375 pd = IO_MEM_UNASSIGNED;
2376 } else {
2377 pd = p->phys_offset;
2378 }
2379
2380 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2381 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2382 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2383 } else {
2384 unsigned long addr1;
2385 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2386 /* RAM case */
2387#ifdef VBOX
2388 ptr = remR3GCPhys2HCVirt(cpu_single_env, addr1);
2389#else
2390 ptr = phys_ram_base + addr1;
2391#endif
2392 stl_p(ptr, val);
2393 /* invalidate code */
2394 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2395 /* set dirty bit */
2396 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 1;
2397 }
2398}
2399
2400#endif
2401
2402/* virtual memory access for debug */
2403int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2404 uint8_t *buf, int len, int is_write)
2405{
2406 int l;
2407 target_ulong page, phys_addr;
2408
2409 while (len > 0) {
2410 page = addr & TARGET_PAGE_MASK;
2411 phys_addr = cpu_get_phys_page_debug(env, page);
2412 /* if no physical page mapped, return an error */
2413 if (phys_addr == -1)
2414 return -1;
2415 l = (page + TARGET_PAGE_SIZE) - addr;
2416 if (l > len)
2417 l = len;
2418 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2419 buf, l, is_write);
2420 len -= l;
2421 buf += l;
2422 addr += l;
2423 }
2424 return 0;
2425}
2426
2427#ifndef VBOX
2428void dump_exec_info(FILE *f,
2429 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2430{
2431 int i, target_code_size, max_target_code_size;
2432 int direct_jmp_count, direct_jmp2_count, cross_page;
2433 TranslationBlock *tb;
2434
2435 target_code_size = 0;
2436 max_target_code_size = 0;
2437 cross_page = 0;
2438 direct_jmp_count = 0;
2439 direct_jmp2_count = 0;
2440 for(i = 0; i < nb_tbs; i++) {
2441 tb = &tbs[i];
2442 target_code_size += tb->size;
2443 if (tb->size > max_target_code_size)
2444 max_target_code_size = tb->size;
2445 if (tb->page_addr[1] != -1)
2446 cross_page++;
2447 if (tb->tb_next_offset[0] != 0xffff) {
2448 direct_jmp_count++;
2449 if (tb->tb_next_offset[1] != 0xffff) {
2450 direct_jmp2_count++;
2451 }
2452 }
2453 }
2454 /* XXX: avoid using doubles ? */
2455 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2456 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2457 nb_tbs ? target_code_size / nb_tbs : 0,
2458 max_target_code_size);
2459 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2460 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2461 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2462 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2463 cross_page,
2464 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2465 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2466 direct_jmp_count,
2467 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2468 direct_jmp2_count,
2469 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2470 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2471 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2472 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2473}
2474#endif /* !VBOX */
2475
2476#if !defined(CONFIG_USER_ONLY)
2477
2478#define MMUSUFFIX _cmmu
2479#define GETPC() NULL
2480#define env cpu_single_env
2481#define SOFTMMU_CODE_ACCESS
2482
2483#define SHIFT 0
2484#include "softmmu_template.h"
2485
2486#define SHIFT 1
2487#include "softmmu_template.h"
2488
2489#define SHIFT 2
2490#include "softmmu_template.h"
2491
2492#define SHIFT 3
2493#include "softmmu_template.h"
2494
2495#undef env
2496
2497#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette