VirtualBox

source: vbox/trunk/src/recompiler/new/exec.c@ 1344

Last change on this file since 1344 was 1182, checked in by vboxsync, 18 years ago

Restore the PGM_DYNAMIC_RAM_ALLOC tests and #include <VBox/pgm.h> to make sure it's defined.

  • Property svn:eol-style set to native
File size: 78.8 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#ifndef VBOX
22#ifdef _WIN32
23#include <windows.h>
24#else
25#include <sys/types.h>
26#include <sys/mman.h>
27#endif
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35#else /* VBOX */
36# include <stdlib.h>
37# include <stdio.h>
38# include <inttypes.h>
39# include <iprt/alloc.h>
40# include <iprt/string.h>
41# include <iprt/param.h>
42# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
43#endif /* VBOX */
44
45#include "cpu.h"
46#include "exec-all.h"
47#if defined(CONFIG_USER_ONLY)
48#include <qemu.h>
49#endif
50
51//#define DEBUG_TB_INVALIDATE
52//#define DEBUG_FLUSH
53//#define DEBUG_TLB
54//#define DEBUG_UNASSIGNED
55
56/* make various TB consistency checks */
57//#define DEBUG_TB_CHECK
58//#define DEBUG_TLB_CHECK
59
60#if !defined(CONFIG_USER_ONLY)
61/* TB consistency checks only implemented for usermode emulation. */
62#undef DEBUG_TB_CHECK
63#endif
64
65/* threshold to flush the translated code buffer */
66#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
67
68#define SMC_BITMAP_USE_THRESHOLD 10
69
70#define MMAP_AREA_START 0x00000000
71#define MMAP_AREA_END 0xa8000000
72
73#if defined(TARGET_SPARC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 41
75#elif defined(TARGET_PPC64)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
77#else
78/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
79#define TARGET_PHYS_ADDR_SPACE_BITS 32
80#endif
81
82TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
83TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84int nb_tbs;
85/* any access to the tbs or the page table must use this lock */
86spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87
88uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]
89#if defined(__MINGW32__)
90 __attribute__((aligned (16)));
91#else
92 __attribute__((aligned (32)));
93#endif
94uint8_t *code_gen_ptr;
95
96#ifndef VBOX
97int phys_ram_size;
98int phys_ram_fd;
99int phys_ram_size;
100#else /* VBOX */
101RTGCPHYS phys_ram_size;
102/* we have memory ranges (the high PC-BIOS mapping) which
103 causes some pages to fall outside the dirty map here. */
104uint32_t phys_ram_dirty_size;
105#endif /* VBOX */
106#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
107uint8_t *phys_ram_base;
108#endif
109uint8_t *phys_ram_dirty;
110
111CPUState *first_cpu;
112/* current CPU in the current thread. It is only valid inside
113 cpu_exec() */
114CPUState *cpu_single_env;
115
116typedef struct PageDesc {
117 /* list of TBs intersecting this ram page */
118 TranslationBlock *first_tb;
119 /* in order to optimize self modifying code, we count the number
120 of lookups we do to a given page to use a bitmap */
121 unsigned int code_write_count;
122 uint8_t *code_bitmap;
123#if defined(CONFIG_USER_ONLY)
124 unsigned long flags;
125#endif
126} PageDesc;
127
128typedef struct PhysPageDesc {
129 /* offset in host memory of the page + io_index in the low 12 bits */
130 uint32_t phys_offset;
131} PhysPageDesc;
132
133#define L2_BITS 10
134#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
135
136#define L1_SIZE (1 << L1_BITS)
137#define L2_SIZE (1 << L2_BITS)
138
139static void io_mem_init(void);
140
141unsigned long qemu_real_host_page_size;
142unsigned long qemu_host_page_bits;
143unsigned long qemu_host_page_size;
144unsigned long qemu_host_page_mask;
145
146/* XXX: for system emulation, it could just be an array */
147static PageDesc *l1_map[L1_SIZE];
148PhysPageDesc **l1_phys_map;
149
150/* io memory support */
151CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
152CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
153void *io_mem_opaque[IO_MEM_NB_ENTRIES];
154static int io_mem_nb;
155
156#ifndef VBOX
157/* log support */
158char *logfilename = "/tmp/qemu.log";
159#endif /* !VBOX */
160FILE *logfile;
161int loglevel;
162
163/* statistics */
164static int tlb_flush_count;
165static int tb_flush_count;
166#ifndef VBOX
167static int tb_phys_invalidate_count;
168#endif /* !VBOX */
169
170static void page_init(void)
171{
172 /* NOTE: we can always suppose that qemu_host_page_size >=
173 TARGET_PAGE_SIZE */
174#ifdef VBOX
175 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
176 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
177 qemu_real_host_page_size = PAGE_SIZE;
178#else /* !VBOX */
179#ifdef _WIN32
180 {
181 SYSTEM_INFO system_info;
182 DWORD old_protect;
183
184 GetSystemInfo(&system_info);
185 qemu_real_host_page_size = system_info.dwPageSize;
186
187 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
188 PAGE_EXECUTE_READWRITE, &old_protect);
189 }
190#else
191 qemu_real_host_page_size = getpagesize();
192 {
193 unsigned long start, end;
194
195 start = (unsigned long)code_gen_buffer;
196 start &= ~(qemu_real_host_page_size - 1);
197
198 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
199 end += qemu_real_host_page_size - 1;
200 end &= ~(qemu_real_host_page_size - 1);
201
202 mprotect((void *)start, end - start,
203 PROT_READ | PROT_WRITE | PROT_EXEC);
204 }
205#endif
206#endif /* !VBOX */
207
208 if (qemu_host_page_size == 0)
209 qemu_host_page_size = qemu_real_host_page_size;
210 if (qemu_host_page_size < TARGET_PAGE_SIZE)
211 qemu_host_page_size = TARGET_PAGE_SIZE;
212 qemu_host_page_bits = 0;
213 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
214 qemu_host_page_bits++;
215 qemu_host_page_mask = ~(qemu_host_page_size - 1);
216 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
217 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
218}
219
220static inline PageDesc *page_find_alloc(unsigned int index)
221{
222 PageDesc **lp, *p;
223
224 lp = &l1_map[index >> L2_BITS];
225 p = *lp;
226 if (!p) {
227 /* allocate if not found */
228 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
229 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
230 *lp = p;
231 }
232 return p + (index & (L2_SIZE - 1));
233}
234
235static inline PageDesc *page_find(unsigned int index)
236{
237 PageDesc *p;
238
239 p = l1_map[index >> L2_BITS];
240 if (!p)
241 return 0;
242 return p + (index & (L2_SIZE - 1));
243}
244
245static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
246{
247 void **lp, **p;
248 PhysPageDesc *pd;
249
250 p = (void **)l1_phys_map;
251#if TARGET_PHYS_ADDR_SPACE_BITS > 32
252
253#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
254#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
255#endif
256 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
257 p = *lp;
258 if (!p) {
259 /* allocate if not found */
260 if (!alloc)
261 return NULL;
262 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
263 memset(p, 0, sizeof(void *) * L1_SIZE);
264 *lp = p;
265 }
266#endif
267 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
268 pd = *lp;
269 if (!pd) {
270 int i;
271 /* allocate if not found */
272 if (!alloc)
273 return NULL;
274 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
275 *lp = pd;
276 for (i = 0; i < L2_SIZE; i++)
277 pd[i].phys_offset = IO_MEM_UNASSIGNED;
278 }
279#if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)
280 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
281 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
282 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
283 return pd;
284#else
285 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
286#endif
287}
288
289static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
290{
291 return phys_page_find_alloc(index, 0);
292}
293
294#if !defined(CONFIG_USER_ONLY)
295static void tlb_protect_code(ram_addr_t ram_addr);
296static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
297 target_ulong vaddr);
298#endif
299
300void cpu_exec_init(CPUState *env)
301{
302 CPUState **penv;
303 int cpu_index;
304
305 if (!code_gen_ptr) {
306 code_gen_ptr = code_gen_buffer;
307 page_init();
308 io_mem_init();
309 }
310 env->next_cpu = NULL;
311 penv = &first_cpu;
312 cpu_index = 0;
313 while (*penv != NULL) {
314 penv = (CPUState **)&(*penv)->next_cpu;
315 cpu_index++;
316 }
317 env->cpu_index = cpu_index;
318 *penv = env;
319}
320
321static inline void invalidate_page_bitmap(PageDesc *p)
322{
323 if (p->code_bitmap) {
324 qemu_free(p->code_bitmap);
325 p->code_bitmap = NULL;
326 }
327 p->code_write_count = 0;
328}
329
330/* set to NULL all the 'first_tb' fields in all PageDescs */
331static void page_flush_tb(void)
332{
333 int i, j;
334 PageDesc *p;
335
336 for(i = 0; i < L1_SIZE; i++) {
337 p = l1_map[i];
338 if (p) {
339 for(j = 0; j < L2_SIZE; j++) {
340 p->first_tb = NULL;
341 invalidate_page_bitmap(p);
342 p++;
343 }
344 }
345 }
346}
347
348/* flush all the translation blocks */
349/* XXX: tb_flush is currently not thread safe */
350void tb_flush(CPUState *env1)
351{
352 CPUState *env;
353#if defined(DEBUG_FLUSH)
354 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
355 code_gen_ptr - code_gen_buffer,
356 nb_tbs,
357 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
358#endif
359 nb_tbs = 0;
360
361 for(env = first_cpu; env != NULL; env = env->next_cpu) {
362 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
363 }
364
365 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
366 page_flush_tb();
367
368 code_gen_ptr = code_gen_buffer;
369 /* XXX: flush processor icache at this point if cache flush is
370 expensive */
371 tb_flush_count++;
372}
373
374#ifdef DEBUG_TB_CHECK
375
376static void tb_invalidate_check(unsigned long address)
377{
378 TranslationBlock *tb;
379 int i;
380 address &= TARGET_PAGE_MASK;
381 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
382 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
383 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
384 address >= tb->pc + tb->size)) {
385 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
386 address, (long)tb->pc, tb->size);
387 }
388 }
389 }
390}
391
392/* verify that all the pages have correct rights for code */
393static void tb_page_check(void)
394{
395 TranslationBlock *tb;
396 int i, flags1, flags2;
397
398 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
399 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
400 flags1 = page_get_flags(tb->pc);
401 flags2 = page_get_flags(tb->pc + tb->size - 1);
402 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
403 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
404 (long)tb->pc, tb->size, flags1, flags2);
405 }
406 }
407 }
408}
409
410void tb_jmp_check(TranslationBlock *tb)
411{
412 TranslationBlock *tb1;
413 unsigned int n1;
414
415 /* suppress any remaining jumps to this TB */
416 tb1 = tb->jmp_first;
417 for(;;) {
418 n1 = (long)tb1 & 3;
419 tb1 = (TranslationBlock *)((long)tb1 & ~3);
420 if (n1 == 2)
421 break;
422 tb1 = tb1->jmp_next[n1];
423 }
424 /* check end of list */
425 if (tb1 != tb) {
426 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
427 }
428}
429
430#endif
431
432/* invalidate one TB */
433static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
434 int next_offset)
435{
436 TranslationBlock *tb1;
437 for(;;) {
438 tb1 = *ptb;
439 if (tb1 == tb) {
440 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
441 break;
442 }
443 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
444 }
445}
446
447static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
448{
449 TranslationBlock *tb1;
450 unsigned int n1;
451
452 for(;;) {
453 tb1 = *ptb;
454 n1 = (long)tb1 & 3;
455 tb1 = (TranslationBlock *)((long)tb1 & ~3);
456 if (tb1 == tb) {
457 *ptb = tb1->page_next[n1];
458 break;
459 }
460 ptb = &tb1->page_next[n1];
461 }
462}
463
464static inline void tb_jmp_remove(TranslationBlock *tb, int n)
465{
466 TranslationBlock *tb1, **ptb;
467 unsigned int n1;
468
469 ptb = &tb->jmp_next[n];
470 tb1 = *ptb;
471 if (tb1) {
472 /* find tb(n) in circular list */
473 for(;;) {
474 tb1 = *ptb;
475 n1 = (long)tb1 & 3;
476 tb1 = (TranslationBlock *)((long)tb1 & ~3);
477 if (n1 == n && tb1 == tb)
478 break;
479 if (n1 == 2) {
480 ptb = &tb1->jmp_first;
481 } else {
482 ptb = &tb1->jmp_next[n1];
483 }
484 }
485 /* now we can suppress tb(n) from the list */
486 *ptb = tb->jmp_next[n];
487
488 tb->jmp_next[n] = NULL;
489 }
490}
491
492/* reset the jump entry 'n' of a TB so that it is not chained to
493 another TB */
494static inline void tb_reset_jump(TranslationBlock *tb, int n)
495{
496 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
497}
498
499static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
500{
501 CPUState *env;
502 PageDesc *p;
503 unsigned int h, n1;
504 target_ulong phys_pc;
505 TranslationBlock *tb1, *tb2;
506
507 /* remove the TB from the hash list */
508 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
509 h = tb_phys_hash_func(phys_pc);
510 tb_remove(&tb_phys_hash[h], tb,
511 offsetof(TranslationBlock, phys_hash_next));
512
513 /* remove the TB from the page list */
514 if (tb->page_addr[0] != page_addr) {
515 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
516 tb_page_remove(&p->first_tb, tb);
517 invalidate_page_bitmap(p);
518 }
519 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
520 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
521 tb_page_remove(&p->first_tb, tb);
522 invalidate_page_bitmap(p);
523 }
524
525 tb_invalidated_flag = 1;
526
527 /* remove the TB from the hash list */
528 h = tb_jmp_cache_hash_func(tb->pc);
529 for(env = first_cpu; env != NULL; env = env->next_cpu) {
530 if (env->tb_jmp_cache[h] == tb)
531 env->tb_jmp_cache[h] = NULL;
532 }
533
534 /* suppress this TB from the two jump lists */
535 tb_jmp_remove(tb, 0);
536 tb_jmp_remove(tb, 1);
537
538 /* suppress any remaining jumps to this TB */
539 tb1 = tb->jmp_first;
540 for(;;) {
541 n1 = (long)tb1 & 3;
542 if (n1 == 2)
543 break;
544 tb1 = (TranslationBlock *)((long)tb1 & ~3);
545 tb2 = tb1->jmp_next[n1];
546 tb_reset_jump(tb1, n1);
547 tb1->jmp_next[n1] = NULL;
548 tb1 = tb2;
549 }
550 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
551
552#ifndef VBOX
553 tb_phys_invalidate_count++;
554#endif /* !VBOX */
555}
556
557#ifdef VBOX
558void tb_invalidate_virt(CPUState *env, uint32_t eip)
559{
560# if 1
561 tb_flush(env);
562# else
563 uint8_t *cs_base, *pc;
564 unsigned int flags, h, phys_pc;
565 TranslationBlock *tb, **ptb;
566
567 flags = env->hflags;
568 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
569 cs_base = env->segs[R_CS].base;
570 pc = cs_base + eip;
571
572 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
573 flags);
574
575 if(tb)
576 {
577# ifdef DEBUG
578 printf("invalidating TB (%08X) at %08X\n", tb, eip);
579# endif
580 tb_invalidate(tb);
581 //Note: this will leak TBs, but the whole cache will be flushed
582 // when it happens too often
583 tb->pc = 0;
584 tb->cs_base = 0;
585 tb->flags = 0;
586 }
587# endif
588}
589
590# ifdef VBOX_STRICT
591/**
592 * Gets the page offset.
593 */
594unsigned long get_phys_page_offset(target_ulong addr)
595{
596 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
597 return p ? p->phys_offset : 0;
598}
599# endif /* VBOX_STRICT */
600#endif /* VBOX */
601
602static inline void set_bits(uint8_t *tab, int start, int len)
603{
604 int end, mask, end1;
605
606 end = start + len;
607 tab += start >> 3;
608 mask = 0xff << (start & 7);
609 if ((start & ~7) == (end & ~7)) {
610 if (start < end) {
611 mask &= ~(0xff << (end & 7));
612 *tab |= mask;
613 }
614 } else {
615 *tab++ |= mask;
616 start = (start + 8) & ~7;
617 end1 = end & ~7;
618 while (start < end1) {
619 *tab++ = 0xff;
620 start += 8;
621 }
622 if (start < end) {
623 mask = ~(0xff << (end & 7));
624 *tab |= mask;
625 }
626 }
627}
628
629static void build_page_bitmap(PageDesc *p)
630{
631 int n, tb_start, tb_end;
632 TranslationBlock *tb;
633
634 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
635 if (!p->code_bitmap)
636 return;
637 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
638
639 tb = p->first_tb;
640 while (tb != NULL) {
641 n = (long)tb & 3;
642 tb = (TranslationBlock *)((long)tb & ~3);
643 /* NOTE: this is subtle as a TB may span two physical pages */
644 if (n == 0) {
645 /* NOTE: tb_end may be after the end of the page, but
646 it is not a problem */
647 tb_start = tb->pc & ~TARGET_PAGE_MASK;
648 tb_end = tb_start + tb->size;
649 if (tb_end > TARGET_PAGE_SIZE)
650 tb_end = TARGET_PAGE_SIZE;
651 } else {
652 tb_start = 0;
653 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
654 }
655 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
656 tb = tb->page_next[n];
657 }
658}
659
660#ifdef TARGET_HAS_PRECISE_SMC
661
662static void tb_gen_code(CPUState *env,
663 target_ulong pc, target_ulong cs_base, int flags,
664 int cflags)
665{
666 TranslationBlock *tb;
667 uint8_t *tc_ptr;
668 target_ulong phys_pc, phys_page2, virt_page2;
669 int code_gen_size;
670
671 phys_pc = get_phys_addr_code(env, pc);
672 tb = tb_alloc(pc);
673 if (!tb) {
674 /* flush must be done */
675 tb_flush(env);
676 /* cannot fail at this point */
677 tb = tb_alloc(pc);
678 }
679 tc_ptr = code_gen_ptr;
680 tb->tc_ptr = tc_ptr;
681 tb->cs_base = cs_base;
682 tb->flags = flags;
683 tb->cflags = cflags;
684 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
685 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
686
687 /* check next page if needed */
688 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
689 phys_page2 = -1;
690 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
691 phys_page2 = get_phys_addr_code(env, virt_page2);
692 }
693 tb_link_phys(tb, phys_pc, phys_page2);
694}
695#endif
696
697/* invalidate all TBs which intersect with the target physical page
698 starting in range [start;end[. NOTE: start and end must refer to
699 the same physical page. 'is_cpu_write_access' should be true if called
700 from a real cpu write access: the virtual CPU will exit the current
701 TB if code is modified inside this TB. */
702void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
703 int is_cpu_write_access)
704{
705 int n, current_tb_modified, current_tb_not_found, current_flags;
706 CPUState *env = cpu_single_env;
707 PageDesc *p;
708 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
709 target_ulong tb_start, tb_end;
710 target_ulong current_pc, current_cs_base;
711
712 p = page_find(start >> TARGET_PAGE_BITS);
713 if (!p)
714 return;
715 if (!p->code_bitmap &&
716 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
717 is_cpu_write_access) {
718 /* build code bitmap */
719 build_page_bitmap(p);
720 }
721
722 /* we remove all the TBs in the range [start, end[ */
723 /* XXX: see if in some cases it could be faster to invalidate all the code */
724 current_tb_not_found = is_cpu_write_access;
725 current_tb_modified = 0;
726 current_tb = NULL; /* avoid warning */
727 current_pc = 0; /* avoid warning */
728 current_cs_base = 0; /* avoid warning */
729 current_flags = 0; /* avoid warning */
730 tb = p->first_tb;
731 while (tb != NULL) {
732 n = (long)tb & 3;
733 tb = (TranslationBlock *)((long)tb & ~3);
734 tb_next = tb->page_next[n];
735 /* NOTE: this is subtle as a TB may span two physical pages */
736 if (n == 0) {
737 /* NOTE: tb_end may be after the end of the page, but
738 it is not a problem */
739 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
740 tb_end = tb_start + tb->size;
741 } else {
742 tb_start = tb->page_addr[1];
743 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
744 }
745 if (!(tb_end <= start || tb_start >= end)) {
746#ifdef TARGET_HAS_PRECISE_SMC
747 if (current_tb_not_found) {
748 current_tb_not_found = 0;
749 current_tb = NULL;
750 if (env->mem_write_pc) {
751 /* now we have a real cpu fault */
752 current_tb = tb_find_pc(env->mem_write_pc);
753 }
754 }
755 if (current_tb == tb &&
756 !(current_tb->cflags & CF_SINGLE_INSN)) {
757 /* If we are modifying the current TB, we must stop
758 its execution. We could be more precise by checking
759 that the modification is after the current PC, but it
760 would require a specialized function to partially
761 restore the CPU state */
762
763 current_tb_modified = 1;
764 cpu_restore_state(current_tb, env,
765 env->mem_write_pc, NULL);
766#if defined(TARGET_I386)
767 current_flags = env->hflags;
768 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
769 current_cs_base = (target_ulong)env->segs[R_CS].base;
770 current_pc = current_cs_base + env->eip;
771#else
772#error unsupported CPU
773#endif
774 }
775#endif /* TARGET_HAS_PRECISE_SMC */
776 /* we need to do that to handle the case where a signal
777 occurs while doing tb_phys_invalidate() */
778 saved_tb = NULL;
779 if (env) {
780 saved_tb = env->current_tb;
781 env->current_tb = NULL;
782 }
783 tb_phys_invalidate(tb, -1);
784 if (env) {
785 env->current_tb = saved_tb;
786 if (env->interrupt_request && env->current_tb)
787 cpu_interrupt(env, env->interrupt_request);
788 }
789 }
790 tb = tb_next;
791 }
792#if !defined(CONFIG_USER_ONLY)
793 /* if no code remaining, no need to continue to use slow writes */
794 if (!p->first_tb) {
795 invalidate_page_bitmap(p);
796 if (is_cpu_write_access) {
797 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
798 }
799 }
800#endif
801#ifdef TARGET_HAS_PRECISE_SMC
802 if (current_tb_modified) {
803 /* we generate a block containing just the instruction
804 modifying the memory. It will ensure that it cannot modify
805 itself */
806 env->current_tb = NULL;
807 tb_gen_code(env, current_pc, current_cs_base, current_flags,
808 CF_SINGLE_INSN);
809 cpu_resume_from_signal(env, NULL);
810 }
811#endif
812}
813
814/* len must be <= 8 and start must be a multiple of len */
815static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
816{
817 PageDesc *p;
818 int offset, b;
819#if 0
820 if (1) {
821 if (loglevel) {
822 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
823 cpu_single_env->mem_write_vaddr, len,
824 cpu_single_env->eip,
825 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
826 }
827 }
828#endif
829 p = page_find(start >> TARGET_PAGE_BITS);
830 if (!p)
831 return;
832 if (p->code_bitmap) {
833 offset = start & ~TARGET_PAGE_MASK;
834 b = p->code_bitmap[offset >> 3] >> (offset & 7);
835 if (b & ((1 << len) - 1))
836 goto do_invalidate;
837 } else {
838 do_invalidate:
839 tb_invalidate_phys_page_range(start, start + len, 1);
840 }
841}
842
843#if !defined(CONFIG_SOFTMMU)
844static void tb_invalidate_phys_page(target_ulong addr,
845 unsigned long pc, void *puc)
846{
847 int n, current_flags, current_tb_modified;
848 target_ulong current_pc, current_cs_base;
849 PageDesc *p;
850 TranslationBlock *tb, *current_tb;
851#ifdef TARGET_HAS_PRECISE_SMC
852 CPUState *env = cpu_single_env;
853#endif
854
855 addr &= TARGET_PAGE_MASK;
856 p = page_find(addr >> TARGET_PAGE_BITS);
857 if (!p)
858 return;
859 tb = p->first_tb;
860 current_tb_modified = 0;
861 current_tb = NULL;
862 current_pc = 0; /* avoid warning */
863 current_cs_base = 0; /* avoid warning */
864 current_flags = 0; /* avoid warning */
865#ifdef TARGET_HAS_PRECISE_SMC
866 if (tb && pc != 0) {
867 current_tb = tb_find_pc(pc);
868 }
869#endif
870 while (tb != NULL) {
871 n = (long)tb & 3;
872 tb = (TranslationBlock *)((long)tb & ~3);
873#ifdef TARGET_HAS_PRECISE_SMC
874 if (current_tb == tb &&
875 !(current_tb->cflags & CF_SINGLE_INSN)) {
876 /* If we are modifying the current TB, we must stop
877 its execution. We could be more precise by checking
878 that the modification is after the current PC, but it
879 would require a specialized function to partially
880 restore the CPU state */
881
882 current_tb_modified = 1;
883 cpu_restore_state(current_tb, env, pc, puc);
884#if defined(TARGET_I386)
885 current_flags = env->hflags;
886 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
887 current_cs_base = (target_ulong)env->segs[R_CS].base;
888 current_pc = current_cs_base + env->eip;
889#else
890#error unsupported CPU
891#endif
892 }
893#endif /* TARGET_HAS_PRECISE_SMC */
894 tb_phys_invalidate(tb, addr);
895 tb = tb->page_next[n];
896 }
897 p->first_tb = NULL;
898#ifdef TARGET_HAS_PRECISE_SMC
899 if (current_tb_modified) {
900 /* we generate a block containing just the instruction
901 modifying the memory. It will ensure that it cannot modify
902 itself */
903 env->current_tb = NULL;
904 tb_gen_code(env, current_pc, current_cs_base, current_flags,
905 CF_SINGLE_INSN);
906 cpu_resume_from_signal(env, puc);
907 }
908#endif
909}
910#endif
911
912/* add the tb in the target page and protect it if necessary */
913static inline void tb_alloc_page(TranslationBlock *tb,
914 unsigned int n, target_ulong page_addr)
915{
916 PageDesc *p;
917 TranslationBlock *last_first_tb;
918
919 tb->page_addr[n] = page_addr;
920 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
921 tb->page_next[n] = p->first_tb;
922 last_first_tb = p->first_tb;
923 p->first_tb = (TranslationBlock *)((long)tb | n);
924 invalidate_page_bitmap(p);
925
926#if defined(TARGET_HAS_SMC) || 1
927
928#if defined(CONFIG_USER_ONLY)
929 if (p->flags & PAGE_WRITE) {
930 target_ulong addr;
931 PageDesc *p2;
932 int prot;
933
934 /* force the host page as non writable (writes will have a
935 page fault + mprotect overhead) */
936 page_addr &= qemu_host_page_mask;
937 prot = 0;
938 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
939 addr += TARGET_PAGE_SIZE) {
940
941 p2 = page_find (addr >> TARGET_PAGE_BITS);
942 if (!p2)
943 continue;
944 prot |= p2->flags;
945 p2->flags &= ~PAGE_WRITE;
946 page_get_flags(addr);
947 }
948 mprotect(g2h(page_addr), qemu_host_page_size,
949 (prot & PAGE_BITS) & ~PAGE_WRITE);
950#ifdef DEBUG_TB_INVALIDATE
951 printf("protecting code page: 0x%08lx\n",
952 page_addr);
953#endif
954 }
955#else
956 /* if some code is already present, then the pages are already
957 protected. So we handle the case where only the first TB is
958 allocated in a physical page */
959 if (!last_first_tb) {
960 tlb_protect_code(page_addr);
961 }
962#endif
963
964#endif /* TARGET_HAS_SMC */
965}
966
967/* Allocate a new translation block. Flush the translation buffer if
968 too many translation blocks or too much generated code. */
969TranslationBlock *tb_alloc(target_ulong pc)
970{
971 TranslationBlock *tb;
972
973 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
974 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
975 return NULL;
976 tb = &tbs[nb_tbs++];
977 tb->pc = pc;
978 tb->cflags = 0;
979 return tb;
980}
981
982/* add a new TB and link it to the physical page tables. phys_page2 is
983 (-1) to indicate that only one page contains the TB. */
984void tb_link_phys(TranslationBlock *tb,
985 target_ulong phys_pc, target_ulong phys_page2)
986{
987 unsigned int h;
988 TranslationBlock **ptb;
989
990 /* add in the physical hash table */
991 h = tb_phys_hash_func(phys_pc);
992 ptb = &tb_phys_hash[h];
993 tb->phys_hash_next = *ptb;
994 *ptb = tb;
995
996 /* add in the page list */
997 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
998 if (phys_page2 != -1)
999 tb_alloc_page(tb, 1, phys_page2);
1000 else
1001 tb->page_addr[1] = -1;
1002
1003 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1004 tb->jmp_next[0] = NULL;
1005 tb->jmp_next[1] = NULL;
1006#ifdef USE_CODE_COPY
1007 tb->cflags &= ~CF_FP_USED;
1008 if (tb->cflags & CF_TB_FP_USED)
1009 tb->cflags |= CF_FP_USED;
1010#endif
1011
1012 /* init original jump addresses */
1013 if (tb->tb_next_offset[0] != 0xffff)
1014 tb_reset_jump(tb, 0);
1015 if (tb->tb_next_offset[1] != 0xffff)
1016 tb_reset_jump(tb, 1);
1017
1018#ifdef DEBUG_TB_CHECK
1019 tb_page_check();
1020#endif
1021}
1022
1023/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1024 tb[1].tc_ptr. Return NULL if not found */
1025TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1026{
1027 int m_min, m_max, m;
1028 unsigned long v;
1029 TranslationBlock *tb;
1030
1031 if (nb_tbs <= 0)
1032 return NULL;
1033 if (tc_ptr < (unsigned long)code_gen_buffer ||
1034 tc_ptr >= (unsigned long)code_gen_ptr)
1035 return NULL;
1036 /* binary search (cf Knuth) */
1037 m_min = 0;
1038 m_max = nb_tbs - 1;
1039 while (m_min <= m_max) {
1040 m = (m_min + m_max) >> 1;
1041 tb = &tbs[m];
1042 v = (unsigned long)tb->tc_ptr;
1043 if (v == tc_ptr)
1044 return tb;
1045 else if (tc_ptr < v) {
1046 m_max = m - 1;
1047 } else {
1048 m_min = m + 1;
1049 }
1050 }
1051 return &tbs[m_max];
1052}
1053
1054static void tb_reset_jump_recursive(TranslationBlock *tb);
1055
1056static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1057{
1058 TranslationBlock *tb1, *tb_next, **ptb;
1059 unsigned int n1;
1060
1061 tb1 = tb->jmp_next[n];
1062 if (tb1 != NULL) {
1063 /* find head of list */
1064 for(;;) {
1065 n1 = (long)tb1 & 3;
1066 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1067 if (n1 == 2)
1068 break;
1069 tb1 = tb1->jmp_next[n1];
1070 }
1071 /* we are now sure now that tb jumps to tb1 */
1072 tb_next = tb1;
1073
1074 /* remove tb from the jmp_first list */
1075 ptb = &tb_next->jmp_first;
1076 for(;;) {
1077 tb1 = *ptb;
1078 n1 = (long)tb1 & 3;
1079 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1080 if (n1 == n && tb1 == tb)
1081 break;
1082 ptb = &tb1->jmp_next[n1];
1083 }
1084 *ptb = tb->jmp_next[n];
1085 tb->jmp_next[n] = NULL;
1086
1087 /* suppress the jump to next tb in generated code */
1088 tb_reset_jump(tb, n);
1089
1090 /* suppress jumps in the tb on which we could have jumped */
1091 tb_reset_jump_recursive(tb_next);
1092 }
1093}
1094
1095static void tb_reset_jump_recursive(TranslationBlock *tb)
1096{
1097 tb_reset_jump_recursive2(tb, 0);
1098 tb_reset_jump_recursive2(tb, 1);
1099}
1100
1101#if defined(TARGET_HAS_ICE)
1102static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1103{
1104 target_ulong addr, pd;
1105 ram_addr_t ram_addr;
1106 PhysPageDesc *p;
1107
1108 addr = cpu_get_phys_page_debug(env, pc);
1109 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1110 if (!p) {
1111 pd = IO_MEM_UNASSIGNED;
1112 } else {
1113 pd = p->phys_offset;
1114 }
1115 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1116 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1117}
1118#endif
1119
1120/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1121 breakpoint is reached */
1122int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1123{
1124#if defined(TARGET_HAS_ICE)
1125 int i;
1126
1127 for(i = 0; i < env->nb_breakpoints; i++) {
1128 if (env->breakpoints[i] == pc)
1129 return 0;
1130 }
1131
1132 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1133 return -1;
1134 env->breakpoints[env->nb_breakpoints++] = pc;
1135
1136 breakpoint_invalidate(env, pc);
1137 return 0;
1138#else
1139 return -1;
1140#endif
1141}
1142
1143/* remove a breakpoint */
1144int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1145{
1146#if defined(TARGET_HAS_ICE)
1147 int i;
1148 for(i = 0; i < env->nb_breakpoints; i++) {
1149 if (env->breakpoints[i] == pc)
1150 goto found;
1151 }
1152 return -1;
1153 found:
1154 env->nb_breakpoints--;
1155 if (i < env->nb_breakpoints)
1156 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1157
1158 breakpoint_invalidate(env, pc);
1159 return 0;
1160#else
1161 return -1;
1162#endif
1163}
1164
1165/* enable or disable single step mode. EXCP_DEBUG is returned by the
1166 CPU loop after each instruction */
1167void cpu_single_step(CPUState *env, int enabled)
1168{
1169#if defined(TARGET_HAS_ICE)
1170 if (env->singlestep_enabled != enabled) {
1171 env->singlestep_enabled = enabled;
1172 /* must flush all the translated code to avoid inconsistancies */
1173 /* XXX: only flush what is necessary */
1174 tb_flush(env);
1175 }
1176#endif
1177}
1178
1179#ifndef VBOX
1180/* enable or disable low levels log */
1181void cpu_set_log(int log_flags)
1182{
1183 loglevel = log_flags;
1184 if (loglevel && !logfile) {
1185 logfile = fopen(logfilename, "w");
1186 if (!logfile) {
1187 perror(logfilename);
1188 _exit(1);
1189 }
1190#if !defined(CONFIG_SOFTMMU)
1191 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1192 {
1193 static uint8_t logfile_buf[4096];
1194 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1195 }
1196#else
1197 setvbuf(logfile, NULL, _IOLBF, 0);
1198#endif
1199 }
1200}
1201
1202void cpu_set_log_filename(const char *filename)
1203{
1204 logfilename = strdup(filename);
1205}
1206#endif /* !VBOX */
1207
1208/* mask must never be zero, except for A20 change call */
1209void cpu_interrupt(CPUState *env, int mask)
1210{
1211 TranslationBlock *tb;
1212 static int interrupt_lock;
1213
1214#ifdef VBOX
1215 VM_ASSERT_EMT(env->pVM);
1216 ASMAtomicOrS32(&env->interrupt_request, mask);
1217#else /* !VBOX */
1218 env->interrupt_request |= mask;
1219#endif /* !VBOX */
1220 /* if the cpu is currently executing code, we must unlink it and
1221 all the potentially executing TB */
1222 tb = env->current_tb;
1223 if (tb && !testandset(&interrupt_lock)) {
1224 env->current_tb = NULL;
1225 tb_reset_jump_recursive(tb);
1226 interrupt_lock = 0;
1227 }
1228}
1229
1230void cpu_reset_interrupt(CPUState *env, int mask)
1231{
1232#ifdef VBOX
1233 /*
1234 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1235 * for future changes!
1236 */
1237 ASMAtomicAndS32(&env->interrupt_request, ~mask);
1238#else /* !VBOX */
1239 env->interrupt_request &= ~mask;
1240#endif /* !VBOX */
1241}
1242
1243#ifndef VBOX
1244CPULogItem cpu_log_items[] = {
1245 { CPU_LOG_TB_OUT_ASM, "out_asm",
1246 "show generated host assembly code for each compiled TB" },
1247 { CPU_LOG_TB_IN_ASM, "in_asm",
1248 "show target assembly code for each compiled TB" },
1249 { CPU_LOG_TB_OP, "op",
1250 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1251#ifdef TARGET_I386
1252 { CPU_LOG_TB_OP_OPT, "op_opt",
1253 "show micro ops after optimization for each compiled TB" },
1254#endif
1255 { CPU_LOG_INT, "int",
1256 "show interrupts/exceptions in short format" },
1257 { CPU_LOG_EXEC, "exec",
1258 "show trace before each executed TB (lots of logs)" },
1259 { CPU_LOG_TB_CPU, "cpu",
1260 "show CPU state before bloc translation" },
1261#ifdef TARGET_I386
1262 { CPU_LOG_PCALL, "pcall",
1263 "show protected mode far calls/returns/exceptions" },
1264#endif
1265#ifdef DEBUG_IOPORT
1266 { CPU_LOG_IOPORT, "ioport",
1267 "show all i/o ports accesses" },
1268#endif
1269 { 0, NULL, NULL },
1270};
1271
1272static int cmp1(const char *s1, int n, const char *s2)
1273{
1274 if (strlen(s2) != n)
1275 return 0;
1276 return memcmp(s1, s2, n) == 0;
1277}
1278
1279/* takes a comma separated list of log masks. Return 0 if error. */
1280int cpu_str_to_log_mask(const char *str)
1281{
1282 CPULogItem *item;
1283 int mask;
1284 const char *p, *p1;
1285
1286 p = str;
1287 mask = 0;
1288 for(;;) {
1289 p1 = strchr(p, ',');
1290 if (!p1)
1291 p1 = p + strlen(p);
1292 if(cmp1(p,p1-p,"all")) {
1293 for(item = cpu_log_items; item->mask != 0; item++) {
1294 mask |= item->mask;
1295 }
1296 } else {
1297 for(item = cpu_log_items; item->mask != 0; item++) {
1298 if (cmp1(p, p1 - p, item->name))
1299 goto found;
1300 }
1301 return 0;
1302 }
1303 found:
1304 mask |= item->mask;
1305 if (*p1 != ',')
1306 break;
1307 p = p1 + 1;
1308 }
1309 return mask;
1310}
1311#endif /* !VBOX */
1312
1313#ifndef VBOX /* VBOX: we have our own routine. */
1314void cpu_abort(CPUState *env, const char *fmt, ...)
1315{
1316 va_list ap;
1317
1318 va_start(ap, fmt);
1319 fprintf(stderr, "qemu: fatal: ");
1320 vfprintf(stderr, fmt, ap);
1321 fprintf(stderr, "\n");
1322#ifdef TARGET_I386
1323 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1324#else
1325 cpu_dump_state(env, stderr, fprintf, 0);
1326#endif
1327 va_end(ap);
1328 abort();
1329}
1330#endif /* !VBOX */
1331
1332#if !defined(CONFIG_USER_ONLY)
1333
1334/* NOTE: if flush_global is true, also flush global entries (not
1335 implemented yet) */
1336void tlb_flush(CPUState *env, int flush_global)
1337{
1338 int i;
1339
1340#if defined(DEBUG_TLB)
1341 printf("tlb_flush:\n");
1342#endif
1343 /* must reset current TB so that interrupts cannot modify the
1344 links while we are modifying them */
1345 env->current_tb = NULL;
1346
1347 for(i = 0; i < CPU_TLB_SIZE; i++) {
1348 env->tlb_table[0][i].addr_read = -1;
1349 env->tlb_table[0][i].addr_write = -1;
1350 env->tlb_table[0][i].addr_code = -1;
1351 env->tlb_table[1][i].addr_read = -1;
1352 env->tlb_table[1][i].addr_write = -1;
1353 env->tlb_table[1][i].addr_code = -1;
1354 }
1355
1356 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1357
1358#if !defined(CONFIG_SOFTMMU)
1359 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1360#endif
1361#ifdef VBOX
1362 /* inform raw mode about TLB flush */
1363 remR3FlushTLB(env, flush_global);
1364#endif
1365#ifdef USE_KQEMU
1366 if (env->kqemu_enabled) {
1367 kqemu_flush(env, flush_global);
1368 }
1369#endif
1370 tlb_flush_count++;
1371}
1372
1373static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1374{
1375 if (addr == (tlb_entry->addr_read &
1376 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1377 addr == (tlb_entry->addr_write &
1378 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1379 addr == (tlb_entry->addr_code &
1380 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1381 tlb_entry->addr_read = -1;
1382 tlb_entry->addr_write = -1;
1383 tlb_entry->addr_code = -1;
1384 }
1385}
1386
1387void tlb_flush_page(CPUState *env, target_ulong addr)
1388{
1389 int i;
1390 TranslationBlock *tb;
1391
1392#if defined(DEBUG_TLB)
1393 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1394#endif
1395 /* must reset current TB so that interrupts cannot modify the
1396 links while we are modifying them */
1397 env->current_tb = NULL;
1398
1399 addr &= TARGET_PAGE_MASK;
1400 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1401 tlb_flush_entry(&env->tlb_table[0][i], addr);
1402 tlb_flush_entry(&env->tlb_table[1][i], addr);
1403
1404 /* Discard jump cache entries for any tb which might potentially
1405 overlap the flushed page. */
1406 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1407 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1408
1409 i = tb_jmp_cache_hash_page(addr);
1410 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1411
1412#if !defined(CONFIG_SOFTMMU)
1413 if (addr < MMAP_AREA_END)
1414 munmap((void *)addr, TARGET_PAGE_SIZE);
1415#endif
1416#ifdef VBOX
1417 /* inform raw mode about TLB page flush */
1418 remR3FlushPage(env, addr);
1419#endif /* VBOX */
1420#ifdef USE_KQEMU
1421 if (env->kqemu_enabled) {
1422 kqemu_flush_page(env, addr);
1423 }
1424#endif
1425}
1426
1427/* update the TLBs so that writes to code in the virtual page 'addr'
1428 can be detected */
1429static void tlb_protect_code(ram_addr_t ram_addr)
1430{
1431 cpu_physical_memory_reset_dirty(ram_addr,
1432 ram_addr + TARGET_PAGE_SIZE,
1433 CODE_DIRTY_FLAG);
1434#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1435 /** @todo Retest this? This function has changed... */
1436 remR3ProtectCode(cpu_single_env, ram_addr);
1437#endif
1438}
1439
1440/* update the TLB so that writes in physical page 'phys_addr' are no longer
1441 tested for self modifying code */
1442static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1443 target_ulong vaddr)
1444{
1445#ifdef VBOX
1446 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1447#endif
1448 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1449}
1450
1451static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1452 unsigned long start, unsigned long length)
1453{
1454 unsigned long addr;
1455 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1456 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1457 if ((addr - start) < length) {
1458 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1459 }
1460 }
1461}
1462
1463void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1464 int dirty_flags)
1465{
1466 CPUState *env;
1467 unsigned long length, start1;
1468 int i, mask, len;
1469 uint8_t *p;
1470
1471 start &= TARGET_PAGE_MASK;
1472 end = TARGET_PAGE_ALIGN(end);
1473
1474 length = end - start;
1475 if (length == 0)
1476 return;
1477 len = length >> TARGET_PAGE_BITS;
1478#ifdef USE_KQEMU
1479 /* XXX: should not depend on cpu context */
1480 env = first_cpu;
1481 if (env->kqemu_enabled) {
1482 ram_addr_t addr;
1483 addr = start;
1484 for(i = 0; i < len; i++) {
1485 kqemu_set_notdirty(env, addr);
1486 addr += TARGET_PAGE_SIZE;
1487 }
1488 }
1489#endif
1490 mask = ~dirty_flags;
1491 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1492#ifdef VBOX
1493 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1494#endif
1495 for(i = 0; i < len; i++)
1496 p[i] &= mask;
1497
1498 /* we modify the TLB cache so that the dirty bit will be set again
1499 when accessing the range */
1500#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
1501 start1 = start + (unsigned long)phys_ram_base;
1502#else
1503 start1 = (unsigned long)remR3GCPhys2HCVirt(env, start);
1504#endif
1505 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1506 for(i = 0; i < CPU_TLB_SIZE; i++)
1507 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1508 for(i = 0; i < CPU_TLB_SIZE; i++)
1509 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1510 }
1511
1512#if !defined(CONFIG_SOFTMMU)
1513#ifdef VBOX /**@todo remove this check */
1514# error "We shouldn't get here..."
1515#endif
1516 /* XXX: this is expensive */
1517 {
1518 VirtPageDesc *p;
1519 int j;
1520 target_ulong addr;
1521
1522 for(i = 0; i < L1_SIZE; i++) {
1523 p = l1_virt_map[i];
1524 if (p) {
1525 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1526 for(j = 0; j < L2_SIZE; j++) {
1527 if (p->valid_tag == virt_valid_tag &&
1528 p->phys_addr >= start && p->phys_addr < end &&
1529 (p->prot & PROT_WRITE)) {
1530 if (addr < MMAP_AREA_END) {
1531 mprotect((void *)addr, TARGET_PAGE_SIZE,
1532 p->prot & ~PROT_WRITE);
1533 }
1534 }
1535 addr += TARGET_PAGE_SIZE;
1536 p++;
1537 }
1538 }
1539 }
1540 }
1541#endif
1542}
1543
1544static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1545{
1546 ram_addr_t ram_addr;
1547
1548 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1549 /* RAM case */
1550#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
1551 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1552 tlb_entry->addend - (unsigned long)phys_ram_base;
1553#else
1554 ram_addr = remR3HCVirt2GCPhys(cpu_single_env, (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend); /** @todo check if this is right! */
1555#endif
1556 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1557 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1558 }
1559 }
1560}
1561
1562/* update the TLB according to the current state of the dirty bits */
1563void cpu_tlb_update_dirty(CPUState *env)
1564{
1565 int i;
1566 for(i = 0; i < CPU_TLB_SIZE; i++)
1567 tlb_update_dirty(&env->tlb_table[0][i]);
1568 for(i = 0; i < CPU_TLB_SIZE; i++)
1569 tlb_update_dirty(&env->tlb_table[1][i]);
1570}
1571
1572static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1573 unsigned long start)
1574{
1575 unsigned long addr;
1576 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1577 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1578 if (addr == start) {
1579 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1580 }
1581 }
1582}
1583
1584/* update the TLB corresponding to virtual page vaddr and phys addr
1585 addr so that it is no longer dirty */
1586static inline void tlb_set_dirty(CPUState *env,
1587 unsigned long addr, target_ulong vaddr)
1588{
1589 int i;
1590
1591 addr &= TARGET_PAGE_MASK;
1592 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1593 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1594 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1595}
1596
1597/* add a new TLB entry. At most one entry for a given virtual address
1598 is permitted. Return 0 if OK or 2 if the page could not be mapped
1599 (can only happen in non SOFTMMU mode for I/O pages or pages
1600 conflicting with the host address space). */
1601int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1602 target_phys_addr_t paddr, int prot,
1603 int is_user, int is_softmmu)
1604{
1605 PhysPageDesc *p;
1606 unsigned long pd;
1607 unsigned int index;
1608 target_ulong address;
1609 target_phys_addr_t addend;
1610 int ret;
1611 CPUTLBEntry *te;
1612
1613 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1614 if (!p) {
1615 pd = IO_MEM_UNASSIGNED;
1616 } else {
1617 pd = p->phys_offset;
1618 }
1619#if defined(DEBUG_TLB)
1620 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1621 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1622#endif
1623
1624 ret = 0;
1625#if !defined(CONFIG_SOFTMMU)
1626 if (is_softmmu)
1627#endif
1628 {
1629 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1630 /* IO memory case */
1631 address = vaddr | pd;
1632 addend = paddr;
1633 } else {
1634 /* standard memory */
1635 address = vaddr;
1636#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
1637 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1638#else
1639 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
1640#endif
1641 }
1642
1643 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1644 addend -= vaddr;
1645 te = &env->tlb_table[is_user][index];
1646 te->addend = addend;
1647 if (prot & PAGE_READ) {
1648 te->addr_read = address;
1649 } else {
1650 te->addr_read = -1;
1651 }
1652 if (prot & PAGE_EXEC) {
1653 te->addr_code = address;
1654 } else {
1655 te->addr_code = -1;
1656 }
1657 if (prot & PAGE_WRITE) {
1658 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1659 (pd & IO_MEM_ROMD)) {
1660 /* write access calls the I/O callback */
1661 te->addr_write = vaddr |
1662 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1663 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1664 !cpu_physical_memory_is_dirty(pd)) {
1665 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1666 } else {
1667 te->addr_write = address;
1668 }
1669 } else {
1670 te->addr_write = -1;
1671 }
1672#ifdef VBOX
1673 /* inform raw mode about TLB page change */
1674 /** @todo double check and fix this interface. OLD: remR3SetPage(env, &env->tlb_read[is_user][index], &env->tlb_write[is_user][index], prot, is_user); */
1675 remR3SetPage(env, te, te, prot, is_user);
1676#endif
1677 }
1678#if !defined(CONFIG_SOFTMMU)
1679 else {
1680 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1681 /* IO access: no mapping is done as it will be handled by the
1682 soft MMU */
1683 if (!(env->hflags & HF_SOFTMMU_MASK))
1684 ret = 2;
1685 } else {
1686 void *map_addr;
1687
1688 if (vaddr >= MMAP_AREA_END) {
1689 ret = 2;
1690 } else {
1691 if (prot & PROT_WRITE) {
1692 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1693#if defined(TARGET_HAS_SMC) || 1
1694 first_tb ||
1695#endif
1696 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1697 !cpu_physical_memory_is_dirty(pd))) {
1698 /* ROM: we do as if code was inside */
1699 /* if code is present, we only map as read only and save the
1700 original mapping */
1701 VirtPageDesc *vp;
1702
1703 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1704 vp->phys_addr = pd;
1705 vp->prot = prot;
1706 vp->valid_tag = virt_valid_tag;
1707 prot &= ~PAGE_WRITE;
1708 }
1709 }
1710 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1711 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1712 if (map_addr == MAP_FAILED) {
1713 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1714 paddr, vaddr);
1715 }
1716 }
1717 }
1718 }
1719#endif
1720 return ret;
1721}
1722
1723/* called from signal handler: invalidate the code and unprotect the
1724 page. Return TRUE if the fault was succesfully handled. */
1725int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1726{
1727#if !defined(CONFIG_SOFTMMU)
1728 VirtPageDesc *vp;
1729
1730#if defined(DEBUG_TLB)
1731 printf("page_unprotect: addr=0x%08x\n", addr);
1732#endif
1733 addr &= TARGET_PAGE_MASK;
1734
1735 /* if it is not mapped, no need to worry here */
1736 if (addr >= MMAP_AREA_END)
1737 return 0;
1738 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1739 if (!vp)
1740 return 0;
1741 /* NOTE: in this case, validate_tag is _not_ tested as it
1742 validates only the code TLB */
1743 if (vp->valid_tag != virt_valid_tag)
1744 return 0;
1745 if (!(vp->prot & PAGE_WRITE))
1746 return 0;
1747#if defined(DEBUG_TLB)
1748 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1749 addr, vp->phys_addr, vp->prot);
1750#endif
1751 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1752 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1753 (unsigned long)addr, vp->prot);
1754 /* set the dirty bit */
1755#ifdef VBOX
1756 if (RT_LIKELY((vp->phys_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1757#endif
1758 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1759 /* flush the code inside */
1760 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1761 return 1;
1762#elif defined(VBOX)
1763 addr &= TARGET_PAGE_MASK;
1764
1765 /* if it is not mapped, no need to worry here */
1766 if (addr >= MMAP_AREA_END)
1767 return 0;
1768 return 1;
1769#else
1770 return 0;
1771#endif
1772}
1773
1774#else
1775
1776void tlb_flush(CPUState *env, int flush_global)
1777{
1778}
1779
1780void tlb_flush_page(CPUState *env, target_ulong addr)
1781{
1782}
1783
1784int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1785 target_phys_addr_t paddr, int prot,
1786 int is_user, int is_softmmu)
1787{
1788 return 0;
1789}
1790
1791#ifndef VBOX
1792/* dump memory mappings */
1793void page_dump(FILE *f)
1794{
1795 unsigned long start, end;
1796 int i, j, prot, prot1;
1797 PageDesc *p;
1798
1799 fprintf(f, "%-8s %-8s %-8s %s\n",
1800 "start", "end", "size", "prot");
1801 start = -1;
1802 end = -1;
1803 prot = 0;
1804 for(i = 0; i <= L1_SIZE; i++) {
1805 if (i < L1_SIZE)
1806 p = l1_map[i];
1807 else
1808 p = NULL;
1809 for(j = 0;j < L2_SIZE; j++) {
1810 if (!p)
1811 prot1 = 0;
1812 else
1813 prot1 = p[j].flags;
1814 if (prot1 != prot) {
1815 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1816 if (start != -1) {
1817 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1818 start, end, end - start,
1819 prot & PAGE_READ ? 'r' : '-',
1820 prot & PAGE_WRITE ? 'w' : '-',
1821 prot & PAGE_EXEC ? 'x' : '-');
1822 }
1823 if (prot1 != 0)
1824 start = end;
1825 else
1826 start = -1;
1827 prot = prot1;
1828 }
1829 if (!p)
1830 break;
1831 }
1832 }
1833}
1834#endif /* !VBOX */
1835
1836int page_get_flags(target_ulong address)
1837{
1838 PageDesc *p;
1839
1840 p = page_find(address >> TARGET_PAGE_BITS);
1841 if (!p)
1842 return 0;
1843 return p->flags;
1844}
1845
1846/* modify the flags of a page and invalidate the code if
1847 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1848 depending on PAGE_WRITE */
1849void page_set_flags(target_ulong start, target_ulong end, int flags)
1850{
1851 PageDesc *p;
1852 target_ulong addr;
1853
1854 start = start & TARGET_PAGE_MASK;
1855 end = TARGET_PAGE_ALIGN(end);
1856 if (flags & PAGE_WRITE)
1857 flags |= PAGE_WRITE_ORG;
1858#ifdef VBOX
1859 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
1860#endif
1861 spin_lock(&tb_lock);
1862 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1863 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1864 /* if the write protection is set, then we invalidate the code
1865 inside */
1866 if (!(p->flags & PAGE_WRITE) &&
1867 (flags & PAGE_WRITE) &&
1868 p->first_tb) {
1869 tb_invalidate_phys_page(addr, 0, NULL);
1870 }
1871 p->flags = flags;
1872 }
1873 spin_unlock(&tb_lock);
1874}
1875
1876/* called from signal handler: invalidate the code and unprotect the
1877 page. Return TRUE if the fault was succesfully handled. */
1878int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1879{
1880 unsigned int page_index, prot, pindex;
1881 PageDesc *p, *p1;
1882 target_ulong host_start, host_end, addr;
1883
1884 host_start = address & qemu_host_page_mask;
1885 page_index = host_start >> TARGET_PAGE_BITS;
1886 p1 = page_find(page_index);
1887 if (!p1)
1888 return 0;
1889 host_end = host_start + qemu_host_page_size;
1890 p = p1;
1891 prot = 0;
1892 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1893 prot |= p->flags;
1894 p++;
1895 }
1896 /* if the page was really writable, then we change its
1897 protection back to writable */
1898 if (prot & PAGE_WRITE_ORG) {
1899 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1900 if (!(p1[pindex].flags & PAGE_WRITE)) {
1901 mprotect((void *)g2h(host_start), qemu_host_page_size,
1902 (prot & PAGE_BITS) | PAGE_WRITE);
1903 p1[pindex].flags |= PAGE_WRITE;
1904 /* and since the content will be modified, we must invalidate
1905 the corresponding translated code. */
1906 tb_invalidate_phys_page(address, pc, puc);
1907#ifdef DEBUG_TB_CHECK
1908 tb_invalidate_check(address);
1909#endif
1910 return 1;
1911 }
1912 }
1913 return 0;
1914}
1915
1916/* call this function when system calls directly modify a memory area */
1917/* ??? This should be redundant now we have lock_user. */
1918void page_unprotect_range(target_ulong data, target_ulong data_size)
1919{
1920 target_ulong start, end, addr;
1921
1922 start = data;
1923 end = start + data_size;
1924 start &= TARGET_PAGE_MASK;
1925 end = TARGET_PAGE_ALIGN(end);
1926 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1927 page_unprotect(addr, 0, NULL);
1928 }
1929}
1930
1931static inline void tlb_set_dirty(CPUState *env,
1932 unsigned long addr, target_ulong vaddr)
1933{
1934}
1935#endif /* defined(CONFIG_USER_ONLY) */
1936
1937/* register physical memory. 'size' must be a multiple of the target
1938 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1939 io memory page */
1940void cpu_register_physical_memory(target_phys_addr_t start_addr,
1941 unsigned long size,
1942 unsigned long phys_offset)
1943{
1944 target_phys_addr_t addr, end_addr;
1945 PhysPageDesc *p;
1946 CPUState *env;
1947
1948 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1949 end_addr = start_addr + size;
1950 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1951 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1952 p->phys_offset = phys_offset;
1953#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
1954 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1955 (phys_offset & IO_MEM_ROMD))
1956#else
1957 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
1958 || (phys_offset & IO_MEM_ROMD)
1959 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
1960#endif
1961
1962 phys_offset += TARGET_PAGE_SIZE;
1963 }
1964
1965 /* since each CPU stores ram addresses in its TLB cache, we must
1966 reset the modified entries */
1967 /* XXX: slow ! */
1968 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1969 tlb_flush(env, 1);
1970 }
1971}
1972
1973/* XXX: temporary until new memory mapping API */
1974uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1975{
1976 PhysPageDesc *p;
1977
1978 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1979 if (!p)
1980 return IO_MEM_UNASSIGNED;
1981 return p->phys_offset;
1982}
1983
1984static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1985{
1986#ifdef DEBUG_UNASSIGNED
1987 printf("Unassigned mem read 0x%08x\n", (int)addr);
1988#endif
1989 return 0;
1990}
1991
1992static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1993{
1994#ifdef DEBUG_UNASSIGNED
1995 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1996#endif
1997}
1998
1999static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2000 unassigned_mem_readb,
2001 unassigned_mem_readb,
2002 unassigned_mem_readb,
2003};
2004
2005static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2006 unassigned_mem_writeb,
2007 unassigned_mem_writeb,
2008 unassigned_mem_writeb,
2009};
2010
2011static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2012{
2013 unsigned long ram_addr;
2014 int dirty_flags;
2015#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
2016 ram_addr = addr - (unsigned long)phys_ram_base;
2017#else
2018 ram_addr = remR3HCVirt2GCPhys(cpu_single_env, (void *)addr);
2019#endif
2020#ifdef VBOX
2021 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2022 dirty_flags = 0xff;
2023 else
2024#endif /* VBOX */
2025 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2026 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2027#if !defined(CONFIG_USER_ONLY)
2028 tb_invalidate_phys_page_fast(ram_addr, 1);
2029# ifdef VBOX
2030 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2031 dirty_flags = 0xff;
2032 else
2033# endif /* VBOX */
2034 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2035#endif
2036 }
2037 stb_p((uint8_t *)(long)addr, val);
2038#ifdef USE_KQEMU
2039 if (cpu_single_env->kqemu_enabled &&
2040 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2041 kqemu_modify_page(cpu_single_env, ram_addr);
2042#endif
2043 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2044#ifdef VBOX
2045 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2046#endif /* !VBOX */
2047 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2048 /* we remove the notdirty callback only if the code has been
2049 flushed */
2050 if (dirty_flags == 0xff)
2051 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2052}
2053
2054static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2055{
2056 unsigned long ram_addr;
2057 int dirty_flags;
2058#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
2059 ram_addr = addr - (unsigned long)phys_ram_base;
2060#else
2061 ram_addr = remR3HCVirt2GCPhys(cpu_single_env, (void *)addr);
2062#endif
2063#ifdef VBOX
2064 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2065 dirty_flags = 0xff;
2066 else
2067#endif /* VBOX */
2068 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2069 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2070#if !defined(CONFIG_USER_ONLY)
2071 tb_invalidate_phys_page_fast(ram_addr, 2);
2072# ifdef VBOX
2073 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2074 dirty_flags = 0xff;
2075 else
2076# endif /* VBOX */
2077 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2078#endif
2079 }
2080 stw_p((uint8_t *)(long)addr, val);
2081#ifdef USE_KQEMU
2082 if (cpu_single_env->kqemu_enabled &&
2083 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2084 kqemu_modify_page(cpu_single_env, ram_addr);
2085#endif
2086 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2087#ifdef VBOX
2088 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2089#endif
2090 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2091 /* we remove the notdirty callback only if the code has been
2092 flushed */
2093 if (dirty_flags == 0xff)
2094 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2095}
2096
2097static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2098{
2099 unsigned long ram_addr;
2100 int dirty_flags;
2101#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
2102 ram_addr = addr - (unsigned long)phys_ram_base;
2103#else
2104 ram_addr = remR3HCVirt2GCPhys(cpu_single_env, (void *)addr);
2105#endif
2106#ifdef VBOX
2107 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2108 dirty_flags = 0xff;
2109 else
2110#endif /* VBOX */
2111 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2112 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2113#if !defined(CONFIG_USER_ONLY)
2114 tb_invalidate_phys_page_fast(ram_addr, 4);
2115# ifdef VBOX
2116 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2117 dirty_flags = 0xff;
2118 else
2119# endif /* VBOX */
2120 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2121#endif
2122 }
2123 stl_p((uint8_t *)(long)addr, val);
2124#ifdef USE_KQEMU
2125 if (cpu_single_env->kqemu_enabled &&
2126 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2127 kqemu_modify_page(cpu_single_env, ram_addr);
2128#endif
2129 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2130#ifdef VBOX
2131 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2132#endif
2133 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2134 /* we remove the notdirty callback only if the code has been
2135 flushed */
2136 if (dirty_flags == 0xff)
2137 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2138}
2139
2140static CPUReadMemoryFunc *error_mem_read[3] = {
2141 NULL, /* never used */
2142 NULL, /* never used */
2143 NULL, /* never used */
2144};
2145
2146static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2147 notdirty_mem_writeb,
2148 notdirty_mem_writew,
2149 notdirty_mem_writel,
2150};
2151
2152static void io_mem_init(void)
2153{
2154 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2155 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2156 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2157#if defined(VBOX) && defined(PGM_DYNAMIC_RAM_ALLOC)
2158 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2159 io_mem_nb = 6;
2160#else
2161 io_mem_nb = 5;
2162#endif
2163
2164#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
2165 /* alloc dirty bits array */
2166 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2167 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2168#endif /* !VBOX */
2169}
2170
2171/* mem_read and mem_write are arrays of functions containing the
2172 function to access byte (index 0), word (index 1) and dword (index
2173 2). All functions must be supplied. If io_index is non zero, the
2174 corresponding io zone is modified. If it is zero, a new io zone is
2175 allocated. The return value can be used with
2176 cpu_register_physical_memory(). (-1) is returned if error. */
2177int cpu_register_io_memory(int io_index,
2178 CPUReadMemoryFunc **mem_read,
2179 CPUWriteMemoryFunc **mem_write,
2180 void *opaque)
2181{
2182 int i;
2183
2184 if (io_index <= 0) {
2185 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2186 return -1;
2187 io_index = io_mem_nb++;
2188 } else {
2189 if (io_index >= IO_MEM_NB_ENTRIES)
2190 return -1;
2191 }
2192
2193 for(i = 0;i < 3; i++) {
2194 io_mem_read[io_index][i] = mem_read[i];
2195 io_mem_write[io_index][i] = mem_write[i];
2196 }
2197 io_mem_opaque[io_index] = opaque;
2198 return io_index << IO_MEM_SHIFT;
2199}
2200
2201CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2202{
2203 return io_mem_write[io_index >> IO_MEM_SHIFT];
2204}
2205
2206CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2207{
2208 return io_mem_read[io_index >> IO_MEM_SHIFT];
2209}
2210
2211/* physical memory access (slow version, mainly for debug) */
2212#if defined(CONFIG_USER_ONLY)
2213void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2214 int len, int is_write)
2215{
2216 int l, flags;
2217 target_ulong page;
2218 void * p;
2219
2220 while (len > 0) {
2221 page = addr & TARGET_PAGE_MASK;
2222 l = (page + TARGET_PAGE_SIZE) - addr;
2223 if (l > len)
2224 l = len;
2225 flags = page_get_flags(page);
2226 if (!(flags & PAGE_VALID))
2227 return;
2228 if (is_write) {
2229 if (!(flags & PAGE_WRITE))
2230 return;
2231 p = lock_user(addr, len, 0);
2232 memcpy(p, buf, len);
2233 unlock_user(p, addr, len);
2234 } else {
2235 if (!(flags & PAGE_READ))
2236 return;
2237 p = lock_user(addr, len, 1);
2238 memcpy(buf, p, len);
2239 unlock_user(p, addr, 0);
2240 }
2241 len -= l;
2242 buf += l;
2243 addr += l;
2244 }
2245}
2246
2247#else
2248void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2249 int len, int is_write)
2250{
2251 int l, io_index;
2252 uint8_t *ptr;
2253 uint32_t val;
2254 target_phys_addr_t page;
2255 unsigned long pd;
2256 PhysPageDesc *p;
2257
2258 while (len > 0) {
2259 page = addr & TARGET_PAGE_MASK;
2260 l = (page + TARGET_PAGE_SIZE) - addr;
2261 if (l > len)
2262 l = len;
2263 p = phys_page_find(page >> TARGET_PAGE_BITS);
2264 if (!p) {
2265 pd = IO_MEM_UNASSIGNED;
2266 } else {
2267 pd = p->phys_offset;
2268 }
2269
2270 if (is_write) {
2271 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2272 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2273 /* XXX: could force cpu_single_env to NULL to avoid
2274 potential bugs */
2275 if (l >= 4 && ((addr & 3) == 0)) {
2276 /* 32 bit write access */
2277 val = ldl_p(buf);
2278 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2279 l = 4;
2280 } else if (l >= 2 && ((addr & 1) == 0)) {
2281 /* 16 bit write access */
2282 val = lduw_p(buf);
2283 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2284 l = 2;
2285 } else {
2286 /* 8 bit write access */
2287 val = ldub_p(buf);
2288 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2289 l = 1;
2290 }
2291 } else {
2292 unsigned long addr1;
2293 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2294 /* RAM case */
2295#ifdef VBOX
2296# ifdef PGM_DYNAMIC_RAM_ALLOC
2297 ptr = remR3GCPhys2HCVirt(cpu_single_env, addr1);
2298# else
2299 ptr = phys_ram_base + addr1;
2300# endif
2301 remR3PhysWrite(ptr, buf, l);
2302#else
2303 ptr = phys_ram_base + addr1;
2304 memcpy(ptr, buf, l);
2305#endif
2306 if (!cpu_physical_memory_is_dirty(addr1)) {
2307 /* invalidate code */
2308 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2309 /* set dirty bit */
2310#ifdef VBOX
2311 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2312#endif
2313 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2314 (0xff & ~CODE_DIRTY_FLAG);
2315 }
2316 }
2317 } else {
2318 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2319 !(pd & IO_MEM_ROMD)) {
2320 /* I/O case */
2321 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2322 if (l >= 4 && ((addr & 3) == 0)) {
2323 /* 32 bit read access */
2324 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2325 stl_p(buf, val);
2326 l = 4;
2327 } else if (l >= 2 && ((addr & 1) == 0)) {
2328 /* 16 bit read access */
2329 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2330 stw_p(buf, val);
2331 l = 2;
2332 } else {
2333 /* 8 bit read access */
2334 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2335 stb_p(buf, val);
2336 l = 1;
2337 }
2338 } else {
2339 /* RAM case */
2340#ifdef VBOX
2341# ifdef PGM_DYNAMIC_RAM_ALLOC
2342 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK));
2343# else
2344 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2345 (addr & ~TARGET_PAGE_MASK);
2346# endif
2347 remR3PhysRead(ptr, buf, l);
2348#else
2349 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2350 (addr & ~TARGET_PAGE_MASK);
2351 memcpy(buf, ptr, l);
2352#endif
2353 }
2354 }
2355 len -= l;
2356 buf += l;
2357 addr += l;
2358 }
2359}
2360
2361/* used for ROM loading : can write in RAM and ROM */
2362void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2363 const uint8_t *buf, int len)
2364{
2365 int l;
2366 uint8_t *ptr;
2367 target_phys_addr_t page;
2368 unsigned long pd;
2369 PhysPageDesc *p;
2370
2371 while (len > 0) {
2372 page = addr & TARGET_PAGE_MASK;
2373 l = (page + TARGET_PAGE_SIZE) - addr;
2374 if (l > len)
2375 l = len;
2376 p = phys_page_find(page >> TARGET_PAGE_BITS);
2377 if (!p) {
2378 pd = IO_MEM_UNASSIGNED;
2379 } else {
2380 pd = p->phys_offset;
2381 }
2382
2383 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2384 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2385 !(pd & IO_MEM_ROMD)) {
2386 /* do nothing */
2387 } else {
2388 unsigned long addr1;
2389 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2390 /* ROM/RAM case */
2391 /* RAM case */
2392#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
2393 ptr = phys_ram_base + addr1;
2394#else
2395 ptr = remR3GCPhys2HCVirt(cpu_single_env, addr1);
2396#endif
2397 memcpy(ptr, buf, l);
2398 }
2399 len -= l;
2400 buf += l;
2401 addr += l;
2402 }
2403}
2404
2405
2406/* warning: addr must be aligned */
2407uint32_t ldl_phys(target_phys_addr_t addr)
2408{
2409 int io_index;
2410 uint8_t *ptr;
2411 uint32_t val;
2412 unsigned long pd;
2413 PhysPageDesc *p;
2414
2415 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2416 if (!p) {
2417 pd = IO_MEM_UNASSIGNED;
2418 } else {
2419 pd = p->phys_offset;
2420 }
2421
2422 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2423 !(pd & IO_MEM_ROMD)) {
2424 /* I/O case */
2425 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2426 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2427 } else {
2428 /* RAM case */
2429#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
2430 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2431 (addr & ~TARGET_PAGE_MASK);
2432#else
2433 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK));
2434#endif
2435 val = ldl_p(ptr);
2436 }
2437 return val;
2438}
2439
2440/* warning: addr must be aligned */
2441uint64_t ldq_phys(target_phys_addr_t addr)
2442{
2443 int io_index;
2444 uint8_t *ptr;
2445 uint64_t val;
2446 unsigned long pd;
2447 PhysPageDesc *p;
2448
2449 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2450 if (!p) {
2451 pd = IO_MEM_UNASSIGNED;
2452 } else {
2453 pd = p->phys_offset;
2454 }
2455
2456 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2457 !(pd & IO_MEM_ROMD)) {
2458 /* I/O case */
2459 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2460#ifdef TARGET_WORDS_BIGENDIAN
2461 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2462 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2463#else
2464 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2465 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2466#endif
2467 } else {
2468 /* RAM case */
2469#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
2470 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2471 (addr & ~TARGET_PAGE_MASK);
2472#else
2473 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK));
2474#endif
2475 val = ldq_p(ptr);
2476 }
2477 return val;
2478}
2479
2480/* XXX: optimize */
2481uint32_t ldub_phys(target_phys_addr_t addr)
2482{
2483 uint8_t val;
2484 cpu_physical_memory_read(addr, &val, 1);
2485 return val;
2486}
2487
2488/* XXX: optimize */
2489uint32_t lduw_phys(target_phys_addr_t addr)
2490{
2491 uint16_t val;
2492 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2493 return tswap16(val);
2494}
2495
2496/* warning: addr must be aligned. The ram page is not masked as dirty
2497 and the code inside is not invalidated. It is useful if the dirty
2498 bits are used to track modified PTEs */
2499void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2500{
2501 int io_index;
2502 uint8_t *ptr;
2503 unsigned long pd;
2504 PhysPageDesc *p;
2505
2506 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2507 if (!p) {
2508 pd = IO_MEM_UNASSIGNED;
2509 } else {
2510 pd = p->phys_offset;
2511 }
2512
2513 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2514 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2515 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2516 } else {
2517#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
2518 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2519 (addr & ~TARGET_PAGE_MASK);
2520#else
2521 ptr = remR3GCPhys2HCVirt(cpu_single_env, (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK));
2522#endif
2523 stl_p(ptr, val);
2524 }
2525}
2526
2527/* warning: addr must be aligned */
2528void stl_phys(target_phys_addr_t addr, uint32_t val)
2529{
2530 int io_index;
2531 uint8_t *ptr;
2532 unsigned long pd;
2533 PhysPageDesc *p;
2534
2535 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2536 if (!p) {
2537 pd = IO_MEM_UNASSIGNED;
2538 } else {
2539 pd = p->phys_offset;
2540 }
2541
2542 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2543 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2544 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2545 } else {
2546 unsigned long addr1;
2547 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2548 /* RAM case */
2549#if !defined(VBOX) || !defined(PGM_DYNAMIC_RAM_ALLOC)
2550 ptr = phys_ram_base + addr1;
2551#else
2552 ptr = remR3GCPhys2HCVirt(cpu_single_env, addr1);
2553#endif
2554
2555 stl_p(ptr, val);
2556 if (!cpu_physical_memory_is_dirty(addr1)) {
2557 /* invalidate code */
2558 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2559 /* set dirty bit */
2560#ifdef VBOX
2561 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2562#endif
2563 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2564 (0xff & ~CODE_DIRTY_FLAG);
2565 }
2566 }
2567}
2568
2569/* XXX: optimize */
2570void stb_phys(target_phys_addr_t addr, uint32_t val)
2571{
2572 uint8_t v = val;
2573 cpu_physical_memory_write(addr, &v, 1);
2574}
2575
2576/* XXX: optimize */
2577void stw_phys(target_phys_addr_t addr, uint32_t val)
2578{
2579 uint16_t v = tswap16(val);
2580 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2581}
2582
2583/* XXX: optimize */
2584void stq_phys(target_phys_addr_t addr, uint64_t val)
2585{
2586 val = tswap64(val);
2587 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2588}
2589
2590#endif
2591
2592/* virtual memory access for debug */
2593int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2594 uint8_t *buf, int len, int is_write)
2595{
2596 int l;
2597 target_ulong page, phys_addr;
2598
2599 while (len > 0) {
2600 page = addr & TARGET_PAGE_MASK;
2601 phys_addr = cpu_get_phys_page_debug(env, page);
2602 /* if no physical page mapped, return an error */
2603 if (phys_addr == -1)
2604 return -1;
2605 l = (page + TARGET_PAGE_SIZE) - addr;
2606 if (l > len)
2607 l = len;
2608 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2609 buf, l, is_write);
2610 len -= l;
2611 buf += l;
2612 addr += l;
2613 }
2614 return 0;
2615}
2616
2617#ifndef VBOX
2618void dump_exec_info(FILE *f,
2619 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2620{
2621 int i, target_code_size, max_target_code_size;
2622 int direct_jmp_count, direct_jmp2_count, cross_page;
2623 TranslationBlock *tb;
2624
2625 target_code_size = 0;
2626 max_target_code_size = 0;
2627 cross_page = 0;
2628 direct_jmp_count = 0;
2629 direct_jmp2_count = 0;
2630 for(i = 0; i < nb_tbs; i++) {
2631 tb = &tbs[i];
2632 target_code_size += tb->size;
2633 if (tb->size > max_target_code_size)
2634 max_target_code_size = tb->size;
2635 if (tb->page_addr[1] != -1)
2636 cross_page++;
2637 if (tb->tb_next_offset[0] != 0xffff) {
2638 direct_jmp_count++;
2639 if (tb->tb_next_offset[1] != 0xffff) {
2640 direct_jmp2_count++;
2641 }
2642 }
2643 }
2644 /* XXX: avoid using doubles ? */
2645 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2646 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2647 nb_tbs ? target_code_size / nb_tbs : 0,
2648 max_target_code_size);
2649 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2650 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2651 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2652 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2653 cross_page,
2654 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2655 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2656 direct_jmp_count,
2657 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2658 direct_jmp2_count,
2659 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2660 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2661 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2662 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2663}
2664#endif /* !VBOX */
2665
2666#if !defined(CONFIG_USER_ONLY)
2667
2668#define MMUSUFFIX _cmmu
2669#define GETPC() NULL
2670#define env cpu_single_env
2671#define SOFTMMU_CODE_ACCESS
2672
2673#define SHIFT 0
2674#include "softmmu_template.h"
2675
2676#define SHIFT 1
2677#include "softmmu_template.h"
2678
2679#define SHIFT 2
2680#include "softmmu_template.h"
2681
2682#define SHIFT 3
2683#include "softmmu_template.h"
2684
2685#undef env
2686
2687#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette