VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 13306

Last change on this file since 13306 was 13185, checked in by vboxsync, 16 years ago

VBoxREM: export the tb statistics.

  • Property svn:eol-style set to native
File size: 79.3 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <inttypes.h>
48# include <iprt/alloc.h>
49# include <iprt/string.h>
50# include <iprt/param.h>
51# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
52#endif /* VBOX */
53
54#include "cpu.h"
55#include "exec-all.h"
56#if defined(CONFIG_USER_ONLY)
57#include <qemu.h>
58#endif
59
60//#define DEBUG_TB_INVALIDATE
61//#define DEBUG_FLUSH
62//#define DEBUG_TLB
63//#define DEBUG_UNASSIGNED
64
65/* make various TB consistency checks */
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
68
69#if !defined(CONFIG_USER_ONLY)
70/* TB consistency checks only implemented for usermode emulation. */
71#undef DEBUG_TB_CHECK
72#endif
73
74/* threshold to flush the translated code buffer */
75#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
76
77#define SMC_BITMAP_USE_THRESHOLD 10
78
79#define MMAP_AREA_START 0x00000000
80#define MMAP_AREA_END 0xa8000000
81
82#if defined(TARGET_SPARC64)
83#define TARGET_PHYS_ADDR_SPACE_BITS 41
84#elif defined(TARGET_PPC64)
85#define TARGET_PHYS_ADDR_SPACE_BITS 42
86#else
87/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
88#define TARGET_PHYS_ADDR_SPACE_BITS 32
89#endif
90
91TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
92TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
93int nb_tbs;
94/* any access to the tbs or the page table must use this lock */
95spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
96
97uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]
98#if defined(__MINGW32__)
99 __attribute__((aligned (16)));
100#else
101 __attribute__((aligned (32)));
102#endif
103uint8_t *code_gen_ptr;
104
105#ifndef VBOX
106int phys_ram_size;
107int phys_ram_fd;
108int phys_ram_size;
109#else /* VBOX */
110RTGCPHYS phys_ram_size;
111/* we have memory ranges (the high PC-BIOS mapping) which
112 causes some pages to fall outside the dirty map here. */
113uint32_t phys_ram_dirty_size;
114#endif /* VBOX */
115#if !defined(VBOX)
116uint8_t *phys_ram_base;
117#endif
118uint8_t *phys_ram_dirty;
119
120CPUState *first_cpu;
121/* current CPU in the current thread. It is only valid inside
122 cpu_exec() */
123CPUState *cpu_single_env;
124
125typedef struct PageDesc {
126 /* list of TBs intersecting this ram page */
127 TranslationBlock *first_tb;
128 /* in order to optimize self modifying code, we count the number
129 of lookups we do to a given page to use a bitmap */
130 unsigned int code_write_count;
131 uint8_t *code_bitmap;
132#if defined(CONFIG_USER_ONLY)
133 unsigned long flags;
134#endif
135} PageDesc;
136
137typedef struct PhysPageDesc {
138 /* offset in host memory of the page + io_index in the low 12 bits */
139 uint32_t phys_offset;
140} PhysPageDesc;
141
142#define L2_BITS 10
143#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
144
145#define L1_SIZE (1 << L1_BITS)
146#define L2_SIZE (1 << L2_BITS)
147
148static void io_mem_init(void);
149
150unsigned long qemu_real_host_page_size;
151unsigned long qemu_host_page_bits;
152unsigned long qemu_host_page_size;
153unsigned long qemu_host_page_mask;
154
155/* XXX: for system emulation, it could just be an array */
156static PageDesc *l1_map[L1_SIZE];
157PhysPageDesc **l1_phys_map;
158
159/* io memory support */
160CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
161CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
162void *io_mem_opaque[IO_MEM_NB_ENTRIES];
163static int io_mem_nb;
164
165#ifndef VBOX
166/* log support */
167char *logfilename = "/tmp/qemu.log";
168#endif /* !VBOX */
169FILE *logfile;
170int loglevel;
171
172/* statistics */
173#ifndef VBOX
174static int tlb_flush_count;
175static int tb_flush_count;
176static int tb_phys_invalidate_count;
177#else /* VBOX */
178# ifdef VBOX_WITH_STATISTICS
179uint32_t tlb_flush_count;
180uint32_t tb_flush_count;
181uint32_t tb_phys_invalidate_count;
182# endif
183#endif /* VBOX */
184
185static void page_init(void)
186{
187 /* NOTE: we can always suppose that qemu_host_page_size >=
188 TARGET_PAGE_SIZE */
189#ifdef VBOX
190 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
191 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
192 qemu_real_host_page_size = PAGE_SIZE;
193#else /* !VBOX */
194#ifdef _WIN32
195 {
196 SYSTEM_INFO system_info;
197 DWORD old_protect;
198
199 GetSystemInfo(&system_info);
200 qemu_real_host_page_size = system_info.dwPageSize;
201
202 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
203 PAGE_EXECUTE_READWRITE, &old_protect);
204 }
205#else
206 qemu_real_host_page_size = getpagesize();
207 {
208 unsigned long start, end;
209
210 start = (unsigned long)code_gen_buffer;
211 start &= ~(qemu_real_host_page_size - 1);
212
213 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
214 end += qemu_real_host_page_size - 1;
215 end &= ~(qemu_real_host_page_size - 1);
216
217 mprotect((void *)start, end - start,
218 PROT_READ | PROT_WRITE | PROT_EXEC);
219 }
220#endif
221#endif /* !VBOX */
222
223 if (qemu_host_page_size == 0)
224 qemu_host_page_size = qemu_real_host_page_size;
225 if (qemu_host_page_size < TARGET_PAGE_SIZE)
226 qemu_host_page_size = TARGET_PAGE_SIZE;
227 qemu_host_page_bits = 0;
228 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
229 qemu_host_page_bits++;
230 qemu_host_page_mask = ~(qemu_host_page_size - 1);
231 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
232 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
233}
234
235static inline PageDesc *page_find_alloc(unsigned int index)
236{
237 PageDesc **lp, *p;
238
239 lp = &l1_map[index >> L2_BITS];
240 p = *lp;
241 if (!p) {
242 /* allocate if not found */
243 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
244 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
245 *lp = p;
246 }
247 return p + (index & (L2_SIZE - 1));
248}
249
250static inline PageDesc *page_find(unsigned int index)
251{
252 PageDesc *p;
253
254 p = l1_map[index >> L2_BITS];
255 if (!p)
256 return 0;
257 return p + (index & (L2_SIZE - 1));
258}
259
260static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
261{
262 void **lp, **p;
263 PhysPageDesc *pd;
264
265 p = (void **)l1_phys_map;
266#if TARGET_PHYS_ADDR_SPACE_BITS > 32
267
268#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
269#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
270#endif
271 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
272 p = *lp;
273 if (!p) {
274 /* allocate if not found */
275 if (!alloc)
276 return NULL;
277 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
278 memset(p, 0, sizeof(void *) * L1_SIZE);
279 *lp = p;
280 }
281#endif
282 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
283 pd = *lp;
284 if (!pd) {
285 int i;
286 /* allocate if not found */
287 if (!alloc)
288 return NULL;
289 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
290 *lp = pd;
291 for (i = 0; i < L2_SIZE; i++)
292 pd[i].phys_offset = IO_MEM_UNASSIGNED;
293 }
294#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
295 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
296 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
297 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
298 return pd;
299#else
300 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
301#endif
302}
303
304static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
305{
306 return phys_page_find_alloc(index, 0);
307}
308
309#if !defined(CONFIG_USER_ONLY)
310static void tlb_protect_code(ram_addr_t ram_addr);
311static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
312 target_ulong vaddr);
313#endif
314
315void cpu_exec_init(CPUState *env)
316{
317 CPUState **penv;
318 int cpu_index;
319
320 if (!code_gen_ptr) {
321 code_gen_ptr = code_gen_buffer;
322 page_init();
323 io_mem_init();
324 }
325 env->next_cpu = NULL;
326 penv = &first_cpu;
327 cpu_index = 0;
328 while (*penv != NULL) {
329 penv = (CPUState **)&(*penv)->next_cpu;
330 cpu_index++;
331 }
332 env->cpu_index = cpu_index;
333 *penv = env;
334}
335
336static inline void invalidate_page_bitmap(PageDesc *p)
337{
338 if (p->code_bitmap) {
339 qemu_free(p->code_bitmap);
340 p->code_bitmap = NULL;
341 }
342 p->code_write_count = 0;
343}
344
345/* set to NULL all the 'first_tb' fields in all PageDescs */
346static void page_flush_tb(void)
347{
348 int i, j;
349 PageDesc *p;
350
351 for(i = 0; i < L1_SIZE; i++) {
352 p = l1_map[i];
353 if (p) {
354 for(j = 0; j < L2_SIZE; j++) {
355 p->first_tb = NULL;
356 invalidate_page_bitmap(p);
357 p++;
358 }
359 }
360 }
361}
362
363/* flush all the translation blocks */
364/* XXX: tb_flush is currently not thread safe */
365void tb_flush(CPUState *env1)
366{
367 CPUState *env;
368#if defined(DEBUG_FLUSH)
369 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
370 code_gen_ptr - code_gen_buffer,
371 nb_tbs,
372 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
373#endif
374 nb_tbs = 0;
375
376 for(env = first_cpu; env != NULL; env = env->next_cpu) {
377 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
378 }
379
380 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
381 page_flush_tb();
382
383 code_gen_ptr = code_gen_buffer;
384 /* XXX: flush processor icache at this point if cache flush is
385 expensive */
386#if !defined(VBOX) || defined(VBOX_WITH_STATISTICS)
387 tb_flush_count++;
388#endif
389}
390
391#ifdef DEBUG_TB_CHECK
392
393static void tb_invalidate_check(unsigned long address)
394{
395 TranslationBlock *tb;
396 int i;
397 address &= TARGET_PAGE_MASK;
398 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
399 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
400 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
401 address >= tb->pc + tb->size)) {
402 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
403 address, (long)tb->pc, tb->size);
404 }
405 }
406 }
407}
408
409/* verify that all the pages have correct rights for code */
410static void tb_page_check(void)
411{
412 TranslationBlock *tb;
413 int i, flags1, flags2;
414
415 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
416 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
417 flags1 = page_get_flags(tb->pc);
418 flags2 = page_get_flags(tb->pc + tb->size - 1);
419 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
420 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
421 (long)tb->pc, tb->size, flags1, flags2);
422 }
423 }
424 }
425}
426
427void tb_jmp_check(TranslationBlock *tb)
428{
429 TranslationBlock *tb1;
430 unsigned int n1;
431
432 /* suppress any remaining jumps to this TB */
433 tb1 = tb->jmp_first;
434 for(;;) {
435 n1 = (long)tb1 & 3;
436 tb1 = (TranslationBlock *)((long)tb1 & ~3);
437 if (n1 == 2)
438 break;
439 tb1 = tb1->jmp_next[n1];
440 }
441 /* check end of list */
442 if (tb1 != tb) {
443 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
444 }
445}
446
447#endif
448
449/* invalidate one TB */
450static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
451 int next_offset)
452{
453 TranslationBlock *tb1;
454 for(;;) {
455 tb1 = *ptb;
456 if (tb1 == tb) {
457 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
458 break;
459 }
460 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
461 }
462}
463
464static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
465{
466 TranslationBlock *tb1;
467 unsigned int n1;
468
469 for(;;) {
470 tb1 = *ptb;
471 n1 = (long)tb1 & 3;
472 tb1 = (TranslationBlock *)((long)tb1 & ~3);
473 if (tb1 == tb) {
474 *ptb = tb1->page_next[n1];
475 break;
476 }
477 ptb = &tb1->page_next[n1];
478 }
479}
480
481static inline void tb_jmp_remove(TranslationBlock *tb, int n)
482{
483 TranslationBlock *tb1, **ptb;
484 unsigned int n1;
485
486 ptb = &tb->jmp_next[n];
487 tb1 = *ptb;
488 if (tb1) {
489 /* find tb(n) in circular list */
490 for(;;) {
491 tb1 = *ptb;
492 n1 = (long)tb1 & 3;
493 tb1 = (TranslationBlock *)((long)tb1 & ~3);
494 if (n1 == n && tb1 == tb)
495 break;
496 if (n1 == 2) {
497 ptb = &tb1->jmp_first;
498 } else {
499 ptb = &tb1->jmp_next[n1];
500 }
501 }
502 /* now we can suppress tb(n) from the list */
503 *ptb = tb->jmp_next[n];
504
505 tb->jmp_next[n] = NULL;
506 }
507}
508
509/* reset the jump entry 'n' of a TB so that it is not chained to
510 another TB */
511static inline void tb_reset_jump(TranslationBlock *tb, int n)
512{
513 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
514}
515
516static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
517{
518 CPUState *env;
519 PageDesc *p;
520 unsigned int h, n1;
521 target_ulong phys_pc;
522 TranslationBlock *tb1, *tb2;
523
524 /* remove the TB from the hash list */
525 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
526 h = tb_phys_hash_func(phys_pc);
527 tb_remove(&tb_phys_hash[h], tb,
528 offsetof(TranslationBlock, phys_hash_next));
529
530 /* remove the TB from the page list */
531 if (tb->page_addr[0] != page_addr) {
532 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
533 tb_page_remove(&p->first_tb, tb);
534 invalidate_page_bitmap(p);
535 }
536 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
537 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
538 tb_page_remove(&p->first_tb, tb);
539 invalidate_page_bitmap(p);
540 }
541
542 tb_invalidated_flag = 1;
543
544 /* remove the TB from the hash list */
545 h = tb_jmp_cache_hash_func(tb->pc);
546 for(env = first_cpu; env != NULL; env = env->next_cpu) {
547 if (env->tb_jmp_cache[h] == tb)
548 env->tb_jmp_cache[h] = NULL;
549 }
550
551 /* suppress this TB from the two jump lists */
552 tb_jmp_remove(tb, 0);
553 tb_jmp_remove(tb, 1);
554
555 /* suppress any remaining jumps to this TB */
556 tb1 = tb->jmp_first;
557 for(;;) {
558 n1 = (long)tb1 & 3;
559 if (n1 == 2)
560 break;
561 tb1 = (TranslationBlock *)((long)tb1 & ~3);
562 tb2 = tb1->jmp_next[n1];
563 tb_reset_jump(tb1, n1);
564 tb1->jmp_next[n1] = NULL;
565 tb1 = tb2;
566 }
567 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
568
569#if !defined(VBOX) || defined(VBOX_WITH_STATISTICS)
570 tb_phys_invalidate_count++;
571#endif
572}
573
574#ifdef VBOX
575void tb_invalidate_virt(CPUState *env, uint32_t eip)
576{
577# if 1
578 tb_flush(env);
579# else
580 uint8_t *cs_base, *pc;
581 unsigned int flags, h, phys_pc;
582 TranslationBlock *tb, **ptb;
583
584 flags = env->hflags;
585 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
586 cs_base = env->segs[R_CS].base;
587 pc = cs_base + eip;
588
589 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
590 flags);
591
592 if(tb)
593 {
594# ifdef DEBUG
595 printf("invalidating TB (%08X) at %08X\n", tb, eip);
596# endif
597 tb_invalidate(tb);
598 //Note: this will leak TBs, but the whole cache will be flushed
599 // when it happens too often
600 tb->pc = 0;
601 tb->cs_base = 0;
602 tb->flags = 0;
603 }
604# endif
605}
606
607# ifdef VBOX_STRICT
608/**
609 * Gets the page offset.
610 */
611unsigned long get_phys_page_offset(target_ulong addr)
612{
613 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
614 return p ? p->phys_offset : 0;
615}
616# endif /* VBOX_STRICT */
617#endif /* VBOX */
618
619static inline void set_bits(uint8_t *tab, int start, int len)
620{
621 int end, mask, end1;
622
623 end = start + len;
624 tab += start >> 3;
625 mask = 0xff << (start & 7);
626 if ((start & ~7) == (end & ~7)) {
627 if (start < end) {
628 mask &= ~(0xff << (end & 7));
629 *tab |= mask;
630 }
631 } else {
632 *tab++ |= mask;
633 start = (start + 8) & ~7;
634 end1 = end & ~7;
635 while (start < end1) {
636 *tab++ = 0xff;
637 start += 8;
638 }
639 if (start < end) {
640 mask = ~(0xff << (end & 7));
641 *tab |= mask;
642 }
643 }
644}
645
646static void build_page_bitmap(PageDesc *p)
647{
648 int n, tb_start, tb_end;
649 TranslationBlock *tb;
650
651 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
652 if (!p->code_bitmap)
653 return;
654 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
655
656 tb = p->first_tb;
657 while (tb != NULL) {
658 n = (long)tb & 3;
659 tb = (TranslationBlock *)((long)tb & ~3);
660 /* NOTE: this is subtle as a TB may span two physical pages */
661 if (n == 0) {
662 /* NOTE: tb_end may be after the end of the page, but
663 it is not a problem */
664 tb_start = tb->pc & ~TARGET_PAGE_MASK;
665 tb_end = tb_start + tb->size;
666 if (tb_end > TARGET_PAGE_SIZE)
667 tb_end = TARGET_PAGE_SIZE;
668 } else {
669 tb_start = 0;
670 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
671 }
672 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
673 tb = tb->page_next[n];
674 }
675}
676
677#ifdef TARGET_HAS_PRECISE_SMC
678
679static void tb_gen_code(CPUState *env,
680 target_ulong pc, target_ulong cs_base, int flags,
681 int cflags)
682{
683 TranslationBlock *tb;
684 uint8_t *tc_ptr;
685 target_ulong phys_pc, phys_page2, virt_page2;
686 int code_gen_size;
687
688 phys_pc = get_phys_addr_code(env, pc);
689 tb = tb_alloc(pc);
690 if (!tb) {
691 /* flush must be done */
692 tb_flush(env);
693 /* cannot fail at this point */
694 tb = tb_alloc(pc);
695 }
696 tc_ptr = code_gen_ptr;
697 tb->tc_ptr = tc_ptr;
698 tb->cs_base = cs_base;
699 tb->flags = flags;
700 tb->cflags = cflags;
701 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
702 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
703
704 /* check next page if needed */
705 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
706 phys_page2 = -1;
707 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
708 phys_page2 = get_phys_addr_code(env, virt_page2);
709 }
710 tb_link_phys(tb, phys_pc, phys_page2);
711}
712#endif
713
714/* invalidate all TBs which intersect with the target physical page
715 starting in range [start;end[. NOTE: start and end must refer to
716 the same physical page. 'is_cpu_write_access' should be true if called
717 from a real cpu write access: the virtual CPU will exit the current
718 TB if code is modified inside this TB. */
719void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
720 int is_cpu_write_access)
721{
722 int n, current_tb_modified, current_tb_not_found, current_flags;
723 CPUState *env = cpu_single_env;
724 PageDesc *p;
725 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
726 target_ulong tb_start, tb_end;
727 target_ulong current_pc, current_cs_base;
728
729 p = page_find(start >> TARGET_PAGE_BITS);
730 if (!p)
731 return;
732 if (!p->code_bitmap &&
733 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
734 is_cpu_write_access) {
735 /* build code bitmap */
736 build_page_bitmap(p);
737 }
738
739 /* we remove all the TBs in the range [start, end[ */
740 /* XXX: see if in some cases it could be faster to invalidate all the code */
741 current_tb_not_found = is_cpu_write_access;
742 current_tb_modified = 0;
743 current_tb = NULL; /* avoid warning */
744 current_pc = 0; /* avoid warning */
745 current_cs_base = 0; /* avoid warning */
746 current_flags = 0; /* avoid warning */
747 tb = p->first_tb;
748 while (tb != NULL) {
749 n = (long)tb & 3;
750 tb = (TranslationBlock *)((long)tb & ~3);
751 tb_next = tb->page_next[n];
752 /* NOTE: this is subtle as a TB may span two physical pages */
753 if (n == 0) {
754 /* NOTE: tb_end may be after the end of the page, but
755 it is not a problem */
756 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
757 tb_end = tb_start + tb->size;
758 } else {
759 tb_start = tb->page_addr[1];
760 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
761 }
762 if (!(tb_end <= start || tb_start >= end)) {
763#ifdef TARGET_HAS_PRECISE_SMC
764 if (current_tb_not_found) {
765 current_tb_not_found = 0;
766 current_tb = NULL;
767 if (env->mem_write_pc) {
768 /* now we have a real cpu fault */
769 current_tb = tb_find_pc(env->mem_write_pc);
770 }
771 }
772 if (current_tb == tb &&
773 !(current_tb->cflags & CF_SINGLE_INSN)) {
774 /* If we are modifying the current TB, we must stop
775 its execution. We could be more precise by checking
776 that the modification is after the current PC, but it
777 would require a specialized function to partially
778 restore the CPU state */
779
780 current_tb_modified = 1;
781 cpu_restore_state(current_tb, env,
782 env->mem_write_pc, NULL);
783#if defined(TARGET_I386)
784 current_flags = env->hflags;
785 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
786 current_cs_base = (target_ulong)env->segs[R_CS].base;
787 current_pc = current_cs_base + env->eip;
788#else
789#error unsupported CPU
790#endif
791 }
792#endif /* TARGET_HAS_PRECISE_SMC */
793 /* we need to do that to handle the case where a signal
794 occurs while doing tb_phys_invalidate() */
795 saved_tb = NULL;
796 if (env) {
797 saved_tb = env->current_tb;
798 env->current_tb = NULL;
799 }
800 tb_phys_invalidate(tb, -1);
801 if (env) {
802 env->current_tb = saved_tb;
803 if (env->interrupt_request && env->current_tb)
804 cpu_interrupt(env, env->interrupt_request);
805 }
806 }
807 tb = tb_next;
808 }
809#if !defined(CONFIG_USER_ONLY)
810 /* if no code remaining, no need to continue to use slow writes */
811 if (!p->first_tb) {
812 invalidate_page_bitmap(p);
813 if (is_cpu_write_access) {
814 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
815 }
816 }
817#endif
818#ifdef TARGET_HAS_PRECISE_SMC
819 if (current_tb_modified) {
820 /* we generate a block containing just the instruction
821 modifying the memory. It will ensure that it cannot modify
822 itself */
823 env->current_tb = NULL;
824 tb_gen_code(env, current_pc, current_cs_base, current_flags,
825 CF_SINGLE_INSN);
826 cpu_resume_from_signal(env, NULL);
827 }
828#endif
829}
830
831/* len must be <= 8 and start must be a multiple of len */
832static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
833{
834 PageDesc *p;
835 int offset, b;
836#if 0
837 if (1) {
838 if (loglevel) {
839 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
840 cpu_single_env->mem_write_vaddr, len,
841 cpu_single_env->eip,
842 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
843 }
844 }
845#endif
846 p = page_find(start >> TARGET_PAGE_BITS);
847 if (!p)
848 return;
849 if (p->code_bitmap) {
850 offset = start & ~TARGET_PAGE_MASK;
851 b = p->code_bitmap[offset >> 3] >> (offset & 7);
852 if (b & ((1 << len) - 1))
853 goto do_invalidate;
854 } else {
855 do_invalidate:
856 tb_invalidate_phys_page_range(start, start + len, 1);
857 }
858}
859
860#if !defined(CONFIG_SOFTMMU)
861static void tb_invalidate_phys_page(target_ulong addr,
862 unsigned long pc, void *puc)
863{
864 int n, current_flags, current_tb_modified;
865 target_ulong current_pc, current_cs_base;
866 PageDesc *p;
867 TranslationBlock *tb, *current_tb;
868#ifdef TARGET_HAS_PRECISE_SMC
869 CPUState *env = cpu_single_env;
870#endif
871
872 addr &= TARGET_PAGE_MASK;
873 p = page_find(addr >> TARGET_PAGE_BITS);
874 if (!p)
875 return;
876 tb = p->first_tb;
877 current_tb_modified = 0;
878 current_tb = NULL;
879 current_pc = 0; /* avoid warning */
880 current_cs_base = 0; /* avoid warning */
881 current_flags = 0; /* avoid warning */
882#ifdef TARGET_HAS_PRECISE_SMC
883 if (tb && pc != 0) {
884 current_tb = tb_find_pc(pc);
885 }
886#endif
887 while (tb != NULL) {
888 n = (long)tb & 3;
889 tb = (TranslationBlock *)((long)tb & ~3);
890#ifdef TARGET_HAS_PRECISE_SMC
891 if (current_tb == tb &&
892 !(current_tb->cflags & CF_SINGLE_INSN)) {
893 /* If we are modifying the current TB, we must stop
894 its execution. We could be more precise by checking
895 that the modification is after the current PC, but it
896 would require a specialized function to partially
897 restore the CPU state */
898
899 current_tb_modified = 1;
900 cpu_restore_state(current_tb, env, pc, puc);
901#if defined(TARGET_I386)
902 current_flags = env->hflags;
903 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
904 current_cs_base = (target_ulong)env->segs[R_CS].base;
905 current_pc = current_cs_base + env->eip;
906#else
907#error unsupported CPU
908#endif
909 }
910#endif /* TARGET_HAS_PRECISE_SMC */
911 tb_phys_invalidate(tb, addr);
912 tb = tb->page_next[n];
913 }
914 p->first_tb = NULL;
915#ifdef TARGET_HAS_PRECISE_SMC
916 if (current_tb_modified) {
917 /* we generate a block containing just the instruction
918 modifying the memory. It will ensure that it cannot modify
919 itself */
920 env->current_tb = NULL;
921 tb_gen_code(env, current_pc, current_cs_base, current_flags,
922 CF_SINGLE_INSN);
923 cpu_resume_from_signal(env, puc);
924 }
925#endif
926}
927#endif
928
929/* add the tb in the target page and protect it if necessary */
930static inline void tb_alloc_page(TranslationBlock *tb,
931 unsigned int n, target_ulong page_addr)
932{
933 PageDesc *p;
934 TranslationBlock *last_first_tb;
935
936 tb->page_addr[n] = page_addr;
937 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
938 tb->page_next[n] = p->first_tb;
939 last_first_tb = p->first_tb;
940 p->first_tb = (TranslationBlock *)((long)tb | n);
941 invalidate_page_bitmap(p);
942
943#if defined(TARGET_HAS_SMC) || 1
944
945#if defined(CONFIG_USER_ONLY)
946 if (p->flags & PAGE_WRITE) {
947 target_ulong addr;
948 PageDesc *p2;
949 int prot;
950
951 /* force the host page as non writable (writes will have a
952 page fault + mprotect overhead) */
953 page_addr &= qemu_host_page_mask;
954 prot = 0;
955 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
956 addr += TARGET_PAGE_SIZE) {
957
958 p2 = page_find (addr >> TARGET_PAGE_BITS);
959 if (!p2)
960 continue;
961 prot |= p2->flags;
962 p2->flags &= ~PAGE_WRITE;
963 page_get_flags(addr);
964 }
965 mprotect(g2h(page_addr), qemu_host_page_size,
966 (prot & PAGE_BITS) & ~PAGE_WRITE);
967#ifdef DEBUG_TB_INVALIDATE
968 printf("protecting code page: 0x%08lx\n",
969 page_addr);
970#endif
971 }
972#else
973 /* if some code is already present, then the pages are already
974 protected. So we handle the case where only the first TB is
975 allocated in a physical page */
976 if (!last_first_tb) {
977 tlb_protect_code(page_addr);
978 }
979#endif
980
981#endif /* TARGET_HAS_SMC */
982}
983
984/* Allocate a new translation block. Flush the translation buffer if
985 too many translation blocks or too much generated code. */
986TranslationBlock *tb_alloc(target_ulong pc)
987{
988 TranslationBlock *tb;
989
990 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
991 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
992 return NULL;
993 tb = &tbs[nb_tbs++];
994 tb->pc = pc;
995 tb->cflags = 0;
996 return tb;
997}
998
999/* add a new TB and link it to the physical page tables. phys_page2 is
1000 (-1) to indicate that only one page contains the TB. */
1001void tb_link_phys(TranslationBlock *tb,
1002 target_ulong phys_pc, target_ulong phys_page2)
1003{
1004 unsigned int h;
1005 TranslationBlock **ptb;
1006
1007 /* add in the physical hash table */
1008 h = tb_phys_hash_func(phys_pc);
1009 ptb = &tb_phys_hash[h];
1010 tb->phys_hash_next = *ptb;
1011 *ptb = tb;
1012
1013 /* add in the page list */
1014 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1015 if (phys_page2 != -1)
1016 tb_alloc_page(tb, 1, phys_page2);
1017 else
1018 tb->page_addr[1] = -1;
1019
1020 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1021 tb->jmp_next[0] = NULL;
1022 tb->jmp_next[1] = NULL;
1023#ifdef USE_CODE_COPY
1024 tb->cflags &= ~CF_FP_USED;
1025 if (tb->cflags & CF_TB_FP_USED)
1026 tb->cflags |= CF_FP_USED;
1027#endif
1028
1029 /* init original jump addresses */
1030 if (tb->tb_next_offset[0] != 0xffff)
1031 tb_reset_jump(tb, 0);
1032 if (tb->tb_next_offset[1] != 0xffff)
1033 tb_reset_jump(tb, 1);
1034
1035#ifdef DEBUG_TB_CHECK
1036 tb_page_check();
1037#endif
1038}
1039
1040/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1041 tb[1].tc_ptr. Return NULL if not found */
1042TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1043{
1044 int m_min, m_max, m;
1045 unsigned long v;
1046 TranslationBlock *tb;
1047
1048 if (nb_tbs <= 0)
1049 return NULL;
1050 if (tc_ptr < (unsigned long)code_gen_buffer ||
1051 tc_ptr >= (unsigned long)code_gen_ptr)
1052 return NULL;
1053 /* binary search (cf Knuth) */
1054 m_min = 0;
1055 m_max = nb_tbs - 1;
1056 while (m_min <= m_max) {
1057 m = (m_min + m_max) >> 1;
1058 tb = &tbs[m];
1059 v = (unsigned long)tb->tc_ptr;
1060 if (v == tc_ptr)
1061 return tb;
1062 else if (tc_ptr < v) {
1063 m_max = m - 1;
1064 } else {
1065 m_min = m + 1;
1066 }
1067 }
1068 return &tbs[m_max];
1069}
1070
1071static void tb_reset_jump_recursive(TranslationBlock *tb);
1072
1073static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1074{
1075 TranslationBlock *tb1, *tb_next, **ptb;
1076 unsigned int n1;
1077
1078 tb1 = tb->jmp_next[n];
1079 if (tb1 != NULL) {
1080 /* find head of list */
1081 for(;;) {
1082 n1 = (long)tb1 & 3;
1083 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1084 if (n1 == 2)
1085 break;
1086 tb1 = tb1->jmp_next[n1];
1087 }
1088 /* we are now sure now that tb jumps to tb1 */
1089 tb_next = tb1;
1090
1091 /* remove tb from the jmp_first list */
1092 ptb = &tb_next->jmp_first;
1093 for(;;) {
1094 tb1 = *ptb;
1095 n1 = (long)tb1 & 3;
1096 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1097 if (n1 == n && tb1 == tb)
1098 break;
1099 ptb = &tb1->jmp_next[n1];
1100 }
1101 *ptb = tb->jmp_next[n];
1102 tb->jmp_next[n] = NULL;
1103
1104 /* suppress the jump to next tb in generated code */
1105 tb_reset_jump(tb, n);
1106
1107 /* suppress jumps in the tb on which we could have jumped */
1108 tb_reset_jump_recursive(tb_next);
1109 }
1110}
1111
1112static void tb_reset_jump_recursive(TranslationBlock *tb)
1113{
1114 tb_reset_jump_recursive2(tb, 0);
1115 tb_reset_jump_recursive2(tb, 1);
1116}
1117
1118#if defined(TARGET_HAS_ICE)
1119static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1120{
1121 target_ulong addr, pd;
1122 ram_addr_t ram_addr;
1123 PhysPageDesc *p;
1124
1125 addr = cpu_get_phys_page_debug(env, pc);
1126 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1127 if (!p) {
1128 pd = IO_MEM_UNASSIGNED;
1129 } else {
1130 pd = p->phys_offset;
1131 }
1132 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1133 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1134}
1135#endif
1136
1137/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1138 breakpoint is reached */
1139int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1140{
1141#if defined(TARGET_HAS_ICE)
1142 int i;
1143
1144 for(i = 0; i < env->nb_breakpoints; i++) {
1145 if (env->breakpoints[i] == pc)
1146 return 0;
1147 }
1148
1149 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1150 return -1;
1151 env->breakpoints[env->nb_breakpoints++] = pc;
1152
1153 breakpoint_invalidate(env, pc);
1154 return 0;
1155#else
1156 return -1;
1157#endif
1158}
1159
1160/* remove a breakpoint */
1161int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1162{
1163#if defined(TARGET_HAS_ICE)
1164 int i;
1165 for(i = 0; i < env->nb_breakpoints; i++) {
1166 if (env->breakpoints[i] == pc)
1167 goto found;
1168 }
1169 return -1;
1170 found:
1171 env->nb_breakpoints--;
1172 if (i < env->nb_breakpoints)
1173 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1174
1175 breakpoint_invalidate(env, pc);
1176 return 0;
1177#else
1178 return -1;
1179#endif
1180}
1181
1182/* enable or disable single step mode. EXCP_DEBUG is returned by the
1183 CPU loop after each instruction */
1184void cpu_single_step(CPUState *env, int enabled)
1185{
1186#if defined(TARGET_HAS_ICE)
1187 if (env->singlestep_enabled != enabled) {
1188 env->singlestep_enabled = enabled;
1189 /* must flush all the translated code to avoid inconsistancies */
1190 /* XXX: only flush what is necessary */
1191 tb_flush(env);
1192 }
1193#endif
1194}
1195
1196#ifndef VBOX
1197/* enable or disable low levels log */
1198void cpu_set_log(int log_flags)
1199{
1200 loglevel = log_flags;
1201 if (loglevel && !logfile) {
1202 logfile = fopen(logfilename, "w");
1203 if (!logfile) {
1204 perror(logfilename);
1205 _exit(1);
1206 }
1207#if !defined(CONFIG_SOFTMMU)
1208 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1209 {
1210 static uint8_t logfile_buf[4096];
1211 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1212 }
1213#else
1214 setvbuf(logfile, NULL, _IOLBF, 0);
1215#endif
1216 }
1217}
1218
1219void cpu_set_log_filename(const char *filename)
1220{
1221 logfilename = strdup(filename);
1222}
1223#endif /* !VBOX */
1224
1225/* mask must never be zero, except for A20 change call */
1226void cpu_interrupt(CPUState *env, int mask)
1227{
1228 TranslationBlock *tb;
1229 static int interrupt_lock;
1230
1231#ifdef VBOX
1232 VM_ASSERT_EMT(env->pVM);
1233 ASMAtomicOrS32(&env->interrupt_request, mask);
1234#else /* !VBOX */
1235 env->interrupt_request |= mask;
1236#endif /* !VBOX */
1237 /* if the cpu is currently executing code, we must unlink it and
1238 all the potentially executing TB */
1239 tb = env->current_tb;
1240 if (tb && !testandset(&interrupt_lock)) {
1241 env->current_tb = NULL;
1242 tb_reset_jump_recursive(tb);
1243 interrupt_lock = 0;
1244 }
1245}
1246
1247void cpu_reset_interrupt(CPUState *env, int mask)
1248{
1249#ifdef VBOX
1250 /*
1251 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1252 * for future changes!
1253 */
1254 ASMAtomicAndS32(&env->interrupt_request, ~mask);
1255#else /* !VBOX */
1256 env->interrupt_request &= ~mask;
1257#endif /* !VBOX */
1258}
1259
1260#ifndef VBOX
1261CPULogItem cpu_log_items[] = {
1262 { CPU_LOG_TB_OUT_ASM, "out_asm",
1263 "show generated host assembly code for each compiled TB" },
1264 { CPU_LOG_TB_IN_ASM, "in_asm",
1265 "show target assembly code for each compiled TB" },
1266 { CPU_LOG_TB_OP, "op",
1267 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1268#ifdef TARGET_I386
1269 { CPU_LOG_TB_OP_OPT, "op_opt",
1270 "show micro ops after optimization for each compiled TB" },
1271#endif
1272 { CPU_LOG_INT, "int",
1273 "show interrupts/exceptions in short format" },
1274 { CPU_LOG_EXEC, "exec",
1275 "show trace before each executed TB (lots of logs)" },
1276 { CPU_LOG_TB_CPU, "cpu",
1277 "show CPU state before bloc translation" },
1278#ifdef TARGET_I386
1279 { CPU_LOG_PCALL, "pcall",
1280 "show protected mode far calls/returns/exceptions" },
1281#endif
1282#ifdef DEBUG_IOPORT
1283 { CPU_LOG_IOPORT, "ioport",
1284 "show all i/o ports accesses" },
1285#endif
1286 { 0, NULL, NULL },
1287};
1288
1289static int cmp1(const char *s1, int n, const char *s2)
1290{
1291 if (strlen(s2) != n)
1292 return 0;
1293 return memcmp(s1, s2, n) == 0;
1294}
1295
1296/* takes a comma separated list of log masks. Return 0 if error. */
1297int cpu_str_to_log_mask(const char *str)
1298{
1299 CPULogItem *item;
1300 int mask;
1301 const char *p, *p1;
1302
1303 p = str;
1304 mask = 0;
1305 for(;;) {
1306 p1 = strchr(p, ',');
1307 if (!p1)
1308 p1 = p + strlen(p);
1309 if(cmp1(p,p1-p,"all")) {
1310 for(item = cpu_log_items; item->mask != 0; item++) {
1311 mask |= item->mask;
1312 }
1313 } else {
1314 for(item = cpu_log_items; item->mask != 0; item++) {
1315 if (cmp1(p, p1 - p, item->name))
1316 goto found;
1317 }
1318 return 0;
1319 }
1320 found:
1321 mask |= item->mask;
1322 if (*p1 != ',')
1323 break;
1324 p = p1 + 1;
1325 }
1326 return mask;
1327}
1328#endif /* !VBOX */
1329
1330#ifndef VBOX /* VBOX: we have our own routine. */
1331void cpu_abort(CPUState *env, const char *fmt, ...)
1332{
1333 va_list ap;
1334
1335 va_start(ap, fmt);
1336 fprintf(stderr, "qemu: fatal: ");
1337 vfprintf(stderr, fmt, ap);
1338 fprintf(stderr, "\n");
1339#ifdef TARGET_I386
1340 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1341#else
1342 cpu_dump_state(env, stderr, fprintf, 0);
1343#endif
1344 va_end(ap);
1345 abort();
1346}
1347#endif /* !VBOX */
1348
1349#if !defined(CONFIG_USER_ONLY)
1350
1351/* NOTE: if flush_global is true, also flush global entries (not
1352 implemented yet) */
1353void tlb_flush(CPUState *env, int flush_global)
1354{
1355 int i;
1356
1357#if defined(DEBUG_TLB)
1358 printf("tlb_flush:\n");
1359#endif
1360 /* must reset current TB so that interrupts cannot modify the
1361 links while we are modifying them */
1362 env->current_tb = NULL;
1363
1364 for(i = 0; i < CPU_TLB_SIZE; i++) {
1365 env->tlb_table[0][i].addr_read = -1;
1366 env->tlb_table[0][i].addr_write = -1;
1367 env->tlb_table[0][i].addr_code = -1;
1368 env->tlb_table[1][i].addr_read = -1;
1369 env->tlb_table[1][i].addr_write = -1;
1370 env->tlb_table[1][i].addr_code = -1;
1371 }
1372
1373 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1374
1375#if !defined(CONFIG_SOFTMMU)
1376 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1377#endif
1378#ifdef VBOX
1379 /* inform raw mode about TLB flush */
1380 remR3FlushTLB(env, flush_global);
1381#endif
1382#ifdef USE_KQEMU
1383 if (env->kqemu_enabled) {
1384 kqemu_flush(env, flush_global);
1385 }
1386#endif
1387#if !defined(VBOX) || defined(VBOX_WITH_STATISTICS)
1388 tlb_flush_count++;
1389#endif
1390}
1391
1392static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1393{
1394 if (addr == (tlb_entry->addr_read &
1395 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1396 addr == (tlb_entry->addr_write &
1397 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1398 addr == (tlb_entry->addr_code &
1399 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1400 tlb_entry->addr_read = -1;
1401 tlb_entry->addr_write = -1;
1402 tlb_entry->addr_code = -1;
1403 }
1404}
1405
1406void tlb_flush_page(CPUState *env, target_ulong addr)
1407{
1408 int i;
1409 TranslationBlock *tb;
1410
1411#if defined(DEBUG_TLB)
1412 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1413#endif
1414 /* must reset current TB so that interrupts cannot modify the
1415 links while we are modifying them */
1416 env->current_tb = NULL;
1417
1418 addr &= TARGET_PAGE_MASK;
1419 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1420 tlb_flush_entry(&env->tlb_table[0][i], addr);
1421 tlb_flush_entry(&env->tlb_table[1][i], addr);
1422
1423 /* Discard jump cache entries for any tb which might potentially
1424 overlap the flushed page. */
1425 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1426 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1427
1428 i = tb_jmp_cache_hash_page(addr);
1429 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1430
1431#if !defined(CONFIG_SOFTMMU)
1432 if (addr < MMAP_AREA_END)
1433 munmap((void *)addr, TARGET_PAGE_SIZE);
1434#endif
1435#ifdef VBOX
1436 /* inform raw mode about TLB page flush */
1437 remR3FlushPage(env, addr);
1438#endif /* VBOX */
1439#ifdef USE_KQEMU
1440 if (env->kqemu_enabled) {
1441 kqemu_flush_page(env, addr);
1442 }
1443#endif
1444}
1445
1446/* update the TLBs so that writes to code in the virtual page 'addr'
1447 can be detected */
1448static void tlb_protect_code(ram_addr_t ram_addr)
1449{
1450 cpu_physical_memory_reset_dirty(ram_addr,
1451 ram_addr + TARGET_PAGE_SIZE,
1452 CODE_DIRTY_FLAG);
1453#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1454 /** @todo Retest this? This function has changed... */
1455 remR3ProtectCode(cpu_single_env, ram_addr);
1456#endif
1457}
1458
1459/* update the TLB so that writes in physical page 'phys_addr' are no longer
1460 tested for self modifying code */
1461static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1462 target_ulong vaddr)
1463{
1464#ifdef VBOX
1465 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1466#endif
1467 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1468}
1469
1470static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1471 unsigned long start, unsigned long length)
1472{
1473 unsigned long addr;
1474 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1475 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1476 if ((addr - start) < length) {
1477 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1478 }
1479 }
1480}
1481
1482void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1483 int dirty_flags)
1484{
1485 CPUState *env;
1486 unsigned long length, start1;
1487 int i, mask, len;
1488 uint8_t *p;
1489
1490 start &= TARGET_PAGE_MASK;
1491 end = TARGET_PAGE_ALIGN(end);
1492
1493 length = end - start;
1494 if (length == 0)
1495 return;
1496 len = length >> TARGET_PAGE_BITS;
1497#ifdef USE_KQEMU
1498 /* XXX: should not depend on cpu context */
1499 env = first_cpu;
1500 if (env->kqemu_enabled) {
1501 ram_addr_t addr;
1502 addr = start;
1503 for(i = 0; i < len; i++) {
1504 kqemu_set_notdirty(env, addr);
1505 addr += TARGET_PAGE_SIZE;
1506 }
1507 }
1508#endif
1509 mask = ~dirty_flags;
1510 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1511#ifdef VBOX
1512 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1513#endif
1514 for(i = 0; i < len; i++)
1515 p[i] &= mask;
1516
1517 /* we modify the TLB cache so that the dirty bit will be set again
1518 when accessing the range */
1519#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1520 start1 = start;
1521#elif !defined(VBOX)
1522 start1 = start + (unsigned long)phys_ram_base;
1523#else
1524 start1 = (unsigned long)remR3GCPhys2HCVirt(first_cpu, start);
1525#endif
1526 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1527 for(i = 0; i < CPU_TLB_SIZE; i++)
1528 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1529 for(i = 0; i < CPU_TLB_SIZE; i++)
1530 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1531 }
1532
1533#if !defined(CONFIG_SOFTMMU)
1534#ifdef VBOX /**@todo remove this check */
1535# error "We shouldn't get here..."
1536#endif
1537 /* XXX: this is expensive */
1538 {
1539 VirtPageDesc *p;
1540 int j;
1541 target_ulong addr;
1542
1543 for(i = 0; i < L1_SIZE; i++) {
1544 p = l1_virt_map[i];
1545 if (p) {
1546 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1547 for(j = 0; j < L2_SIZE; j++) {
1548 if (p->valid_tag == virt_valid_tag &&
1549 p->phys_addr >= start && p->phys_addr < end &&
1550 (p->prot & PROT_WRITE)) {
1551 if (addr < MMAP_AREA_END) {
1552 mprotect((void *)addr, TARGET_PAGE_SIZE,
1553 p->prot & ~PROT_WRITE);
1554 }
1555 }
1556 addr += TARGET_PAGE_SIZE;
1557 p++;
1558 }
1559 }
1560 }
1561 }
1562#endif
1563}
1564
1565static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1566{
1567 ram_addr_t ram_addr;
1568
1569 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1570 /* RAM case */
1571#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1572 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1573#elif !defined(VBOX)
1574 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1575 tlb_entry->addend - (unsigned long)phys_ram_base;
1576#else
1577 ram_addr = remR3HCVirt2GCPhys(first_cpu, (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend);
1578#endif
1579 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1580 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1581 }
1582 }
1583}
1584
1585/* update the TLB according to the current state of the dirty bits */
1586void cpu_tlb_update_dirty(CPUState *env)
1587{
1588 int i;
1589 for(i = 0; i < CPU_TLB_SIZE; i++)
1590 tlb_update_dirty(&env->tlb_table[0][i]);
1591 for(i = 0; i < CPU_TLB_SIZE; i++)
1592 tlb_update_dirty(&env->tlb_table[1][i]);
1593}
1594
1595static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1596 unsigned long start)
1597{
1598 unsigned long addr;
1599 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1600 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1601 if (addr == start) {
1602 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1603 }
1604 }
1605}
1606
1607/* update the TLB corresponding to virtual page vaddr and phys addr
1608 addr so that it is no longer dirty */
1609static inline void tlb_set_dirty(CPUState *env,
1610 unsigned long addr, target_ulong vaddr)
1611{
1612 int i;
1613
1614 addr &= TARGET_PAGE_MASK;
1615 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1616 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1617 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1618}
1619
1620/* add a new TLB entry. At most one entry for a given virtual address
1621 is permitted. Return 0 if OK or 2 if the page could not be mapped
1622 (can only happen in non SOFTMMU mode for I/O pages or pages
1623 conflicting with the host address space). */
1624int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1625 target_phys_addr_t paddr, int prot,
1626 int is_user, int is_softmmu)
1627{
1628 PhysPageDesc *p;
1629 unsigned long pd;
1630 unsigned int index;
1631 target_ulong address;
1632 target_phys_addr_t addend;
1633 int ret;
1634 CPUTLBEntry *te;
1635
1636 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1637 if (!p) {
1638 pd = IO_MEM_UNASSIGNED;
1639 } else {
1640 pd = p->phys_offset;
1641 }
1642#if defined(DEBUG_TLB)
1643 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1644 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1645#endif
1646
1647 ret = 0;
1648#if !defined(CONFIG_SOFTMMU)
1649 if (is_softmmu)
1650#endif
1651 {
1652 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1653 /* IO memory case */
1654 address = vaddr | pd;
1655 addend = paddr;
1656 } else {
1657 /* standard memory */
1658 address = vaddr;
1659#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
1660 addend = pd & TARGET_PAGE_MASK;
1661#elif !defined(VBOX)
1662 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1663#else
1664 addend = (unsigned long)remR3GCPhys2HCVirt(env, pd & TARGET_PAGE_MASK);
1665#endif
1666 }
1667
1668 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1669 addend -= vaddr;
1670 te = &env->tlb_table[is_user][index];
1671 te->addend = addend;
1672 if (prot & PAGE_READ) {
1673 te->addr_read = address;
1674 } else {
1675 te->addr_read = -1;
1676 }
1677 if (prot & PAGE_EXEC) {
1678 te->addr_code = address;
1679 } else {
1680 te->addr_code = -1;
1681 }
1682 if (prot & PAGE_WRITE) {
1683 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1684 (pd & IO_MEM_ROMD)) {
1685 /* write access calls the I/O callback */
1686 te->addr_write = vaddr |
1687 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1688 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1689 !cpu_physical_memory_is_dirty(pd)) {
1690 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1691 } else {
1692 te->addr_write = address;
1693 }
1694 } else {
1695 te->addr_write = -1;
1696 }
1697#ifdef VBOX
1698 /* inform raw mode about TLB page change */
1699 remR3FlushPage(env, vaddr);
1700#endif
1701 }
1702#if !defined(CONFIG_SOFTMMU)
1703 else {
1704 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1705 /* IO access: no mapping is done as it will be handled by the
1706 soft MMU */
1707 if (!(env->hflags & HF_SOFTMMU_MASK))
1708 ret = 2;
1709 } else {
1710 void *map_addr;
1711
1712 if (vaddr >= MMAP_AREA_END) {
1713 ret = 2;
1714 } else {
1715 if (prot & PROT_WRITE) {
1716 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1717#if defined(TARGET_HAS_SMC) || 1
1718 first_tb ||
1719#endif
1720 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1721 !cpu_physical_memory_is_dirty(pd))) {
1722 /* ROM: we do as if code was inside */
1723 /* if code is present, we only map as read only and save the
1724 original mapping */
1725 VirtPageDesc *vp;
1726
1727 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1728 vp->phys_addr = pd;
1729 vp->prot = prot;
1730 vp->valid_tag = virt_valid_tag;
1731 prot &= ~PAGE_WRITE;
1732 }
1733 }
1734 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1735 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1736 if (map_addr == MAP_FAILED) {
1737 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1738 paddr, vaddr);
1739 }
1740 }
1741 }
1742 }
1743#endif
1744 return ret;
1745}
1746
1747/* called from signal handler: invalidate the code and unprotect the
1748 page. Return TRUE if the fault was succesfully handled. */
1749int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1750{
1751#if !defined(CONFIG_SOFTMMU)
1752 VirtPageDesc *vp;
1753
1754#if defined(DEBUG_TLB)
1755 printf("page_unprotect: addr=0x%08x\n", addr);
1756#endif
1757 addr &= TARGET_PAGE_MASK;
1758
1759 /* if it is not mapped, no need to worry here */
1760 if (addr >= MMAP_AREA_END)
1761 return 0;
1762 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1763 if (!vp)
1764 return 0;
1765 /* NOTE: in this case, validate_tag is _not_ tested as it
1766 validates only the code TLB */
1767 if (vp->valid_tag != virt_valid_tag)
1768 return 0;
1769 if (!(vp->prot & PAGE_WRITE))
1770 return 0;
1771#if defined(DEBUG_TLB)
1772 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1773 addr, vp->phys_addr, vp->prot);
1774#endif
1775 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1776 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1777 (unsigned long)addr, vp->prot);
1778 /* set the dirty bit */
1779 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1780 /* flush the code inside */
1781 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1782 return 1;
1783#elif defined(VBOX)
1784 addr &= TARGET_PAGE_MASK;
1785
1786 /* if it is not mapped, no need to worry here */
1787 if (addr >= MMAP_AREA_END)
1788 return 0;
1789 return 1;
1790#else
1791 return 0;
1792#endif
1793}
1794
1795#else
1796
1797void tlb_flush(CPUState *env, int flush_global)
1798{
1799}
1800
1801void tlb_flush_page(CPUState *env, target_ulong addr)
1802{
1803}
1804
1805int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1806 target_phys_addr_t paddr, int prot,
1807 int is_user, int is_softmmu)
1808{
1809 return 0;
1810}
1811
1812#ifndef VBOX
1813/* dump memory mappings */
1814void page_dump(FILE *f)
1815{
1816 unsigned long start, end;
1817 int i, j, prot, prot1;
1818 PageDesc *p;
1819
1820 fprintf(f, "%-8s %-8s %-8s %s\n",
1821 "start", "end", "size", "prot");
1822 start = -1;
1823 end = -1;
1824 prot = 0;
1825 for(i = 0; i <= L1_SIZE; i++) {
1826 if (i < L1_SIZE)
1827 p = l1_map[i];
1828 else
1829 p = NULL;
1830 for(j = 0;j < L2_SIZE; j++) {
1831 if (!p)
1832 prot1 = 0;
1833 else
1834 prot1 = p[j].flags;
1835 if (prot1 != prot) {
1836 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1837 if (start != -1) {
1838 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1839 start, end, end - start,
1840 prot & PAGE_READ ? 'r' : '-',
1841 prot & PAGE_WRITE ? 'w' : '-',
1842 prot & PAGE_EXEC ? 'x' : '-');
1843 }
1844 if (prot1 != 0)
1845 start = end;
1846 else
1847 start = -1;
1848 prot = prot1;
1849 }
1850 if (!p)
1851 break;
1852 }
1853 }
1854}
1855#endif /* !VBOX */
1856
1857int page_get_flags(target_ulong address)
1858{
1859 PageDesc *p;
1860
1861 p = page_find(address >> TARGET_PAGE_BITS);
1862 if (!p)
1863 return 0;
1864 return p->flags;
1865}
1866
1867/* modify the flags of a page and invalidate the code if
1868 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1869 depending on PAGE_WRITE */
1870void page_set_flags(target_ulong start, target_ulong end, int flags)
1871{
1872 PageDesc *p;
1873 target_ulong addr;
1874
1875 start = start & TARGET_PAGE_MASK;
1876 end = TARGET_PAGE_ALIGN(end);
1877 if (flags & PAGE_WRITE)
1878 flags |= PAGE_WRITE_ORG;
1879#ifdef VBOX
1880 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
1881#endif
1882 spin_lock(&tb_lock);
1883 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1884 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1885 /* if the write protection is set, then we invalidate the code
1886 inside */
1887 if (!(p->flags & PAGE_WRITE) &&
1888 (flags & PAGE_WRITE) &&
1889 p->first_tb) {
1890 tb_invalidate_phys_page(addr, 0, NULL);
1891 }
1892 p->flags = flags;
1893 }
1894 spin_unlock(&tb_lock);
1895}
1896
1897/* called from signal handler: invalidate the code and unprotect the
1898 page. Return TRUE if the fault was succesfully handled. */
1899int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1900{
1901 unsigned int page_index, prot, pindex;
1902 PageDesc *p, *p1;
1903 target_ulong host_start, host_end, addr;
1904
1905 host_start = address & qemu_host_page_mask;
1906 page_index = host_start >> TARGET_PAGE_BITS;
1907 p1 = page_find(page_index);
1908 if (!p1)
1909 return 0;
1910 host_end = host_start + qemu_host_page_size;
1911 p = p1;
1912 prot = 0;
1913 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1914 prot |= p->flags;
1915 p++;
1916 }
1917 /* if the page was really writable, then we change its
1918 protection back to writable */
1919 if (prot & PAGE_WRITE_ORG) {
1920 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1921 if (!(p1[pindex].flags & PAGE_WRITE)) {
1922 mprotect((void *)g2h(host_start), qemu_host_page_size,
1923 (prot & PAGE_BITS) | PAGE_WRITE);
1924 p1[pindex].flags |= PAGE_WRITE;
1925 /* and since the content will be modified, we must invalidate
1926 the corresponding translated code. */
1927 tb_invalidate_phys_page(address, pc, puc);
1928#ifdef DEBUG_TB_CHECK
1929 tb_invalidate_check(address);
1930#endif
1931 return 1;
1932 }
1933 }
1934 return 0;
1935}
1936
1937/* call this function when system calls directly modify a memory area */
1938/* ??? This should be redundant now we have lock_user. */
1939void page_unprotect_range(target_ulong data, target_ulong data_size)
1940{
1941 target_ulong start, end, addr;
1942
1943 start = data;
1944 end = start + data_size;
1945 start &= TARGET_PAGE_MASK;
1946 end = TARGET_PAGE_ALIGN(end);
1947 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1948 page_unprotect(addr, 0, NULL);
1949 }
1950}
1951
1952static inline void tlb_set_dirty(CPUState *env,
1953 unsigned long addr, target_ulong vaddr)
1954{
1955}
1956#endif /* defined(CONFIG_USER_ONLY) */
1957
1958/* register physical memory. 'size' must be a multiple of the target
1959 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1960 io memory page */
1961void cpu_register_physical_memory(target_phys_addr_t start_addr,
1962 unsigned long size,
1963 unsigned long phys_offset)
1964{
1965 target_phys_addr_t addr, end_addr;
1966 PhysPageDesc *p;
1967 CPUState *env;
1968
1969 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1970 end_addr = start_addr + size;
1971 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1972 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1973 p->phys_offset = phys_offset;
1974#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
1975 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1976 (phys_offset & IO_MEM_ROMD))
1977#else
1978 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
1979 || (phys_offset & IO_MEM_ROMD)
1980 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
1981#endif
1982
1983 phys_offset += TARGET_PAGE_SIZE;
1984 }
1985
1986 /* since each CPU stores ram addresses in its TLB cache, we must
1987 reset the modified entries */
1988 /* XXX: slow ! */
1989 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1990 tlb_flush(env, 1);
1991 }
1992}
1993
1994/* XXX: temporary until new memory mapping API */
1995uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1996{
1997 PhysPageDesc *p;
1998
1999 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2000 if (!p)
2001 return IO_MEM_UNASSIGNED;
2002 return p->phys_offset;
2003}
2004
2005static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2006{
2007#ifdef DEBUG_UNASSIGNED
2008 printf("Unassigned mem read 0x%08x\n", (int)addr);
2009#endif
2010 return 0;
2011}
2012
2013static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2014{
2015#ifdef DEBUG_UNASSIGNED
2016 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2017#endif
2018}
2019
2020static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2021 unassigned_mem_readb,
2022 unassigned_mem_readb,
2023 unassigned_mem_readb,
2024};
2025
2026static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2027 unassigned_mem_writeb,
2028 unassigned_mem_writeb,
2029 unassigned_mem_writeb,
2030};
2031
2032static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2033{
2034 unsigned long ram_addr;
2035 int dirty_flags;
2036#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2037 ram_addr = addr;
2038#elif !defined(VBOX)
2039 ram_addr = addr - (unsigned long)phys_ram_base;
2040#else
2041 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2042#endif
2043#ifdef VBOX
2044 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2045 dirty_flags = 0xff;
2046 else
2047#endif /* VBOX */
2048 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2049 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2050#if !defined(CONFIG_USER_ONLY)
2051 tb_invalidate_phys_page_fast(ram_addr, 1);
2052# ifdef VBOX
2053 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2054 dirty_flags = 0xff;
2055 else
2056# endif /* VBOX */
2057 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2058#endif
2059 }
2060 stb_p((uint8_t *)(long)addr, val);
2061#ifdef USE_KQEMU
2062 if (cpu_single_env->kqemu_enabled &&
2063 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2064 kqemu_modify_page(cpu_single_env, ram_addr);
2065#endif
2066 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2067#ifdef VBOX
2068 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2069#endif /* !VBOX */
2070 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2071 /* we remove the notdirty callback only if the code has been
2072 flushed */
2073 if (dirty_flags == 0xff)
2074 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2075}
2076
2077static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2078{
2079 unsigned long ram_addr;
2080 int dirty_flags;
2081#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2082 ram_addr = addr;
2083#elif !defined(VBOX)
2084 ram_addr = addr - (unsigned long)phys_ram_base;
2085#else
2086 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2087#endif
2088#ifdef VBOX
2089 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2090 dirty_flags = 0xff;
2091 else
2092#endif /* VBOX */
2093 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2094 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2095#if !defined(CONFIG_USER_ONLY)
2096 tb_invalidate_phys_page_fast(ram_addr, 2);
2097# ifdef VBOX
2098 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2099 dirty_flags = 0xff;
2100 else
2101# endif /* VBOX */
2102 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2103#endif
2104 }
2105 stw_p((uint8_t *)(long)addr, val);
2106#ifdef USE_KQEMU
2107 if (cpu_single_env->kqemu_enabled &&
2108 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2109 kqemu_modify_page(cpu_single_env, ram_addr);
2110#endif
2111 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2112#ifdef VBOX
2113 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2114#endif
2115 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2116 /* we remove the notdirty callback only if the code has been
2117 flushed */
2118 if (dirty_flags == 0xff)
2119 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2120}
2121
2122static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2123{
2124 unsigned long ram_addr;
2125 int dirty_flags;
2126#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2127 ram_addr = addr;
2128#elif !defined(VBOX)
2129 ram_addr = addr - (unsigned long)phys_ram_base;
2130#else
2131 ram_addr = remR3HCVirt2GCPhys(first_cpu, (void *)addr);
2132#endif
2133#ifdef VBOX
2134 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2135 dirty_flags = 0xff;
2136 else
2137#endif /* VBOX */
2138 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2139 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2140#if !defined(CONFIG_USER_ONLY)
2141 tb_invalidate_phys_page_fast(ram_addr, 4);
2142# ifdef VBOX
2143 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2144 dirty_flags = 0xff;
2145 else
2146# endif /* VBOX */
2147 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2148#endif
2149 }
2150 stl_p((uint8_t *)(long)addr, val);
2151#ifdef USE_KQEMU
2152 if (cpu_single_env->kqemu_enabled &&
2153 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2154 kqemu_modify_page(cpu_single_env, ram_addr);
2155#endif
2156 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2157#ifdef VBOX
2158 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2159#endif
2160 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2161 /* we remove the notdirty callback only if the code has been
2162 flushed */
2163 if (dirty_flags == 0xff)
2164 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2165}
2166
2167static CPUReadMemoryFunc *error_mem_read[3] = {
2168 NULL, /* never used */
2169 NULL, /* never used */
2170 NULL, /* never used */
2171};
2172
2173static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2174 notdirty_mem_writeb,
2175 notdirty_mem_writew,
2176 notdirty_mem_writel,
2177};
2178
2179static void io_mem_init(void)
2180{
2181 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2182 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2183 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2184#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
2185 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2186 io_mem_nb = 6;
2187#else
2188 io_mem_nb = 5;
2189#endif
2190
2191#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
2192 /* alloc dirty bits array */
2193 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2194 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2195#endif /* !VBOX */
2196}
2197
2198/* mem_read and mem_write are arrays of functions containing the
2199 function to access byte (index 0), word (index 1) and dword (index
2200 2). All functions must be supplied. If io_index is non zero, the
2201 corresponding io zone is modified. If it is zero, a new io zone is
2202 allocated. The return value can be used with
2203 cpu_register_physical_memory(). (-1) is returned if error. */
2204int cpu_register_io_memory(int io_index,
2205 CPUReadMemoryFunc **mem_read,
2206 CPUWriteMemoryFunc **mem_write,
2207 void *opaque)
2208{
2209 int i;
2210
2211 if (io_index <= 0) {
2212 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2213 return -1;
2214 io_index = io_mem_nb++;
2215 } else {
2216 if (io_index >= IO_MEM_NB_ENTRIES)
2217 return -1;
2218 }
2219
2220 for(i = 0;i < 3; i++) {
2221 io_mem_read[io_index][i] = mem_read[i];
2222 io_mem_write[io_index][i] = mem_write[i];
2223 }
2224 io_mem_opaque[io_index] = opaque;
2225 return io_index << IO_MEM_SHIFT;
2226}
2227
2228CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2229{
2230 return io_mem_write[io_index >> IO_MEM_SHIFT];
2231}
2232
2233CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2234{
2235 return io_mem_read[io_index >> IO_MEM_SHIFT];
2236}
2237
2238/* physical memory access (slow version, mainly for debug) */
2239#if defined(CONFIG_USER_ONLY)
2240void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2241 int len, int is_write)
2242{
2243 int l, flags;
2244 target_ulong page;
2245 void * p;
2246
2247 while (len > 0) {
2248 page = addr & TARGET_PAGE_MASK;
2249 l = (page + TARGET_PAGE_SIZE) - addr;
2250 if (l > len)
2251 l = len;
2252 flags = page_get_flags(page);
2253 if (!(flags & PAGE_VALID))
2254 return;
2255 if (is_write) {
2256 if (!(flags & PAGE_WRITE))
2257 return;
2258 p = lock_user(addr, len, 0);
2259 memcpy(p, buf, len);
2260 unlock_user(p, addr, len);
2261 } else {
2262 if (!(flags & PAGE_READ))
2263 return;
2264 p = lock_user(addr, len, 1);
2265 memcpy(buf, p, len);
2266 unlock_user(p, addr, 0);
2267 }
2268 len -= l;
2269 buf += l;
2270 addr += l;
2271 }
2272}
2273
2274#else
2275void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2276 int len, int is_write)
2277{
2278 int l, io_index;
2279 uint8_t *ptr;
2280 uint32_t val;
2281 target_phys_addr_t page;
2282 unsigned long pd;
2283 PhysPageDesc *p;
2284
2285 while (len > 0) {
2286 page = addr & TARGET_PAGE_MASK;
2287 l = (page + TARGET_PAGE_SIZE) - addr;
2288 if (l > len)
2289 l = len;
2290 p = phys_page_find(page >> TARGET_PAGE_BITS);
2291 if (!p) {
2292 pd = IO_MEM_UNASSIGNED;
2293 } else {
2294 pd = p->phys_offset;
2295 }
2296
2297 if (is_write) {
2298 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2299 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2300 /* XXX: could force cpu_single_env to NULL to avoid
2301 potential bugs */
2302 if (l >= 4 && ((addr & 3) == 0)) {
2303 /* 32 bit write access */
2304#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2305 val = ldl_p(buf);
2306#else
2307 val = *(const uint32_t *)buf;
2308#endif
2309 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2310 l = 4;
2311 } else if (l >= 2 && ((addr & 1) == 0)) {
2312 /* 16 bit write access */
2313#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2314 val = lduw_p(buf);
2315#else
2316 val = *(const uint16_t *)buf;
2317#endif
2318 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2319 l = 2;
2320 } else {
2321 /* 8 bit write access */
2322#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2323 val = ldub_p(buf);
2324#else
2325 val = *(const uint8_t *)buf;
2326#endif
2327 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2328 l = 1;
2329 }
2330 } else {
2331 unsigned long addr1;
2332 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2333 /* RAM case */
2334#ifdef VBOX
2335 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
2336#else
2337 ptr = phys_ram_base + addr1;
2338 memcpy(ptr, buf, l);
2339#endif
2340 if (!cpu_physical_memory_is_dirty(addr1)) {
2341 /* invalidate code */
2342 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2343 /* set dirty bit */
2344#ifdef VBOX
2345 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2346#endif
2347 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2348 (0xff & ~CODE_DIRTY_FLAG);
2349 }
2350 }
2351 } else {
2352 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2353 !(pd & IO_MEM_ROMD)) {
2354 /* I/O case */
2355 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2356 if (l >= 4 && ((addr & 3) == 0)) {
2357 /* 32 bit read access */
2358 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2359#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2360 stl_p(buf, val);
2361#else
2362 *(uint32_t *)buf = val;
2363#endif
2364 l = 4;
2365 } else if (l >= 2 && ((addr & 1) == 0)) {
2366 /* 16 bit read access */
2367 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2368#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2369 stw_p(buf, val);
2370#else
2371 *(uint16_t *)buf = val;
2372#endif
2373 l = 2;
2374 } else {
2375 /* 8 bit read access */
2376 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2377#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
2378 stb_p(buf, val);
2379#else
2380 *(uint8_t *)buf = val;
2381#endif
2382 l = 1;
2383 }
2384 } else {
2385 /* RAM case */
2386#ifdef VBOX
2387 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
2388#else
2389 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2390 (addr & ~TARGET_PAGE_MASK);
2391 memcpy(buf, ptr, l);
2392#endif
2393 }
2394 }
2395 len -= l;
2396 buf += l;
2397 addr += l;
2398 }
2399}
2400
2401#ifndef VBOX
2402/* used for ROM loading : can write in RAM and ROM */
2403void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2404 const uint8_t *buf, int len)
2405{
2406 int l;
2407 uint8_t *ptr;
2408 target_phys_addr_t page;
2409 unsigned long pd;
2410 PhysPageDesc *p;
2411
2412 while (len > 0) {
2413 page = addr & TARGET_PAGE_MASK;
2414 l = (page + TARGET_PAGE_SIZE) - addr;
2415 if (l > len)
2416 l = len;
2417 p = phys_page_find(page >> TARGET_PAGE_BITS);
2418 if (!p) {
2419 pd = IO_MEM_UNASSIGNED;
2420 } else {
2421 pd = p->phys_offset;
2422 }
2423
2424 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2425 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2426 !(pd & IO_MEM_ROMD)) {
2427 /* do nothing */
2428 } else {
2429 unsigned long addr1;
2430 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2431 /* ROM/RAM case */
2432 ptr = phys_ram_base + addr1;
2433 memcpy(ptr, buf, l);
2434 }
2435 len -= l;
2436 buf += l;
2437 addr += l;
2438 }
2439}
2440#endif /* !VBOX */
2441
2442
2443/* warning: addr must be aligned */
2444uint32_t ldl_phys(target_phys_addr_t addr)
2445{
2446 int io_index;
2447 uint8_t *ptr;
2448 uint32_t val;
2449 unsigned long pd;
2450 PhysPageDesc *p;
2451
2452 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2453 if (!p) {
2454 pd = IO_MEM_UNASSIGNED;
2455 } else {
2456 pd = p->phys_offset;
2457 }
2458
2459 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2460 !(pd & IO_MEM_ROMD)) {
2461 /* I/O case */
2462 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2463 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2464 } else {
2465 /* RAM case */
2466#ifndef VBOX
2467 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2468 (addr & ~TARGET_PAGE_MASK);
2469 val = ldl_p(ptr);
2470#else
2471 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2472#endif
2473 }
2474 return val;
2475}
2476
2477/* warning: addr must be aligned */
2478uint64_t ldq_phys(target_phys_addr_t addr)
2479{
2480 int io_index;
2481 uint8_t *ptr;
2482 uint64_t val;
2483 unsigned long pd;
2484 PhysPageDesc *p;
2485
2486 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2487 if (!p) {
2488 pd = IO_MEM_UNASSIGNED;
2489 } else {
2490 pd = p->phys_offset;
2491 }
2492
2493 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2494 !(pd & IO_MEM_ROMD)) {
2495 /* I/O case */
2496 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2497#ifdef TARGET_WORDS_BIGENDIAN
2498 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2499 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2500#else
2501 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2502 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2503#endif
2504 } else {
2505 /* RAM case */
2506#ifndef VBOX
2507 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2508 (addr & ~TARGET_PAGE_MASK);
2509 val = ldq_p(ptr);
2510#else
2511 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
2512#endif
2513 }
2514 return val;
2515}
2516
2517/* XXX: optimize */
2518uint32_t ldub_phys(target_phys_addr_t addr)
2519{
2520 uint8_t val;
2521 cpu_physical_memory_read(addr, &val, 1);
2522 return val;
2523}
2524
2525/* XXX: optimize */
2526uint32_t lduw_phys(target_phys_addr_t addr)
2527{
2528 uint16_t val;
2529 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2530 return tswap16(val);
2531}
2532
2533/* warning: addr must be aligned. The ram page is not masked as dirty
2534 and the code inside is not invalidated. It is useful if the dirty
2535 bits are used to track modified PTEs */
2536void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2537{
2538 int io_index;
2539 uint8_t *ptr;
2540 unsigned long pd;
2541 PhysPageDesc *p;
2542
2543 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2544 if (!p) {
2545 pd = IO_MEM_UNASSIGNED;
2546 } else {
2547 pd = p->phys_offset;
2548 }
2549
2550 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2551 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2552 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2553 } else {
2554#ifndef VBOX
2555 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2556 (addr & ~TARGET_PAGE_MASK);
2557 stl_p(ptr, val);
2558#else
2559 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
2560#endif
2561 }
2562}
2563
2564/* warning: addr must be aligned */
2565void stl_phys(target_phys_addr_t addr, uint32_t val)
2566{
2567 int io_index;
2568 uint8_t *ptr;
2569 unsigned long pd;
2570 PhysPageDesc *p;
2571
2572 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2573 if (!p) {
2574 pd = IO_MEM_UNASSIGNED;
2575 } else {
2576 pd = p->phys_offset;
2577 }
2578
2579 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2580 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2581 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2582 } else {
2583 unsigned long addr1;
2584 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2585 /* RAM case */
2586#ifndef VBOX
2587 ptr = phys_ram_base + addr1;
2588 stl_p(ptr, val);
2589#else
2590 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
2591#endif
2592 if (!cpu_physical_memory_is_dirty(addr1)) {
2593 /* invalidate code */
2594 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2595 /* set dirty bit */
2596#ifdef VBOX
2597 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2598#endif
2599 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2600 (0xff & ~CODE_DIRTY_FLAG);
2601 }
2602 }
2603}
2604
2605/* XXX: optimize */
2606void stb_phys(target_phys_addr_t addr, uint32_t val)
2607{
2608 uint8_t v = val;
2609 cpu_physical_memory_write(addr, &v, 1);
2610}
2611
2612/* XXX: optimize */
2613void stw_phys(target_phys_addr_t addr, uint32_t val)
2614{
2615 uint16_t v = tswap16(val);
2616 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2617}
2618
2619/* XXX: optimize */
2620void stq_phys(target_phys_addr_t addr, uint64_t val)
2621{
2622 val = tswap64(val);
2623 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2624}
2625
2626#endif
2627
2628#ifndef VBOX
2629/* virtual memory access for debug */
2630int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2631 uint8_t *buf, int len, int is_write)
2632{
2633 int l;
2634 target_ulong page, phys_addr;
2635
2636 while (len > 0) {
2637 page = addr & TARGET_PAGE_MASK;
2638 phys_addr = cpu_get_phys_page_debug(env, page);
2639 /* if no physical page mapped, return an error */
2640 if (phys_addr == -1)
2641 return -1;
2642 l = (page + TARGET_PAGE_SIZE) - addr;
2643 if (l > len)
2644 l = len;
2645 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2646 buf, l, is_write);
2647 len -= l;
2648 buf += l;
2649 addr += l;
2650 }
2651 return 0;
2652}
2653
2654void dump_exec_info(FILE *f,
2655 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2656{
2657 int i, target_code_size, max_target_code_size;
2658 int direct_jmp_count, direct_jmp2_count, cross_page;
2659 TranslationBlock *tb;
2660
2661 target_code_size = 0;
2662 max_target_code_size = 0;
2663 cross_page = 0;
2664 direct_jmp_count = 0;
2665 direct_jmp2_count = 0;
2666 for(i = 0; i < nb_tbs; i++) {
2667 tb = &tbs[i];
2668 target_code_size += tb->size;
2669 if (tb->size > max_target_code_size)
2670 max_target_code_size = tb->size;
2671 if (tb->page_addr[1] != -1)
2672 cross_page++;
2673 if (tb->tb_next_offset[0] != 0xffff) {
2674 direct_jmp_count++;
2675 if (tb->tb_next_offset[1] != 0xffff) {
2676 direct_jmp2_count++;
2677 }
2678 }
2679 }
2680 /* XXX: avoid using doubles ? */
2681 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2682 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2683 nb_tbs ? target_code_size / nb_tbs : 0,
2684 max_target_code_size);
2685 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2686 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2687 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2688 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2689 cross_page,
2690 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2691 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2692 direct_jmp_count,
2693 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2694 direct_jmp2_count,
2695 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2696 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2697 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2698 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2699}
2700#endif /* !VBOX */
2701
2702#if !defined(CONFIG_USER_ONLY)
2703
2704#define MMUSUFFIX _cmmu
2705#define GETPC() NULL
2706#define env cpu_single_env
2707#define SOFTMMU_CODE_ACCESS
2708
2709#define SHIFT 0
2710#include "softmmu_template.h"
2711
2712#define SHIFT 1
2713#include "softmmu_template.h"
2714
2715#define SHIFT 2
2716#include "softmmu_template.h"
2717
2718#define SHIFT 3
2719#include "softmmu_template.h"
2720
2721#undef env
2722
2723#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette