VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 36028

Last change on this file since 36028 was 35996, checked in by vboxsync, 14 years ago

optimize tlb_flush a little bit

  • Property svn:eol-style set to native
File size: 113.6 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include "config.h"
31#ifndef VBOX
32#ifdef _WIN32
33#include <windows.h>
34#else
35#include <sys/types.h>
36#include <sys/mman.h>
37#endif
38#include <stdlib.h>
39#include <stdio.h>
40#include <stdarg.h>
41#include <string.h>
42#include <errno.h>
43#include <unistd.h>
44#include <inttypes.h>
45#else /* VBOX */
46# include <stdlib.h>
47# include <stdio.h>
48# include <iprt/alloc.h>
49# include <iprt/string.h>
50# include <iprt/param.h>
51# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
52#endif /* VBOX */
53
54#include "cpu.h"
55#include "exec-all.h"
56#if defined(CONFIG_USER_ONLY)
57#include <qemu.h>
58#endif
59
60//#define DEBUG_TB_INVALIDATE
61//#define DEBUG_FLUSH
62//#define DEBUG_TLB
63//#define DEBUG_UNASSIGNED
64
65/* make various TB consistency checks */
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
68
69#if !defined(CONFIG_USER_ONLY)
70/* TB consistency checks only implemented for usermode emulation. */
71#undef DEBUG_TB_CHECK
72#endif
73
74#define SMC_BITMAP_USE_THRESHOLD 10
75
76#define MMAP_AREA_START 0x00000000
77#define MMAP_AREA_END 0xa8000000
78
79#if defined(TARGET_SPARC64)
80#define TARGET_PHYS_ADDR_SPACE_BITS 41
81#elif defined(TARGET_SPARC)
82#define TARGET_PHYS_ADDR_SPACE_BITS 36
83#elif defined(TARGET_ALPHA)
84#define TARGET_PHYS_ADDR_SPACE_BITS 42
85#define TARGET_VIRT_ADDR_SPACE_BITS 42
86#elif defined(TARGET_PPC64)
87#define TARGET_PHYS_ADDR_SPACE_BITS 42
88#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
89#define TARGET_PHYS_ADDR_SPACE_BITS 42
90#elif defined(TARGET_I386) && !defined(USE_KQEMU)
91#define TARGET_PHYS_ADDR_SPACE_BITS 36
92#else
93/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
94#define TARGET_PHYS_ADDR_SPACE_BITS 32
95#endif
96
97static TranslationBlock *tbs;
98int code_gen_max_blocks;
99TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
100static int nb_tbs;
101/* any access to the tbs or the page table must use this lock */
102spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
103
104#ifndef VBOX
105#if defined(__arm__) || defined(__sparc_v9__)
106/* The prologue must be reachable with a direct jump. ARM and Sparc64
107 have limited branch ranges (possibly also PPC) so place it in a
108 section close to code segment. */
109#define code_gen_section \
110 __attribute__((__section__(".gen_code"))) \
111 __attribute__((aligned (32)))
112#else
113#define code_gen_section \
114 __attribute__((aligned (32)))
115#endif
116uint8_t code_gen_prologue[1024] code_gen_section;
117
118#else /* VBOX */
119extern uint8_t* code_gen_prologue;
120#endif /* VBOX */
121
122static uint8_t *code_gen_buffer;
123static unsigned long code_gen_buffer_size;
124/* threshold to flush the translated code buffer */
125static unsigned long code_gen_buffer_max_size;
126uint8_t *code_gen_ptr;
127
128#ifndef VBOX
129#if !defined(CONFIG_USER_ONLY)
130ram_addr_t phys_ram_size;
131int phys_ram_fd;
132uint8_t *phys_ram_base;
133uint8_t *phys_ram_dirty;
134static int in_migration;
135static ram_addr_t phys_ram_alloc_offset = 0;
136#endif
137#else /* VBOX */
138RTGCPHYS phys_ram_size;
139/* we have memory ranges (the high PC-BIOS mapping) which
140 causes some pages to fall outside the dirty map here. */
141RTGCPHYS phys_ram_dirty_size;
142#endif /* VBOX */
143#if !defined(VBOX)
144uint8_t *phys_ram_base;
145#endif
146uint8_t *phys_ram_dirty;
147
148CPUState *first_cpu;
149/* current CPU in the current thread. It is only valid inside
150 cpu_exec() */
151CPUState *cpu_single_env;
152/* 0 = Do not count executed instructions.
153 1 = Precise instruction counting.
154 2 = Adaptive rate instruction counting. */
155int use_icount = 0;
156/* Current instruction counter. While executing translated code this may
157 include some instructions that have not yet been executed. */
158int64_t qemu_icount;
159
160typedef struct PageDesc {
161 /* list of TBs intersecting this ram page */
162 TranslationBlock *first_tb;
163 /* in order to optimize self modifying code, we count the number
164 of lookups we do to a given page to use a bitmap */
165 unsigned int code_write_count;
166 uint8_t *code_bitmap;
167#if defined(CONFIG_USER_ONLY)
168 unsigned long flags;
169#endif
170} PageDesc;
171
172typedef struct PhysPageDesc {
173 /* offset in host memory of the page + io_index in the low 12 bits */
174 ram_addr_t phys_offset;
175} PhysPageDesc;
176
177#define L2_BITS 10
178#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
179/* XXX: this is a temporary hack for alpha target.
180 * In the future, this is to be replaced by a multi-level table
181 * to actually be able to handle the complete 64 bits address space.
182 */
183#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
184#else
185#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
186#endif
187#ifdef VBOX
188#define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32)
189#endif
190
191#ifdef VBOX
192#define L0_SIZE (1 << L0_BITS)
193#endif
194#define L1_SIZE (1 << L1_BITS)
195#define L2_SIZE (1 << L2_BITS)
196
197static void io_mem_init(void);
198
199unsigned long qemu_real_host_page_size;
200unsigned long qemu_host_page_bits;
201unsigned long qemu_host_page_size;
202unsigned long qemu_host_page_mask;
203
204/* XXX: for system emulation, it could just be an array */
205#ifndef VBOX
206static PageDesc *l1_map[L1_SIZE];
207static PhysPageDesc **l1_phys_map;
208#else
209static unsigned l0_map_max_used = 0;
210static PageDesc **l0_map[L0_SIZE];
211static void **l0_phys_map[L0_SIZE];
212#endif
213
214#if !defined(CONFIG_USER_ONLY)
215static void io_mem_init(void);
216
217/* io memory support */
218CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
219CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
220void *io_mem_opaque[IO_MEM_NB_ENTRIES];
221static int io_mem_nb;
222static int io_mem_watch;
223#endif
224
225#ifndef VBOX
226/* log support */
227static const char *logfilename = "/tmp/qemu.log";
228#endif /* !VBOX */
229FILE *logfile;
230int loglevel;
231#ifndef VBOX
232static int log_append = 0;
233#endif
234
235/* statistics */
236#ifndef VBOX
237static int tlb_flush_count;
238static int tb_flush_count;
239static int tb_phys_invalidate_count;
240#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
241uint32_t tlb_flush_count;
242uint32_t tb_flush_count;
243uint32_t tb_phys_invalidate_count;
244#endif /* VBOX */
245
246#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
247typedef struct subpage_t {
248 target_phys_addr_t base;
249 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
250 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
251 void *opaque[TARGET_PAGE_SIZE][2][4];
252} subpage_t;
253
254
255#ifndef VBOX
256#ifdef _WIN32
257static void map_exec(void *addr, long size)
258{
259 DWORD old_protect;
260 VirtualProtect(addr, size,
261 PAGE_EXECUTE_READWRITE, &old_protect);
262
263}
264#else
265static void map_exec(void *addr, long size)
266{
267 unsigned long start, end, page_size;
268
269 page_size = getpagesize();
270 start = (unsigned long)addr;
271 start &= ~(page_size - 1);
272
273 end = (unsigned long)addr + size;
274 end += page_size - 1;
275 end &= ~(page_size - 1);
276
277 mprotect((void *)start, end - start,
278 PROT_READ | PROT_WRITE | PROT_EXEC);
279}
280#endif
281#else // VBOX
282static void map_exec(void *addr, long size)
283{
284 RTMemProtect(addr, size,
285 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
286}
287#endif
288
289static void page_init(void)
290{
291 /* NOTE: we can always suppose that qemu_host_page_size >=
292 TARGET_PAGE_SIZE */
293#ifdef VBOX
294 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
295 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
296 qemu_real_host_page_size = PAGE_SIZE;
297#else /* !VBOX */
298#ifdef _WIN32
299 {
300 SYSTEM_INFO system_info;
301 DWORD old_protect;
302
303 GetSystemInfo(&system_info);
304 qemu_real_host_page_size = system_info.dwPageSize;
305 }
306#else
307 qemu_real_host_page_size = getpagesize();
308#endif
309#endif /* !VBOX */
310
311 if (qemu_host_page_size == 0)
312 qemu_host_page_size = qemu_real_host_page_size;
313 if (qemu_host_page_size < TARGET_PAGE_SIZE)
314 qemu_host_page_size = TARGET_PAGE_SIZE;
315 qemu_host_page_bits = 0;
316#ifndef VBOX
317 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
318#else
319 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
320#endif
321 qemu_host_page_bits++;
322 qemu_host_page_mask = ~(qemu_host_page_size - 1);
323#ifndef VBOX
324 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
325 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
326#endif
327#ifdef VBOX
328 /* We use other means to set reserved bit on our pages */
329#else
330#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
331 {
332 long long startaddr, endaddr;
333 FILE *f;
334 int n;
335
336 mmap_lock();
337 last_brk = (unsigned long)sbrk(0);
338 f = fopen("/proc/self/maps", "r");
339 if (f) {
340 do {
341 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
342 if (n == 2) {
343 startaddr = MIN(startaddr,
344 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
345 endaddr = MIN(endaddr,
346 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
347 page_set_flags(startaddr & TARGET_PAGE_MASK,
348 TARGET_PAGE_ALIGN(endaddr),
349 PAGE_RESERVED);
350 }
351 } while (!feof(f));
352 fclose(f);
353 }
354 mmap_unlock();
355 }
356#endif
357#endif
358}
359
360#ifndef VBOX
361static inline PageDesc **page_l1_map(target_ulong index)
362#else
363DECLINLINE(PageDesc **) page_l1_map(target_ulong index)
364#endif
365{
366#ifndef VBOX
367#if TARGET_LONG_BITS > 32
368 /* Host memory outside guest VM. For 32-bit targets we have already
369 excluded high addresses. */
370 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
371 return NULL;
372#endif
373 return &l1_map[index >> L2_BITS];
374#else /* VBOX */
375 PageDesc **l1_map;
376 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE,
377 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n",
378 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE),
379 NULL);
380 l1_map = l0_map[index >> (L1_BITS + L2_BITS)];
381 if (RT_UNLIKELY(!l1_map))
382 {
383 unsigned i0 = index >> (L1_BITS + L2_BITS);
384 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE);
385 if (RT_UNLIKELY(!l1_map))
386 return NULL;
387 if (i0 >= l0_map_max_used)
388 l0_map_max_used = i0 + 1;
389 }
390 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)];
391#endif /* VBOX */
392}
393
394#ifndef VBOX
395static inline PageDesc *page_find_alloc(target_ulong index)
396#else
397DECLINLINE(PageDesc *) page_find_alloc(target_ulong index)
398#endif
399{
400 PageDesc **lp, *p;
401 lp = page_l1_map(index);
402 if (!lp)
403 return NULL;
404
405 p = *lp;
406 if (!p) {
407 /* allocate if not found */
408#if defined(CONFIG_USER_ONLY)
409 unsigned long addr;
410 size_t len = sizeof(PageDesc) * L2_SIZE;
411 /* Don't use qemu_malloc because it may recurse. */
412 p = mmap(0, len, PROT_READ | PROT_WRITE,
413 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
414 *lp = p;
415 addr = h2g(p);
416 if (addr == (target_ulong)addr) {
417 page_set_flags(addr & TARGET_PAGE_MASK,
418 TARGET_PAGE_ALIGN(addr + len),
419 PAGE_RESERVED);
420 }
421#else
422 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
423 *lp = p;
424#endif
425 }
426 return p + (index & (L2_SIZE - 1));
427}
428
429#ifndef VBOX
430static inline PageDesc *page_find(target_ulong index)
431#else
432DECLINLINE(PageDesc *) page_find(target_ulong index)
433#endif
434{
435 PageDesc **lp, *p;
436 lp = page_l1_map(index);
437 if (!lp)
438 return NULL;
439
440 p = *lp;
441 if (!p)
442 return 0;
443 return p + (index & (L2_SIZE - 1));
444}
445
446static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
447{
448 void **lp, **p;
449 PhysPageDesc *pd;
450
451#ifndef VBOX
452 p = (void **)l1_phys_map;
453#if TARGET_PHYS_ADDR_SPACE_BITS > 32
454
455#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
456#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
457#endif
458 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
459 p = *lp;
460 if (!p) {
461 /* allocate if not found */
462 if (!alloc)
463 return NULL;
464 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
465 memset(p, 0, sizeof(void *) * L1_SIZE);
466 *lp = p;
467 }
468#endif
469#else /* VBOX */
470 /* level 0 lookup and lazy allocation of level 1 map. */
471 if (RT_UNLIKELY(index >= (target_phys_addr_t)L2_SIZE * L1_SIZE * L0_SIZE))
472 return NULL;
473 p = l0_phys_map[index >> (L1_BITS + L2_BITS)];
474 if (RT_UNLIKELY(!p)) {
475 if (!alloc)
476 return NULL;
477 p = qemu_vmalloc(sizeof(void **) * L1_SIZE);
478 memset(p, 0, sizeof(void **) * L1_SIZE);
479 l0_phys_map[index >> (L1_BITS + L2_BITS)] = p;
480 }
481
482 /* level 1 lookup and lazy allocation of level 2 map. */
483#endif /* VBOX */
484 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
485 pd = *lp;
486 if (!pd) {
487 int i;
488 /* allocate if not found */
489 if (!alloc)
490 return NULL;
491 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
492 *lp = pd;
493 for (i = 0; i < L2_SIZE; i++)
494 pd[i].phys_offset = IO_MEM_UNASSIGNED;
495 }
496 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
497}
498
499#ifndef VBOX
500static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
501#else
502DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index)
503#endif
504{
505 return phys_page_find_alloc(index, 0);
506}
507
508#if !defined(CONFIG_USER_ONLY)
509static void tlb_protect_code(ram_addr_t ram_addr);
510static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
511 target_ulong vaddr);
512#define mmap_lock() do { } while(0)
513#define mmap_unlock() do { } while(0)
514#endif
515
516#ifdef VBOX
517/*
518 * We don't need such huge codegen buffer size, as execute most of the code
519 * in raw or hwacc mode
520 */
521#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
522#else
523#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
524#endif
525
526#if defined(CONFIG_USER_ONLY)
527/* Currently it is not recommended to allocate big chunks of data in
528 user mode. It will change when a dedicated libc will be used */
529#define USE_STATIC_CODE_GEN_BUFFER
530#endif
531
532/* VBox allocates codegen buffer dynamically */
533#ifndef VBOX
534#ifdef USE_STATIC_CODE_GEN_BUFFER
535static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
536#endif
537#endif
538
539static void code_gen_alloc(unsigned long tb_size)
540{
541#ifdef USE_STATIC_CODE_GEN_BUFFER
542 code_gen_buffer = static_code_gen_buffer;
543 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
544 map_exec(code_gen_buffer, code_gen_buffer_size);
545#else
546#ifdef VBOX
547 /* We cannot use phys_ram_size here, as it's 0 now,
548 * it only gets initialized once RAM registration callback
549 * (REMR3NotifyPhysRamRegister()) called.
550 */
551 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
552#else
553 code_gen_buffer_size = tb_size;
554 if (code_gen_buffer_size == 0) {
555#if defined(CONFIG_USER_ONLY)
556 /* in user mode, phys_ram_size is not meaningful */
557 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
558#else
559 /* XXX: needs adjustments */
560 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
561#endif
562
563 }
564 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
565 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
566#endif /* VBOX */
567
568 /* The code gen buffer location may have constraints depending on
569 the host cpu and OS */
570#ifdef VBOX
571 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
572
573 if (!code_gen_buffer) {
574 LogRel(("REM: failed allocate codegen buffer %lld\n",
575 code_gen_buffer_size));
576 return;
577 }
578#else //!VBOX
579#if defined(__linux__)
580 {
581 int flags;
582 void *start = NULL;
583
584 flags = MAP_PRIVATE | MAP_ANONYMOUS;
585#if defined(__x86_64__)
586 flags |= MAP_32BIT;
587 /* Cannot map more than that */
588 if (code_gen_buffer_size > (800 * 1024 * 1024))
589 code_gen_buffer_size = (800 * 1024 * 1024);
590#elif defined(__sparc_v9__)
591 // Map the buffer below 2G, so we can use direct calls and branches
592 flags |= MAP_FIXED;
593 start = (void *) 0x60000000UL;
594 if (code_gen_buffer_size > (512 * 1024 * 1024))
595 code_gen_buffer_size = (512 * 1024 * 1024);
596#endif
597 code_gen_buffer = mmap(start, code_gen_buffer_size,
598 PROT_WRITE | PROT_READ | PROT_EXEC,
599 flags, -1, 0);
600 if (code_gen_buffer == MAP_FAILED) {
601 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
602 exit(1);
603 }
604 }
605#elif defined(__FreeBSD__)
606 {
607 int flags;
608 void *addr = NULL;
609 flags = MAP_PRIVATE | MAP_ANONYMOUS;
610#if defined(__x86_64__)
611 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
612 * 0x40000000 is free */
613 flags |= MAP_FIXED;
614 addr = (void *)0x40000000;
615 /* Cannot map more than that */
616 if (code_gen_buffer_size > (800 * 1024 * 1024))
617 code_gen_buffer_size = (800 * 1024 * 1024);
618#endif
619 code_gen_buffer = mmap(addr, code_gen_buffer_size,
620 PROT_WRITE | PROT_READ | PROT_EXEC,
621 flags, -1, 0);
622 if (code_gen_buffer == MAP_FAILED) {
623 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
624 exit(1);
625 }
626 }
627#else
628 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
629 if (!code_gen_buffer) {
630 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
631 exit(1);
632 }
633 map_exec(code_gen_buffer, code_gen_buffer_size);
634#endif
635 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
636#endif /* !VBOX */
637#endif /* !USE_STATIC_CODE_GEN_BUFFER */
638#ifndef VBOX
639 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
640#else
641 map_exec(code_gen_prologue, _1K);
642#endif
643
644 code_gen_buffer_max_size = code_gen_buffer_size -
645 code_gen_max_block_size();
646 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
647 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
648}
649
650/* Must be called before using the QEMU cpus. 'tb_size' is the size
651 (in bytes) allocated to the translation buffer. Zero means default
652 size. */
653void cpu_exec_init_all(unsigned long tb_size)
654{
655 cpu_gen_init();
656 code_gen_alloc(tb_size);
657 code_gen_ptr = code_gen_buffer;
658 page_init();
659#if !defined(CONFIG_USER_ONLY)
660 io_mem_init();
661#endif
662}
663
664#ifndef VBOX
665#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
666
667#define CPU_COMMON_SAVE_VERSION 1
668
669static void cpu_common_save(QEMUFile *f, void *opaque)
670{
671 CPUState *env = opaque;
672
673 qemu_put_be32s(f, &env->halted);
674 qemu_put_be32s(f, &env->interrupt_request);
675}
676
677static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
678{
679 CPUState *env = opaque;
680
681 if (version_id != CPU_COMMON_SAVE_VERSION)
682 return -EINVAL;
683
684 qemu_get_be32s(f, &env->halted);
685 qemu_get_be32s(f, &env->interrupt_request);
686 tlb_flush(env, 1);
687
688 return 0;
689}
690#endif
691#endif //!VBOX
692
693void cpu_exec_init(CPUState *env)
694{
695 CPUState **penv;
696 int cpu_index;
697
698 env->next_cpu = NULL;
699 penv = &first_cpu;
700 cpu_index = 0;
701 while (*penv != NULL) {
702 penv = (CPUState **)&(*penv)->next_cpu;
703 cpu_index++;
704 }
705 env->cpu_index = cpu_index;
706 env->nb_watchpoints = 0;
707 *penv = env;
708#ifndef VBOX
709#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
710 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
711 cpu_common_save, cpu_common_load, env);
712 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
713 cpu_save, cpu_load, env);
714#endif
715#endif // !VBOX
716}
717
718#ifndef VBOX
719static inline void invalidate_page_bitmap(PageDesc *p)
720#else
721DECLINLINE(void) invalidate_page_bitmap(PageDesc *p)
722#endif
723{
724 if (p->code_bitmap) {
725 qemu_free(p->code_bitmap);
726 p->code_bitmap = NULL;
727 }
728 p->code_write_count = 0;
729}
730
731/* set to NULL all the 'first_tb' fields in all PageDescs */
732static void page_flush_tb(void)
733{
734 int i, j;
735 PageDesc *p;
736#ifdef VBOX
737 int k;
738#endif
739
740#ifdef VBOX
741 k = l0_map_max_used;
742 while (k-- > 0) {
743 PageDesc **l1_map = l0_map[k];
744 if (l1_map) {
745#endif
746 for(i = 0; i < L1_SIZE; i++) {
747 p = l1_map[i];
748 if (p) {
749 for(j = 0; j < L2_SIZE; j++) {
750 p->first_tb = NULL;
751 invalidate_page_bitmap(p);
752 p++;
753 }
754 }
755 }
756#ifdef VBOX
757 }
758 }
759#endif
760}
761
762/* flush all the translation blocks */
763/* XXX: tb_flush is currently not thread safe */
764void tb_flush(CPUState *env1)
765{
766 CPUState *env;
767#ifdef VBOX
768 STAM_PROFILE_START(&env1->StatTbFlush, a);
769#endif
770#if defined(DEBUG_FLUSH)
771 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
772 (unsigned long)(code_gen_ptr - code_gen_buffer),
773 nb_tbs, nb_tbs > 0 ?
774 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
775#endif
776 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
777 cpu_abort(env1, "Internal error: code buffer overflow\n");
778
779 nb_tbs = 0;
780
781 for(env = first_cpu; env != NULL; env = env->next_cpu) {
782 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
783 }
784
785 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
786 page_flush_tb();
787
788 code_gen_ptr = code_gen_buffer;
789 /* XXX: flush processor icache at this point if cache flush is
790 expensive */
791 tb_flush_count++;
792#ifdef VBOX
793 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
794#endif
795}
796
797#ifdef DEBUG_TB_CHECK
798static void tb_invalidate_check(target_ulong address)
799{
800 TranslationBlock *tb;
801 int i;
802 address &= TARGET_PAGE_MASK;
803 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
804 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
805 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
806 address >= tb->pc + tb->size)) {
807 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
808 address, (long)tb->pc, tb->size);
809 }
810 }
811 }
812}
813
814/* verify that all the pages have correct rights for code */
815static void tb_page_check(void)
816{
817 TranslationBlock *tb;
818 int i, flags1, flags2;
819
820 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
821 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
822 flags1 = page_get_flags(tb->pc);
823 flags2 = page_get_flags(tb->pc + tb->size - 1);
824 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
825 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
826 (long)tb->pc, tb->size, flags1, flags2);
827 }
828 }
829 }
830}
831
832static void tb_jmp_check(TranslationBlock *tb)
833{
834 TranslationBlock *tb1;
835 unsigned int n1;
836
837 /* suppress any remaining jumps to this TB */
838 tb1 = tb->jmp_first;
839 for(;;) {
840 n1 = (long)tb1 & 3;
841 tb1 = (TranslationBlock *)((long)tb1 & ~3);
842 if (n1 == 2)
843 break;
844 tb1 = tb1->jmp_next[n1];
845 }
846 /* check end of list */
847 if (tb1 != tb) {
848 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
849 }
850}
851#endif // DEBUG_TB_CHECK
852
853/* invalidate one TB */
854#ifndef VBOX
855static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
856 int next_offset)
857#else
858DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
859 int next_offset)
860#endif
861{
862 TranslationBlock *tb1;
863 for(;;) {
864 tb1 = *ptb;
865 if (tb1 == tb) {
866 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
867 break;
868 }
869 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
870 }
871}
872
873#ifndef VBOX
874static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
875#else
876DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
877#endif
878{
879 TranslationBlock *tb1;
880 unsigned int n1;
881
882 for(;;) {
883 tb1 = *ptb;
884 n1 = (long)tb1 & 3;
885 tb1 = (TranslationBlock *)((long)tb1 & ~3);
886 if (tb1 == tb) {
887 *ptb = tb1->page_next[n1];
888 break;
889 }
890 ptb = &tb1->page_next[n1];
891 }
892}
893
894#ifndef VBOX
895static inline void tb_jmp_remove(TranslationBlock *tb, int n)
896#else
897DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n)
898#endif
899{
900 TranslationBlock *tb1, **ptb;
901 unsigned int n1;
902
903 ptb = &tb->jmp_next[n];
904 tb1 = *ptb;
905 if (tb1) {
906 /* find tb(n) in circular list */
907 for(;;) {
908 tb1 = *ptb;
909 n1 = (long)tb1 & 3;
910 tb1 = (TranslationBlock *)((long)tb1 & ~3);
911 if (n1 == n && tb1 == tb)
912 break;
913 if (n1 == 2) {
914 ptb = &tb1->jmp_first;
915 } else {
916 ptb = &tb1->jmp_next[n1];
917 }
918 }
919 /* now we can suppress tb(n) from the list */
920 *ptb = tb->jmp_next[n];
921
922 tb->jmp_next[n] = NULL;
923 }
924}
925
926/* reset the jump entry 'n' of a TB so that it is not chained to
927 another TB */
928#ifndef VBOX
929static inline void tb_reset_jump(TranslationBlock *tb, int n)
930#else
931DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
932#endif
933{
934 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
935}
936
937void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
938{
939 CPUState *env;
940 PageDesc *p;
941 unsigned int h, n1;
942 target_phys_addr_t phys_pc;
943 TranslationBlock *tb1, *tb2;
944
945 /* remove the TB from the hash list */
946 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
947 h = tb_phys_hash_func(phys_pc);
948 tb_remove(&tb_phys_hash[h], tb,
949 offsetof(TranslationBlock, phys_hash_next));
950
951 /* remove the TB from the page list */
952 if (tb->page_addr[0] != page_addr) {
953 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
954 tb_page_remove(&p->first_tb, tb);
955 invalidate_page_bitmap(p);
956 }
957 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
958 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
959 tb_page_remove(&p->first_tb, tb);
960 invalidate_page_bitmap(p);
961 }
962
963 tb_invalidated_flag = 1;
964
965 /* remove the TB from the hash list */
966 h = tb_jmp_cache_hash_func(tb->pc);
967 for(env = first_cpu; env != NULL; env = env->next_cpu) {
968 if (env->tb_jmp_cache[h] == tb)
969 env->tb_jmp_cache[h] = NULL;
970 }
971
972 /* suppress this TB from the two jump lists */
973 tb_jmp_remove(tb, 0);
974 tb_jmp_remove(tb, 1);
975
976 /* suppress any remaining jumps to this TB */
977 tb1 = tb->jmp_first;
978 for(;;) {
979 n1 = (long)tb1 & 3;
980 if (n1 == 2)
981 break;
982 tb1 = (TranslationBlock *)((long)tb1 & ~3);
983 tb2 = tb1->jmp_next[n1];
984 tb_reset_jump(tb1, n1);
985 tb1->jmp_next[n1] = NULL;
986 tb1 = tb2;
987 }
988 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
989
990 tb_phys_invalidate_count++;
991}
992
993
994#ifdef VBOX
995void tb_invalidate_virt(CPUState *env, uint32_t eip)
996{
997# if 1
998 tb_flush(env);
999# else
1000 uint8_t *cs_base, *pc;
1001 unsigned int flags, h, phys_pc;
1002 TranslationBlock *tb, **ptb;
1003
1004 flags = env->hflags;
1005 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1006 cs_base = env->segs[R_CS].base;
1007 pc = cs_base + eip;
1008
1009 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
1010 flags);
1011
1012 if(tb)
1013 {
1014# ifdef DEBUG
1015 printf("invalidating TB (%08X) at %08X\n", tb, eip);
1016# endif
1017 tb_invalidate(tb);
1018 //Note: this will leak TBs, but the whole cache will be flushed
1019 // when it happens too often
1020 tb->pc = 0;
1021 tb->cs_base = 0;
1022 tb->flags = 0;
1023 }
1024# endif
1025}
1026
1027# ifdef VBOX_STRICT
1028/**
1029 * Gets the page offset.
1030 */
1031unsigned long get_phys_page_offset(target_ulong addr)
1032{
1033 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1034 return p ? p->phys_offset : 0;
1035}
1036# endif /* VBOX_STRICT */
1037#endif /* VBOX */
1038
1039#ifndef VBOX
1040static inline void set_bits(uint8_t *tab, int start, int len)
1041#else
1042DECLINLINE(void) set_bits(uint8_t *tab, int start, int len)
1043#endif
1044{
1045 int end, mask, end1;
1046
1047 end = start + len;
1048 tab += start >> 3;
1049 mask = 0xff << (start & 7);
1050 if ((start & ~7) == (end & ~7)) {
1051 if (start < end) {
1052 mask &= ~(0xff << (end & 7));
1053 *tab |= mask;
1054 }
1055 } else {
1056 *tab++ |= mask;
1057 start = (start + 8) & ~7;
1058 end1 = end & ~7;
1059 while (start < end1) {
1060 *tab++ = 0xff;
1061 start += 8;
1062 }
1063 if (start < end) {
1064 mask = ~(0xff << (end & 7));
1065 *tab |= mask;
1066 }
1067 }
1068}
1069
1070static void build_page_bitmap(PageDesc *p)
1071{
1072 int n, tb_start, tb_end;
1073 TranslationBlock *tb;
1074
1075 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
1076 if (!p->code_bitmap)
1077 return;
1078 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
1079
1080 tb = p->first_tb;
1081 while (tb != NULL) {
1082 n = (long)tb & 3;
1083 tb = (TranslationBlock *)((long)tb & ~3);
1084 /* NOTE: this is subtle as a TB may span two physical pages */
1085 if (n == 0) {
1086 /* NOTE: tb_end may be after the end of the page, but
1087 it is not a problem */
1088 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1089 tb_end = tb_start + tb->size;
1090 if (tb_end > TARGET_PAGE_SIZE)
1091 tb_end = TARGET_PAGE_SIZE;
1092 } else {
1093 tb_start = 0;
1094 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1095 }
1096 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1097 tb = tb->page_next[n];
1098 }
1099}
1100
1101TranslationBlock *tb_gen_code(CPUState *env,
1102 target_ulong pc, target_ulong cs_base,
1103 int flags, int cflags)
1104{
1105 TranslationBlock *tb;
1106 uint8_t *tc_ptr;
1107 target_ulong phys_pc, phys_page2, virt_page2;
1108 int code_gen_size;
1109
1110 phys_pc = get_phys_addr_code(env, pc);
1111 tb = tb_alloc(pc);
1112 if (!tb) {
1113 /* flush must be done */
1114 tb_flush(env);
1115 /* cannot fail at this point */
1116 tb = tb_alloc(pc);
1117 /* Don't forget to invalidate previous TB info. */
1118 tb_invalidated_flag = 1;
1119 }
1120 tc_ptr = code_gen_ptr;
1121 tb->tc_ptr = tc_ptr;
1122 tb->cs_base = cs_base;
1123 tb->flags = flags;
1124 tb->cflags = cflags;
1125 cpu_gen_code(env, tb, &code_gen_size);
1126 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1127
1128 /* check next page if needed */
1129 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1130 phys_page2 = -1;
1131 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1132 phys_page2 = get_phys_addr_code(env, virt_page2);
1133 }
1134 tb_link_phys(tb, phys_pc, phys_page2);
1135 return tb;
1136}
1137
1138/* invalidate all TBs which intersect with the target physical page
1139 starting in range [start;end[. NOTE: start and end must refer to
1140 the same physical page. 'is_cpu_write_access' should be true if called
1141 from a real cpu write access: the virtual CPU will exit the current
1142 TB if code is modified inside this TB. */
1143void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1144 int is_cpu_write_access)
1145{
1146 int n, current_tb_modified, current_tb_not_found, current_flags;
1147 CPUState *env = cpu_single_env;
1148 PageDesc *p;
1149 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1150 target_ulong tb_start, tb_end;
1151 target_ulong current_pc, current_cs_base;
1152
1153 p = page_find(start >> TARGET_PAGE_BITS);
1154 if (!p)
1155 return;
1156 if (!p->code_bitmap &&
1157 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1158 is_cpu_write_access) {
1159 /* build code bitmap */
1160 build_page_bitmap(p);
1161 }
1162
1163 /* we remove all the TBs in the range [start, end[ */
1164 /* XXX: see if in some cases it could be faster to invalidate all the code */
1165 current_tb_not_found = is_cpu_write_access;
1166 current_tb_modified = 0;
1167 current_tb = NULL; /* avoid warning */
1168 current_pc = 0; /* avoid warning */
1169 current_cs_base = 0; /* avoid warning */
1170 current_flags = 0; /* avoid warning */
1171 tb = p->first_tb;
1172 while (tb != NULL) {
1173 n = (long)tb & 3;
1174 tb = (TranslationBlock *)((long)tb & ~3);
1175 tb_next = tb->page_next[n];
1176 /* NOTE: this is subtle as a TB may span two physical pages */
1177 if (n == 0) {
1178 /* NOTE: tb_end may be after the end of the page, but
1179 it is not a problem */
1180 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1181 tb_end = tb_start + tb->size;
1182 } else {
1183 tb_start = tb->page_addr[1];
1184 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1185 }
1186 if (!(tb_end <= start || tb_start >= end)) {
1187#ifdef TARGET_HAS_PRECISE_SMC
1188 if (current_tb_not_found) {
1189 current_tb_not_found = 0;
1190 current_tb = NULL;
1191 if (env->mem_io_pc) {
1192 /* now we have a real cpu fault */
1193 current_tb = tb_find_pc(env->mem_io_pc);
1194 }
1195 }
1196 if (current_tb == tb &&
1197 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1198 /* If we are modifying the current TB, we must stop
1199 its execution. We could be more precise by checking
1200 that the modification is after the current PC, but it
1201 would require a specialized function to partially
1202 restore the CPU state */
1203
1204 current_tb_modified = 1;
1205 cpu_restore_state(current_tb, env,
1206 env->mem_io_pc, NULL);
1207#if defined(TARGET_I386)
1208 current_flags = env->hflags;
1209 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1210 current_cs_base = (target_ulong)env->segs[R_CS].base;
1211 current_pc = current_cs_base + env->eip;
1212#else
1213#error unsupported CPU
1214#endif
1215 }
1216#endif /* TARGET_HAS_PRECISE_SMC */
1217 /* we need to do that to handle the case where a signal
1218 occurs while doing tb_phys_invalidate() */
1219 saved_tb = NULL;
1220 if (env) {
1221 saved_tb = env->current_tb;
1222 env->current_tb = NULL;
1223 }
1224 tb_phys_invalidate(tb, -1);
1225 if (env) {
1226 env->current_tb = saved_tb;
1227 if (env->interrupt_request && env->current_tb)
1228 cpu_interrupt(env, env->interrupt_request);
1229 }
1230 }
1231 tb = tb_next;
1232 }
1233#if !defined(CONFIG_USER_ONLY)
1234 /* if no code remaining, no need to continue to use slow writes */
1235 if (!p->first_tb) {
1236 invalidate_page_bitmap(p);
1237 if (is_cpu_write_access) {
1238 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1239 }
1240 }
1241#endif
1242#ifdef TARGET_HAS_PRECISE_SMC
1243 if (current_tb_modified) {
1244 /* we generate a block containing just the instruction
1245 modifying the memory. It will ensure that it cannot modify
1246 itself */
1247 env->current_tb = NULL;
1248 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1249 cpu_resume_from_signal(env, NULL);
1250 }
1251#endif
1252}
1253
1254
1255/* len must be <= 8 and start must be a multiple of len */
1256#ifndef VBOX
1257static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1258#else
1259DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1260#endif
1261{
1262 PageDesc *p;
1263 int offset, b;
1264#if 0
1265 if (1) {
1266 if (loglevel) {
1267 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1268 cpu_single_env->mem_io_vaddr, len,
1269 cpu_single_env->eip,
1270 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1271 }
1272 }
1273#endif
1274 p = page_find(start >> TARGET_PAGE_BITS);
1275 if (!p)
1276 return;
1277 if (p->code_bitmap) {
1278 offset = start & ~TARGET_PAGE_MASK;
1279 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1280 if (b & ((1 << len) - 1))
1281 goto do_invalidate;
1282 } else {
1283 do_invalidate:
1284 tb_invalidate_phys_page_range(start, start + len, 1);
1285 }
1286}
1287
1288
1289#if !defined(CONFIG_SOFTMMU)
1290static void tb_invalidate_phys_page(target_phys_addr_t addr,
1291 unsigned long pc, void *puc)
1292{
1293 int n, current_flags, current_tb_modified;
1294 target_ulong current_pc, current_cs_base;
1295 PageDesc *p;
1296 TranslationBlock *tb, *current_tb;
1297#ifdef TARGET_HAS_PRECISE_SMC
1298 CPUState *env = cpu_single_env;
1299#endif
1300
1301 addr &= TARGET_PAGE_MASK;
1302 p = page_find(addr >> TARGET_PAGE_BITS);
1303 if (!p)
1304 return;
1305 tb = p->first_tb;
1306 current_tb_modified = 0;
1307 current_tb = NULL;
1308 current_pc = 0; /* avoid warning */
1309 current_cs_base = 0; /* avoid warning */
1310 current_flags = 0; /* avoid warning */
1311#ifdef TARGET_HAS_PRECISE_SMC
1312 if (tb && pc != 0) {
1313 current_tb = tb_find_pc(pc);
1314 }
1315#endif
1316 while (tb != NULL) {
1317 n = (long)tb & 3;
1318 tb = (TranslationBlock *)((long)tb & ~3);
1319#ifdef TARGET_HAS_PRECISE_SMC
1320 if (current_tb == tb &&
1321 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1322 /* If we are modifying the current TB, we must stop
1323 its execution. We could be more precise by checking
1324 that the modification is after the current PC, but it
1325 would require a specialized function to partially
1326 restore the CPU state */
1327
1328 current_tb_modified = 1;
1329 cpu_restore_state(current_tb, env, pc, puc);
1330#if defined(TARGET_I386)
1331 current_flags = env->hflags;
1332 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1333 current_cs_base = (target_ulong)env->segs[R_CS].base;
1334 current_pc = current_cs_base + env->eip;
1335#else
1336#error unsupported CPU
1337#endif
1338 }
1339#endif /* TARGET_HAS_PRECISE_SMC */
1340 tb_phys_invalidate(tb, addr);
1341 tb = tb->page_next[n];
1342 }
1343 p->first_tb = NULL;
1344#ifdef TARGET_HAS_PRECISE_SMC
1345 if (current_tb_modified) {
1346 /* we generate a block containing just the instruction
1347 modifying the memory. It will ensure that it cannot modify
1348 itself */
1349 env->current_tb = NULL;
1350 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1351 cpu_resume_from_signal(env, puc);
1352 }
1353#endif
1354}
1355#endif
1356
1357/* add the tb in the target page and protect it if necessary */
1358#ifndef VBOX
1359static inline void tb_alloc_page(TranslationBlock *tb,
1360 unsigned int n, target_ulong page_addr)
1361#else
1362DECLINLINE(void) tb_alloc_page(TranslationBlock *tb,
1363 unsigned int n, target_ulong page_addr)
1364#endif
1365{
1366 PageDesc *p;
1367 TranslationBlock *last_first_tb;
1368
1369 tb->page_addr[n] = page_addr;
1370 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1371 tb->page_next[n] = p->first_tb;
1372 last_first_tb = p->first_tb;
1373 p->first_tb = (TranslationBlock *)((long)tb | n);
1374 invalidate_page_bitmap(p);
1375
1376#if defined(TARGET_HAS_SMC) || 1
1377
1378#if defined(CONFIG_USER_ONLY)
1379 if (p->flags & PAGE_WRITE) {
1380 target_ulong addr;
1381 PageDesc *p2;
1382 int prot;
1383
1384 /* force the host page as non writable (writes will have a
1385 page fault + mprotect overhead) */
1386 page_addr &= qemu_host_page_mask;
1387 prot = 0;
1388 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1389 addr += TARGET_PAGE_SIZE) {
1390
1391 p2 = page_find (addr >> TARGET_PAGE_BITS);
1392 if (!p2)
1393 continue;
1394 prot |= p2->flags;
1395 p2->flags &= ~PAGE_WRITE;
1396 page_get_flags(addr);
1397 }
1398 mprotect(g2h(page_addr), qemu_host_page_size,
1399 (prot & PAGE_BITS) & ~PAGE_WRITE);
1400#ifdef DEBUG_TB_INVALIDATE
1401 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1402 page_addr);
1403#endif
1404 }
1405#else
1406 /* if some code is already present, then the pages are already
1407 protected. So we handle the case where only the first TB is
1408 allocated in a physical page */
1409 if (!last_first_tb) {
1410 tlb_protect_code(page_addr);
1411 }
1412#endif
1413
1414#endif /* TARGET_HAS_SMC */
1415}
1416
1417/* Allocate a new translation block. Flush the translation buffer if
1418 too many translation blocks or too much generated code. */
1419TranslationBlock *tb_alloc(target_ulong pc)
1420{
1421 TranslationBlock *tb;
1422
1423 if (nb_tbs >= code_gen_max_blocks ||
1424#ifndef VBOX
1425 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1426#else
1427 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1428#endif
1429 return NULL;
1430 tb = &tbs[nb_tbs++];
1431 tb->pc = pc;
1432 tb->cflags = 0;
1433 return tb;
1434}
1435
1436void tb_free(TranslationBlock *tb)
1437{
1438 /* In practice this is mostly used for single use temporary TB
1439 Ignore the hard cases and just back up if this TB happens to
1440 be the last one generated. */
1441 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1442 code_gen_ptr = tb->tc_ptr;
1443 nb_tbs--;
1444 }
1445}
1446
1447/* add a new TB and link it to the physical page tables. phys_page2 is
1448 (-1) to indicate that only one page contains the TB. */
1449void tb_link_phys(TranslationBlock *tb,
1450 target_ulong phys_pc, target_ulong phys_page2)
1451{
1452 unsigned int h;
1453 TranslationBlock **ptb;
1454
1455 /* Grab the mmap lock to stop another thread invalidating this TB
1456 before we are done. */
1457 mmap_lock();
1458 /* add in the physical hash table */
1459 h = tb_phys_hash_func(phys_pc);
1460 ptb = &tb_phys_hash[h];
1461 tb->phys_hash_next = *ptb;
1462 *ptb = tb;
1463
1464 /* add in the page list */
1465 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1466 if (phys_page2 != -1)
1467 tb_alloc_page(tb, 1, phys_page2);
1468 else
1469 tb->page_addr[1] = -1;
1470
1471 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1472 tb->jmp_next[0] = NULL;
1473 tb->jmp_next[1] = NULL;
1474
1475 /* init original jump addresses */
1476 if (tb->tb_next_offset[0] != 0xffff)
1477 tb_reset_jump(tb, 0);
1478 if (tb->tb_next_offset[1] != 0xffff)
1479 tb_reset_jump(tb, 1);
1480
1481#ifdef DEBUG_TB_CHECK
1482 tb_page_check();
1483#endif
1484 mmap_unlock();
1485}
1486
1487/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1488 tb[1].tc_ptr. Return NULL if not found */
1489TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1490{
1491 int m_min, m_max, m;
1492 unsigned long v;
1493 TranslationBlock *tb;
1494
1495 if (nb_tbs <= 0)
1496 return NULL;
1497 if (tc_ptr < (unsigned long)code_gen_buffer ||
1498 tc_ptr >= (unsigned long)code_gen_ptr)
1499 return NULL;
1500 /* binary search (cf Knuth) */
1501 m_min = 0;
1502 m_max = nb_tbs - 1;
1503 while (m_min <= m_max) {
1504 m = (m_min + m_max) >> 1;
1505 tb = &tbs[m];
1506 v = (unsigned long)tb->tc_ptr;
1507 if (v == tc_ptr)
1508 return tb;
1509 else if (tc_ptr < v) {
1510 m_max = m - 1;
1511 } else {
1512 m_min = m + 1;
1513 }
1514 }
1515 return &tbs[m_max];
1516}
1517
1518static void tb_reset_jump_recursive(TranslationBlock *tb);
1519
1520#ifndef VBOX
1521static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1522#else
1523DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1524#endif
1525{
1526 TranslationBlock *tb1, *tb_next, **ptb;
1527 unsigned int n1;
1528
1529 tb1 = tb->jmp_next[n];
1530 if (tb1 != NULL) {
1531 /* find head of list */
1532 for(;;) {
1533 n1 = (long)tb1 & 3;
1534 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1535 if (n1 == 2)
1536 break;
1537 tb1 = tb1->jmp_next[n1];
1538 }
1539 /* we are now sure now that tb jumps to tb1 */
1540 tb_next = tb1;
1541
1542 /* remove tb from the jmp_first list */
1543 ptb = &tb_next->jmp_first;
1544 for(;;) {
1545 tb1 = *ptb;
1546 n1 = (long)tb1 & 3;
1547 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1548 if (n1 == n && tb1 == tb)
1549 break;
1550 ptb = &tb1->jmp_next[n1];
1551 }
1552 *ptb = tb->jmp_next[n];
1553 tb->jmp_next[n] = NULL;
1554
1555 /* suppress the jump to next tb in generated code */
1556 tb_reset_jump(tb, n);
1557
1558 /* suppress jumps in the tb on which we could have jumped */
1559 tb_reset_jump_recursive(tb_next);
1560 }
1561}
1562
1563static void tb_reset_jump_recursive(TranslationBlock *tb)
1564{
1565 tb_reset_jump_recursive2(tb, 0);
1566 tb_reset_jump_recursive2(tb, 1);
1567}
1568
1569#if defined(TARGET_HAS_ICE)
1570static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1571{
1572 target_ulong addr, pd;
1573 ram_addr_t ram_addr;
1574 PhysPageDesc *p;
1575
1576 addr = cpu_get_phys_page_debug(env, pc);
1577 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1578 if (!p) {
1579 pd = IO_MEM_UNASSIGNED;
1580 } else {
1581 pd = p->phys_offset;
1582 }
1583 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1584 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1585}
1586#endif
1587
1588/* Add a watchpoint. */
1589int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1590{
1591 int i;
1592
1593 for (i = 0; i < env->nb_watchpoints; i++) {
1594 if (addr == env->watchpoint[i].vaddr)
1595 return 0;
1596 }
1597 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1598 return -1;
1599
1600 i = env->nb_watchpoints++;
1601 env->watchpoint[i].vaddr = addr;
1602 env->watchpoint[i].type = type;
1603 tlb_flush_page(env, addr);
1604 /* FIXME: This flush is needed because of the hack to make memory ops
1605 terminate the TB. It can be removed once the proper IO trap and
1606 re-execute bits are in. */
1607 tb_flush(env);
1608 return i;
1609}
1610
1611/* Remove a watchpoint. */
1612int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1613{
1614 int i;
1615
1616 for (i = 0; i < env->nb_watchpoints; i++) {
1617 if (addr == env->watchpoint[i].vaddr) {
1618 env->nb_watchpoints--;
1619 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1620 tlb_flush_page(env, addr);
1621 return 0;
1622 }
1623 }
1624 return -1;
1625}
1626
1627/* Remove all watchpoints. */
1628void cpu_watchpoint_remove_all(CPUState *env) {
1629 int i;
1630
1631 for (i = 0; i < env->nb_watchpoints; i++) {
1632 tlb_flush_page(env, env->watchpoint[i].vaddr);
1633 }
1634 env->nb_watchpoints = 0;
1635}
1636
1637/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1638 breakpoint is reached */
1639int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1640{
1641#if defined(TARGET_HAS_ICE)
1642 int i;
1643
1644 for(i = 0; i < env->nb_breakpoints; i++) {
1645 if (env->breakpoints[i] == pc)
1646 return 0;
1647 }
1648
1649 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1650 return -1;
1651 env->breakpoints[env->nb_breakpoints++] = pc;
1652
1653 breakpoint_invalidate(env, pc);
1654 return 0;
1655#else
1656 return -1;
1657#endif
1658}
1659
1660/* remove all breakpoints */
1661void cpu_breakpoint_remove_all(CPUState *env) {
1662#if defined(TARGET_HAS_ICE)
1663 int i;
1664 for(i = 0; i < env->nb_breakpoints; i++) {
1665 breakpoint_invalidate(env, env->breakpoints[i]);
1666 }
1667 env->nb_breakpoints = 0;
1668#endif
1669}
1670
1671/* remove a breakpoint */
1672int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1673{
1674#if defined(TARGET_HAS_ICE)
1675 int i;
1676 for(i = 0; i < env->nb_breakpoints; i++) {
1677 if (env->breakpoints[i] == pc)
1678 goto found;
1679 }
1680 return -1;
1681 found:
1682 env->nb_breakpoints--;
1683 if (i < env->nb_breakpoints)
1684 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1685
1686 breakpoint_invalidate(env, pc);
1687 return 0;
1688#else
1689 return -1;
1690#endif
1691}
1692
1693/* enable or disable single step mode. EXCP_DEBUG is returned by the
1694 CPU loop after each instruction */
1695void cpu_single_step(CPUState *env, int enabled)
1696{
1697#if defined(TARGET_HAS_ICE)
1698 if (env->singlestep_enabled != enabled) {
1699 env->singlestep_enabled = enabled;
1700 /* must flush all the translated code to avoid inconsistencies */
1701 /* XXX: only flush what is necessary */
1702 tb_flush(env);
1703 }
1704#endif
1705}
1706
1707#ifndef VBOX
1708/* enable or disable low levels log */
1709void cpu_set_log(int log_flags)
1710{
1711 loglevel = log_flags;
1712 if (loglevel && !logfile) {
1713 logfile = fopen(logfilename, "w");
1714 if (!logfile) {
1715 perror(logfilename);
1716 _exit(1);
1717 }
1718#if !defined(CONFIG_SOFTMMU)
1719 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1720 {
1721 static uint8_t logfile_buf[4096];
1722 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1723 }
1724#else
1725 setvbuf(logfile, NULL, _IOLBF, 0);
1726#endif
1727 }
1728}
1729
1730void cpu_set_log_filename(const char *filename)
1731{
1732 logfilename = strdup(filename);
1733}
1734#endif /* !VBOX */
1735
1736/* mask must never be zero, except for A20 change call */
1737void cpu_interrupt(CPUState *env, int mask)
1738{
1739#if !defined(USE_NPTL)
1740 TranslationBlock *tb;
1741 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1742#endif
1743 int old_mask;
1744
1745 old_mask = env->interrupt_request;
1746#ifdef VBOX
1747 VM_ASSERT_EMT(env->pVM);
1748 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1749#else /* !VBOX */
1750 /* FIXME: This is probably not threadsafe. A different thread could
1751 be in the middle of a read-modify-write operation. */
1752 env->interrupt_request |= mask;
1753#endif /* !VBOX */
1754#if defined(USE_NPTL)
1755 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1756 problem and hope the cpu will stop of its own accord. For userspace
1757 emulation this often isn't actually as bad as it sounds. Often
1758 signals are used primarily to interrupt blocking syscalls. */
1759#else
1760 if (use_icount) {
1761 env->icount_decr.u16.high = 0xffff;
1762#ifndef CONFIG_USER_ONLY
1763 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1764 an async event happened and we need to process it. */
1765 if (!can_do_io(env)
1766 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1767 cpu_abort(env, "Raised interrupt while not in I/O function");
1768 }
1769#endif
1770 } else {
1771 tb = env->current_tb;
1772 /* if the cpu is currently executing code, we must unlink it and
1773 all the potentially executing TB */
1774 if (tb && !testandset(&interrupt_lock)) {
1775 env->current_tb = NULL;
1776 tb_reset_jump_recursive(tb);
1777 resetlock(&interrupt_lock);
1778 }
1779 }
1780#endif
1781}
1782
1783void cpu_reset_interrupt(CPUState *env, int mask)
1784{
1785#ifdef VBOX
1786 /*
1787 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1788 * for future changes!
1789 */
1790 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1791#else /* !VBOX */
1792 env->interrupt_request &= ~mask;
1793#endif /* !VBOX */
1794}
1795
1796#ifndef VBOX
1797CPULogItem cpu_log_items[] = {
1798 { CPU_LOG_TB_OUT_ASM, "out_asm",
1799 "show generated host assembly code for each compiled TB" },
1800 { CPU_LOG_TB_IN_ASM, "in_asm",
1801 "show target assembly code for each compiled TB" },
1802 { CPU_LOG_TB_OP, "op",
1803 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1804#ifdef TARGET_I386
1805 { CPU_LOG_TB_OP_OPT, "op_opt",
1806 "show micro ops after optimization for each compiled TB" },
1807#endif
1808 { CPU_LOG_INT, "int",
1809 "show interrupts/exceptions in short format" },
1810 { CPU_LOG_EXEC, "exec",
1811 "show trace before each executed TB (lots of logs)" },
1812 { CPU_LOG_TB_CPU, "cpu",
1813 "show CPU state before bloc translation" },
1814#ifdef TARGET_I386
1815 { CPU_LOG_PCALL, "pcall",
1816 "show protected mode far calls/returns/exceptions" },
1817#endif
1818#ifdef DEBUG_IOPORT
1819 { CPU_LOG_IOPORT, "ioport",
1820 "show all i/o ports accesses" },
1821#endif
1822 { 0, NULL, NULL },
1823};
1824
1825static int cmp1(const char *s1, int n, const char *s2)
1826{
1827 if (strlen(s2) != n)
1828 return 0;
1829 return memcmp(s1, s2, n) == 0;
1830}
1831
1832/* takes a comma separated list of log masks. Return 0 if error. */
1833int cpu_str_to_log_mask(const char *str)
1834{
1835 CPULogItem *item;
1836 int mask;
1837 const char *p, *p1;
1838
1839 p = str;
1840 mask = 0;
1841 for(;;) {
1842 p1 = strchr(p, ',');
1843 if (!p1)
1844 p1 = p + strlen(p);
1845 if(cmp1(p,p1-p,"all")) {
1846 for(item = cpu_log_items; item->mask != 0; item++) {
1847 mask |= item->mask;
1848 }
1849 } else {
1850 for(item = cpu_log_items; item->mask != 0; item++) {
1851 if (cmp1(p, p1 - p, item->name))
1852 goto found;
1853 }
1854 return 0;
1855 }
1856 found:
1857 mask |= item->mask;
1858 if (*p1 != ',')
1859 break;
1860 p = p1 + 1;
1861 }
1862 return mask;
1863}
1864#endif /* !VBOX */
1865
1866#ifndef VBOX /* VBOX: we have our own routine. */
1867void cpu_abort(CPUState *env, const char *fmt, ...)
1868{
1869 va_list ap;
1870
1871 va_start(ap, fmt);
1872 fprintf(stderr, "qemu: fatal: ");
1873 vfprintf(stderr, fmt, ap);
1874 fprintf(stderr, "\n");
1875#ifdef TARGET_I386
1876 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1877#else
1878 cpu_dump_state(env, stderr, fprintf, 0);
1879#endif
1880 va_end(ap);
1881 abort();
1882}
1883#endif /* !VBOX */
1884
1885#ifndef VBOX
1886CPUState *cpu_copy(CPUState *env)
1887{
1888 CPUState *new_env = cpu_init(env->cpu_model_str);
1889 /* preserve chaining and index */
1890 CPUState *next_cpu = new_env->next_cpu;
1891 int cpu_index = new_env->cpu_index;
1892 memcpy(new_env, env, sizeof(CPUState));
1893 new_env->next_cpu = next_cpu;
1894 new_env->cpu_index = cpu_index;
1895 return new_env;
1896}
1897#endif
1898
1899#if !defined(CONFIG_USER_ONLY)
1900
1901#ifndef VBOX
1902static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1903#else
1904DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1905#endif
1906{
1907 unsigned int i;
1908
1909 /* Discard jump cache entries for any tb which might potentially
1910 overlap the flushed page. */
1911 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1912 memset (&env->tb_jmp_cache[i], 0,
1913 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1914
1915 i = tb_jmp_cache_hash_page(addr);
1916 memset (&env->tb_jmp_cache[i], 0,
1917 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1918
1919#ifdef VBOX
1920 /* inform raw mode about TLB page flush */
1921 remR3FlushPage(env, addr);
1922#endif /* VBOX */
1923}
1924
1925static CPUTLBEntry s_cputlb_empty_entry = {
1926 .addr_read = -1,
1927 .addr_write = -1,
1928 .addr_code = -1,
1929 .addend = -1,
1930};
1931
1932/* NOTE: if flush_global is true, also flush global entries (not
1933 implemented yet) */
1934void tlb_flush(CPUState *env, int flush_global)
1935{
1936 int i;
1937
1938#if defined(DEBUG_TLB)
1939 printf("tlb_flush:\n");
1940#endif
1941 /* must reset current TB so that interrupts cannot modify the
1942 links while we are modifying them */
1943 env->current_tb = NULL;
1944
1945 for(i = 0; i < CPU_TLB_SIZE; i++) {
1946 int mmu_idx;
1947 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1948 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1949 }
1950 }
1951
1952 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1953
1954#ifdef VBOX
1955 /* inform raw mode about TLB flush */
1956 remR3FlushTLB(env, flush_global);
1957#endif
1958#ifdef USE_KQEMU
1959 if (env->kqemu_enabled) {
1960 kqemu_flush(env, flush_global);
1961 }
1962#endif
1963 tlb_flush_count++;
1964}
1965
1966#ifndef VBOX
1967static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1968#else
1969DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1970#endif
1971{
1972 if (addr == (tlb_entry->addr_read &
1973 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1974 addr == (tlb_entry->addr_write &
1975 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1976 addr == (tlb_entry->addr_code &
1977 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1978 tlb_entry->addr_read = -1;
1979 tlb_entry->addr_write = -1;
1980 tlb_entry->addr_code = -1;
1981 }
1982}
1983
1984void tlb_flush_page(CPUState *env, target_ulong addr)
1985{
1986 int i;
1987
1988#if defined(DEBUG_TLB)
1989 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1990#endif
1991 /* must reset current TB so that interrupts cannot modify the
1992 links while we are modifying them */
1993 env->current_tb = NULL;
1994
1995 addr &= TARGET_PAGE_MASK;
1996 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1997 tlb_flush_entry(&env->tlb_table[0][i], addr);
1998 tlb_flush_entry(&env->tlb_table[1][i], addr);
1999#if (NB_MMU_MODES >= 3)
2000 tlb_flush_entry(&env->tlb_table[2][i], addr);
2001#if (NB_MMU_MODES == 4)
2002 tlb_flush_entry(&env->tlb_table[3][i], addr);
2003#endif
2004#endif
2005
2006 tlb_flush_jmp_cache(env, addr);
2007
2008#ifdef USE_KQEMU
2009 if (env->kqemu_enabled) {
2010 kqemu_flush_page(env, addr);
2011 }
2012#endif
2013}
2014
2015/* update the TLBs so that writes to code in the virtual page 'addr'
2016 can be detected */
2017static void tlb_protect_code(ram_addr_t ram_addr)
2018{
2019 cpu_physical_memory_reset_dirty(ram_addr,
2020 ram_addr + TARGET_PAGE_SIZE,
2021 CODE_DIRTY_FLAG);
2022#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2023 /** @todo Retest this? This function has changed... */
2024 remR3ProtectCode(cpu_single_env, ram_addr);
2025#endif
2026}
2027
2028/* update the TLB so that writes in physical page 'phys_addr' are no longer
2029 tested for self modifying code */
2030static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2031 target_ulong vaddr)
2032{
2033#ifdef VBOX
2034 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2035#endif
2036 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
2037}
2038
2039#ifndef VBOX
2040static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2041 unsigned long start, unsigned long length)
2042#else
2043DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2044 unsigned long start, unsigned long length)
2045#endif
2046{
2047 unsigned long addr;
2048
2049#ifdef VBOX
2050 if (start & 3)
2051 return;
2052#endif
2053 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2054 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2055 if ((addr - start) < length) {
2056 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
2057 }
2058 }
2059}
2060
2061void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2062 int dirty_flags)
2063{
2064 CPUState *env;
2065 unsigned long length, start1;
2066 int i, mask, len;
2067 uint8_t *p;
2068
2069 start &= TARGET_PAGE_MASK;
2070 end = TARGET_PAGE_ALIGN(end);
2071
2072 length = end - start;
2073 if (length == 0)
2074 return;
2075 len = length >> TARGET_PAGE_BITS;
2076#ifdef USE_KQEMU
2077 /* XXX: should not depend on cpu context */
2078 env = first_cpu;
2079 if (env->kqemu_enabled) {
2080 ram_addr_t addr;
2081 addr = start;
2082 for(i = 0; i < len; i++) {
2083 kqemu_set_notdirty(env, addr);
2084 addr += TARGET_PAGE_SIZE;
2085 }
2086 }
2087#endif
2088 mask = ~dirty_flags;
2089 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2090#ifdef VBOX
2091 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2092#endif
2093 for(i = 0; i < len; i++)
2094 p[i] &= mask;
2095
2096 /* we modify the TLB cache so that the dirty bit will be set again
2097 when accessing the range */
2098#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2099 start1 = start;
2100#elif !defined(VBOX)
2101 start1 = start + (unsigned long)phys_ram_base;
2102#else
2103 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
2104#endif
2105 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2106 for(i = 0; i < CPU_TLB_SIZE; i++)
2107 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2108 for(i = 0; i < CPU_TLB_SIZE; i++)
2109 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2110#if (NB_MMU_MODES >= 3)
2111 for(i = 0; i < CPU_TLB_SIZE; i++)
2112 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2113#if (NB_MMU_MODES == 4)
2114 for(i = 0; i < CPU_TLB_SIZE; i++)
2115 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2116#endif
2117#endif
2118 }
2119}
2120
2121#ifndef VBOX
2122int cpu_physical_memory_set_dirty_tracking(int enable)
2123{
2124 in_migration = enable;
2125 return 0;
2126}
2127
2128int cpu_physical_memory_get_dirty_tracking(void)
2129{
2130 return in_migration;
2131}
2132#endif
2133
2134#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2135DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2136#else
2137static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2138#endif
2139{
2140 ram_addr_t ram_addr;
2141
2142 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2143 /* RAM case */
2144#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2145 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2146#elif !defined(VBOX)
2147 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2148 tlb_entry->addend - (unsigned long)phys_ram_base;
2149#else
2150 Assert(phys_addend != -1);
2151 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2152#endif
2153 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2154 tlb_entry->addr_write |= TLB_NOTDIRTY;
2155 }
2156 }
2157}
2158
2159/* update the TLB according to the current state of the dirty bits */
2160void cpu_tlb_update_dirty(CPUState *env)
2161{
2162 int i;
2163#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2164 for(i = 0; i < CPU_TLB_SIZE; i++)
2165 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2166 for(i = 0; i < CPU_TLB_SIZE; i++)
2167 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2168#if (NB_MMU_MODES >= 3)
2169 for(i = 0; i < CPU_TLB_SIZE; i++)
2170 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2171#if (NB_MMU_MODES == 4)
2172 for(i = 0; i < CPU_TLB_SIZE; i++)
2173 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2174#endif
2175#endif
2176#else /* VBOX */
2177 for(i = 0; i < CPU_TLB_SIZE; i++)
2178 tlb_update_dirty(&env->tlb_table[0][i]);
2179 for(i = 0; i < CPU_TLB_SIZE; i++)
2180 tlb_update_dirty(&env->tlb_table[1][i]);
2181#if (NB_MMU_MODES >= 3)
2182 for(i = 0; i < CPU_TLB_SIZE; i++)
2183 tlb_update_dirty(&env->tlb_table[2][i]);
2184#if (NB_MMU_MODES == 4)
2185 for(i = 0; i < CPU_TLB_SIZE; i++)
2186 tlb_update_dirty(&env->tlb_table[3][i]);
2187#endif
2188#endif
2189#endif /* VBOX */
2190}
2191
2192#ifndef VBOX
2193static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2194#else
2195DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2196#endif
2197{
2198 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2199 tlb_entry->addr_write = vaddr;
2200}
2201
2202
2203/* update the TLB corresponding to virtual page vaddr and phys addr
2204 addr so that it is no longer dirty */
2205#ifndef VBOX
2206static inline void tlb_set_dirty(CPUState *env,
2207 unsigned long addr, target_ulong vaddr)
2208#else
2209DECLINLINE(void) tlb_set_dirty(CPUState *env,
2210 unsigned long addr, target_ulong vaddr)
2211#endif
2212{
2213 int i;
2214
2215 addr &= TARGET_PAGE_MASK;
2216 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2217 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2218 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2219#if (NB_MMU_MODES >= 3)
2220 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2221#if (NB_MMU_MODES == 4)
2222 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2223#endif
2224#endif
2225}
2226
2227/* add a new TLB entry. At most one entry for a given virtual address
2228 is permitted. Return 0 if OK or 2 if the page could not be mapped
2229 (can only happen in non SOFTMMU mode for I/O pages or pages
2230 conflicting with the host address space). */
2231int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2232 target_phys_addr_t paddr, int prot,
2233 int mmu_idx, int is_softmmu)
2234{
2235 PhysPageDesc *p;
2236 unsigned long pd;
2237 unsigned int index;
2238 target_ulong address;
2239 target_ulong code_address;
2240 target_phys_addr_t addend;
2241 int ret;
2242 CPUTLBEntry *te;
2243 int i;
2244 target_phys_addr_t iotlb;
2245#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2246 int read_mods = 0, write_mods = 0, code_mods = 0;
2247#endif
2248
2249 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2250 if (!p) {
2251 pd = IO_MEM_UNASSIGNED;
2252 } else {
2253 pd = p->phys_offset;
2254 }
2255#if defined(DEBUG_TLB)
2256 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2257 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2258#endif
2259
2260 ret = 0;
2261 address = vaddr;
2262 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2263 /* IO memory case (romd handled later) */
2264 address |= TLB_MMIO;
2265 }
2266#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2267 addend = pd & TARGET_PAGE_MASK;
2268#elif !defined(VBOX)
2269 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2270#else
2271 /** @todo this is racing the phys_page_find call above since it may register
2272 * a new chunk of memory... */
2273 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2274 pd & TARGET_PAGE_MASK,
2275 !!(prot & PAGE_WRITE));
2276#endif
2277
2278 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2279 /* Normal RAM. */
2280 iotlb = pd & TARGET_PAGE_MASK;
2281 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2282 iotlb |= IO_MEM_NOTDIRTY;
2283 else
2284 iotlb |= IO_MEM_ROM;
2285 } else {
2286 /* IO handlers are currently passed a phsical address.
2287 It would be nice to pass an offset from the base address
2288 of that region. This would avoid having to special case RAM,
2289 and avoid full address decoding in every device.
2290 We can't use the high bits of pd for this because
2291 IO_MEM_ROMD uses these as a ram address. */
2292 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2293 }
2294
2295 code_address = address;
2296
2297#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2298 if (addend & 0x3)
2299 {
2300 if (addend & 0x2)
2301 {
2302 /* catch write */
2303 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2304 write_mods |= TLB_MMIO;
2305 }
2306 else if (addend & 0x1)
2307 {
2308 /* catch all */
2309 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2310 {
2311 read_mods |= TLB_MMIO;
2312 write_mods |= TLB_MMIO;
2313 code_mods |= TLB_MMIO;
2314 }
2315 }
2316 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2317 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2318 addend &= ~(target_ulong)0x3;
2319 }
2320#endif
2321
2322 /* Make accesses to pages with watchpoints go via the
2323 watchpoint trap routines. */
2324 for (i = 0; i < env->nb_watchpoints; i++) {
2325 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2326 iotlb = io_mem_watch + paddr;
2327 /* TODO: The memory case can be optimized by not trapping
2328 reads of pages with a write breakpoint. */
2329 address |= TLB_MMIO;
2330 }
2331 }
2332
2333 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2334 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2335 te = &env->tlb_table[mmu_idx][index];
2336 te->addend = addend - vaddr;
2337 if (prot & PAGE_READ) {
2338 te->addr_read = address;
2339 } else {
2340 te->addr_read = -1;
2341 }
2342
2343 if (prot & PAGE_EXEC) {
2344 te->addr_code = code_address;
2345 } else {
2346 te->addr_code = -1;
2347 }
2348 if (prot & PAGE_WRITE) {
2349 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2350 (pd & IO_MEM_ROMD)) {
2351 /* Write access calls the I/O callback. */
2352 te->addr_write = address | TLB_MMIO;
2353 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2354 !cpu_physical_memory_is_dirty(pd)) {
2355 te->addr_write = address | TLB_NOTDIRTY;
2356 } else {
2357 te->addr_write = address;
2358 }
2359 } else {
2360 te->addr_write = -1;
2361 }
2362
2363#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2364 if (prot & PAGE_READ)
2365 te->addr_read |= read_mods;
2366 if (prot & PAGE_EXEC)
2367 te->addr_code |= code_mods;
2368 if (prot & PAGE_WRITE)
2369 te->addr_write |= write_mods;
2370
2371 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2372#endif
2373
2374#ifdef VBOX
2375 /* inform raw mode about TLB page change */
2376 remR3FlushPage(env, vaddr);
2377#endif
2378 return ret;
2379}
2380#if 0
2381/* called from signal handler: invalidate the code and unprotect the
2382 page. Return TRUE if the fault was successfully handled. */
2383int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2384{
2385#if !defined(CONFIG_SOFTMMU)
2386 VirtPageDesc *vp;
2387
2388#if defined(DEBUG_TLB)
2389 printf("page_unprotect: addr=0x%08x\n", addr);
2390#endif
2391 addr &= TARGET_PAGE_MASK;
2392
2393 /* if it is not mapped, no need to worry here */
2394 if (addr >= MMAP_AREA_END)
2395 return 0;
2396 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2397 if (!vp)
2398 return 0;
2399 /* NOTE: in this case, validate_tag is _not_ tested as it
2400 validates only the code TLB */
2401 if (vp->valid_tag != virt_valid_tag)
2402 return 0;
2403 if (!(vp->prot & PAGE_WRITE))
2404 return 0;
2405#if defined(DEBUG_TLB)
2406 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2407 addr, vp->phys_addr, vp->prot);
2408#endif
2409 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2410 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2411 (unsigned long)addr, vp->prot);
2412 /* set the dirty bit */
2413 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2414 /* flush the code inside */
2415 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2416 return 1;
2417#elif defined(VBOX)
2418 addr &= TARGET_PAGE_MASK;
2419
2420 /* if it is not mapped, no need to worry here */
2421 if (addr >= MMAP_AREA_END)
2422 return 0;
2423 return 1;
2424#else
2425 return 0;
2426#endif
2427}
2428#endif /* 0 */
2429
2430#else
2431
2432void tlb_flush(CPUState *env, int flush_global)
2433{
2434}
2435
2436void tlb_flush_page(CPUState *env, target_ulong addr)
2437{
2438}
2439
2440int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2441 target_phys_addr_t paddr, int prot,
2442 int mmu_idx, int is_softmmu)
2443{
2444 return 0;
2445}
2446
2447#ifndef VBOX
2448/* dump memory mappings */
2449void page_dump(FILE *f)
2450{
2451 unsigned long start, end;
2452 int i, j, prot, prot1;
2453 PageDesc *p;
2454
2455 fprintf(f, "%-8s %-8s %-8s %s\n",
2456 "start", "end", "size", "prot");
2457 start = -1;
2458 end = -1;
2459 prot = 0;
2460 for(i = 0; i <= L1_SIZE; i++) {
2461 if (i < L1_SIZE)
2462 p = l1_map[i];
2463 else
2464 p = NULL;
2465 for(j = 0;j < L2_SIZE; j++) {
2466 if (!p)
2467 prot1 = 0;
2468 else
2469 prot1 = p[j].flags;
2470 if (prot1 != prot) {
2471 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2472 if (start != -1) {
2473 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2474 start, end, end - start,
2475 prot & PAGE_READ ? 'r' : '-',
2476 prot & PAGE_WRITE ? 'w' : '-',
2477 prot & PAGE_EXEC ? 'x' : '-');
2478 }
2479 if (prot1 != 0)
2480 start = end;
2481 else
2482 start = -1;
2483 prot = prot1;
2484 }
2485 if (!p)
2486 break;
2487 }
2488 }
2489}
2490#endif /* !VBOX */
2491
2492int page_get_flags(target_ulong address)
2493{
2494 PageDesc *p;
2495
2496 p = page_find(address >> TARGET_PAGE_BITS);
2497 if (!p)
2498 return 0;
2499 return p->flags;
2500}
2501
2502/* modify the flags of a page and invalidate the code if
2503 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2504 depending on PAGE_WRITE */
2505void page_set_flags(target_ulong start, target_ulong end, int flags)
2506{
2507 PageDesc *p;
2508 target_ulong addr;
2509
2510 start = start & TARGET_PAGE_MASK;
2511 end = TARGET_PAGE_ALIGN(end);
2512 if (flags & PAGE_WRITE)
2513 flags |= PAGE_WRITE_ORG;
2514#ifdef VBOX
2515 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2516#endif
2517 spin_lock(&tb_lock);
2518 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2519 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2520 /* if the write protection is set, then we invalidate the code
2521 inside */
2522 if (!(p->flags & PAGE_WRITE) &&
2523 (flags & PAGE_WRITE) &&
2524 p->first_tb) {
2525 tb_invalidate_phys_page(addr, 0, NULL);
2526 }
2527 p->flags = flags;
2528 }
2529 spin_unlock(&tb_lock);
2530}
2531
2532int page_check_range(target_ulong start, target_ulong len, int flags)
2533{
2534 PageDesc *p;
2535 target_ulong end;
2536 target_ulong addr;
2537
2538 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2539 start = start & TARGET_PAGE_MASK;
2540
2541 if( end < start )
2542 /* we've wrapped around */
2543 return -1;
2544 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2545 p = page_find(addr >> TARGET_PAGE_BITS);
2546 if( !p )
2547 return -1;
2548 if( !(p->flags & PAGE_VALID) )
2549 return -1;
2550
2551 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2552 return -1;
2553 if (flags & PAGE_WRITE) {
2554 if (!(p->flags & PAGE_WRITE_ORG))
2555 return -1;
2556 /* unprotect the page if it was put read-only because it
2557 contains translated code */
2558 if (!(p->flags & PAGE_WRITE)) {
2559 if (!page_unprotect(addr, 0, NULL))
2560 return -1;
2561 }
2562 return 0;
2563 }
2564 }
2565 return 0;
2566}
2567
2568/* called from signal handler: invalidate the code and unprotect the
2569 page. Return TRUE if the fault was successfully handled. */
2570int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2571{
2572 unsigned int page_index, prot, pindex;
2573 PageDesc *p, *p1;
2574 target_ulong host_start, host_end, addr;
2575
2576 /* Technically this isn't safe inside a signal handler. However we
2577 know this only ever happens in a synchronous SEGV handler, so in
2578 practice it seems to be ok. */
2579 mmap_lock();
2580
2581 host_start = address & qemu_host_page_mask;
2582 page_index = host_start >> TARGET_PAGE_BITS;
2583 p1 = page_find(page_index);
2584 if (!p1) {
2585 mmap_unlock();
2586 return 0;
2587 }
2588 host_end = host_start + qemu_host_page_size;
2589 p = p1;
2590 prot = 0;
2591 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2592 prot |= p->flags;
2593 p++;
2594 }
2595 /* if the page was really writable, then we change its
2596 protection back to writable */
2597 if (prot & PAGE_WRITE_ORG) {
2598 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2599 if (!(p1[pindex].flags & PAGE_WRITE)) {
2600 mprotect((void *)g2h(host_start), qemu_host_page_size,
2601 (prot & PAGE_BITS) | PAGE_WRITE);
2602 p1[pindex].flags |= PAGE_WRITE;
2603 /* and since the content will be modified, we must invalidate
2604 the corresponding translated code. */
2605 tb_invalidate_phys_page(address, pc, puc);
2606#ifdef DEBUG_TB_CHECK
2607 tb_invalidate_check(address);
2608#endif
2609 mmap_unlock();
2610 return 1;
2611 }
2612 }
2613 mmap_unlock();
2614 return 0;
2615}
2616
2617static inline void tlb_set_dirty(CPUState *env,
2618 unsigned long addr, target_ulong vaddr)
2619{
2620}
2621#endif /* defined(CONFIG_USER_ONLY) */
2622
2623#if !defined(CONFIG_USER_ONLY)
2624static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2625 ram_addr_t memory);
2626static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2627 ram_addr_t orig_memory);
2628#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2629 need_subpage) \
2630 do { \
2631 if (addr > start_addr) \
2632 start_addr2 = 0; \
2633 else { \
2634 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2635 if (start_addr2 > 0) \
2636 need_subpage = 1; \
2637 } \
2638 \
2639 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2640 end_addr2 = TARGET_PAGE_SIZE - 1; \
2641 else { \
2642 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2643 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2644 need_subpage = 1; \
2645 } \
2646 } while (0)
2647
2648
2649/* register physical memory. 'size' must be a multiple of the target
2650 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2651 io memory page */
2652void cpu_register_physical_memory(target_phys_addr_t start_addr,
2653 unsigned long size,
2654 unsigned long phys_offset)
2655{
2656 target_phys_addr_t addr, end_addr;
2657 PhysPageDesc *p;
2658 CPUState *env;
2659 ram_addr_t orig_size = size;
2660 void *subpage;
2661
2662#ifdef USE_KQEMU
2663 /* XXX: should not depend on cpu context */
2664 env = first_cpu;
2665 if (env->kqemu_enabled) {
2666 kqemu_set_phys_mem(start_addr, size, phys_offset);
2667 }
2668#endif
2669 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2670 end_addr = start_addr + (target_phys_addr_t)size;
2671 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2672 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2673 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2674 ram_addr_t orig_memory = p->phys_offset;
2675 target_phys_addr_t start_addr2, end_addr2;
2676 int need_subpage = 0;
2677
2678 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2679 need_subpage);
2680 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2681 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2682 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2683 &p->phys_offset, orig_memory);
2684 } else {
2685 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2686 >> IO_MEM_SHIFT];
2687 }
2688 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2689 } else {
2690 p->phys_offset = phys_offset;
2691 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2692 (phys_offset & IO_MEM_ROMD))
2693 phys_offset += TARGET_PAGE_SIZE;
2694 }
2695 } else {
2696 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2697 p->phys_offset = phys_offset;
2698 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2699 (phys_offset & IO_MEM_ROMD))
2700 phys_offset += TARGET_PAGE_SIZE;
2701 else {
2702 target_phys_addr_t start_addr2, end_addr2;
2703 int need_subpage = 0;
2704
2705 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2706 end_addr2, need_subpage);
2707
2708 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2709 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2710 &p->phys_offset, IO_MEM_UNASSIGNED);
2711 subpage_register(subpage, start_addr2, end_addr2,
2712 phys_offset);
2713 }
2714 }
2715 }
2716 }
2717 /* since each CPU stores ram addresses in its TLB cache, we must
2718 reset the modified entries */
2719 /* XXX: slow ! */
2720 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2721 tlb_flush(env, 1);
2722 }
2723}
2724
2725/* XXX: temporary until new memory mapping API */
2726uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2727{
2728 PhysPageDesc *p;
2729
2730 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2731 if (!p)
2732 return IO_MEM_UNASSIGNED;
2733 return p->phys_offset;
2734}
2735
2736#ifndef VBOX
2737/* XXX: better than nothing */
2738ram_addr_t qemu_ram_alloc(ram_addr_t size)
2739{
2740 ram_addr_t addr;
2741 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2742 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2743 (uint64_t)size, (uint64_t)phys_ram_size);
2744 abort();
2745 }
2746 addr = phys_ram_alloc_offset;
2747 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2748 return addr;
2749}
2750
2751void qemu_ram_free(ram_addr_t addr)
2752{
2753}
2754#endif
2755
2756
2757static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2758{
2759#ifdef DEBUG_UNASSIGNED
2760 printf("Unassigned mem read 0x%08x\n", (int)addr);
2761#endif
2762#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2763 do_unassigned_access(addr, 0, 0, 0, 1);
2764#endif
2765 return 0;
2766}
2767
2768static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2769{
2770#ifdef DEBUG_UNASSIGNED
2771 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2772#endif
2773#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2774 do_unassigned_access(addr, 0, 0, 0, 2);
2775#endif
2776 return 0;
2777}
2778
2779static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2780{
2781#ifdef DEBUG_UNASSIGNED
2782 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2783#endif
2784#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2785 do_unassigned_access(addr, 0, 0, 0, 4);
2786#endif
2787 return 0;
2788}
2789
2790static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2791{
2792#ifdef DEBUG_UNASSIGNED
2793 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2794#endif
2795}
2796
2797static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2798{
2799#ifdef DEBUG_UNASSIGNED
2800 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2801#endif
2802#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2803 do_unassigned_access(addr, 1, 0, 0, 2);
2804#endif
2805}
2806
2807static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2808{
2809#ifdef DEBUG_UNASSIGNED
2810 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2811#endif
2812#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2813 do_unassigned_access(addr, 1, 0, 0, 4);
2814#endif
2815}
2816static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2817 unassigned_mem_readb,
2818 unassigned_mem_readw,
2819 unassigned_mem_readl,
2820};
2821
2822static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2823 unassigned_mem_writeb,
2824 unassigned_mem_writew,
2825 unassigned_mem_writel,
2826};
2827
2828static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2829{
2830 unsigned long ram_addr;
2831 int dirty_flags;
2832#if defined(VBOX)
2833 ram_addr = addr;
2834#else
2835 ram_addr = addr - (unsigned long)phys_ram_base;
2836#endif
2837#ifdef VBOX
2838 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2839 dirty_flags = 0xff;
2840 else
2841#endif /* VBOX */
2842 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2843 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2844#if !defined(CONFIG_USER_ONLY)
2845 tb_invalidate_phys_page_fast(ram_addr, 1);
2846# ifdef VBOX
2847 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2848 dirty_flags = 0xff;
2849 else
2850# endif /* VBOX */
2851 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2852#endif
2853 }
2854#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2855 remR3PhysWriteU8(addr, val);
2856#else
2857 stb_p((uint8_t *)(long)addr, val);
2858#endif
2859#ifdef USE_KQEMU
2860 if (cpu_single_env->kqemu_enabled &&
2861 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2862 kqemu_modify_page(cpu_single_env, ram_addr);
2863#endif
2864 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2865#ifdef VBOX
2866 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2867#endif /* !VBOX */
2868 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2869 /* we remove the notdirty callback only if the code has been
2870 flushed */
2871 if (dirty_flags == 0xff)
2872 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2873}
2874
2875static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2876{
2877 unsigned long ram_addr;
2878 int dirty_flags;
2879#if defined(VBOX)
2880 ram_addr = addr;
2881#else
2882 ram_addr = addr - (unsigned long)phys_ram_base;
2883#endif
2884#ifdef VBOX
2885 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2886 dirty_flags = 0xff;
2887 else
2888#endif /* VBOX */
2889 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2890 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2891#if !defined(CONFIG_USER_ONLY)
2892 tb_invalidate_phys_page_fast(ram_addr, 2);
2893# ifdef VBOX
2894 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2895 dirty_flags = 0xff;
2896 else
2897# endif /* VBOX */
2898 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2899#endif
2900 }
2901#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2902 remR3PhysWriteU16(addr, val);
2903#else
2904 stw_p((uint8_t *)(long)addr, val);
2905#endif
2906
2907#ifdef USE_KQEMU
2908 if (cpu_single_env->kqemu_enabled &&
2909 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2910 kqemu_modify_page(cpu_single_env, ram_addr);
2911#endif
2912 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2913#ifdef VBOX
2914 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2915#endif
2916 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2917 /* we remove the notdirty callback only if the code has been
2918 flushed */
2919 if (dirty_flags == 0xff)
2920 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2921}
2922
2923static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2924{
2925 unsigned long ram_addr;
2926 int dirty_flags;
2927#if defined(VBOX)
2928 ram_addr = addr;
2929#else
2930 ram_addr = addr - (unsigned long)phys_ram_base;
2931#endif
2932#ifdef VBOX
2933 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2934 dirty_flags = 0xff;
2935 else
2936#endif /* VBOX */
2937 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2938 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2939#if !defined(CONFIG_USER_ONLY)
2940 tb_invalidate_phys_page_fast(ram_addr, 4);
2941# ifdef VBOX
2942 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2943 dirty_flags = 0xff;
2944 else
2945# endif /* VBOX */
2946 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2947#endif
2948 }
2949#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2950 remR3PhysWriteU32(addr, val);
2951#else
2952 stl_p((uint8_t *)(long)addr, val);
2953#endif
2954#ifdef USE_KQEMU
2955 if (cpu_single_env->kqemu_enabled &&
2956 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2957 kqemu_modify_page(cpu_single_env, ram_addr);
2958#endif
2959 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2960#ifdef VBOX
2961 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2962#endif
2963 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2964 /* we remove the notdirty callback only if the code has been
2965 flushed */
2966 if (dirty_flags == 0xff)
2967 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2968}
2969
2970static CPUReadMemoryFunc *error_mem_read[3] = {
2971 NULL, /* never used */
2972 NULL, /* never used */
2973 NULL, /* never used */
2974};
2975
2976static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2977 notdirty_mem_writeb,
2978 notdirty_mem_writew,
2979 notdirty_mem_writel,
2980};
2981
2982
2983/* Generate a debug exception if a watchpoint has been hit. */
2984static void check_watchpoint(int offset, int flags)
2985{
2986 CPUState *env = cpu_single_env;
2987 target_ulong vaddr;
2988 int i;
2989
2990 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2991 for (i = 0; i < env->nb_watchpoints; i++) {
2992 if (vaddr == env->watchpoint[i].vaddr
2993 && (env->watchpoint[i].type & flags)) {
2994 env->watchpoint_hit = i + 1;
2995 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2996 break;
2997 }
2998 }
2999}
3000
3001/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3002 so these check for a hit then pass through to the normal out-of-line
3003 phys routines. */
3004static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3005{
3006 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3007 return ldub_phys(addr);
3008}
3009
3010static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3011{
3012 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3013 return lduw_phys(addr);
3014}
3015
3016static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3017{
3018 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3019 return ldl_phys(addr);
3020}
3021
3022static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3023 uint32_t val)
3024{
3025 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3026 stb_phys(addr, val);
3027}
3028
3029static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3030 uint32_t val)
3031{
3032 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3033 stw_phys(addr, val);
3034}
3035
3036static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3037 uint32_t val)
3038{
3039 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3040 stl_phys(addr, val);
3041}
3042
3043static CPUReadMemoryFunc *watch_mem_read[3] = {
3044 watch_mem_readb,
3045 watch_mem_readw,
3046 watch_mem_readl,
3047};
3048
3049static CPUWriteMemoryFunc *watch_mem_write[3] = {
3050 watch_mem_writeb,
3051 watch_mem_writew,
3052 watch_mem_writel,
3053};
3054
3055static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3056 unsigned int len)
3057{
3058 uint32_t ret;
3059 unsigned int idx;
3060
3061 idx = SUBPAGE_IDX(addr - mmio->base);
3062#if defined(DEBUG_SUBPAGE)
3063 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3064 mmio, len, addr, idx);
3065#endif
3066 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
3067
3068 return ret;
3069}
3070
3071static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3072 uint32_t value, unsigned int len)
3073{
3074 unsigned int idx;
3075
3076 idx = SUBPAGE_IDX(addr - mmio->base);
3077#if defined(DEBUG_SUBPAGE)
3078 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3079 mmio, len, addr, idx, value);
3080#endif
3081 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
3082}
3083
3084static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3085{
3086#if defined(DEBUG_SUBPAGE)
3087 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3088#endif
3089
3090 return subpage_readlen(opaque, addr, 0);
3091}
3092
3093static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3094 uint32_t value)
3095{
3096#if defined(DEBUG_SUBPAGE)
3097 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3098#endif
3099 subpage_writelen(opaque, addr, value, 0);
3100}
3101
3102static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3103{
3104#if defined(DEBUG_SUBPAGE)
3105 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3106#endif
3107
3108 return subpage_readlen(opaque, addr, 1);
3109}
3110
3111static void subpage_writew (void *opaque, target_phys_addr_t addr,
3112 uint32_t value)
3113{
3114#if defined(DEBUG_SUBPAGE)
3115 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3116#endif
3117 subpage_writelen(opaque, addr, value, 1);
3118}
3119
3120static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3121{
3122#if defined(DEBUG_SUBPAGE)
3123 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3124#endif
3125
3126 return subpage_readlen(opaque, addr, 2);
3127}
3128
3129static void subpage_writel (void *opaque,
3130 target_phys_addr_t addr, uint32_t value)
3131{
3132#if defined(DEBUG_SUBPAGE)
3133 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3134#endif
3135 subpage_writelen(opaque, addr, value, 2);
3136}
3137
3138static CPUReadMemoryFunc *subpage_read[] = {
3139 &subpage_readb,
3140 &subpage_readw,
3141 &subpage_readl,
3142};
3143
3144static CPUWriteMemoryFunc *subpage_write[] = {
3145 &subpage_writeb,
3146 &subpage_writew,
3147 &subpage_writel,
3148};
3149
3150static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3151 ram_addr_t memory)
3152{
3153 int idx, eidx;
3154 unsigned int i;
3155
3156 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3157 return -1;
3158 idx = SUBPAGE_IDX(start);
3159 eidx = SUBPAGE_IDX(end);
3160#if defined(DEBUG_SUBPAGE)
3161 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3162 mmio, start, end, idx, eidx, memory);
3163#endif
3164 memory >>= IO_MEM_SHIFT;
3165 for (; idx <= eidx; idx++) {
3166 for (i = 0; i < 4; i++) {
3167 if (io_mem_read[memory][i]) {
3168 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3169 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3170 }
3171 if (io_mem_write[memory][i]) {
3172 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3173 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3174 }
3175 }
3176 }
3177
3178 return 0;
3179}
3180
3181static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3182 ram_addr_t orig_memory)
3183{
3184 subpage_t *mmio;
3185 int subpage_memory;
3186
3187 mmio = qemu_mallocz(sizeof(subpage_t));
3188 if (mmio != NULL) {
3189 mmio->base = base;
3190 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3191#if defined(DEBUG_SUBPAGE)
3192 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3193 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3194#endif
3195 *phys = subpage_memory | IO_MEM_SUBPAGE;
3196 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
3197 }
3198
3199 return mmio;
3200}
3201
3202static void io_mem_init(void)
3203{
3204 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3205 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3206 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3207 io_mem_nb = 5;
3208
3209 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3210 watch_mem_write, NULL);
3211
3212#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3213 /* alloc dirty bits array */
3214 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3215 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3216#endif /* !VBOX */
3217}
3218
3219/* mem_read and mem_write are arrays of functions containing the
3220 function to access byte (index 0), word (index 1) and dword (index
3221 2). Functions can be omitted with a NULL function pointer. The
3222 registered functions may be modified dynamically later.
3223 If io_index is non zero, the corresponding io zone is
3224 modified. If it is zero, a new io zone is allocated. The return
3225 value can be used with cpu_register_physical_memory(). (-1) is
3226 returned if error. */
3227int cpu_register_io_memory(int io_index,
3228 CPUReadMemoryFunc **mem_read,
3229 CPUWriteMemoryFunc **mem_write,
3230 void *opaque)
3231{
3232 int i, subwidth = 0;
3233
3234 if (io_index <= 0) {
3235 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3236 return -1;
3237 io_index = io_mem_nb++;
3238 } else {
3239 if (io_index >= IO_MEM_NB_ENTRIES)
3240 return -1;
3241 }
3242
3243 for(i = 0;i < 3; i++) {
3244 if (!mem_read[i] || !mem_write[i])
3245 subwidth = IO_MEM_SUBWIDTH;
3246 io_mem_read[io_index][i] = mem_read[i];
3247 io_mem_write[io_index][i] = mem_write[i];
3248 }
3249 io_mem_opaque[io_index] = opaque;
3250 return (io_index << IO_MEM_SHIFT) | subwidth;
3251}
3252
3253CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3254{
3255 return io_mem_write[io_index >> IO_MEM_SHIFT];
3256}
3257
3258CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3259{
3260 return io_mem_read[io_index >> IO_MEM_SHIFT];
3261}
3262#endif /* !defined(CONFIG_USER_ONLY) */
3263
3264/* physical memory access (slow version, mainly for debug) */
3265#if defined(CONFIG_USER_ONLY)
3266void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3267 int len, int is_write)
3268{
3269 int l, flags;
3270 target_ulong page;
3271 void * p;
3272
3273 while (len > 0) {
3274 page = addr & TARGET_PAGE_MASK;
3275 l = (page + TARGET_PAGE_SIZE) - addr;
3276 if (l > len)
3277 l = len;
3278 flags = page_get_flags(page);
3279 if (!(flags & PAGE_VALID))
3280 return;
3281 if (is_write) {
3282 if (!(flags & PAGE_WRITE))
3283 return;
3284 /* XXX: this code should not depend on lock_user */
3285 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3286 /* FIXME - should this return an error rather than just fail? */
3287 return;
3288 memcpy(p, buf, len);
3289 unlock_user(p, addr, len);
3290 } else {
3291 if (!(flags & PAGE_READ))
3292 return;
3293 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3294 /* FIXME - should this return an error rather than just fail? */
3295 return;
3296 memcpy(buf, p, len);
3297 unlock_user(p, addr, 0);
3298 }
3299 len -= l;
3300 buf += l;
3301 addr += l;
3302 }
3303}
3304
3305#else
3306void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3307 int len, int is_write)
3308{
3309 int l, io_index;
3310 uint8_t *ptr;
3311 uint32_t val;
3312 target_phys_addr_t page;
3313 unsigned long pd;
3314 PhysPageDesc *p;
3315
3316 while (len > 0) {
3317 page = addr & TARGET_PAGE_MASK;
3318 l = (page + TARGET_PAGE_SIZE) - addr;
3319 if (l > len)
3320 l = len;
3321 p = phys_page_find(page >> TARGET_PAGE_BITS);
3322 if (!p) {
3323 pd = IO_MEM_UNASSIGNED;
3324 } else {
3325 pd = p->phys_offset;
3326 }
3327
3328 if (is_write) {
3329 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3330 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3331 /* XXX: could force cpu_single_env to NULL to avoid
3332 potential bugs */
3333 if (l >= 4 && ((addr & 3) == 0)) {
3334 /* 32 bit write access */
3335#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3336 val = ldl_p(buf);
3337#else
3338 val = *(const uint32_t *)buf;
3339#endif
3340 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3341 l = 4;
3342 } else if (l >= 2 && ((addr & 1) == 0)) {
3343 /* 16 bit write access */
3344#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3345 val = lduw_p(buf);
3346#else
3347 val = *(const uint16_t *)buf;
3348#endif
3349 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3350 l = 2;
3351 } else {
3352 /* 8 bit write access */
3353#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3354 val = ldub_p(buf);
3355#else
3356 val = *(const uint8_t *)buf;
3357#endif
3358 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3359 l = 1;
3360 }
3361 } else {
3362 unsigned long addr1;
3363 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3364 /* RAM case */
3365#ifdef VBOX
3366 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3367#else
3368 ptr = phys_ram_base + addr1;
3369 memcpy(ptr, buf, l);
3370#endif
3371 if (!cpu_physical_memory_is_dirty(addr1)) {
3372 /* invalidate code */
3373 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3374 /* set dirty bit */
3375#ifdef VBOX
3376 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3377#endif
3378 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3379 (0xff & ~CODE_DIRTY_FLAG);
3380 }
3381 }
3382 } else {
3383 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3384 !(pd & IO_MEM_ROMD)) {
3385 /* I/O case */
3386 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3387 if (l >= 4 && ((addr & 3) == 0)) {
3388 /* 32 bit read access */
3389 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3390#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3391 stl_p(buf, val);
3392#else
3393 *(uint32_t *)buf = val;
3394#endif
3395 l = 4;
3396 } else if (l >= 2 && ((addr & 1) == 0)) {
3397 /* 16 bit read access */
3398 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3399#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3400 stw_p(buf, val);
3401#else
3402 *(uint16_t *)buf = val;
3403#endif
3404 l = 2;
3405 } else {
3406 /* 8 bit read access */
3407 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3408#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3409 stb_p(buf, val);
3410#else
3411 *(uint8_t *)buf = val;
3412#endif
3413 l = 1;
3414 }
3415 } else {
3416 /* RAM case */
3417#ifdef VBOX
3418 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3419#else
3420 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3421 (addr & ~TARGET_PAGE_MASK);
3422 memcpy(buf, ptr, l);
3423#endif
3424 }
3425 }
3426 len -= l;
3427 buf += l;
3428 addr += l;
3429 }
3430}
3431
3432#ifndef VBOX
3433/* used for ROM loading : can write in RAM and ROM */
3434void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3435 const uint8_t *buf, int len)
3436{
3437 int l;
3438 uint8_t *ptr;
3439 target_phys_addr_t page;
3440 unsigned long pd;
3441 PhysPageDesc *p;
3442
3443 while (len > 0) {
3444 page = addr & TARGET_PAGE_MASK;
3445 l = (page + TARGET_PAGE_SIZE) - addr;
3446 if (l > len)
3447 l = len;
3448 p = phys_page_find(page >> TARGET_PAGE_BITS);
3449 if (!p) {
3450 pd = IO_MEM_UNASSIGNED;
3451 } else {
3452 pd = p->phys_offset;
3453 }
3454
3455 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3456 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3457 !(pd & IO_MEM_ROMD)) {
3458 /* do nothing */
3459 } else {
3460 unsigned long addr1;
3461 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3462 /* ROM/RAM case */
3463 ptr = phys_ram_base + addr1;
3464 memcpy(ptr, buf, l);
3465 }
3466 len -= l;
3467 buf += l;
3468 addr += l;
3469 }
3470}
3471#endif /* !VBOX */
3472
3473
3474/* warning: addr must be aligned */
3475uint32_t ldl_phys(target_phys_addr_t addr)
3476{
3477 int io_index;
3478 uint8_t *ptr;
3479 uint32_t val;
3480 unsigned long pd;
3481 PhysPageDesc *p;
3482
3483 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3484 if (!p) {
3485 pd = IO_MEM_UNASSIGNED;
3486 } else {
3487 pd = p->phys_offset;
3488 }
3489
3490 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3491 !(pd & IO_MEM_ROMD)) {
3492 /* I/O case */
3493 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3494 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3495 } else {
3496 /* RAM case */
3497#ifndef VBOX
3498 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3499 (addr & ~TARGET_PAGE_MASK);
3500 val = ldl_p(ptr);
3501#else
3502 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3503#endif
3504 }
3505 return val;
3506}
3507
3508/* warning: addr must be aligned */
3509uint64_t ldq_phys(target_phys_addr_t addr)
3510{
3511 int io_index;
3512 uint8_t *ptr;
3513 uint64_t val;
3514 unsigned long pd;
3515 PhysPageDesc *p;
3516
3517 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3518 if (!p) {
3519 pd = IO_MEM_UNASSIGNED;
3520 } else {
3521 pd = p->phys_offset;
3522 }
3523
3524 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3525 !(pd & IO_MEM_ROMD)) {
3526 /* I/O case */
3527 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3528#ifdef TARGET_WORDS_BIGENDIAN
3529 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3530 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3531#else
3532 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3533 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3534#endif
3535 } else {
3536 /* RAM case */
3537#ifndef VBOX
3538 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3539 (addr & ~TARGET_PAGE_MASK);
3540 val = ldq_p(ptr);
3541#else
3542 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3543#endif
3544 }
3545 return val;
3546}
3547
3548/* XXX: optimize */
3549uint32_t ldub_phys(target_phys_addr_t addr)
3550{
3551 uint8_t val;
3552 cpu_physical_memory_read(addr, &val, 1);
3553 return val;
3554}
3555
3556/* XXX: optimize */
3557uint32_t lduw_phys(target_phys_addr_t addr)
3558{
3559 uint16_t val;
3560 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3561 return tswap16(val);
3562}
3563
3564/* warning: addr must be aligned. The ram page is not masked as dirty
3565 and the code inside is not invalidated. It is useful if the dirty
3566 bits are used to track modified PTEs */
3567void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3568{
3569 int io_index;
3570 uint8_t *ptr;
3571 unsigned long pd;
3572 PhysPageDesc *p;
3573
3574 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3575 if (!p) {
3576 pd = IO_MEM_UNASSIGNED;
3577 } else {
3578 pd = p->phys_offset;
3579 }
3580
3581 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3582 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3583 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3584 } else {
3585#ifndef VBOX
3586 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3587 (addr & ~TARGET_PAGE_MASK);
3588 stl_p(ptr, val);
3589#else
3590 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3591#endif
3592#ifndef VBOX
3593 if (unlikely(in_migration)) {
3594 if (!cpu_physical_memory_is_dirty(addr1)) {
3595 /* invalidate code */
3596 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3597 /* set dirty bit */
3598 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3599 (0xff & ~CODE_DIRTY_FLAG);
3600 }
3601 }
3602#endif
3603 }
3604}
3605
3606void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3607{
3608 int io_index;
3609 uint8_t *ptr;
3610 unsigned long pd;
3611 PhysPageDesc *p;
3612
3613 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3614 if (!p) {
3615 pd = IO_MEM_UNASSIGNED;
3616 } else {
3617 pd = p->phys_offset;
3618 }
3619
3620 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3621 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3622#ifdef TARGET_WORDS_BIGENDIAN
3623 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3624 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3625#else
3626 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3627 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3628#endif
3629 } else {
3630#ifndef VBOX
3631 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3632 (addr & ~TARGET_PAGE_MASK);
3633 stq_p(ptr, val);
3634#else
3635 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3636#endif
3637 }
3638}
3639
3640
3641/* warning: addr must be aligned */
3642void stl_phys(target_phys_addr_t addr, uint32_t val)
3643{
3644 int io_index;
3645 uint8_t *ptr;
3646 unsigned long pd;
3647 PhysPageDesc *p;
3648
3649 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3650 if (!p) {
3651 pd = IO_MEM_UNASSIGNED;
3652 } else {
3653 pd = p->phys_offset;
3654 }
3655
3656 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3657 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3658 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3659 } else {
3660 unsigned long addr1;
3661 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3662 /* RAM case */
3663#ifndef VBOX
3664 ptr = phys_ram_base + addr1;
3665 stl_p(ptr, val);
3666#else
3667 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3668#endif
3669 if (!cpu_physical_memory_is_dirty(addr1)) {
3670 /* invalidate code */
3671 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3672 /* set dirty bit */
3673#ifdef VBOX
3674 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3675#endif
3676 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3677 (0xff & ~CODE_DIRTY_FLAG);
3678 }
3679 }
3680}
3681
3682/* XXX: optimize */
3683void stb_phys(target_phys_addr_t addr, uint32_t val)
3684{
3685 uint8_t v = val;
3686 cpu_physical_memory_write(addr, &v, 1);
3687}
3688
3689/* XXX: optimize */
3690void stw_phys(target_phys_addr_t addr, uint32_t val)
3691{
3692 uint16_t v = tswap16(val);
3693 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3694}
3695
3696/* XXX: optimize */
3697void stq_phys(target_phys_addr_t addr, uint64_t val)
3698{
3699 val = tswap64(val);
3700 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3701}
3702
3703#endif
3704
3705/* virtual memory access for debug */
3706int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3707 uint8_t *buf, int len, int is_write)
3708{
3709 int l;
3710 target_ulong page, phys_addr;
3711
3712 while (len > 0) {
3713 page = addr & TARGET_PAGE_MASK;
3714 phys_addr = cpu_get_phys_page_debug(env, page);
3715 /* if no physical page mapped, return an error */
3716 if (phys_addr == -1)
3717 return -1;
3718 l = (page + TARGET_PAGE_SIZE) - addr;
3719 if (l > len)
3720 l = len;
3721 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3722 buf, l, is_write);
3723 len -= l;
3724 buf += l;
3725 addr += l;
3726 }
3727 return 0;
3728}
3729
3730/* in deterministic execution mode, instructions doing device I/Os
3731 must be at the end of the TB */
3732void cpu_io_recompile(CPUState *env, void *retaddr)
3733{
3734 TranslationBlock *tb;
3735 uint32_t n, cflags;
3736 target_ulong pc, cs_base;
3737 uint64_t flags;
3738
3739 tb = tb_find_pc((unsigned long)retaddr);
3740 if (!tb) {
3741 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3742 retaddr);
3743 }
3744 n = env->icount_decr.u16.low + tb->icount;
3745 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3746 /* Calculate how many instructions had been executed before the fault
3747 occurred. */
3748 n = n - env->icount_decr.u16.low;
3749 /* Generate a new TB ending on the I/O insn. */
3750 n++;
3751 /* On MIPS and SH, delay slot instructions can only be restarted if
3752 they were already the first instruction in the TB. If this is not
3753 the first instruction in a TB then re-execute the preceding
3754 branch. */
3755#if defined(TARGET_MIPS)
3756 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3757 env->active_tc.PC -= 4;
3758 env->icount_decr.u16.low++;
3759 env->hflags &= ~MIPS_HFLAG_BMASK;
3760 }
3761#elif defined(TARGET_SH4)
3762 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3763 && n > 1) {
3764 env->pc -= 2;
3765 env->icount_decr.u16.low++;
3766 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3767 }
3768#endif
3769 /* This should never happen. */
3770 if (n > CF_COUNT_MASK)
3771 cpu_abort(env, "TB too big during recompile");
3772
3773 cflags = n | CF_LAST_IO;
3774 pc = tb->pc;
3775 cs_base = tb->cs_base;
3776 flags = tb->flags;
3777 tb_phys_invalidate(tb, -1);
3778 /* FIXME: In theory this could raise an exception. In practice
3779 we have already translated the block once so it's probably ok. */
3780 tb_gen_code(env, pc, cs_base, flags, cflags);
3781 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3782 the first in the TB) then we end up generating a whole new TB and
3783 repeating the fault, which is horribly inefficient.
3784 Better would be to execute just this insn uncached, or generate a
3785 second new TB. */
3786 cpu_resume_from_signal(env, NULL);
3787}
3788
3789#ifndef VBOX
3790void dump_exec_info(FILE *f,
3791 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3792{
3793 int i, target_code_size, max_target_code_size;
3794 int direct_jmp_count, direct_jmp2_count, cross_page;
3795 TranslationBlock *tb;
3796
3797 target_code_size = 0;
3798 max_target_code_size = 0;
3799 cross_page = 0;
3800 direct_jmp_count = 0;
3801 direct_jmp2_count = 0;
3802 for(i = 0; i < nb_tbs; i++) {
3803 tb = &tbs[i];
3804 target_code_size += tb->size;
3805 if (tb->size > max_target_code_size)
3806 max_target_code_size = tb->size;
3807 if (tb->page_addr[1] != -1)
3808 cross_page++;
3809 if (tb->tb_next_offset[0] != 0xffff) {
3810 direct_jmp_count++;
3811 if (tb->tb_next_offset[1] != 0xffff) {
3812 direct_jmp2_count++;
3813 }
3814 }
3815 }
3816 /* XXX: avoid using doubles ? */
3817 cpu_fprintf(f, "Translation buffer state:\n");
3818 cpu_fprintf(f, "gen code size %ld/%ld\n",
3819 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3820 cpu_fprintf(f, "TB count %d/%d\n",
3821 nb_tbs, code_gen_max_blocks);
3822 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3823 nb_tbs ? target_code_size / nb_tbs : 0,
3824 max_target_code_size);
3825 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3826 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3827 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3828 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3829 cross_page,
3830 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3831 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3832 direct_jmp_count,
3833 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3834 direct_jmp2_count,
3835 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3836 cpu_fprintf(f, "\nStatistics:\n");
3837 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3838 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3839 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3840 tcg_dump_info(f, cpu_fprintf);
3841}
3842#endif /* !VBOX */
3843
3844#if !defined(CONFIG_USER_ONLY)
3845
3846#define MMUSUFFIX _cmmu
3847#define GETPC() NULL
3848#define env cpu_single_env
3849#define SOFTMMU_CODE_ACCESS
3850
3851#define SHIFT 0
3852#include "softmmu_template.h"
3853
3854#define SHIFT 1
3855#include "softmmu_template.h"
3856
3857#define SHIFT 2
3858#include "softmmu_template.h"
3859
3860#define SHIFT 3
3861#include "softmmu_template.h"
3862
3863#undef env
3864
3865#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette