VirtualBox

source: vbox/trunk/src/recompiler_new/exec.c@ 18650

Last change on this file since 18650 was 18611, checked in by vboxsync, 16 years ago

REM: allocate the right map.

  • Property svn:eol-style set to native
File size: 115.1 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#if defined(CONFIG_USER_ONLY)
56#include <qemu.h>
57#endif
58
59//#define DEBUG_TB_INVALIDATE
60//#define DEBUG_FLUSH
61//#define DEBUG_TLB
62//#define DEBUG_UNASSIGNED
63
64/* make various TB consistency checks */
65//#define DEBUG_TB_CHECK
66//#define DEBUG_TLB_CHECK
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
75#define MMAP_AREA_START 0x00000000
76#define MMAP_AREA_END 0xa8000000
77
78#if defined(TARGET_SPARC64)
79#define TARGET_PHYS_ADDR_SPACE_BITS 41
80#elif defined(TARGET_SPARC)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
82#elif defined(TARGET_ALPHA)
83#define TARGET_PHYS_ADDR_SPACE_BITS 42
84#define TARGET_VIRT_ADDR_SPACE_BITS 42
85#elif defined(TARGET_PPC64)
86#define TARGET_PHYS_ADDR_SPACE_BITS 42
87#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
88#define TARGET_PHYS_ADDR_SPACE_BITS 42
89#elif defined(TARGET_I386) && !defined(USE_KQEMU)
90#define TARGET_PHYS_ADDR_SPACE_BITS 36
91#else
92/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
93#define TARGET_PHYS_ADDR_SPACE_BITS 32
94#endif
95
96static TranslationBlock *tbs;
97int code_gen_max_blocks;
98TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
99static int nb_tbs;
100/* any access to the tbs or the page table must use this lock */
101spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
102
103#ifndef VBOX
104#if defined(__arm__) || defined(__sparc_v9__)
105/* The prologue must be reachable with a direct jump. ARM and Sparc64
106 have limited branch ranges (possibly also PPC) so place it in a
107 section close to code segment. */
108#define code_gen_section \
109 __attribute__((__section__(".gen_code"))) \
110 __attribute__((aligned (32)))
111#else
112#define code_gen_section \
113 __attribute__((aligned (32)))
114#endif
115uint8_t code_gen_prologue[1024] code_gen_section;
116
117#else /* VBOX */
118extern uint8_t* code_gen_prologue;
119#endif /* VBOX */
120
121static uint8_t *code_gen_buffer;
122static unsigned long code_gen_buffer_size;
123/* threshold to flush the translated code buffer */
124static unsigned long code_gen_buffer_max_size;
125uint8_t *code_gen_ptr;
126
127#ifndef VBOX
128#if !defined(CONFIG_USER_ONLY)
129ram_addr_t phys_ram_size;
130int phys_ram_fd;
131uint8_t *phys_ram_base;
132uint8_t *phys_ram_dirty;
133static int in_migration;
134static ram_addr_t phys_ram_alloc_offset = 0;
135#endif
136#else /* VBOX */
137RTGCPHYS phys_ram_size;
138/* we have memory ranges (the high PC-BIOS mapping) which
139 causes some pages to fall outside the dirty map here. */
140RTGCPHYS phys_ram_dirty_size;
141#endif /* VBOX */
142#if !defined(VBOX)
143uint8_t *phys_ram_base;
144#endif
145uint8_t *phys_ram_dirty;
146
147CPUState *first_cpu;
148/* current CPU in the current thread. It is only valid inside
149 cpu_exec() */
150CPUState *cpu_single_env;
151/* 0 = Do not count executed instructions.
152 1 = Precise instruction counting.
153 2 = Adaptive rate instruction counting. */
154int use_icount = 0;
155/* Current instruction counter. While executing translated code this may
156 include some instructions that have not yet been executed. */
157int64_t qemu_icount;
158
159typedef struct PageDesc {
160 /* list of TBs intersecting this ram page */
161 TranslationBlock *first_tb;
162 /* in order to optimize self modifying code, we count the number
163 of lookups we do to a given page to use a bitmap */
164 unsigned int code_write_count;
165 uint8_t *code_bitmap;
166#if defined(CONFIG_USER_ONLY)
167 unsigned long flags;
168#endif
169} PageDesc;
170
171typedef struct PhysPageDesc {
172 /* offset in host memory of the page + io_index in the low 12 bits */
173 ram_addr_t phys_offset;
174} PhysPageDesc;
175
176#define L2_BITS 10
177#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
178/* XXX: this is a temporary hack for alpha target.
179 * In the future, this is to be replaced by a multi-level table
180 * to actually be able to handle the complete 64 bits address space.
181 */
182#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
183#else
184#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
185#endif
186#ifdef VBOX
187#define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32)
188#endif
189
190#ifdef VBOX
191#define L0_SIZE (1 << L0_BITS)
192#endif
193#define L1_SIZE (1 << L1_BITS)
194#define L2_SIZE (1 << L2_BITS)
195
196static void io_mem_init(void);
197
198unsigned long qemu_real_host_page_size;
199unsigned long qemu_host_page_bits;
200unsigned long qemu_host_page_size;
201unsigned long qemu_host_page_mask;
202
203/* XXX: for system emulation, it could just be an array */
204#ifndef VBOX
205static PageDesc *l1_map[L1_SIZE];
206static PhysPageDesc **l1_phys_map;
207#else
208static unsigned l0_map_max_used = 0;
209static PageDesc **l0_map[L0_SIZE];
210static void **l0_phys_map[L0_SIZE];
211#endif
212
213#if !defined(CONFIG_USER_ONLY)
214static void io_mem_init(void);
215
216/* io memory support */
217CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
218CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
219void *io_mem_opaque[IO_MEM_NB_ENTRIES];
220static int io_mem_nb;
221static int io_mem_watch;
222#endif
223
224#ifndef VBOX
225/* log support */
226static const char *logfilename = "/tmp/qemu.log";
227#endif /* !VBOX */
228FILE *logfile;
229int loglevel;
230#ifndef VBOX
231static int log_append = 0;
232#endif
233
234/* statistics */
235#ifndef VBOX
236static int tlb_flush_count;
237static int tb_flush_count;
238static int tb_phys_invalidate_count;
239#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
240uint32_t tlb_flush_count;
241uint32_t tb_flush_count;
242uint32_t tb_phys_invalidate_count;
243#endif /* VBOX */
244
245#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
246typedef struct subpage_t {
247 target_phys_addr_t base;
248 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
249 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
250 void *opaque[TARGET_PAGE_SIZE][2][4];
251} subpage_t;
252
253
254#ifndef VBOX
255#ifdef _WIN32
256static void map_exec(void *addr, long size)
257{
258 DWORD old_protect;
259 VirtualProtect(addr, size,
260 PAGE_EXECUTE_READWRITE, &old_protect);
261
262}
263#else
264static void map_exec(void *addr, long size)
265{
266 unsigned long start, end, page_size;
267
268 page_size = getpagesize();
269 start = (unsigned long)addr;
270 start &= ~(page_size - 1);
271
272 end = (unsigned long)addr + size;
273 end += page_size - 1;
274 end &= ~(page_size - 1);
275
276 mprotect((void *)start, end - start,
277 PROT_READ | PROT_WRITE | PROT_EXEC);
278}
279#endif
280#else // VBOX
281static void map_exec(void *addr, long size)
282{
283 RTMemProtect(addr, size,
284 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
285}
286#endif
287
288static void page_init(void)
289{
290 /* NOTE: we can always suppose that qemu_host_page_size >=
291 TARGET_PAGE_SIZE */
292#ifdef VBOX
293 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
294 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
295 qemu_real_host_page_size = PAGE_SIZE;
296#else /* !VBOX */
297#ifdef _WIN32
298 {
299 SYSTEM_INFO system_info;
300 DWORD old_protect;
301
302 GetSystemInfo(&system_info);
303 qemu_real_host_page_size = system_info.dwPageSize;
304 }
305#else
306 qemu_real_host_page_size = getpagesize();
307#endif
308#endif /* !VBOX */
309
310 if (qemu_host_page_size == 0)
311 qemu_host_page_size = qemu_real_host_page_size;
312 if (qemu_host_page_size < TARGET_PAGE_SIZE)
313 qemu_host_page_size = TARGET_PAGE_SIZE;
314 qemu_host_page_bits = 0;
315#ifndef VBOX
316 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
317#else
318 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
319#endif
320 qemu_host_page_bits++;
321 qemu_host_page_mask = ~(qemu_host_page_size - 1);
322#ifndef VBOX
323 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
324 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
325#endif
326#ifdef VBOX
327 /* We use other means to set reserved bit on our pages */
328#else
329#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
330 {
331 long long startaddr, endaddr;
332 FILE *f;
333 int n;
334
335 mmap_lock();
336 last_brk = (unsigned long)sbrk(0);
337 f = fopen("/proc/self/maps", "r");
338 if (f) {
339 do {
340 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
341 if (n == 2) {
342 startaddr = MIN(startaddr,
343 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
344 endaddr = MIN(endaddr,
345 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
346 page_set_flags(startaddr & TARGET_PAGE_MASK,
347 TARGET_PAGE_ALIGN(endaddr),
348 PAGE_RESERVED);
349 }
350 } while (!feof(f));
351 fclose(f);
352 }
353 mmap_unlock();
354 }
355#endif
356#endif
357}
358
359#ifndef VBOX
360static inline PageDesc **page_l1_map(target_ulong index)
361#else
362DECLINLINE(PageDesc **) page_l1_map(target_ulong index)
363#endif
364{
365#ifndef VBOX
366#if TARGET_LONG_BITS > 32
367 /* Host memory outside guest VM. For 32-bit targets we have already
368 excluded high addresses. */
369 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
370 return NULL;
371#endif
372 return &l1_map[index >> L2_BITS];
373#else /* VBOX */
374 PageDesc **l1_map;
375 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE,
376 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n",
377 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE),
378 NULL);
379 l1_map = l0_map[index >> (L1_BITS + L2_BITS)];
380 if (RT_UNLIKELY(!l1_map))
381 {
382 unsigned i0 = index >> (L1_BITS + L2_BITS);
383 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE);
384 if (RT_UNLIKELY(!l1_map))
385 return NULL;
386 if (i0 >= l0_map_max_used)
387 l0_map_max_used = i0 + 1;
388 }
389 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)];
390#endif /* VBOX */
391}
392
393#ifndef VBOX
394static inline PageDesc *page_find_alloc(target_ulong index)
395#else
396DECLINLINE(PageDesc *) page_find_alloc(target_ulong index)
397#endif
398{
399 PageDesc **lp, *p;
400 lp = page_l1_map(index);
401 if (!lp)
402 return NULL;
403
404 p = *lp;
405 if (!p) {
406 /* allocate if not found */
407#if defined(CONFIG_USER_ONLY)
408 unsigned long addr;
409 size_t len = sizeof(PageDesc) * L2_SIZE;
410 /* Don't use qemu_malloc because it may recurse. */
411 p = mmap(0, len, PROT_READ | PROT_WRITE,
412 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
413 *lp = p;
414 addr = h2g(p);
415 if (addr == (target_ulong)addr) {
416 page_set_flags(addr & TARGET_PAGE_MASK,
417 TARGET_PAGE_ALIGN(addr + len),
418 PAGE_RESERVED);
419 }
420#else
421 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
422 *lp = p;
423#endif
424 }
425 return p + (index & (L2_SIZE - 1));
426}
427
428#ifndef VBOX
429static inline PageDesc *page_find(target_ulong index)
430#else
431DECLINLINE(PageDesc *) page_find(target_ulong index)
432#endif
433{
434 PageDesc **lp, *p;
435 lp = page_l1_map(index);
436 if (!lp)
437 return NULL;
438
439 p = *lp;
440 if (!p)
441 return 0;
442 return p + (index & (L2_SIZE - 1));
443}
444
445static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
446{
447 void **lp, **p;
448 PhysPageDesc *pd;
449
450#ifndef VBOX
451 p = (void **)l1_phys_map;
452#if TARGET_PHYS_ADDR_SPACE_BITS > 32
453
454#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
455#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
456#endif
457 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
458 p = *lp;
459 if (!p) {
460 /* allocate if not found */
461 if (!alloc)
462 return NULL;
463 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
464 memset(p, 0, sizeof(void *) * L1_SIZE);
465 *lp = p;
466 }
467#endif
468#else /* VBOX */
469 /* level 0 lookup and lazy allocation of level 1 map. */
470 if (RT_UNLIKELY(index >= (target_phys_addr_t)L2_SIZE * L1_SIZE * L0_SIZE))
471 return NULL;
472 p = l0_phys_map[index >> (L1_BITS + L2_BITS)];
473 if (RT_UNLIKELY(!p)) {
474 if (!alloc)
475 return NULL;
476 p = qemu_vmalloc(sizeof(void **) * L1_SIZE);
477 memset(p, 0, sizeof(void **) * L1_SIZE);
478 l0_phys_map[index >> (L1_BITS + L2_BITS)] = p;
479 }
480
481 /* level 1 lookup and lazy allocation of level 2 map. */
482#endif /* VBOX */
483 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
484 pd = *lp;
485 if (!pd) {
486 int i;
487 /* allocate if not found */
488 if (!alloc)
489 return NULL;
490 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
491 *lp = pd;
492 for (i = 0; i < L2_SIZE; i++)
493 pd[i].phys_offset = IO_MEM_UNASSIGNED;
494 }
495#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
496 pd = ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
497 if (RT_UNLIKELY((pd->phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING))
498 remR3GrowDynRange(pd->phys_offset & TARGET_PAGE_MASK);
499 return pd;
500#else
501 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
502#endif
503}
504
505#ifndef VBOX
506static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
507#else
508DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index)
509#endif
510{
511 return phys_page_find_alloc(index, 0);
512}
513
514#if !defined(CONFIG_USER_ONLY)
515static void tlb_protect_code(ram_addr_t ram_addr);
516static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
517 target_ulong vaddr);
518#define mmap_lock() do { } while(0)
519#define mmap_unlock() do { } while(0)
520#endif
521
522#ifdef VBOX
523/*
524 * We don't need such huge codegen buffer size, as execute most of the code
525 * in raw or hwacc mode
526 */
527#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
528#else
529#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
530#endif
531
532#if defined(CONFIG_USER_ONLY)
533/* Currently it is not recommanded to allocate big chunks of data in
534 user mode. It will change when a dedicated libc will be used */
535#define USE_STATIC_CODE_GEN_BUFFER
536#endif
537
538/* VBox allocates codegen buffer dynamically */
539#ifndef VBOX
540#ifdef USE_STATIC_CODE_GEN_BUFFER
541static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
542#endif
543#endif
544
545static void code_gen_alloc(unsigned long tb_size)
546{
547#ifdef USE_STATIC_CODE_GEN_BUFFER
548 code_gen_buffer = static_code_gen_buffer;
549 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
550 map_exec(code_gen_buffer, code_gen_buffer_size);
551#else
552#ifdef VBOX
553 /* We cannot use phys_ram_size here, as it's 0 now,
554 * it only gets initialized once RAM registration callback
555 * (REMR3NotifyPhysRamRegister()) called.
556 */
557 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
558#else
559 code_gen_buffer_size = tb_size;
560 if (code_gen_buffer_size == 0) {
561#if defined(CONFIG_USER_ONLY)
562 /* in user mode, phys_ram_size is not meaningful */
563 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
564#else
565 /* XXX: needs ajustments */
566 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
567#endif
568
569 }
570 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
571 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
572#endif /* VBOX */
573
574 /* The code gen buffer location may have constraints depending on
575 the host cpu and OS */
576#ifdef VBOX
577 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
578
579 if (!code_gen_buffer) {
580 LogRel(("REM: failed allocate codegen buffer %lld\n",
581 code_gen_buffer_size));
582 return;
583 }
584#else //!VBOX
585#if defined(__linux__)
586 {
587 int flags;
588 void *start = NULL;
589
590 flags = MAP_PRIVATE | MAP_ANONYMOUS;
591#if defined(__x86_64__)
592 flags |= MAP_32BIT;
593 /* Cannot map more than that */
594 if (code_gen_buffer_size > (800 * 1024 * 1024))
595 code_gen_buffer_size = (800 * 1024 * 1024);
596#elif defined(__sparc_v9__)
597 // Map the buffer below 2G, so we can use direct calls and branches
598 flags |= MAP_FIXED;
599 start = (void *) 0x60000000UL;
600 if (code_gen_buffer_size > (512 * 1024 * 1024))
601 code_gen_buffer_size = (512 * 1024 * 1024);
602#endif
603 code_gen_buffer = mmap(start, code_gen_buffer_size,
604 PROT_WRITE | PROT_READ | PROT_EXEC,
605 flags, -1, 0);
606 if (code_gen_buffer == MAP_FAILED) {
607 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
608 exit(1);
609 }
610 }
611#elif defined(__FreeBSD__)
612 {
613 int flags;
614 void *addr = NULL;
615 flags = MAP_PRIVATE | MAP_ANONYMOUS;
616#if defined(__x86_64__)
617 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
618 * 0x40000000 is free */
619 flags |= MAP_FIXED;
620 addr = (void *)0x40000000;
621 /* Cannot map more than that */
622 if (code_gen_buffer_size > (800 * 1024 * 1024))
623 code_gen_buffer_size = (800 * 1024 * 1024);
624#endif
625 code_gen_buffer = mmap(addr, code_gen_buffer_size,
626 PROT_WRITE | PROT_READ | PROT_EXEC,
627 flags, -1, 0);
628 if (code_gen_buffer == MAP_FAILED) {
629 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
630 exit(1);
631 }
632 }
633#else
634 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
635 if (!code_gen_buffer) {
636 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
637 exit(1);
638 }
639 map_exec(code_gen_buffer, code_gen_buffer_size);
640#endif
641 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
642#endif /* !VBOX */
643#endif /* !USE_STATIC_CODE_GEN_BUFFER */
644#ifndef VBOX
645 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
646#else
647 map_exec(code_gen_prologue, _1K);
648#endif
649
650 code_gen_buffer_max_size = code_gen_buffer_size -
651 code_gen_max_block_size();
652 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
653 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
654}
655
656/* Must be called before using the QEMU cpus. 'tb_size' is the size
657 (in bytes) allocated to the translation buffer. Zero means default
658 size. */
659void cpu_exec_init_all(unsigned long tb_size)
660{
661 cpu_gen_init();
662 code_gen_alloc(tb_size);
663 code_gen_ptr = code_gen_buffer;
664 page_init();
665#if !defined(CONFIG_USER_ONLY)
666 io_mem_init();
667#endif
668}
669
670#ifndef VBOX
671#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
672
673#define CPU_COMMON_SAVE_VERSION 1
674
675static void cpu_common_save(QEMUFile *f, void *opaque)
676{
677 CPUState *env = opaque;
678
679 qemu_put_be32s(f, &env->halted);
680 qemu_put_be32s(f, &env->interrupt_request);
681}
682
683static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
684{
685 CPUState *env = opaque;
686
687 if (version_id != CPU_COMMON_SAVE_VERSION)
688 return -EINVAL;
689
690 qemu_get_be32s(f, &env->halted);
691 qemu_get_be32s(f, &env->interrupt_request);
692 tlb_flush(env, 1);
693
694 return 0;
695}
696#endif
697#endif //!VBOX
698
699void cpu_exec_init(CPUState *env)
700{
701 CPUState **penv;
702 int cpu_index;
703
704 env->next_cpu = NULL;
705 penv = &first_cpu;
706 cpu_index = 0;
707 while (*penv != NULL) {
708 penv = (CPUState **)&(*penv)->next_cpu;
709 cpu_index++;
710 }
711 env->cpu_index = cpu_index;
712 env->nb_watchpoints = 0;
713 *penv = env;
714#ifndef VBOX
715#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
716 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
717 cpu_common_save, cpu_common_load, env);
718 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
719 cpu_save, cpu_load, env);
720#endif
721#endif // !VBOX
722}
723
724#ifndef VBOX
725static inline void invalidate_page_bitmap(PageDesc *p)
726#else
727DECLINLINE(void) invalidate_page_bitmap(PageDesc *p)
728#endif
729{
730 if (p->code_bitmap) {
731 qemu_free(p->code_bitmap);
732 p->code_bitmap = NULL;
733 }
734 p->code_write_count = 0;
735}
736
737/* set to NULL all the 'first_tb' fields in all PageDescs */
738static void page_flush_tb(void)
739{
740 int i, j;
741 PageDesc *p;
742#ifdef VBOX
743 int k;
744#endif
745
746#ifdef VBOX
747 k = l0_map_max_used;
748 while (k-- > 0) {
749 PageDesc **l1_map = l0_map[k];
750 if (l1_map) {
751#endif
752 for(i = 0; i < L1_SIZE; i++) {
753 p = l1_map[i];
754 if (p) {
755 for(j = 0; j < L2_SIZE; j++) {
756 p->first_tb = NULL;
757 invalidate_page_bitmap(p);
758 p++;
759 }
760 }
761 }
762#ifdef VBOX
763 }
764 }
765#endif
766}
767
768/* flush all the translation blocks */
769/* XXX: tb_flush is currently not thread safe */
770void tb_flush(CPUState *env1)
771{
772 CPUState *env;
773#ifdef VBOX
774 STAM_PROFILE_START(&env1->StatTbFlush, a);
775#endif
776#if defined(DEBUG_FLUSH)
777 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
778 (unsigned long)(code_gen_ptr - code_gen_buffer),
779 nb_tbs, nb_tbs > 0 ?
780 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
781#endif
782 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
783 cpu_abort(env1, "Internal error: code buffer overflow\n");
784
785 nb_tbs = 0;
786
787 for(env = first_cpu; env != NULL; env = env->next_cpu) {
788 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
789 }
790
791 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
792 page_flush_tb();
793
794 code_gen_ptr = code_gen_buffer;
795 /* XXX: flush processor icache at this point if cache flush is
796 expensive */
797 tb_flush_count++;
798#ifdef VBOX
799 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
800#endif
801}
802
803#ifdef DEBUG_TB_CHECK
804static void tb_invalidate_check(target_ulong address)
805{
806 TranslationBlock *tb;
807 int i;
808 address &= TARGET_PAGE_MASK;
809 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
810 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
811 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
812 address >= tb->pc + tb->size)) {
813 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
814 address, (long)tb->pc, tb->size);
815 }
816 }
817 }
818}
819
820/* verify that all the pages have correct rights for code */
821static void tb_page_check(void)
822{
823 TranslationBlock *tb;
824 int i, flags1, flags2;
825
826 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
827 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
828 flags1 = page_get_flags(tb->pc);
829 flags2 = page_get_flags(tb->pc + tb->size - 1);
830 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
831 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
832 (long)tb->pc, tb->size, flags1, flags2);
833 }
834 }
835 }
836}
837
838static void tb_jmp_check(TranslationBlock *tb)
839{
840 TranslationBlock *tb1;
841 unsigned int n1;
842
843 /* suppress any remaining jumps to this TB */
844 tb1 = tb->jmp_first;
845 for(;;) {
846 n1 = (long)tb1 & 3;
847 tb1 = (TranslationBlock *)((long)tb1 & ~3);
848 if (n1 == 2)
849 break;
850 tb1 = tb1->jmp_next[n1];
851 }
852 /* check end of list */
853 if (tb1 != tb) {
854 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
855 }
856}
857#endif // DEBUG_TB_CHECK
858
859/* invalidate one TB */
860#ifndef VBOX
861static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
862 int next_offset)
863#else
864DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
865 int next_offset)
866#endif
867{
868 TranslationBlock *tb1;
869 for(;;) {
870 tb1 = *ptb;
871 if (tb1 == tb) {
872 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
873 break;
874 }
875 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
876 }
877}
878
879#ifndef VBOX
880static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
881#else
882DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
883#endif
884{
885 TranslationBlock *tb1;
886 unsigned int n1;
887
888 for(;;) {
889 tb1 = *ptb;
890 n1 = (long)tb1 & 3;
891 tb1 = (TranslationBlock *)((long)tb1 & ~3);
892 if (tb1 == tb) {
893 *ptb = tb1->page_next[n1];
894 break;
895 }
896 ptb = &tb1->page_next[n1];
897 }
898}
899
900#ifndef VBOX
901static inline void tb_jmp_remove(TranslationBlock *tb, int n)
902#else
903DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n)
904#endif
905{
906 TranslationBlock *tb1, **ptb;
907 unsigned int n1;
908
909 ptb = &tb->jmp_next[n];
910 tb1 = *ptb;
911 if (tb1) {
912 /* find tb(n) in circular list */
913 for(;;) {
914 tb1 = *ptb;
915 n1 = (long)tb1 & 3;
916 tb1 = (TranslationBlock *)((long)tb1 & ~3);
917 if (n1 == n && tb1 == tb)
918 break;
919 if (n1 == 2) {
920 ptb = &tb1->jmp_first;
921 } else {
922 ptb = &tb1->jmp_next[n1];
923 }
924 }
925 /* now we can suppress tb(n) from the list */
926 *ptb = tb->jmp_next[n];
927
928 tb->jmp_next[n] = NULL;
929 }
930}
931
932/* reset the jump entry 'n' of a TB so that it is not chained to
933 another TB */
934#ifndef VBOX
935static inline void tb_reset_jump(TranslationBlock *tb, int n)
936#else
937DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
938#endif
939{
940 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
941}
942
943void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
944{
945 CPUState *env;
946 PageDesc *p;
947 unsigned int h, n1;
948 target_phys_addr_t phys_pc;
949 TranslationBlock *tb1, *tb2;
950
951 /* remove the TB from the hash list */
952 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
953 h = tb_phys_hash_func(phys_pc);
954 tb_remove(&tb_phys_hash[h], tb,
955 offsetof(TranslationBlock, phys_hash_next));
956
957 /* remove the TB from the page list */
958 if (tb->page_addr[0] != page_addr) {
959 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
960 tb_page_remove(&p->first_tb, tb);
961 invalidate_page_bitmap(p);
962 }
963 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
964 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
965 tb_page_remove(&p->first_tb, tb);
966 invalidate_page_bitmap(p);
967 }
968
969 tb_invalidated_flag = 1;
970
971 /* remove the TB from the hash list */
972 h = tb_jmp_cache_hash_func(tb->pc);
973 for(env = first_cpu; env != NULL; env = env->next_cpu) {
974 if (env->tb_jmp_cache[h] == tb)
975 env->tb_jmp_cache[h] = NULL;
976 }
977
978 /* suppress this TB from the two jump lists */
979 tb_jmp_remove(tb, 0);
980 tb_jmp_remove(tb, 1);
981
982 /* suppress any remaining jumps to this TB */
983 tb1 = tb->jmp_first;
984 for(;;) {
985 n1 = (long)tb1 & 3;
986 if (n1 == 2)
987 break;
988 tb1 = (TranslationBlock *)((long)tb1 & ~3);
989 tb2 = tb1->jmp_next[n1];
990 tb_reset_jump(tb1, n1);
991 tb1->jmp_next[n1] = NULL;
992 tb1 = tb2;
993 }
994 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
995
996 tb_phys_invalidate_count++;
997}
998
999
1000#ifdef VBOX
1001void tb_invalidate_virt(CPUState *env, uint32_t eip)
1002{
1003# if 1
1004 tb_flush(env);
1005# else
1006 uint8_t *cs_base, *pc;
1007 unsigned int flags, h, phys_pc;
1008 TranslationBlock *tb, **ptb;
1009
1010 flags = env->hflags;
1011 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1012 cs_base = env->segs[R_CS].base;
1013 pc = cs_base + eip;
1014
1015 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
1016 flags);
1017
1018 if(tb)
1019 {
1020# ifdef DEBUG
1021 printf("invalidating TB (%08X) at %08X\n", tb, eip);
1022# endif
1023 tb_invalidate(tb);
1024 //Note: this will leak TBs, but the whole cache will be flushed
1025 // when it happens too often
1026 tb->pc = 0;
1027 tb->cs_base = 0;
1028 tb->flags = 0;
1029 }
1030# endif
1031}
1032
1033# ifdef VBOX_STRICT
1034/**
1035 * Gets the page offset.
1036 */
1037unsigned long get_phys_page_offset(target_ulong addr)
1038{
1039 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1040 return p ? p->phys_offset : 0;
1041}
1042# endif /* VBOX_STRICT */
1043#endif /* VBOX */
1044
1045#ifndef VBOX
1046static inline void set_bits(uint8_t *tab, int start, int len)
1047#else
1048DECLINLINE(void) set_bits(uint8_t *tab, int start, int len)
1049#endif
1050{
1051 int end, mask, end1;
1052
1053 end = start + len;
1054 tab += start >> 3;
1055 mask = 0xff << (start & 7);
1056 if ((start & ~7) == (end & ~7)) {
1057 if (start < end) {
1058 mask &= ~(0xff << (end & 7));
1059 *tab |= mask;
1060 }
1061 } else {
1062 *tab++ |= mask;
1063 start = (start + 8) & ~7;
1064 end1 = end & ~7;
1065 while (start < end1) {
1066 *tab++ = 0xff;
1067 start += 8;
1068 }
1069 if (start < end) {
1070 mask = ~(0xff << (end & 7));
1071 *tab |= mask;
1072 }
1073 }
1074}
1075
1076static void build_page_bitmap(PageDesc *p)
1077{
1078 int n, tb_start, tb_end;
1079 TranslationBlock *tb;
1080
1081 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
1082 if (!p->code_bitmap)
1083 return;
1084 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
1085
1086 tb = p->first_tb;
1087 while (tb != NULL) {
1088 n = (long)tb & 3;
1089 tb = (TranslationBlock *)((long)tb & ~3);
1090 /* NOTE: this is subtle as a TB may span two physical pages */
1091 if (n == 0) {
1092 /* NOTE: tb_end may be after the end of the page, but
1093 it is not a problem */
1094 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1095 tb_end = tb_start + tb->size;
1096 if (tb_end > TARGET_PAGE_SIZE)
1097 tb_end = TARGET_PAGE_SIZE;
1098 } else {
1099 tb_start = 0;
1100 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1101 }
1102 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1103 tb = tb->page_next[n];
1104 }
1105}
1106
1107TranslationBlock *tb_gen_code(CPUState *env,
1108 target_ulong pc, target_ulong cs_base,
1109 int flags, int cflags)
1110{
1111 TranslationBlock *tb;
1112 uint8_t *tc_ptr;
1113 target_ulong phys_pc, phys_page2, virt_page2;
1114 int code_gen_size;
1115
1116 phys_pc = get_phys_addr_code(env, pc);
1117 tb = tb_alloc(pc);
1118 if (!tb) {
1119 /* flush must be done */
1120 tb_flush(env);
1121 /* cannot fail at this point */
1122 tb = tb_alloc(pc);
1123 /* Don't forget to invalidate previous TB info. */
1124 tb_invalidated_flag = 1;
1125 }
1126 tc_ptr = code_gen_ptr;
1127 tb->tc_ptr = tc_ptr;
1128 tb->cs_base = cs_base;
1129 tb->flags = flags;
1130 tb->cflags = cflags;
1131 cpu_gen_code(env, tb, &code_gen_size);
1132 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1133
1134 /* check next page if needed */
1135 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1136 phys_page2 = -1;
1137 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1138 phys_page2 = get_phys_addr_code(env, virt_page2);
1139 }
1140 tb_link_phys(tb, phys_pc, phys_page2);
1141 return tb;
1142}
1143
1144/* invalidate all TBs which intersect with the target physical page
1145 starting in range [start;end[. NOTE: start and end must refer to
1146 the same physical page. 'is_cpu_write_access' should be true if called
1147 from a real cpu write access: the virtual CPU will exit the current
1148 TB if code is modified inside this TB. */
1149void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1150 int is_cpu_write_access)
1151{
1152 int n, current_tb_modified, current_tb_not_found, current_flags;
1153 CPUState *env = cpu_single_env;
1154 PageDesc *p;
1155 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1156 target_ulong tb_start, tb_end;
1157 target_ulong current_pc, current_cs_base;
1158
1159 p = page_find(start >> TARGET_PAGE_BITS);
1160 if (!p)
1161 return;
1162 if (!p->code_bitmap &&
1163 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1164 is_cpu_write_access) {
1165 /* build code bitmap */
1166 build_page_bitmap(p);
1167 }
1168
1169 /* we remove all the TBs in the range [start, end[ */
1170 /* XXX: see if in some cases it could be faster to invalidate all the code */
1171 current_tb_not_found = is_cpu_write_access;
1172 current_tb_modified = 0;
1173 current_tb = NULL; /* avoid warning */
1174 current_pc = 0; /* avoid warning */
1175 current_cs_base = 0; /* avoid warning */
1176 current_flags = 0; /* avoid warning */
1177 tb = p->first_tb;
1178 while (tb != NULL) {
1179 n = (long)tb & 3;
1180 tb = (TranslationBlock *)((long)tb & ~3);
1181 tb_next = tb->page_next[n];
1182 /* NOTE: this is subtle as a TB may span two physical pages */
1183 if (n == 0) {
1184 /* NOTE: tb_end may be after the end of the page, but
1185 it is not a problem */
1186 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1187 tb_end = tb_start + tb->size;
1188 } else {
1189 tb_start = tb->page_addr[1];
1190 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1191 }
1192 if (!(tb_end <= start || tb_start >= end)) {
1193#ifdef TARGET_HAS_PRECISE_SMC
1194 if (current_tb_not_found) {
1195 current_tb_not_found = 0;
1196 current_tb = NULL;
1197 if (env->mem_io_pc) {
1198 /* now we have a real cpu fault */
1199 current_tb = tb_find_pc(env->mem_io_pc);
1200 }
1201 }
1202 if (current_tb == tb &&
1203 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1204 /* If we are modifying the current TB, we must stop
1205 its execution. We could be more precise by checking
1206 that the modification is after the current PC, but it
1207 would require a specialized function to partially
1208 restore the CPU state */
1209
1210 current_tb_modified = 1;
1211 cpu_restore_state(current_tb, env,
1212 env->mem_io_pc, NULL);
1213#if defined(TARGET_I386)
1214 current_flags = env->hflags;
1215 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1216 current_cs_base = (target_ulong)env->segs[R_CS].base;
1217 current_pc = current_cs_base + env->eip;
1218#else
1219#error unsupported CPU
1220#endif
1221 }
1222#endif /* TARGET_HAS_PRECISE_SMC */
1223 /* we need to do that to handle the case where a signal
1224 occurs while doing tb_phys_invalidate() */
1225 saved_tb = NULL;
1226 if (env) {
1227 saved_tb = env->current_tb;
1228 env->current_tb = NULL;
1229 }
1230 tb_phys_invalidate(tb, -1);
1231 if (env) {
1232 env->current_tb = saved_tb;
1233 if (env->interrupt_request && env->current_tb)
1234 cpu_interrupt(env, env->interrupt_request);
1235 }
1236 }
1237 tb = tb_next;
1238 }
1239#if !defined(CONFIG_USER_ONLY)
1240 /* if no code remaining, no need to continue to use slow writes */
1241 if (!p->first_tb) {
1242 invalidate_page_bitmap(p);
1243 if (is_cpu_write_access) {
1244 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1245 }
1246 }
1247#endif
1248#ifdef TARGET_HAS_PRECISE_SMC
1249 if (current_tb_modified) {
1250 /* we generate a block containing just the instruction
1251 modifying the memory. It will ensure that it cannot modify
1252 itself */
1253 env->current_tb = NULL;
1254 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1255 cpu_resume_from_signal(env, NULL);
1256 }
1257#endif
1258}
1259
1260
1261/* len must be <= 8 and start must be a multiple of len */
1262#ifndef VBOX
1263static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1264#else
1265DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1266#endif
1267{
1268 PageDesc *p;
1269 int offset, b;
1270#if 0
1271 if (1) {
1272 if (loglevel) {
1273 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1274 cpu_single_env->mem_io_vaddr, len,
1275 cpu_single_env->eip,
1276 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1277 }
1278 }
1279#endif
1280 p = page_find(start >> TARGET_PAGE_BITS);
1281 if (!p)
1282 return;
1283 if (p->code_bitmap) {
1284 offset = start & ~TARGET_PAGE_MASK;
1285 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1286 if (b & ((1 << len) - 1))
1287 goto do_invalidate;
1288 } else {
1289 do_invalidate:
1290 tb_invalidate_phys_page_range(start, start + len, 1);
1291 }
1292}
1293
1294
1295#if !defined(CONFIG_SOFTMMU)
1296static void tb_invalidate_phys_page(target_phys_addr_t addr,
1297 unsigned long pc, void *puc)
1298{
1299 int n, current_flags, current_tb_modified;
1300 target_ulong current_pc, current_cs_base;
1301 PageDesc *p;
1302 TranslationBlock *tb, *current_tb;
1303#ifdef TARGET_HAS_PRECISE_SMC
1304 CPUState *env = cpu_single_env;
1305#endif
1306
1307 addr &= TARGET_PAGE_MASK;
1308 p = page_find(addr >> TARGET_PAGE_BITS);
1309 if (!p)
1310 return;
1311 tb = p->first_tb;
1312 current_tb_modified = 0;
1313 current_tb = NULL;
1314 current_pc = 0; /* avoid warning */
1315 current_cs_base = 0; /* avoid warning */
1316 current_flags = 0; /* avoid warning */
1317#ifdef TARGET_HAS_PRECISE_SMC
1318 if (tb && pc != 0) {
1319 current_tb = tb_find_pc(pc);
1320 }
1321#endif
1322 while (tb != NULL) {
1323 n = (long)tb & 3;
1324 tb = (TranslationBlock *)((long)tb & ~3);
1325#ifdef TARGET_HAS_PRECISE_SMC
1326 if (current_tb == tb &&
1327 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1328 /* If we are modifying the current TB, we must stop
1329 its execution. We could be more precise by checking
1330 that the modification is after the current PC, but it
1331 would require a specialized function to partially
1332 restore the CPU state */
1333
1334 current_tb_modified = 1;
1335 cpu_restore_state(current_tb, env, pc, puc);
1336#if defined(TARGET_I386)
1337 current_flags = env->hflags;
1338 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1339 current_cs_base = (target_ulong)env->segs[R_CS].base;
1340 current_pc = current_cs_base + env->eip;
1341#else
1342#error unsupported CPU
1343#endif
1344 }
1345#endif /* TARGET_HAS_PRECISE_SMC */
1346 tb_phys_invalidate(tb, addr);
1347 tb = tb->page_next[n];
1348 }
1349 p->first_tb = NULL;
1350#ifdef TARGET_HAS_PRECISE_SMC
1351 if (current_tb_modified) {
1352 /* we generate a block containing just the instruction
1353 modifying the memory. It will ensure that it cannot modify
1354 itself */
1355 env->current_tb = NULL;
1356 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1357 cpu_resume_from_signal(env, puc);
1358 }
1359#endif
1360}
1361#endif
1362
1363/* add the tb in the target page and protect it if necessary */
1364#ifndef VBOX
1365static inline void tb_alloc_page(TranslationBlock *tb,
1366 unsigned int n, target_ulong page_addr)
1367#else
1368DECLINLINE(void) tb_alloc_page(TranslationBlock *tb,
1369 unsigned int n, target_ulong page_addr)
1370#endif
1371{
1372 PageDesc *p;
1373 TranslationBlock *last_first_tb;
1374
1375 tb->page_addr[n] = page_addr;
1376 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1377 tb->page_next[n] = p->first_tb;
1378 last_first_tb = p->first_tb;
1379 p->first_tb = (TranslationBlock *)((long)tb | n);
1380 invalidate_page_bitmap(p);
1381
1382#if defined(TARGET_HAS_SMC) || 1
1383
1384#if defined(CONFIG_USER_ONLY)
1385 if (p->flags & PAGE_WRITE) {
1386 target_ulong addr;
1387 PageDesc *p2;
1388 int prot;
1389
1390 /* force the host page as non writable (writes will have a
1391 page fault + mprotect overhead) */
1392 page_addr &= qemu_host_page_mask;
1393 prot = 0;
1394 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1395 addr += TARGET_PAGE_SIZE) {
1396
1397 p2 = page_find (addr >> TARGET_PAGE_BITS);
1398 if (!p2)
1399 continue;
1400 prot |= p2->flags;
1401 p2->flags &= ~PAGE_WRITE;
1402 page_get_flags(addr);
1403 }
1404 mprotect(g2h(page_addr), qemu_host_page_size,
1405 (prot & PAGE_BITS) & ~PAGE_WRITE);
1406#ifdef DEBUG_TB_INVALIDATE
1407 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1408 page_addr);
1409#endif
1410 }
1411#else
1412 /* if some code is already present, then the pages are already
1413 protected. So we handle the case where only the first TB is
1414 allocated in a physical page */
1415 if (!last_first_tb) {
1416 tlb_protect_code(page_addr);
1417 }
1418#endif
1419
1420#endif /* TARGET_HAS_SMC */
1421}
1422
1423/* Allocate a new translation block. Flush the translation buffer if
1424 too many translation blocks or too much generated code. */
1425TranslationBlock *tb_alloc(target_ulong pc)
1426{
1427 TranslationBlock *tb;
1428
1429 if (nb_tbs >= code_gen_max_blocks ||
1430#ifndef VBOX
1431 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1432#else
1433 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1434#endif
1435 return NULL;
1436 tb = &tbs[nb_tbs++];
1437 tb->pc = pc;
1438 tb->cflags = 0;
1439 return tb;
1440}
1441
1442void tb_free(TranslationBlock *tb)
1443{
1444 /* In practice this is mostly used for single use temporary TB
1445 Ignore the hard cases and just back up if this TB happens to
1446 be the last one generated. */
1447 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1448 code_gen_ptr = tb->tc_ptr;
1449 nb_tbs--;
1450 }
1451}
1452
1453/* add a new TB and link it to the physical page tables. phys_page2 is
1454 (-1) to indicate that only one page contains the TB. */
1455void tb_link_phys(TranslationBlock *tb,
1456 target_ulong phys_pc, target_ulong phys_page2)
1457{
1458 unsigned int h;
1459 TranslationBlock **ptb;
1460
1461 /* Grab the mmap lock to stop another thread invalidating this TB
1462 before we are done. */
1463 mmap_lock();
1464 /* add in the physical hash table */
1465 h = tb_phys_hash_func(phys_pc);
1466 ptb = &tb_phys_hash[h];
1467 tb->phys_hash_next = *ptb;
1468 *ptb = tb;
1469
1470 /* add in the page list */
1471 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1472 if (phys_page2 != -1)
1473 tb_alloc_page(tb, 1, phys_page2);
1474 else
1475 tb->page_addr[1] = -1;
1476
1477 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1478 tb->jmp_next[0] = NULL;
1479 tb->jmp_next[1] = NULL;
1480
1481 /* init original jump addresses */
1482 if (tb->tb_next_offset[0] != 0xffff)
1483 tb_reset_jump(tb, 0);
1484 if (tb->tb_next_offset[1] != 0xffff)
1485 tb_reset_jump(tb, 1);
1486
1487#ifdef DEBUG_TB_CHECK
1488 tb_page_check();
1489#endif
1490 mmap_unlock();
1491}
1492
1493/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1494 tb[1].tc_ptr. Return NULL if not found */
1495TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1496{
1497 int m_min, m_max, m;
1498 unsigned long v;
1499 TranslationBlock *tb;
1500
1501 if (nb_tbs <= 0)
1502 return NULL;
1503 if (tc_ptr < (unsigned long)code_gen_buffer ||
1504 tc_ptr >= (unsigned long)code_gen_ptr)
1505 return NULL;
1506 /* binary search (cf Knuth) */
1507 m_min = 0;
1508 m_max = nb_tbs - 1;
1509 while (m_min <= m_max) {
1510 m = (m_min + m_max) >> 1;
1511 tb = &tbs[m];
1512 v = (unsigned long)tb->tc_ptr;
1513 if (v == tc_ptr)
1514 return tb;
1515 else if (tc_ptr < v) {
1516 m_max = m - 1;
1517 } else {
1518 m_min = m + 1;
1519 }
1520 }
1521 return &tbs[m_max];
1522}
1523
1524static void tb_reset_jump_recursive(TranslationBlock *tb);
1525
1526#ifndef VBOX
1527static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1528#else
1529DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1530#endif
1531{
1532 TranslationBlock *tb1, *tb_next, **ptb;
1533 unsigned int n1;
1534
1535 tb1 = tb->jmp_next[n];
1536 if (tb1 != NULL) {
1537 /* find head of list */
1538 for(;;) {
1539 n1 = (long)tb1 & 3;
1540 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1541 if (n1 == 2)
1542 break;
1543 tb1 = tb1->jmp_next[n1];
1544 }
1545 /* we are now sure now that tb jumps to tb1 */
1546 tb_next = tb1;
1547
1548 /* remove tb from the jmp_first list */
1549 ptb = &tb_next->jmp_first;
1550 for(;;) {
1551 tb1 = *ptb;
1552 n1 = (long)tb1 & 3;
1553 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1554 if (n1 == n && tb1 == tb)
1555 break;
1556 ptb = &tb1->jmp_next[n1];
1557 }
1558 *ptb = tb->jmp_next[n];
1559 tb->jmp_next[n] = NULL;
1560
1561 /* suppress the jump to next tb in generated code */
1562 tb_reset_jump(tb, n);
1563
1564 /* suppress jumps in the tb on which we could have jumped */
1565 tb_reset_jump_recursive(tb_next);
1566 }
1567}
1568
1569static void tb_reset_jump_recursive(TranslationBlock *tb)
1570{
1571 tb_reset_jump_recursive2(tb, 0);
1572 tb_reset_jump_recursive2(tb, 1);
1573}
1574
1575#if defined(TARGET_HAS_ICE)
1576static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1577{
1578 target_ulong addr, pd;
1579 ram_addr_t ram_addr;
1580 PhysPageDesc *p;
1581
1582 addr = cpu_get_phys_page_debug(env, pc);
1583 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1584 if (!p) {
1585 pd = IO_MEM_UNASSIGNED;
1586 } else {
1587 pd = p->phys_offset;
1588 }
1589 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1590 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1591}
1592#endif
1593
1594/* Add a watchpoint. */
1595int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1596{
1597 int i;
1598
1599 for (i = 0; i < env->nb_watchpoints; i++) {
1600 if (addr == env->watchpoint[i].vaddr)
1601 return 0;
1602 }
1603 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1604 return -1;
1605
1606 i = env->nb_watchpoints++;
1607 env->watchpoint[i].vaddr = addr;
1608 env->watchpoint[i].type = type;
1609 tlb_flush_page(env, addr);
1610 /* FIXME: This flush is needed because of the hack to make memory ops
1611 terminate the TB. It can be removed once the proper IO trap and
1612 re-execute bits are in. */
1613 tb_flush(env);
1614 return i;
1615}
1616
1617/* Remove a watchpoint. */
1618int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1619{
1620 int i;
1621
1622 for (i = 0; i < env->nb_watchpoints; i++) {
1623 if (addr == env->watchpoint[i].vaddr) {
1624 env->nb_watchpoints--;
1625 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1626 tlb_flush_page(env, addr);
1627 return 0;
1628 }
1629 }
1630 return -1;
1631}
1632
1633/* Remove all watchpoints. */
1634void cpu_watchpoint_remove_all(CPUState *env) {
1635 int i;
1636
1637 for (i = 0; i < env->nb_watchpoints; i++) {
1638 tlb_flush_page(env, env->watchpoint[i].vaddr);
1639 }
1640 env->nb_watchpoints = 0;
1641}
1642
1643/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1644 breakpoint is reached */
1645int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1646{
1647#if defined(TARGET_HAS_ICE)
1648 int i;
1649
1650 for(i = 0; i < env->nb_breakpoints; i++) {
1651 if (env->breakpoints[i] == pc)
1652 return 0;
1653 }
1654
1655 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1656 return -1;
1657 env->breakpoints[env->nb_breakpoints++] = pc;
1658
1659 breakpoint_invalidate(env, pc);
1660 return 0;
1661#else
1662 return -1;
1663#endif
1664}
1665
1666/* remove all breakpoints */
1667void cpu_breakpoint_remove_all(CPUState *env) {
1668#if defined(TARGET_HAS_ICE)
1669 int i;
1670 for(i = 0; i < env->nb_breakpoints; i++) {
1671 breakpoint_invalidate(env, env->breakpoints[i]);
1672 }
1673 env->nb_breakpoints = 0;
1674#endif
1675}
1676
1677/* remove a breakpoint */
1678int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1679{
1680#if defined(TARGET_HAS_ICE)
1681 int i;
1682 for(i = 0; i < env->nb_breakpoints; i++) {
1683 if (env->breakpoints[i] == pc)
1684 goto found;
1685 }
1686 return -1;
1687 found:
1688 env->nb_breakpoints--;
1689 if (i < env->nb_breakpoints)
1690 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1691
1692 breakpoint_invalidate(env, pc);
1693 return 0;
1694#else
1695 return -1;
1696#endif
1697}
1698
1699/* enable or disable single step mode. EXCP_DEBUG is returned by the
1700 CPU loop after each instruction */
1701void cpu_single_step(CPUState *env, int enabled)
1702{
1703#if defined(TARGET_HAS_ICE)
1704 if (env->singlestep_enabled != enabled) {
1705 env->singlestep_enabled = enabled;
1706 /* must flush all the translated code to avoid inconsistancies */
1707 /* XXX: only flush what is necessary */
1708 tb_flush(env);
1709 }
1710#endif
1711}
1712
1713#ifndef VBOX
1714/* enable or disable low levels log */
1715void cpu_set_log(int log_flags)
1716{
1717 loglevel = log_flags;
1718 if (loglevel && !logfile) {
1719 logfile = fopen(logfilename, "w");
1720 if (!logfile) {
1721 perror(logfilename);
1722 _exit(1);
1723 }
1724#if !defined(CONFIG_SOFTMMU)
1725 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1726 {
1727 static uint8_t logfile_buf[4096];
1728 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1729 }
1730#else
1731 setvbuf(logfile, NULL, _IOLBF, 0);
1732#endif
1733 }
1734}
1735
1736void cpu_set_log_filename(const char *filename)
1737{
1738 logfilename = strdup(filename);
1739}
1740#endif /* !VBOX */
1741
1742/* mask must never be zero, except for A20 change call */
1743void cpu_interrupt(CPUState *env, int mask)
1744{
1745#if !defined(USE_NPTL)
1746 TranslationBlock *tb;
1747 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1748#endif
1749 int old_mask;
1750
1751 old_mask = env->interrupt_request;
1752#ifdef VBOX
1753 VM_ASSERT_EMT(env->pVM);
1754 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1755#else /* !VBOX */
1756 /* FIXME: This is probably not threadsafe. A different thread could
1757 be in the middle of a read-modify-write operation. */
1758 env->interrupt_request |= mask;
1759#endif /* !VBOX */
1760#if defined(USE_NPTL)
1761 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1762 problem and hope the cpu will stop of its own accord. For userspace
1763 emulation this often isn't actually as bad as it sounds. Often
1764 signals are used primarily to interrupt blocking syscalls. */
1765#else
1766 if (use_icount) {
1767 env->icount_decr.u16.high = 0xffff;
1768#ifndef CONFIG_USER_ONLY
1769 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1770 an async event happened and we need to process it. */
1771 if (!can_do_io(env)
1772 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1773 cpu_abort(env, "Raised interrupt while not in I/O function");
1774 }
1775#endif
1776 } else {
1777 tb = env->current_tb;
1778 /* if the cpu is currently executing code, we must unlink it and
1779 all the potentially executing TB */
1780 if (tb && !testandset(&interrupt_lock)) {
1781 env->current_tb = NULL;
1782 tb_reset_jump_recursive(tb);
1783 resetlock(&interrupt_lock);
1784 }
1785 }
1786#endif
1787}
1788
1789void cpu_reset_interrupt(CPUState *env, int mask)
1790{
1791#ifdef VBOX
1792 /*
1793 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1794 * for future changes!
1795 */
1796 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1797#else /* !VBOX */
1798 env->interrupt_request &= ~mask;
1799#endif /* !VBOX */
1800}
1801
1802#ifndef VBOX
1803CPULogItem cpu_log_items[] = {
1804 { CPU_LOG_TB_OUT_ASM, "out_asm",
1805 "show generated host assembly code for each compiled TB" },
1806 { CPU_LOG_TB_IN_ASM, "in_asm",
1807 "show target assembly code for each compiled TB" },
1808 { CPU_LOG_TB_OP, "op",
1809 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1810#ifdef TARGET_I386
1811 { CPU_LOG_TB_OP_OPT, "op_opt",
1812 "show micro ops after optimization for each compiled TB" },
1813#endif
1814 { CPU_LOG_INT, "int",
1815 "show interrupts/exceptions in short format" },
1816 { CPU_LOG_EXEC, "exec",
1817 "show trace before each executed TB (lots of logs)" },
1818 { CPU_LOG_TB_CPU, "cpu",
1819 "show CPU state before bloc translation" },
1820#ifdef TARGET_I386
1821 { CPU_LOG_PCALL, "pcall",
1822 "show protected mode far calls/returns/exceptions" },
1823#endif
1824#ifdef DEBUG_IOPORT
1825 { CPU_LOG_IOPORT, "ioport",
1826 "show all i/o ports accesses" },
1827#endif
1828 { 0, NULL, NULL },
1829};
1830
1831static int cmp1(const char *s1, int n, const char *s2)
1832{
1833 if (strlen(s2) != n)
1834 return 0;
1835 return memcmp(s1, s2, n) == 0;
1836}
1837
1838/* takes a comma separated list of log masks. Return 0 if error. */
1839int cpu_str_to_log_mask(const char *str)
1840{
1841 CPULogItem *item;
1842 int mask;
1843 const char *p, *p1;
1844
1845 p = str;
1846 mask = 0;
1847 for(;;) {
1848 p1 = strchr(p, ',');
1849 if (!p1)
1850 p1 = p + strlen(p);
1851 if(cmp1(p,p1-p,"all")) {
1852 for(item = cpu_log_items; item->mask != 0; item++) {
1853 mask |= item->mask;
1854 }
1855 } else {
1856 for(item = cpu_log_items; item->mask != 0; item++) {
1857 if (cmp1(p, p1 - p, item->name))
1858 goto found;
1859 }
1860 return 0;
1861 }
1862 found:
1863 mask |= item->mask;
1864 if (*p1 != ',')
1865 break;
1866 p = p1 + 1;
1867 }
1868 return mask;
1869}
1870#endif /* !VBOX */
1871
1872#ifndef VBOX /* VBOX: we have our own routine. */
1873void cpu_abort(CPUState *env, const char *fmt, ...)
1874{
1875 va_list ap;
1876
1877 va_start(ap, fmt);
1878 fprintf(stderr, "qemu: fatal: ");
1879 vfprintf(stderr, fmt, ap);
1880 fprintf(stderr, "\n");
1881#ifdef TARGET_I386
1882 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1883#else
1884 cpu_dump_state(env, stderr, fprintf, 0);
1885#endif
1886 va_end(ap);
1887 abort();
1888}
1889#endif /* !VBOX */
1890
1891#ifndef VBOX
1892CPUState *cpu_copy(CPUState *env)
1893{
1894 CPUState *new_env = cpu_init(env->cpu_model_str);
1895 /* preserve chaining and index */
1896 CPUState *next_cpu = new_env->next_cpu;
1897 int cpu_index = new_env->cpu_index;
1898 memcpy(new_env, env, sizeof(CPUState));
1899 new_env->next_cpu = next_cpu;
1900 new_env->cpu_index = cpu_index;
1901 return new_env;
1902}
1903#endif
1904
1905#if !defined(CONFIG_USER_ONLY)
1906
1907#ifndef VBOX
1908static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1909#else
1910DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1911#endif
1912{
1913 unsigned int i;
1914
1915 /* Discard jump cache entries for any tb which might potentially
1916 overlap the flushed page. */
1917 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1918 memset (&env->tb_jmp_cache[i], 0,
1919 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1920
1921 i = tb_jmp_cache_hash_page(addr);
1922 memset (&env->tb_jmp_cache[i], 0,
1923 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1924
1925#ifdef VBOX
1926 /* inform raw mode about TLB page flush */
1927 remR3FlushPage(env, addr);
1928#endif /* VBOX */
1929}
1930
1931/* NOTE: if flush_global is true, also flush global entries (not
1932 implemented yet) */
1933void tlb_flush(CPUState *env, int flush_global)
1934{
1935 int i;
1936#if defined(DEBUG_TLB)
1937 printf("tlb_flush:\n");
1938#endif
1939 /* must reset current TB so that interrupts cannot modify the
1940 links while we are modifying them */
1941 env->current_tb = NULL;
1942
1943 for(i = 0; i < CPU_TLB_SIZE; i++) {
1944 env->tlb_table[0][i].addr_read = -1;
1945 env->tlb_table[0][i].addr_write = -1;
1946 env->tlb_table[0][i].addr_code = -1;
1947 env->tlb_table[1][i].addr_read = -1;
1948 env->tlb_table[1][i].addr_write = -1;
1949 env->tlb_table[1][i].addr_code = -1;
1950#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1951 env->phys_addends[0][i] = -1;
1952 env->phys_addends[1][i] = -1;
1953#endif
1954#if (NB_MMU_MODES >= 3)
1955 env->tlb_table[2][i].addr_read = -1;
1956 env->tlb_table[2][i].addr_write = -1;
1957 env->tlb_table[2][i].addr_code = -1;
1958#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1959 env->phys_addends[2][i] = -1;
1960#endif
1961#if (NB_MMU_MODES == 4)
1962 env->tlb_table[3][i].addr_read = -1;
1963 env->tlb_table[3][i].addr_write = -1;
1964 env->tlb_table[3][i].addr_code = -1;
1965#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
1966 env->phys_addends[3][i] = -1;
1967#endif
1968#endif
1969#endif
1970 }
1971
1972 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1973
1974#ifdef VBOX
1975 /* inform raw mode about TLB flush */
1976 remR3FlushTLB(env, flush_global);
1977#endif
1978#ifdef USE_KQEMU
1979 if (env->kqemu_enabled) {
1980 kqemu_flush(env, flush_global);
1981 }
1982#endif
1983 tlb_flush_count++;
1984}
1985
1986#ifndef VBOX
1987static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1988#else
1989DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1990#endif
1991{
1992 if (addr == (tlb_entry->addr_read &
1993 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1994 addr == (tlb_entry->addr_write &
1995 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1996 addr == (tlb_entry->addr_code &
1997 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1998 tlb_entry->addr_read = -1;
1999 tlb_entry->addr_write = -1;
2000 tlb_entry->addr_code = -1;
2001 }
2002}
2003
2004void tlb_flush_page(CPUState *env, target_ulong addr)
2005{
2006 int i;
2007
2008#if defined(DEBUG_TLB)
2009 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2010#endif
2011 /* must reset current TB so that interrupts cannot modify the
2012 links while we are modifying them */
2013 env->current_tb = NULL;
2014
2015 addr &= TARGET_PAGE_MASK;
2016 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2017 tlb_flush_entry(&env->tlb_table[0][i], addr);
2018 tlb_flush_entry(&env->tlb_table[1][i], addr);
2019#if (NB_MMU_MODES >= 3)
2020 tlb_flush_entry(&env->tlb_table[2][i], addr);
2021#if (NB_MMU_MODES == 4)
2022 tlb_flush_entry(&env->tlb_table[3][i], addr);
2023#endif
2024#endif
2025
2026 tlb_flush_jmp_cache(env, addr);
2027
2028#ifdef USE_KQEMU
2029 if (env->kqemu_enabled) {
2030 kqemu_flush_page(env, addr);
2031 }
2032#endif
2033}
2034
2035/* update the TLBs so that writes to code in the virtual page 'addr'
2036 can be detected */
2037static void tlb_protect_code(ram_addr_t ram_addr)
2038{
2039 cpu_physical_memory_reset_dirty(ram_addr,
2040 ram_addr + TARGET_PAGE_SIZE,
2041 CODE_DIRTY_FLAG);
2042#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2043 /** @todo Retest this? This function has changed... */
2044 remR3ProtectCode(cpu_single_env, ram_addr);
2045#endif
2046}
2047
2048/* update the TLB so that writes in physical page 'phys_addr' are no longer
2049 tested for self modifying code */
2050static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2051 target_ulong vaddr)
2052{
2053#ifdef VBOX
2054 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2055#endif
2056 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
2057}
2058
2059#ifndef VBOX
2060static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2061 unsigned long start, unsigned long length)
2062#else
2063DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2064 unsigned long start, unsigned long length)
2065#endif
2066{
2067 unsigned long addr;
2068
2069#ifdef VBOX
2070 if (start & 3)
2071 return;
2072#endif
2073 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2074 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2075 if ((addr - start) < length) {
2076 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
2077 }
2078 }
2079}
2080
2081void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2082 int dirty_flags)
2083{
2084 CPUState *env;
2085 unsigned long length, start1;
2086 int i, mask, len;
2087 uint8_t *p;
2088
2089 start &= TARGET_PAGE_MASK;
2090 end = TARGET_PAGE_ALIGN(end);
2091
2092 length = end - start;
2093 if (length == 0)
2094 return;
2095 len = length >> TARGET_PAGE_BITS;
2096#ifdef USE_KQEMU
2097 /* XXX: should not depend on cpu context */
2098 env = first_cpu;
2099 if (env->kqemu_enabled) {
2100 ram_addr_t addr;
2101 addr = start;
2102 for(i = 0; i < len; i++) {
2103 kqemu_set_notdirty(env, addr);
2104 addr += TARGET_PAGE_SIZE;
2105 }
2106 }
2107#endif
2108 mask = ~dirty_flags;
2109 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2110#ifdef VBOX
2111 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2112#endif
2113 for(i = 0; i < len; i++)
2114 p[i] &= mask;
2115
2116 /* we modify the TLB cache so that the dirty bit will be set again
2117 when accessing the range */
2118#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2119 start1 = start;
2120#elif !defined(VBOX)
2121 start1 = start + (unsigned long)phys_ram_base;
2122#else
2123 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo this can be harmful with VBOX_WITH_NEW_PHYS_CODE, fix interface/whatever. */
2124#endif
2125 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2126 for(i = 0; i < CPU_TLB_SIZE; i++)
2127 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2128 for(i = 0; i < CPU_TLB_SIZE; i++)
2129 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2130#if (NB_MMU_MODES >= 3)
2131 for(i = 0; i < CPU_TLB_SIZE; i++)
2132 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2133#if (NB_MMU_MODES == 4)
2134 for(i = 0; i < CPU_TLB_SIZE; i++)
2135 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2136#endif
2137#endif
2138 }
2139}
2140
2141#ifndef VBOX
2142int cpu_physical_memory_set_dirty_tracking(int enable)
2143{
2144 in_migration = enable;
2145 return 0;
2146}
2147
2148int cpu_physical_memory_get_dirty_tracking(void)
2149{
2150 return in_migration;
2151}
2152#endif
2153
2154#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2155DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2156#else
2157static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2158#endif
2159{
2160 ram_addr_t ram_addr;
2161
2162 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2163 /* RAM case */
2164#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2165 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2166#elif !defined(VBOX)
2167 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2168 tlb_entry->addend - (unsigned long)phys_ram_base;
2169#else
2170 Assert(phys_addend != -1);
2171 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2172#endif
2173 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2174 tlb_entry->addr_write |= TLB_NOTDIRTY;
2175 }
2176 }
2177}
2178
2179/* update the TLB according to the current state of the dirty bits */
2180void cpu_tlb_update_dirty(CPUState *env)
2181{
2182 int i;
2183#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2184 for(i = 0; i < CPU_TLB_SIZE; i++)
2185 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2186 for(i = 0; i < CPU_TLB_SIZE; i++)
2187 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2188#if (NB_MMU_MODES >= 3)
2189 for(i = 0; i < CPU_TLB_SIZE; i++)
2190 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2191#if (NB_MMU_MODES == 4)
2192 for(i = 0; i < CPU_TLB_SIZE; i++)
2193 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2194#endif
2195#endif
2196#else /* VBOX */
2197 for(i = 0; i < CPU_TLB_SIZE; i++)
2198 tlb_update_dirty(&env->tlb_table[0][i]);
2199 for(i = 0; i < CPU_TLB_SIZE; i++)
2200 tlb_update_dirty(&env->tlb_table[1][i]);
2201#if (NB_MMU_MODES >= 3)
2202 for(i = 0; i < CPU_TLB_SIZE; i++)
2203 tlb_update_dirty(&env->tlb_table[2][i]);
2204#if (NB_MMU_MODES == 4)
2205 for(i = 0; i < CPU_TLB_SIZE; i++)
2206 tlb_update_dirty(&env->tlb_table[3][i]);
2207#endif
2208#endif
2209#endif /* VBOX */
2210}
2211
2212#ifndef VBOX
2213static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2214#else
2215DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2216#endif
2217{
2218 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2219 tlb_entry->addr_write = vaddr;
2220}
2221
2222
2223/* update the TLB corresponding to virtual page vaddr and phys addr
2224 addr so that it is no longer dirty */
2225#ifndef VBOX
2226static inline void tlb_set_dirty(CPUState *env,
2227 unsigned long addr, target_ulong vaddr)
2228#else
2229DECLINLINE(void) tlb_set_dirty(CPUState *env,
2230 unsigned long addr, target_ulong vaddr)
2231#endif
2232{
2233 int i;
2234
2235 addr &= TARGET_PAGE_MASK;
2236 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2237 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2238 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2239#if (NB_MMU_MODES >= 3)
2240 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2241#if (NB_MMU_MODES == 4)
2242 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2243#endif
2244#endif
2245}
2246
2247/* add a new TLB entry. At most one entry for a given virtual address
2248 is permitted. Return 0 if OK or 2 if the page could not be mapped
2249 (can only happen in non SOFTMMU mode for I/O pages or pages
2250 conflicting with the host address space). */
2251int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2252 target_phys_addr_t paddr, int prot,
2253 int mmu_idx, int is_softmmu)
2254{
2255 PhysPageDesc *p;
2256 unsigned long pd;
2257 unsigned int index;
2258 target_ulong address;
2259 target_ulong code_address;
2260 target_phys_addr_t addend;
2261 int ret;
2262 CPUTLBEntry *te;
2263 int i;
2264 target_phys_addr_t iotlb;
2265#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2266 int read_mods = 0, write_mods = 0, code_mods = 0;
2267#endif
2268
2269 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2270 if (!p) {
2271 pd = IO_MEM_UNASSIGNED;
2272 } else {
2273 pd = p->phys_offset;
2274 }
2275#if defined(DEBUG_TLB)
2276 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2277 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2278#endif
2279
2280 ret = 0;
2281 address = vaddr;
2282 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2283 /* IO memory case (romd handled later) */
2284 address |= TLB_MMIO;
2285 }
2286#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2287 addend = pd & TARGET_PAGE_MASK;
2288#elif !defined(VBOX)
2289 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2290#else
2291 /** @todo this is racing the phys_page_find call above since it may register
2292 * a new chunk of memory... */
2293 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2294 pd & TARGET_PAGE_MASK,
2295 !!(prot & PAGE_WRITE));
2296#endif
2297
2298 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2299 /* Normal RAM. */
2300 iotlb = pd & TARGET_PAGE_MASK;
2301 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2302 iotlb |= IO_MEM_NOTDIRTY;
2303 else
2304 iotlb |= IO_MEM_ROM;
2305 } else {
2306 /* IO handlers are currently passed a phsical address.
2307 It would be nice to pass an offset from the base address
2308 of that region. This would avoid having to special case RAM,
2309 and avoid full address decoding in every device.
2310 We can't use the high bits of pd for this because
2311 IO_MEM_ROMD uses these as a ram address. */
2312 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2313 }
2314
2315 code_address = address;
2316
2317#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2318 if (addend & 0x3)
2319 {
2320 if (addend & 0x2)
2321 {
2322 /* catch write */
2323 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2324 write_mods |= TLB_MMIO;
2325 }
2326 else if (addend & 0x1)
2327 {
2328 /* catch all */
2329 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2330 {
2331 read_mods |= TLB_MMIO;
2332 write_mods |= TLB_MMIO;
2333 code_mods |= TLB_MMIO;
2334 }
2335 }
2336 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2337 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2338 addend &= ~(target_ulong)0x3;
2339 }
2340#endif
2341
2342 /* Make accesses to pages with watchpoints go via the
2343 watchpoint trap routines. */
2344 for (i = 0; i < env->nb_watchpoints; i++) {
2345 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2346 iotlb = io_mem_watch + paddr;
2347 /* TODO: The memory case can be optimized by not trapping
2348 reads of pages with a write breakpoint. */
2349 address |= TLB_MMIO;
2350 }
2351 }
2352
2353 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2354 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2355 te = &env->tlb_table[mmu_idx][index];
2356 te->addend = addend - vaddr;
2357 if (prot & PAGE_READ) {
2358 te->addr_read = address;
2359 } else {
2360 te->addr_read = -1;
2361 }
2362
2363 if (prot & PAGE_EXEC) {
2364 te->addr_code = code_address;
2365 } else {
2366 te->addr_code = -1;
2367 }
2368 if (prot & PAGE_WRITE) {
2369 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2370 (pd & IO_MEM_ROMD)) {
2371 /* Write access calls the I/O callback. */
2372 te->addr_write = address | TLB_MMIO;
2373 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2374 !cpu_physical_memory_is_dirty(pd)) {
2375 te->addr_write = address | TLB_NOTDIRTY;
2376 } else {
2377 te->addr_write = address;
2378 }
2379 } else {
2380 te->addr_write = -1;
2381 }
2382
2383#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2384 if (prot & PAGE_READ)
2385 te->addr_read |= read_mods;
2386 if (prot & PAGE_EXEC)
2387 te->addr_code |= code_mods;
2388 if (prot & PAGE_WRITE)
2389 te->addr_write |= write_mods;
2390
2391 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2392#endif
2393
2394#ifdef VBOX
2395 /* inform raw mode about TLB page change */
2396 remR3FlushPage(env, vaddr);
2397#endif
2398 return ret;
2399}
2400#if 0
2401/* called from signal handler: invalidate the code and unprotect the
2402 page. Return TRUE if the fault was succesfully handled. */
2403int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2404{
2405#if !defined(CONFIG_SOFTMMU)
2406 VirtPageDesc *vp;
2407
2408#if defined(DEBUG_TLB)
2409 printf("page_unprotect: addr=0x%08x\n", addr);
2410#endif
2411 addr &= TARGET_PAGE_MASK;
2412
2413 /* if it is not mapped, no need to worry here */
2414 if (addr >= MMAP_AREA_END)
2415 return 0;
2416 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2417 if (!vp)
2418 return 0;
2419 /* NOTE: in this case, validate_tag is _not_ tested as it
2420 validates only the code TLB */
2421 if (vp->valid_tag != virt_valid_tag)
2422 return 0;
2423 if (!(vp->prot & PAGE_WRITE))
2424 return 0;
2425#if defined(DEBUG_TLB)
2426 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2427 addr, vp->phys_addr, vp->prot);
2428#endif
2429 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2430 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2431 (unsigned long)addr, vp->prot);
2432 /* set the dirty bit */
2433 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2434 /* flush the code inside */
2435 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2436 return 1;
2437#elif defined(VBOX)
2438 addr &= TARGET_PAGE_MASK;
2439
2440 /* if it is not mapped, no need to worry here */
2441 if (addr >= MMAP_AREA_END)
2442 return 0;
2443 return 1;
2444#else
2445 return 0;
2446#endif
2447}
2448#endif /* 0 */
2449
2450#else
2451
2452void tlb_flush(CPUState *env, int flush_global)
2453{
2454}
2455
2456void tlb_flush_page(CPUState *env, target_ulong addr)
2457{
2458}
2459
2460int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2461 target_phys_addr_t paddr, int prot,
2462 int mmu_idx, int is_softmmu)
2463{
2464 return 0;
2465}
2466
2467#ifndef VBOX
2468/* dump memory mappings */
2469void page_dump(FILE *f)
2470{
2471 unsigned long start, end;
2472 int i, j, prot, prot1;
2473 PageDesc *p;
2474
2475 fprintf(f, "%-8s %-8s %-8s %s\n",
2476 "start", "end", "size", "prot");
2477 start = -1;
2478 end = -1;
2479 prot = 0;
2480 for(i = 0; i <= L1_SIZE; i++) {
2481 if (i < L1_SIZE)
2482 p = l1_map[i];
2483 else
2484 p = NULL;
2485 for(j = 0;j < L2_SIZE; j++) {
2486 if (!p)
2487 prot1 = 0;
2488 else
2489 prot1 = p[j].flags;
2490 if (prot1 != prot) {
2491 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2492 if (start != -1) {
2493 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2494 start, end, end - start,
2495 prot & PAGE_READ ? 'r' : '-',
2496 prot & PAGE_WRITE ? 'w' : '-',
2497 prot & PAGE_EXEC ? 'x' : '-');
2498 }
2499 if (prot1 != 0)
2500 start = end;
2501 else
2502 start = -1;
2503 prot = prot1;
2504 }
2505 if (!p)
2506 break;
2507 }
2508 }
2509}
2510#endif /* !VBOX */
2511
2512int page_get_flags(target_ulong address)
2513{
2514 PageDesc *p;
2515
2516 p = page_find(address >> TARGET_PAGE_BITS);
2517 if (!p)
2518 return 0;
2519 return p->flags;
2520}
2521
2522/* modify the flags of a page and invalidate the code if
2523 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2524 depending on PAGE_WRITE */
2525void page_set_flags(target_ulong start, target_ulong end, int flags)
2526{
2527 PageDesc *p;
2528 target_ulong addr;
2529
2530 start = start & TARGET_PAGE_MASK;
2531 end = TARGET_PAGE_ALIGN(end);
2532 if (flags & PAGE_WRITE)
2533 flags |= PAGE_WRITE_ORG;
2534#ifdef VBOX
2535 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2536#endif
2537 spin_lock(&tb_lock);
2538 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2539 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2540 /* if the write protection is set, then we invalidate the code
2541 inside */
2542 if (!(p->flags & PAGE_WRITE) &&
2543 (flags & PAGE_WRITE) &&
2544 p->first_tb) {
2545 tb_invalidate_phys_page(addr, 0, NULL);
2546 }
2547 p->flags = flags;
2548 }
2549 spin_unlock(&tb_lock);
2550}
2551
2552int page_check_range(target_ulong start, target_ulong len, int flags)
2553{
2554 PageDesc *p;
2555 target_ulong end;
2556 target_ulong addr;
2557
2558 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2559 start = start & TARGET_PAGE_MASK;
2560
2561 if( end < start )
2562 /* we've wrapped around */
2563 return -1;
2564 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2565 p = page_find(addr >> TARGET_PAGE_BITS);
2566 if( !p )
2567 return -1;
2568 if( !(p->flags & PAGE_VALID) )
2569 return -1;
2570
2571 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2572 return -1;
2573 if (flags & PAGE_WRITE) {
2574 if (!(p->flags & PAGE_WRITE_ORG))
2575 return -1;
2576 /* unprotect the page if it was put read-only because it
2577 contains translated code */
2578 if (!(p->flags & PAGE_WRITE)) {
2579 if (!page_unprotect(addr, 0, NULL))
2580 return -1;
2581 }
2582 return 0;
2583 }
2584 }
2585 return 0;
2586}
2587
2588/* called from signal handler: invalidate the code and unprotect the
2589 page. Return TRUE if the fault was succesfully handled. */
2590int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2591{
2592 unsigned int page_index, prot, pindex;
2593 PageDesc *p, *p1;
2594 target_ulong host_start, host_end, addr;
2595
2596 /* Technically this isn't safe inside a signal handler. However we
2597 know this only ever happens in a synchronous SEGV handler, so in
2598 practice it seems to be ok. */
2599 mmap_lock();
2600
2601 host_start = address & qemu_host_page_mask;
2602 page_index = host_start >> TARGET_PAGE_BITS;
2603 p1 = page_find(page_index);
2604 if (!p1) {
2605 mmap_unlock();
2606 return 0;
2607 }
2608 host_end = host_start + qemu_host_page_size;
2609 p = p1;
2610 prot = 0;
2611 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2612 prot |= p->flags;
2613 p++;
2614 }
2615 /* if the page was really writable, then we change its
2616 protection back to writable */
2617 if (prot & PAGE_WRITE_ORG) {
2618 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2619 if (!(p1[pindex].flags & PAGE_WRITE)) {
2620 mprotect((void *)g2h(host_start), qemu_host_page_size,
2621 (prot & PAGE_BITS) | PAGE_WRITE);
2622 p1[pindex].flags |= PAGE_WRITE;
2623 /* and since the content will be modified, we must invalidate
2624 the corresponding translated code. */
2625 tb_invalidate_phys_page(address, pc, puc);
2626#ifdef DEBUG_TB_CHECK
2627 tb_invalidate_check(address);
2628#endif
2629 mmap_unlock();
2630 return 1;
2631 }
2632 }
2633 mmap_unlock();
2634 return 0;
2635}
2636
2637static inline void tlb_set_dirty(CPUState *env,
2638 unsigned long addr, target_ulong vaddr)
2639{
2640}
2641#endif /* defined(CONFIG_USER_ONLY) */
2642
2643#if !defined(CONFIG_USER_ONLY)
2644static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2645 ram_addr_t memory);
2646static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2647 ram_addr_t orig_memory);
2648#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2649 need_subpage) \
2650 do { \
2651 if (addr > start_addr) \
2652 start_addr2 = 0; \
2653 else { \
2654 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2655 if (start_addr2 > 0) \
2656 need_subpage = 1; \
2657 } \
2658 \
2659 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2660 end_addr2 = TARGET_PAGE_SIZE - 1; \
2661 else { \
2662 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2663 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2664 need_subpage = 1; \
2665 } \
2666 } while (0)
2667
2668
2669/* register physical memory. 'size' must be a multiple of the target
2670 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2671 io memory page */
2672void cpu_register_physical_memory(target_phys_addr_t start_addr,
2673 unsigned long size,
2674 unsigned long phys_offset)
2675{
2676 target_phys_addr_t addr, end_addr;
2677 PhysPageDesc *p;
2678 CPUState *env;
2679 ram_addr_t orig_size = size;
2680 void *subpage;
2681
2682#ifdef USE_KQEMU
2683 /* XXX: should not depend on cpu context */
2684 env = first_cpu;
2685 if (env->kqemu_enabled) {
2686 kqemu_set_phys_mem(start_addr, size, phys_offset);
2687 }
2688#endif
2689 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2690 end_addr = start_addr + (target_phys_addr_t)size;
2691 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2692 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2693 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2694 ram_addr_t orig_memory = p->phys_offset;
2695 target_phys_addr_t start_addr2, end_addr2;
2696 int need_subpage = 0;
2697
2698 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2699 need_subpage);
2700 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2701 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2702 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2703 &p->phys_offset, orig_memory);
2704 } else {
2705 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2706 >> IO_MEM_SHIFT];
2707 }
2708 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2709 } else {
2710 p->phys_offset = phys_offset;
2711#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2712 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2713 (phys_offset & IO_MEM_ROMD))
2714#else
2715 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2716 || (phys_offset & IO_MEM_ROMD)
2717 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2718#endif
2719 phys_offset += TARGET_PAGE_SIZE;
2720 }
2721 } else {
2722 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2723 p->phys_offset = phys_offset;
2724#if !defined(VBOX) || defined(VBOX_WITH_NEW_PHYS_CODE)
2725 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2726 (phys_offset & IO_MEM_ROMD))
2727#else
2728 if ( (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM
2729 || (phys_offset & IO_MEM_ROMD)
2730 || (phys_offset & ~TARGET_PAGE_MASK) == IO_MEM_RAM_MISSING)
2731#endif
2732 phys_offset += TARGET_PAGE_SIZE;
2733 else {
2734 target_phys_addr_t start_addr2, end_addr2;
2735 int need_subpage = 0;
2736
2737 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2738 end_addr2, need_subpage);
2739
2740 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2741 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2742 &p->phys_offset, IO_MEM_UNASSIGNED);
2743 subpage_register(subpage, start_addr2, end_addr2,
2744 phys_offset);
2745 }
2746 }
2747 }
2748 }
2749 /* since each CPU stores ram addresses in its TLB cache, we must
2750 reset the modified entries */
2751 /* XXX: slow ! */
2752 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2753 tlb_flush(env, 1);
2754 }
2755}
2756
2757/* XXX: temporary until new memory mapping API */
2758uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2759{
2760 PhysPageDesc *p;
2761
2762 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2763 if (!p)
2764 return IO_MEM_UNASSIGNED;
2765 return p->phys_offset;
2766}
2767
2768#ifndef VBOX
2769/* XXX: better than nothing */
2770ram_addr_t qemu_ram_alloc(ram_addr_t size)
2771{
2772 ram_addr_t addr;
2773 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2774 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2775 (uint64_t)size, (uint64_t)phys_ram_size);
2776 abort();
2777 }
2778 addr = phys_ram_alloc_offset;
2779 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2780 return addr;
2781}
2782
2783void qemu_ram_free(ram_addr_t addr)
2784{
2785}
2786#endif
2787
2788
2789static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2790{
2791#ifdef DEBUG_UNASSIGNED
2792 printf("Unassigned mem read 0x%08x\n", (int)addr);
2793#endif
2794#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2795 do_unassigned_access(addr, 0, 0, 0, 1);
2796#endif
2797 return 0;
2798}
2799
2800static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2801{
2802#ifdef DEBUG_UNASSIGNED
2803 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2804#endif
2805#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2806 do_unassigned_access(addr, 0, 0, 0, 2);
2807#endif
2808 return 0;
2809}
2810
2811static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2812{
2813#ifdef DEBUG_UNASSIGNED
2814 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2815#endif
2816#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2817 do_unassigned_access(addr, 0, 0, 0, 4);
2818#endif
2819 return 0;
2820}
2821
2822static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2823{
2824#ifdef DEBUG_UNASSIGNED
2825 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2826#endif
2827}
2828
2829static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2830{
2831#ifdef DEBUG_UNASSIGNED
2832 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2833#endif
2834#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2835 do_unassigned_access(addr, 1, 0, 0, 2);
2836#endif
2837}
2838
2839static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2840{
2841#ifdef DEBUG_UNASSIGNED
2842 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2843#endif
2844#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2845 do_unassigned_access(addr, 1, 0, 0, 4);
2846#endif
2847}
2848static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2849 unassigned_mem_readb,
2850 unassigned_mem_readw,
2851 unassigned_mem_readl,
2852};
2853
2854static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2855 unassigned_mem_writeb,
2856 unassigned_mem_writew,
2857 unassigned_mem_writel,
2858};
2859
2860static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2861{
2862 unsigned long ram_addr;
2863 int dirty_flags;
2864#if defined(VBOX)
2865 ram_addr = addr;
2866#elif
2867 ram_addr = addr - (unsigned long)phys_ram_base;
2868#endif
2869#ifdef VBOX
2870 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2871 dirty_flags = 0xff;
2872 else
2873#endif /* VBOX */
2874 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2875 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2876#if !defined(CONFIG_USER_ONLY)
2877 tb_invalidate_phys_page_fast(ram_addr, 1);
2878# ifdef VBOX
2879 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2880 dirty_flags = 0xff;
2881 else
2882# endif /* VBOX */
2883 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2884#endif
2885 }
2886#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2887 remR3PhysWriteU8(addr, val);
2888#else
2889 stb_p((uint8_t *)(long)addr, val);
2890#endif
2891#ifdef USE_KQEMU
2892 if (cpu_single_env->kqemu_enabled &&
2893 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2894 kqemu_modify_page(cpu_single_env, ram_addr);
2895#endif
2896 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2897#ifdef VBOX
2898 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2899#endif /* !VBOX */
2900 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2901 /* we remove the notdirty callback only if the code has been
2902 flushed */
2903 if (dirty_flags == 0xff)
2904 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2905}
2906
2907static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2908{
2909 unsigned long ram_addr;
2910 int dirty_flags;
2911#if defined(VBOX)
2912 ram_addr = addr;
2913#else
2914 ram_addr = addr - (unsigned long)phys_ram_base;
2915#endif
2916#ifdef VBOX
2917 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2918 dirty_flags = 0xff;
2919 else
2920#endif /* VBOX */
2921 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2922 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2923#if !defined(CONFIG_USER_ONLY)
2924 tb_invalidate_phys_page_fast(ram_addr, 2);
2925# ifdef VBOX
2926 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2927 dirty_flags = 0xff;
2928 else
2929# endif /* VBOX */
2930 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2931#endif
2932 }
2933#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2934 remR3PhysWriteU16(addr, val);
2935#else
2936 stw_p((uint8_t *)(long)addr, val);
2937#endif
2938
2939#ifdef USE_KQEMU
2940 if (cpu_single_env->kqemu_enabled &&
2941 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2942 kqemu_modify_page(cpu_single_env, ram_addr);
2943#endif
2944 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2945#ifdef VBOX
2946 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2947#endif
2948 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2949 /* we remove the notdirty callback only if the code has been
2950 flushed */
2951 if (dirty_flags == 0xff)
2952 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2953}
2954
2955static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2956{
2957 unsigned long ram_addr;
2958 int dirty_flags;
2959#if defined(VBOX)
2960 ram_addr = addr;
2961#else
2962 ram_addr = addr - (unsigned long)phys_ram_base;
2963#endif
2964#ifdef VBOX
2965 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2966 dirty_flags = 0xff;
2967 else
2968#endif /* VBOX */
2969 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2970 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2971#if !defined(CONFIG_USER_ONLY)
2972 tb_invalidate_phys_page_fast(ram_addr, 4);
2973# ifdef VBOX
2974 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2975 dirty_flags = 0xff;
2976 else
2977# endif /* VBOX */
2978 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2979#endif
2980 }
2981#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2982 remR3PhysWriteU32(addr, val);
2983#else
2984 stl_p((uint8_t *)(long)addr, val);
2985#endif
2986#ifdef USE_KQEMU
2987 if (cpu_single_env->kqemu_enabled &&
2988 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2989 kqemu_modify_page(cpu_single_env, ram_addr);
2990#endif
2991 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2992#ifdef VBOX
2993 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2994#endif
2995 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2996 /* we remove the notdirty callback only if the code has been
2997 flushed */
2998 if (dirty_flags == 0xff)
2999 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
3000}
3001
3002static CPUReadMemoryFunc *error_mem_read[3] = {
3003 NULL, /* never used */
3004 NULL, /* never used */
3005 NULL, /* never used */
3006};
3007
3008static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
3009 notdirty_mem_writeb,
3010 notdirty_mem_writew,
3011 notdirty_mem_writel,
3012};
3013
3014
3015/* Generate a debug exception if a watchpoint has been hit. */
3016static void check_watchpoint(int offset, int flags)
3017{
3018 CPUState *env = cpu_single_env;
3019 target_ulong vaddr;
3020 int i;
3021
3022 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3023 for (i = 0; i < env->nb_watchpoints; i++) {
3024 if (vaddr == env->watchpoint[i].vaddr
3025 && (env->watchpoint[i].type & flags)) {
3026 env->watchpoint_hit = i + 1;
3027 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3028 break;
3029 }
3030 }
3031}
3032
3033/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3034 so these check for a hit then pass through to the normal out-of-line
3035 phys routines. */
3036static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3037{
3038 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3039 return ldub_phys(addr);
3040}
3041
3042static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3043{
3044 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3045 return lduw_phys(addr);
3046}
3047
3048static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3049{
3050 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
3051 return ldl_phys(addr);
3052}
3053
3054static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3055 uint32_t val)
3056{
3057 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3058 stb_phys(addr, val);
3059}
3060
3061static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3062 uint32_t val)
3063{
3064 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3065 stw_phys(addr, val);
3066}
3067
3068static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3069 uint32_t val)
3070{
3071 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
3072 stl_phys(addr, val);
3073}
3074
3075static CPUReadMemoryFunc *watch_mem_read[3] = {
3076 watch_mem_readb,
3077 watch_mem_readw,
3078 watch_mem_readl,
3079};
3080
3081static CPUWriteMemoryFunc *watch_mem_write[3] = {
3082 watch_mem_writeb,
3083 watch_mem_writew,
3084 watch_mem_writel,
3085};
3086
3087static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3088 unsigned int len)
3089{
3090 uint32_t ret;
3091 unsigned int idx;
3092
3093 idx = SUBPAGE_IDX(addr - mmio->base);
3094#if defined(DEBUG_SUBPAGE)
3095 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3096 mmio, len, addr, idx);
3097#endif
3098 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
3099
3100 return ret;
3101}
3102
3103static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3104 uint32_t value, unsigned int len)
3105{
3106 unsigned int idx;
3107
3108 idx = SUBPAGE_IDX(addr - mmio->base);
3109#if defined(DEBUG_SUBPAGE)
3110 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3111 mmio, len, addr, idx, value);
3112#endif
3113 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
3114}
3115
3116static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3117{
3118#if defined(DEBUG_SUBPAGE)
3119 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3120#endif
3121
3122 return subpage_readlen(opaque, addr, 0);
3123}
3124
3125static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3126 uint32_t value)
3127{
3128#if defined(DEBUG_SUBPAGE)
3129 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3130#endif
3131 subpage_writelen(opaque, addr, value, 0);
3132}
3133
3134static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3135{
3136#if defined(DEBUG_SUBPAGE)
3137 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3138#endif
3139
3140 return subpage_readlen(opaque, addr, 1);
3141}
3142
3143static void subpage_writew (void *opaque, target_phys_addr_t addr,
3144 uint32_t value)
3145{
3146#if defined(DEBUG_SUBPAGE)
3147 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3148#endif
3149 subpage_writelen(opaque, addr, value, 1);
3150}
3151
3152static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3153{
3154#if defined(DEBUG_SUBPAGE)
3155 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3156#endif
3157
3158 return subpage_readlen(opaque, addr, 2);
3159}
3160
3161static void subpage_writel (void *opaque,
3162 target_phys_addr_t addr, uint32_t value)
3163{
3164#if defined(DEBUG_SUBPAGE)
3165 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3166#endif
3167 subpage_writelen(opaque, addr, value, 2);
3168}
3169
3170static CPUReadMemoryFunc *subpage_read[] = {
3171 &subpage_readb,
3172 &subpage_readw,
3173 &subpage_readl,
3174};
3175
3176static CPUWriteMemoryFunc *subpage_write[] = {
3177 &subpage_writeb,
3178 &subpage_writew,
3179 &subpage_writel,
3180};
3181
3182static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3183 ram_addr_t memory)
3184{
3185 int idx, eidx;
3186 unsigned int i;
3187
3188 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3189 return -1;
3190 idx = SUBPAGE_IDX(start);
3191 eidx = SUBPAGE_IDX(end);
3192#if defined(DEBUG_SUBPAGE)
3193 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3194 mmio, start, end, idx, eidx, memory);
3195#endif
3196 memory >>= IO_MEM_SHIFT;
3197 for (; idx <= eidx; idx++) {
3198 for (i = 0; i < 4; i++) {
3199 if (io_mem_read[memory][i]) {
3200 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3201 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3202 }
3203 if (io_mem_write[memory][i]) {
3204 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3205 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3206 }
3207 }
3208 }
3209
3210 return 0;
3211}
3212
3213static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3214 ram_addr_t orig_memory)
3215{
3216 subpage_t *mmio;
3217 int subpage_memory;
3218
3219 mmio = qemu_mallocz(sizeof(subpage_t));
3220 if (mmio != NULL) {
3221 mmio->base = base;
3222 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3223#if defined(DEBUG_SUBPAGE)
3224 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3225 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3226#endif
3227 *phys = subpage_memory | IO_MEM_SUBPAGE;
3228 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
3229 }
3230
3231 return mmio;
3232}
3233
3234static void io_mem_init(void)
3235{
3236 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3237 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3238 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3239#if defined(VBOX) && !defined(VBOX_WITH_NEW_PHYS_CODE)
3240 cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3241 io_mem_nb = 6;
3242#else
3243 io_mem_nb = 5;
3244#endif
3245
3246 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3247 watch_mem_write, NULL);
3248
3249#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3250 /* alloc dirty bits array */
3251 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3252 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3253#endif /* !VBOX */
3254}
3255
3256/* mem_read and mem_write are arrays of functions containing the
3257 function to access byte (index 0), word (index 1) and dword (index
3258 2). Functions can be omitted with a NULL function pointer. The
3259 registered functions may be modified dynamically later.
3260 If io_index is non zero, the corresponding io zone is
3261 modified. If it is zero, a new io zone is allocated. The return
3262 value can be used with cpu_register_physical_memory(). (-1) is
3263 returned if error. */
3264int cpu_register_io_memory(int io_index,
3265 CPUReadMemoryFunc **mem_read,
3266 CPUWriteMemoryFunc **mem_write,
3267 void *opaque)
3268{
3269 int i, subwidth = 0;
3270
3271 if (io_index <= 0) {
3272 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3273 return -1;
3274 io_index = io_mem_nb++;
3275 } else {
3276 if (io_index >= IO_MEM_NB_ENTRIES)
3277 return -1;
3278 }
3279
3280 for(i = 0;i < 3; i++) {
3281 if (!mem_read[i] || !mem_write[i])
3282 subwidth = IO_MEM_SUBWIDTH;
3283 io_mem_read[io_index][i] = mem_read[i];
3284 io_mem_write[io_index][i] = mem_write[i];
3285 }
3286 io_mem_opaque[io_index] = opaque;
3287 return (io_index << IO_MEM_SHIFT) | subwidth;
3288}
3289
3290CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3291{
3292 return io_mem_write[io_index >> IO_MEM_SHIFT];
3293}
3294
3295CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3296{
3297 return io_mem_read[io_index >> IO_MEM_SHIFT];
3298}
3299#endif /* !defined(CONFIG_USER_ONLY) */
3300
3301/* physical memory access (slow version, mainly for debug) */
3302#if defined(CONFIG_USER_ONLY)
3303void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3304 int len, int is_write)
3305{
3306 int l, flags;
3307 target_ulong page;
3308 void * p;
3309
3310 while (len > 0) {
3311 page = addr & TARGET_PAGE_MASK;
3312 l = (page + TARGET_PAGE_SIZE) - addr;
3313 if (l > len)
3314 l = len;
3315 flags = page_get_flags(page);
3316 if (!(flags & PAGE_VALID))
3317 return;
3318 if (is_write) {
3319 if (!(flags & PAGE_WRITE))
3320 return;
3321 /* XXX: this code should not depend on lock_user */
3322 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3323 /* FIXME - should this return an error rather than just fail? */
3324 return;
3325 memcpy(p, buf, len);
3326 unlock_user(p, addr, len);
3327 } else {
3328 if (!(flags & PAGE_READ))
3329 return;
3330 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3331 /* FIXME - should this return an error rather than just fail? */
3332 return;
3333 memcpy(buf, p, len);
3334 unlock_user(p, addr, 0);
3335 }
3336 len -= l;
3337 buf += l;
3338 addr += l;
3339 }
3340}
3341
3342#else
3343void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3344 int len, int is_write)
3345{
3346 int l, io_index;
3347 uint8_t *ptr;
3348 uint32_t val;
3349 target_phys_addr_t page;
3350 unsigned long pd;
3351 PhysPageDesc *p;
3352
3353 while (len > 0) {
3354 page = addr & TARGET_PAGE_MASK;
3355 l = (page + TARGET_PAGE_SIZE) - addr;
3356 if (l > len)
3357 l = len;
3358 p = phys_page_find(page >> TARGET_PAGE_BITS);
3359 if (!p) {
3360 pd = IO_MEM_UNASSIGNED;
3361 } else {
3362 pd = p->phys_offset;
3363 }
3364
3365 if (is_write) {
3366 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3367 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3368 /* XXX: could force cpu_single_env to NULL to avoid
3369 potential bugs */
3370 if (l >= 4 && ((addr & 3) == 0)) {
3371 /* 32 bit write access */
3372#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3373 val = ldl_p(buf);
3374#else
3375 val = *(const uint32_t *)buf;
3376#endif
3377 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3378 l = 4;
3379 } else if (l >= 2 && ((addr & 1) == 0)) {
3380 /* 16 bit write access */
3381#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3382 val = lduw_p(buf);
3383#else
3384 val = *(const uint16_t *)buf;
3385#endif
3386 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3387 l = 2;
3388 } else {
3389 /* 8 bit write access */
3390#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3391 val = ldub_p(buf);
3392#else
3393 val = *(const uint8_t *)buf;
3394#endif
3395 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3396 l = 1;
3397 }
3398 } else {
3399 unsigned long addr1;
3400 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3401 /* RAM case */
3402#ifdef VBOX
3403 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3404#else
3405 ptr = phys_ram_base + addr1;
3406 memcpy(ptr, buf, l);
3407#endif
3408 if (!cpu_physical_memory_is_dirty(addr1)) {
3409 /* invalidate code */
3410 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3411 /* set dirty bit */
3412#ifdef VBOX
3413 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3414#endif
3415 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3416 (0xff & ~CODE_DIRTY_FLAG);
3417 }
3418 }
3419 } else {
3420 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3421 !(pd & IO_MEM_ROMD)) {
3422 /* I/O case */
3423 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3424 if (l >= 4 && ((addr & 3) == 0)) {
3425 /* 32 bit read access */
3426 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3427#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3428 stl_p(buf, val);
3429#else
3430 *(uint32_t *)buf = val;
3431#endif
3432 l = 4;
3433 } else if (l >= 2 && ((addr & 1) == 0)) {
3434 /* 16 bit read access */
3435 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3436#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3437 stw_p(buf, val);
3438#else
3439 *(uint16_t *)buf = val;
3440#endif
3441 l = 2;
3442 } else {
3443 /* 8 bit read access */
3444 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3445#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3446 stb_p(buf, val);
3447#else
3448 *(uint8_t *)buf = val;
3449#endif
3450 l = 1;
3451 }
3452 } else {
3453 /* RAM case */
3454#ifdef VBOX
3455 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3456#else
3457 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3458 (addr & ~TARGET_PAGE_MASK);
3459 memcpy(buf, ptr, l);
3460#endif
3461 }
3462 }
3463 len -= l;
3464 buf += l;
3465 addr += l;
3466 }
3467}
3468
3469#ifndef VBOX
3470/* used for ROM loading : can write in RAM and ROM */
3471void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3472 const uint8_t *buf, int len)
3473{
3474 int l;
3475 uint8_t *ptr;
3476 target_phys_addr_t page;
3477 unsigned long pd;
3478 PhysPageDesc *p;
3479
3480 while (len > 0) {
3481 page = addr & TARGET_PAGE_MASK;
3482 l = (page + TARGET_PAGE_SIZE) - addr;
3483 if (l > len)
3484 l = len;
3485 p = phys_page_find(page >> TARGET_PAGE_BITS);
3486 if (!p) {
3487 pd = IO_MEM_UNASSIGNED;
3488 } else {
3489 pd = p->phys_offset;
3490 }
3491
3492 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3493 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3494 !(pd & IO_MEM_ROMD)) {
3495 /* do nothing */
3496 } else {
3497 unsigned long addr1;
3498 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3499 /* ROM/RAM case */
3500 ptr = phys_ram_base + addr1;
3501 memcpy(ptr, buf, l);
3502 }
3503 len -= l;
3504 buf += l;
3505 addr += l;
3506 }
3507}
3508#endif /* !VBOX */
3509
3510
3511/* warning: addr must be aligned */
3512uint32_t ldl_phys(target_phys_addr_t addr)
3513{
3514 int io_index;
3515 uint8_t *ptr;
3516 uint32_t val;
3517 unsigned long pd;
3518 PhysPageDesc *p;
3519
3520 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3521 if (!p) {
3522 pd = IO_MEM_UNASSIGNED;
3523 } else {
3524 pd = p->phys_offset;
3525 }
3526
3527 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3528 !(pd & IO_MEM_ROMD)) {
3529 /* I/O case */
3530 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3531 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3532 } else {
3533 /* RAM case */
3534#ifndef VBOX
3535 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3536 (addr & ~TARGET_PAGE_MASK);
3537 val = ldl_p(ptr);
3538#else
3539 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3540#endif
3541 }
3542 return val;
3543}
3544
3545/* warning: addr must be aligned */
3546uint64_t ldq_phys(target_phys_addr_t addr)
3547{
3548 int io_index;
3549 uint8_t *ptr;
3550 uint64_t val;
3551 unsigned long pd;
3552 PhysPageDesc *p;
3553
3554 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3555 if (!p) {
3556 pd = IO_MEM_UNASSIGNED;
3557 } else {
3558 pd = p->phys_offset;
3559 }
3560
3561 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3562 !(pd & IO_MEM_ROMD)) {
3563 /* I/O case */
3564 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3565#ifdef TARGET_WORDS_BIGENDIAN
3566 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3567 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3568#else
3569 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3570 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3571#endif
3572 } else {
3573 /* RAM case */
3574#ifndef VBOX
3575 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3576 (addr & ~TARGET_PAGE_MASK);
3577 val = ldq_p(ptr);
3578#else
3579 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3580#endif
3581 }
3582 return val;
3583}
3584
3585/* XXX: optimize */
3586uint32_t ldub_phys(target_phys_addr_t addr)
3587{
3588 uint8_t val;
3589 cpu_physical_memory_read(addr, &val, 1);
3590 return val;
3591}
3592
3593/* XXX: optimize */
3594uint32_t lduw_phys(target_phys_addr_t addr)
3595{
3596 uint16_t val;
3597 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3598 return tswap16(val);
3599}
3600
3601/* warning: addr must be aligned. The ram page is not masked as dirty
3602 and the code inside is not invalidated. It is useful if the dirty
3603 bits are used to track modified PTEs */
3604void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3605{
3606 int io_index;
3607 uint8_t *ptr;
3608 unsigned long pd;
3609 PhysPageDesc *p;
3610
3611 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3612 if (!p) {
3613 pd = IO_MEM_UNASSIGNED;
3614 } else {
3615 pd = p->phys_offset;
3616 }
3617
3618 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3619 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3620 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3621 } else {
3622#ifndef VBOX
3623 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3624 (addr & ~TARGET_PAGE_MASK);
3625 stl_p(ptr, val);
3626#else
3627 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3628#endif
3629#ifndef VBOX
3630 if (unlikely(in_migration)) {
3631 if (!cpu_physical_memory_is_dirty(addr1)) {
3632 /* invalidate code */
3633 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3634 /* set dirty bit */
3635 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3636 (0xff & ~CODE_DIRTY_FLAG);
3637 }
3638 }
3639#endif
3640 }
3641}
3642
3643void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3644{
3645 int io_index;
3646 uint8_t *ptr;
3647 unsigned long pd;
3648 PhysPageDesc *p;
3649
3650 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3651 if (!p) {
3652 pd = IO_MEM_UNASSIGNED;
3653 } else {
3654 pd = p->phys_offset;
3655 }
3656
3657 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3658 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3659#ifdef TARGET_WORDS_BIGENDIAN
3660 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3661 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3662#else
3663 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3664 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3665#endif
3666 } else {
3667#ifndef VBOX
3668 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3669 (addr & ~TARGET_PAGE_MASK);
3670 stq_p(ptr, val);
3671#else
3672 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3673#endif
3674 }
3675}
3676
3677
3678/* warning: addr must be aligned */
3679void stl_phys(target_phys_addr_t addr, uint32_t val)
3680{
3681 int io_index;
3682 uint8_t *ptr;
3683 unsigned long pd;
3684 PhysPageDesc *p;
3685
3686 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3687 if (!p) {
3688 pd = IO_MEM_UNASSIGNED;
3689 } else {
3690 pd = p->phys_offset;
3691 }
3692
3693 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3694 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3695 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3696 } else {
3697 unsigned long addr1;
3698 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3699 /* RAM case */
3700#ifndef VBOX
3701 ptr = phys_ram_base + addr1;
3702 stl_p(ptr, val);
3703#else
3704 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3705#endif
3706 if (!cpu_physical_memory_is_dirty(addr1)) {
3707 /* invalidate code */
3708 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3709 /* set dirty bit */
3710#ifdef VBOX
3711 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3712#endif
3713 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3714 (0xff & ~CODE_DIRTY_FLAG);
3715 }
3716 }
3717}
3718
3719/* XXX: optimize */
3720void stb_phys(target_phys_addr_t addr, uint32_t val)
3721{
3722 uint8_t v = val;
3723 cpu_physical_memory_write(addr, &v, 1);
3724}
3725
3726/* XXX: optimize */
3727void stw_phys(target_phys_addr_t addr, uint32_t val)
3728{
3729 uint16_t v = tswap16(val);
3730 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3731}
3732
3733/* XXX: optimize */
3734void stq_phys(target_phys_addr_t addr, uint64_t val)
3735{
3736 val = tswap64(val);
3737 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3738}
3739
3740#endif
3741
3742/* virtual memory access for debug */
3743int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3744 uint8_t *buf, int len, int is_write)
3745{
3746 int l;
3747 target_ulong page, phys_addr;
3748
3749 while (len > 0) {
3750 page = addr & TARGET_PAGE_MASK;
3751 phys_addr = cpu_get_phys_page_debug(env, page);
3752 /* if no physical page mapped, return an error */
3753 if (phys_addr == -1)
3754 return -1;
3755 l = (page + TARGET_PAGE_SIZE) - addr;
3756 if (l > len)
3757 l = len;
3758 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3759 buf, l, is_write);
3760 len -= l;
3761 buf += l;
3762 addr += l;
3763 }
3764 return 0;
3765}
3766
3767/* in deterministic execution mode, instructions doing device I/Os
3768 must be at the end of the TB */
3769void cpu_io_recompile(CPUState *env, void *retaddr)
3770{
3771 TranslationBlock *tb;
3772 uint32_t n, cflags;
3773 target_ulong pc, cs_base;
3774 uint64_t flags;
3775
3776 tb = tb_find_pc((unsigned long)retaddr);
3777 if (!tb) {
3778 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3779 retaddr);
3780 }
3781 n = env->icount_decr.u16.low + tb->icount;
3782 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3783 /* Calculate how many instructions had been executed before the fault
3784 occurred. */
3785 n = n - env->icount_decr.u16.low;
3786 /* Generate a new TB ending on the I/O insn. */
3787 n++;
3788 /* On MIPS and SH, delay slot instructions can only be restarted if
3789 they were already the first instruction in the TB. If this is not
3790 the first instruction in a TB then re-execute the preceding
3791 branch. */
3792#if defined(TARGET_MIPS)
3793 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3794 env->active_tc.PC -= 4;
3795 env->icount_decr.u16.low++;
3796 env->hflags &= ~MIPS_HFLAG_BMASK;
3797 }
3798#elif defined(TARGET_SH4)
3799 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3800 && n > 1) {
3801 env->pc -= 2;
3802 env->icount_decr.u16.low++;
3803 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3804 }
3805#endif
3806 /* This should never happen. */
3807 if (n > CF_COUNT_MASK)
3808 cpu_abort(env, "TB too big during recompile");
3809
3810 cflags = n | CF_LAST_IO;
3811 pc = tb->pc;
3812 cs_base = tb->cs_base;
3813 flags = tb->flags;
3814 tb_phys_invalidate(tb, -1);
3815 /* FIXME: In theory this could raise an exception. In practice
3816 we have already translated the block once so it's probably ok. */
3817 tb_gen_code(env, pc, cs_base, flags, cflags);
3818 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3819 the first in the TB) then we end up generating a whole new TB and
3820 repeating the fault, which is horribly inefficient.
3821 Better would be to execute just this insn uncached, or generate a
3822 second new TB. */
3823 cpu_resume_from_signal(env, NULL);
3824}
3825
3826#ifndef VBOX
3827void dump_exec_info(FILE *f,
3828 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3829{
3830 int i, target_code_size, max_target_code_size;
3831 int direct_jmp_count, direct_jmp2_count, cross_page;
3832 TranslationBlock *tb;
3833
3834 target_code_size = 0;
3835 max_target_code_size = 0;
3836 cross_page = 0;
3837 direct_jmp_count = 0;
3838 direct_jmp2_count = 0;
3839 for(i = 0; i < nb_tbs; i++) {
3840 tb = &tbs[i];
3841 target_code_size += tb->size;
3842 if (tb->size > max_target_code_size)
3843 max_target_code_size = tb->size;
3844 if (tb->page_addr[1] != -1)
3845 cross_page++;
3846 if (tb->tb_next_offset[0] != 0xffff) {
3847 direct_jmp_count++;
3848 if (tb->tb_next_offset[1] != 0xffff) {
3849 direct_jmp2_count++;
3850 }
3851 }
3852 }
3853 /* XXX: avoid using doubles ? */
3854 cpu_fprintf(f, "Translation buffer state:\n");
3855 cpu_fprintf(f, "gen code size %ld/%ld\n",
3856 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3857 cpu_fprintf(f, "TB count %d/%d\n",
3858 nb_tbs, code_gen_max_blocks);
3859 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3860 nb_tbs ? target_code_size / nb_tbs : 0,
3861 max_target_code_size);
3862 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3863 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3864 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3865 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3866 cross_page,
3867 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3868 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3869 direct_jmp_count,
3870 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3871 direct_jmp2_count,
3872 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3873 cpu_fprintf(f, "\nStatistics:\n");
3874 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3875 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3876 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3877 tcg_dump_info(f, cpu_fprintf);
3878}
3879#endif /* !VBOX */
3880
3881#if !defined(CONFIG_USER_ONLY)
3882
3883#define MMUSUFFIX _cmmu
3884#define GETPC() NULL
3885#define env cpu_single_env
3886#define SOFTMMU_CODE_ACCESS
3887
3888#define SHIFT 0
3889#include "softmmu_template.h"
3890
3891#define SHIFT 1
3892#include "softmmu_template.h"
3893
3894#define SHIFT 2
3895#include "softmmu_template.h"
3896
3897#define SHIFT 3
3898#include "softmmu_template.h"
3899
3900#undef env
3901
3902#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette