VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 36175

Last change on this file since 36175 was 36175, checked in by vboxsync, 14 years ago

rem: Synced up to v0.11.1 (35bfc7324e2e6946c4113ada5db30553a1a7c40b) from git://git.savannah.nongnu.org/qemu.git.

  • Property svn:eol-style set to native
File size: 124.6 KB
Line 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#ifndef VBOX
31#ifdef _WIN32
32#include <windows.h>
33#else
34#include <sys/types.h>
35#include <sys/mman.h>
36#endif
37#include <stdlib.h>
38#include <stdio.h>
39#include <stdarg.h>
40#include <string.h>
41#include <errno.h>
42#include <unistd.h>
43#include <inttypes.h>
44#else /* VBOX */
45# include <stdlib.h>
46# include <stdio.h>
47# include <iprt/alloc.h>
48# include <iprt/string.h>
49# include <iprt/param.h>
50# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
51#endif /* VBOX */
52
53#include "cpu.h"
54#include "exec-all.h"
55#include "qemu-common.h"
56#include "tcg.h"
57#ifndef VBOX
58#include "hw/hw.h"
59#endif
60#include "osdep.h"
61#include "kvm.h"
62#if defined(CONFIG_USER_ONLY)
63#include <qemu.h>
64#endif
65
66//#define DEBUG_TB_INVALIDATE
67//#define DEBUG_FLUSH
68//#define DEBUG_TLB
69//#define DEBUG_UNASSIGNED
70
71/* make various TB consistency checks */
72//#define DEBUG_TB_CHECK
73//#define DEBUG_TLB_CHECK
74
75//#define DEBUG_IOPORT
76//#define DEBUG_SUBPAGE
77
78#if !defined(CONFIG_USER_ONLY)
79/* TB consistency checks only implemented for usermode emulation. */
80#undef DEBUG_TB_CHECK
81#endif
82
83#define SMC_BITMAP_USE_THRESHOLD 10
84
85#if defined(TARGET_SPARC64)
86#define TARGET_PHYS_ADDR_SPACE_BITS 41
87#elif defined(TARGET_SPARC)
88#define TARGET_PHYS_ADDR_SPACE_BITS 36
89#elif defined(TARGET_ALPHA)
90#define TARGET_PHYS_ADDR_SPACE_BITS 42
91#define TARGET_VIRT_ADDR_SPACE_BITS 42
92#elif defined(TARGET_PPC64)
93#define TARGET_PHYS_ADDR_SPACE_BITS 42
94#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
95#define TARGET_PHYS_ADDR_SPACE_BITS 42
96#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
97#define TARGET_PHYS_ADDR_SPACE_BITS 36
98#else
99/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
100#define TARGET_PHYS_ADDR_SPACE_BITS 32
101#endif
102
103static TranslationBlock *tbs;
104int code_gen_max_blocks;
105TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
106static int nb_tbs;
107/* any access to the tbs or the page table must use this lock */
108spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
109
110#ifndef VBOX
111#if defined(__arm__) || defined(__sparc_v9__)
112/* The prologue must be reachable with a direct jump. ARM and Sparc64
113 have limited branch ranges (possibly also PPC) so place it in a
114 section close to code segment. */
115#define code_gen_section \
116 __attribute__((__section__(".gen_code"))) \
117 __attribute__((aligned (32)))
118#elif defined(_WIN32)
119/* Maximum alignment for Win32 is 16. */
120#define code_gen_section \
121 __attribute__((aligned (16)))
122#else
123#define code_gen_section \
124 __attribute__((aligned (32)))
125#endif
126
127uint8_t code_gen_prologue[1024] code_gen_section;
128#else /* VBOX */
129extern uint8_t* code_gen_prologue;
130#endif /* VBOX */
131static uint8_t *code_gen_buffer;
132static unsigned long code_gen_buffer_size;
133/* threshold to flush the translated code buffer */
134static unsigned long code_gen_buffer_max_size;
135uint8_t *code_gen_ptr;
136
137#ifndef VBOX
138#if !defined(CONFIG_USER_ONLY)
139int phys_ram_fd;
140uint8_t *phys_ram_dirty;
141static int in_migration;
142
143typedef struct RAMBlock {
144 uint8_t *host;
145 ram_addr_t offset;
146 ram_addr_t length;
147 struct RAMBlock *next;
148} RAMBlock;
149
150static RAMBlock *ram_blocks;
151/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
152 then we can no longer assume contiguous ram offsets, and external uses
153 of this variable will break. */
154ram_addr_t last_ram_offset;
155#endif
156#else /* VBOX */
157/* we have memory ranges (the high PC-BIOS mapping) which
158 causes some pages to fall outside the dirty map here. */
159RTGCPHYS phys_ram_dirty_size;
160uint8_t *phys_ram_dirty;
161#endif /* VBOX */
162
163CPUState *first_cpu;
164/* current CPU in the current thread. It is only valid inside
165 cpu_exec() */
166CPUState *cpu_single_env;
167/* 0 = Do not count executed instructions.
168 1 = Precise instruction counting.
169 2 = Adaptive rate instruction counting. */
170int use_icount = 0;
171/* Current instruction counter. While executing translated code this may
172 include some instructions that have not yet been executed. */
173int64_t qemu_icount;
174
175typedef struct PageDesc {
176 /* list of TBs intersecting this ram page */
177 TranslationBlock *first_tb;
178 /* in order to optimize self modifying code, we count the number
179 of lookups we do to a given page to use a bitmap */
180 unsigned int code_write_count;
181 uint8_t *code_bitmap;
182#if defined(CONFIG_USER_ONLY)
183 unsigned long flags;
184#endif
185} PageDesc;
186
187typedef struct PhysPageDesc {
188 /* offset in host memory of the page + io_index in the low bits */
189 ram_addr_t phys_offset;
190 ram_addr_t region_offset;
191} PhysPageDesc;
192
193#define L2_BITS 10
194#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
195/* XXX: this is a temporary hack for alpha target.
196 * In the future, this is to be replaced by a multi-level table
197 * to actually be able to handle the complete 64 bits address space.
198 */
199#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
200#else
201#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
202#endif
203#ifdef VBOX
204#define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32)
205#endif
206
207#ifdef VBOX
208#define L0_SIZE (1 << L0_BITS)
209#endif
210#define L1_SIZE (1 << L1_BITS)
211#define L2_SIZE (1 << L2_BITS)
212
213unsigned long qemu_real_host_page_size;
214unsigned long qemu_host_page_bits;
215unsigned long qemu_host_page_size;
216unsigned long qemu_host_page_mask;
217
218/* XXX: for system emulation, it could just be an array */
219#ifndef VBOX
220static PageDesc *l1_map[L1_SIZE];
221static PhysPageDesc **l1_phys_map;
222#else
223static unsigned l0_map_max_used = 0;
224static PageDesc **l0_map[L0_SIZE];
225static void **l0_phys_map[L0_SIZE];
226#endif
227
228#if !defined(CONFIG_USER_ONLY)
229static void io_mem_init(void);
230
231/* io memory support */
232CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
233CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
234void *io_mem_opaque[IO_MEM_NB_ENTRIES];
235static char io_mem_used[IO_MEM_NB_ENTRIES];
236static int io_mem_watch;
237#endif
238
239#ifndef VBOX
240/* log support */
241static const char *logfilename = "/tmp/qemu.log";
242#endif /* !VBOX */
243FILE *logfile;
244int loglevel;
245#ifndef VBOX
246static int log_append = 0;
247#endif
248
249/* statistics */
250#ifndef VBOX
251static int tlb_flush_count;
252static int tb_flush_count;
253static int tb_phys_invalidate_count;
254#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
255uint32_t tlb_flush_count;
256uint32_t tb_flush_count;
257uint32_t tb_phys_invalidate_count;
258#endif /* VBOX */
259
260#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
261typedef struct subpage_t {
262 target_phys_addr_t base;
263 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
264 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
265 void *opaque[TARGET_PAGE_SIZE][2][4];
266 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
267} subpage_t;
268
269#ifndef VBOX
270#ifdef _WIN32
271static void map_exec(void *addr, long size)
272{
273 DWORD old_protect;
274 VirtualProtect(addr, size,
275 PAGE_EXECUTE_READWRITE, &old_protect);
276
277}
278#else
279static void map_exec(void *addr, long size)
280{
281 unsigned long start, end, page_size;
282
283 page_size = getpagesize();
284 start = (unsigned long)addr;
285 start &= ~(page_size - 1);
286
287 end = (unsigned long)addr + size;
288 end += page_size - 1;
289 end &= ~(page_size - 1);
290
291 mprotect((void *)start, end - start,
292 PROT_READ | PROT_WRITE | PROT_EXEC);
293}
294#endif
295#else /* VBOX */
296static void map_exec(void *addr, long size)
297{
298 RTMemProtect(addr, size,
299 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
300}
301#endif /* VBOX */
302
303static void page_init(void)
304{
305 /* NOTE: we can always suppose that qemu_host_page_size >=
306 TARGET_PAGE_SIZE */
307#ifdef VBOX
308 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
309 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
310 qemu_real_host_page_size = PAGE_SIZE;
311#else /* !VBOX */
312#ifdef _WIN32
313 {
314 SYSTEM_INFO system_info;
315
316 GetSystemInfo(&system_info);
317 qemu_real_host_page_size = system_info.dwPageSize;
318 }
319#else
320 qemu_real_host_page_size = getpagesize();
321#endif
322#endif /* !VBOX */
323 if (qemu_host_page_size == 0)
324 qemu_host_page_size = qemu_real_host_page_size;
325 if (qemu_host_page_size < TARGET_PAGE_SIZE)
326 qemu_host_page_size = TARGET_PAGE_SIZE;
327 qemu_host_page_bits = 0;
328#ifndef VBOX
329 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
330#else
331 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
332#endif
333 qemu_host_page_bits++;
334 qemu_host_page_mask = ~(qemu_host_page_size - 1);
335#ifndef VBOX
336 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
337 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
338#endif
339
340#ifdef VBOX
341 /* We use other means to set reserved bit on our pages */
342#else /* !VBOX */
343#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
344 {
345 long long startaddr, endaddr;
346 FILE *f;
347 int n;
348
349 mmap_lock();
350 last_brk = (unsigned long)sbrk(0);
351 f = fopen("/proc/self/maps", "r");
352 if (f) {
353 do {
354 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
355 if (n == 2) {
356 startaddr = MIN(startaddr,
357 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
358 endaddr = MIN(endaddr,
359 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
360 page_set_flags(startaddr & TARGET_PAGE_MASK,
361 TARGET_PAGE_ALIGN(endaddr),
362 PAGE_RESERVED);
363 }
364 } while (!feof(f));
365 fclose(f);
366 }
367 mmap_unlock();
368 }
369#endif
370#endif /* !VBOX */
371}
372
373static inline PageDesc **page_l1_map(target_ulong index)
374{
375#ifndef VBOX
376#if TARGET_LONG_BITS > 32
377 /* Host memory outside guest VM. For 32-bit targets we have already
378 excluded high addresses. */
379 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
380 return NULL;
381#endif
382 return &l1_map[index >> L2_BITS];
383#else /* VBOX */
384 PageDesc **l1_map;
385 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE,
386 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n",
387 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE),
388 NULL);
389 l1_map = l0_map[index >> (L1_BITS + L2_BITS)];
390 if (RT_UNLIKELY(!l1_map))
391 {
392 unsigned i0 = index >> (L1_BITS + L2_BITS);
393 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE);
394 if (RT_UNLIKELY(!l1_map))
395 return NULL;
396 if (i0 >= l0_map_max_used)
397 l0_map_max_used = i0 + 1;
398 }
399 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)];
400#endif /* VBOX */
401}
402
403static inline PageDesc *page_find_alloc(target_ulong index)
404{
405 PageDesc **lp, *p;
406 lp = page_l1_map(index);
407 if (!lp)
408 return NULL;
409
410 p = *lp;
411 if (!p) {
412 /* allocate if not found */
413#if defined(CONFIG_USER_ONLY)
414 size_t len = sizeof(PageDesc) * L2_SIZE;
415 /* Don't use qemu_malloc because it may recurse. */
416 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
417 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
418 *lp = p;
419 if (h2g_valid(p)) {
420 unsigned long addr = h2g(p);
421 page_set_flags(addr & TARGET_PAGE_MASK,
422 TARGET_PAGE_ALIGN(addr + len),
423 PAGE_RESERVED);
424 }
425#else
426 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
427 *lp = p;
428#endif
429 }
430 return p + (index & (L2_SIZE - 1));
431}
432
433static inline PageDesc *page_find(target_ulong index)
434{
435 PageDesc **lp, *p;
436 lp = page_l1_map(index);
437 if (!lp)
438 return NULL;
439
440 p = *lp;
441 if (!p) {
442 return NULL;
443 }
444 return p + (index & (L2_SIZE - 1));
445}
446
447static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
448{
449 void **lp, **p;
450 PhysPageDesc *pd;
451
452#ifndef VBOX
453 p = (void **)l1_phys_map;
454#if TARGET_PHYS_ADDR_SPACE_BITS > 32
455
456#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
457#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
458#endif
459 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
460 p = *lp;
461 if (!p) {
462 /* allocate if not found */
463 if (!alloc)
464 return NULL;
465 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
466 memset(p, 0, sizeof(void *) * L1_SIZE);
467 *lp = p;
468 }
469#endif
470#else /* VBOX */
471 /* level 0 lookup and lazy allocation of level 1 map. */
472 if (RT_UNLIKELY(index >= (target_phys_addr_t)L2_SIZE * L1_SIZE * L0_SIZE))
473 return NULL;
474 p = l0_phys_map[index >> (L1_BITS + L2_BITS)];
475 if (RT_UNLIKELY(!p)) {
476 if (!alloc)
477 return NULL;
478 p = qemu_vmalloc(sizeof(void **) * L1_SIZE);
479 memset(p, 0, sizeof(void **) * L1_SIZE);
480 l0_phys_map[index >> (L1_BITS + L2_BITS)] = p;
481 }
482
483 /* level 1 lookup and lazy allocation of level 2 map. */
484#endif /* VBOX */
485 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
486 pd = *lp;
487 if (!pd) {
488 int i;
489 /* allocate if not found */
490 if (!alloc)
491 return NULL;
492 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
493 *lp = pd;
494 for (i = 0; i < L2_SIZE; i++) {
495 pd[i].phys_offset = IO_MEM_UNASSIGNED;
496 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
497 }
498 }
499 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
500}
501
502static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
503{
504 return phys_page_find_alloc(index, 0);
505}
506
507#if !defined(CONFIG_USER_ONLY)
508static void tlb_protect_code(ram_addr_t ram_addr);
509static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
510 target_ulong vaddr);
511#define mmap_lock() do { } while(0)
512#define mmap_unlock() do { } while(0)
513#endif
514
515#ifdef VBOX /* We don't need such huge codegen buffer size, as execute
516 most of the code in raw or hwacc mode. */
517#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
518#else /* !VBOX */
519#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
520#endif /* !VBOX */
521
522#if defined(CONFIG_USER_ONLY)
523/* Currently it is not recommended to allocate big chunks of data in
524 user mode. It will change when a dedicated libc will be used */
525#define USE_STATIC_CODE_GEN_BUFFER
526#endif
527
528#if defined(VBOX) && defined(USE_STATIC_CODE_GEN_BUFFER)
529# error "VBox allocates codegen buffer dynamically"
530#endif
531
532#ifdef USE_STATIC_CODE_GEN_BUFFER
533static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
534#endif
535
536static void code_gen_alloc(unsigned long tb_size)
537{
538#ifdef USE_STATIC_CODE_GEN_BUFFER
539 code_gen_buffer = static_code_gen_buffer;
540 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
541 map_exec(code_gen_buffer, code_gen_buffer_size);
542#else
543# ifdef VBOX
544 /* We cannot use phys_ram_size here, as it's 0 now,
545 * it only gets initialized once RAM registration callback
546 * (REMR3NotifyPhysRamRegister()) called.
547 */
548 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
549# else /* !VBOX */
550 code_gen_buffer_size = tb_size;
551 if (code_gen_buffer_size == 0) {
552#if defined(CONFIG_USER_ONLY)
553 /* in user mode, phys_ram_size is not meaningful */
554 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
555#else
556 /* XXX: needs ajustments */
557 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
558#endif
559 }
560 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
561 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
562# endif /* !VBOX */
563 /* The code gen buffer location may have constraints depending on
564 the host cpu and OS */
565# ifdef VBOX
566 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
567
568 if (!code_gen_buffer) {
569 LogRel(("REM: failed allocate codegen buffer %lld\n",
570 code_gen_buffer_size));
571 return;
572 }
573# else /* !VBOX */
574#if defined(__linux__)
575 {
576 int flags;
577 void *start = NULL;
578
579 flags = MAP_PRIVATE | MAP_ANONYMOUS;
580#if defined(__x86_64__)
581 flags |= MAP_32BIT;
582 /* Cannot map more than that */
583 if (code_gen_buffer_size > (800 * 1024 * 1024))
584 code_gen_buffer_size = (800 * 1024 * 1024);
585#elif defined(__sparc_v9__)
586 // Map the buffer below 2G, so we can use direct calls and branches
587 flags |= MAP_FIXED;
588 start = (void *) 0x60000000UL;
589 if (code_gen_buffer_size > (512 * 1024 * 1024))
590 code_gen_buffer_size = (512 * 1024 * 1024);
591#elif defined(__arm__)
592 /* Map the buffer below 32M, so we can use direct calls and branches */
593 flags |= MAP_FIXED;
594 start = (void *) 0x01000000UL;
595 if (code_gen_buffer_size > 16 * 1024 * 1024)
596 code_gen_buffer_size = 16 * 1024 * 1024;
597#endif
598 code_gen_buffer = mmap(start, code_gen_buffer_size,
599 PROT_WRITE | PROT_READ | PROT_EXEC,
600 flags, -1, 0);
601 if (code_gen_buffer == MAP_FAILED) {
602 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
603 exit(1);
604 }
605 }
606#elif defined(__FreeBSD__) || defined(__DragonFly__)
607 {
608 int flags;
609 void *addr = NULL;
610 flags = MAP_PRIVATE | MAP_ANONYMOUS;
611#if defined(__x86_64__)
612 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
613 * 0x40000000 is free */
614 flags |= MAP_FIXED;
615 addr = (void *)0x40000000;
616 /* Cannot map more than that */
617 if (code_gen_buffer_size > (800 * 1024 * 1024))
618 code_gen_buffer_size = (800 * 1024 * 1024);
619#endif
620 code_gen_buffer = mmap(addr, code_gen_buffer_size,
621 PROT_WRITE | PROT_READ | PROT_EXEC,
622 flags, -1, 0);
623 if (code_gen_buffer == MAP_FAILED) {
624 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
625 exit(1);
626 }
627 }
628#else
629 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
630 map_exec(code_gen_buffer, code_gen_buffer_size);
631#endif
632# endif /* !VBOX */
633#endif /* !USE_STATIC_CODE_GEN_BUFFER */
634#ifndef VBOX
635 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
636#else
637 map_exec(code_gen_prologue, _1K);
638#endif
639 code_gen_buffer_max_size = code_gen_buffer_size -
640 code_gen_max_block_size();
641 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
642 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
643}
644
645/* Must be called before using the QEMU cpus. 'tb_size' is the size
646 (in bytes) allocated to the translation buffer. Zero means default
647 size. */
648void cpu_exec_init_all(unsigned long tb_size)
649{
650 cpu_gen_init();
651 code_gen_alloc(tb_size);
652 code_gen_ptr = code_gen_buffer;
653 page_init();
654#if !defined(CONFIG_USER_ONLY)
655 io_mem_init();
656#endif
657}
658
659#ifndef VBOX
660#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
661
662#define CPU_COMMON_SAVE_VERSION 1
663
664static void cpu_common_save(QEMUFile *f, void *opaque)
665{
666 CPUState *env = opaque;
667
668 cpu_synchronize_state(env, 0);
669
670 qemu_put_be32s(f, &env->halted);
671 qemu_put_be32s(f, &env->interrupt_request);
672}
673
674static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
675{
676 CPUState *env = opaque;
677
678 if (version_id != CPU_COMMON_SAVE_VERSION)
679 return -EINVAL;
680
681 qemu_get_be32s(f, &env->halted);
682 qemu_get_be32s(f, &env->interrupt_request);
683 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
684 version_id is increased. */
685 env->interrupt_request &= ~0x01;
686 tlb_flush(env, 1);
687 cpu_synchronize_state(env, 1);
688
689 return 0;
690}
691#endif
692
693CPUState *qemu_get_cpu(int cpu)
694{
695 CPUState *env = first_cpu;
696
697 while (env) {
698 if (env->cpu_index == cpu)
699 break;
700 env = env->next_cpu;
701 }
702
703 return env;
704}
705
706#endif /* !VBOX */
707
708void cpu_exec_init(CPUState *env)
709{
710 CPUState **penv;
711 int cpu_index;
712
713#if defined(CONFIG_USER_ONLY)
714 cpu_list_lock();
715#endif
716 env->next_cpu = NULL;
717 penv = &first_cpu;
718 cpu_index = 0;
719 while (*penv != NULL) {
720 penv = &(*penv)->next_cpu;
721 cpu_index++;
722 }
723 env->cpu_index = cpu_index;
724 env->numa_node = 0;
725 TAILQ_INIT(&env->breakpoints);
726 TAILQ_INIT(&env->watchpoints);
727 *penv = env;
728#ifndef VBOX
729#if defined(CONFIG_USER_ONLY)
730 cpu_list_unlock();
731#endif
732#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
733 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
734 cpu_common_save, cpu_common_load, env);
735 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
736 cpu_save, cpu_load, env);
737#endif
738#endif /* !VBOX */
739}
740
741static inline void invalidate_page_bitmap(PageDesc *p)
742{
743 if (p->code_bitmap) {
744 qemu_free(p->code_bitmap);
745 p->code_bitmap = NULL;
746 }
747 p->code_write_count = 0;
748}
749
750/* set to NULL all the 'first_tb' fields in all PageDescs */
751static void page_flush_tb(void)
752{
753 int i, j;
754 PageDesc *p;
755#ifdef VBOX
756 int k;
757#endif
758
759#ifdef VBOX
760 k = l0_map_max_used;
761 while (k-- > 0) {
762 PageDesc **l1_map = l0_map[k];
763 if (l1_map) {
764#endif
765 for(i = 0; i < L1_SIZE; i++) {
766 p = l1_map[i];
767 if (p) {
768 for(j = 0; j < L2_SIZE; j++) {
769 p->first_tb = NULL;
770 invalidate_page_bitmap(p);
771 p++;
772 }
773 }
774 }
775#ifdef VBOX
776 }
777 }
778#endif
779}
780
781/* flush all the translation blocks */
782/* XXX: tb_flush is currently not thread safe */
783void tb_flush(CPUState *env1)
784{
785 CPUState *env;
786#ifdef VBOX
787 STAM_PROFILE_START(&env1->StatTbFlush, a);
788#endif
789#if defined(DEBUG_FLUSH)
790 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
791 (unsigned long)(code_gen_ptr - code_gen_buffer),
792 nb_tbs, nb_tbs > 0 ?
793 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
794#endif
795 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
796 cpu_abort(env1, "Internal error: code buffer overflow\n");
797
798 nb_tbs = 0;
799
800 for(env = first_cpu; env != NULL; env = env->next_cpu) {
801 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
802 }
803
804 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
805 page_flush_tb();
806
807 code_gen_ptr = code_gen_buffer;
808 /* XXX: flush processor icache at this point if cache flush is
809 expensive */
810 tb_flush_count++;
811#ifdef VBOX
812 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
813#endif
814}
815
816#ifdef DEBUG_TB_CHECK
817
818static void tb_invalidate_check(target_ulong address)
819{
820 TranslationBlock *tb;
821 int i;
822 address &= TARGET_PAGE_MASK;
823 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
824 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
825 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
826 address >= tb->pc + tb->size)) {
827 printf("ERROR invalidate: address=" TARGET_FMT_lx
828 " PC=%08lx size=%04x\n",
829 address, (long)tb->pc, tb->size);
830 }
831 }
832 }
833}
834
835/* verify that all the pages have correct rights for code */
836static void tb_page_check(void)
837{
838 TranslationBlock *tb;
839 int i, flags1, flags2;
840
841 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
842 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
843 flags1 = page_get_flags(tb->pc);
844 flags2 = page_get_flags(tb->pc + tb->size - 1);
845 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
846 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
847 (long)tb->pc, tb->size, flags1, flags2);
848 }
849 }
850 }
851}
852
853#endif
854
855/* invalidate one TB */
856static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
857 int next_offset)
858{
859 TranslationBlock *tb1;
860 for(;;) {
861 tb1 = *ptb;
862 if (tb1 == tb) {
863 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
864 break;
865 }
866 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
867 }
868}
869
870static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
871{
872 TranslationBlock *tb1;
873 unsigned int n1;
874
875 for(;;) {
876 tb1 = *ptb;
877 n1 = (long)tb1 & 3;
878 tb1 = (TranslationBlock *)((long)tb1 & ~3);
879 if (tb1 == tb) {
880 *ptb = tb1->page_next[n1];
881 break;
882 }
883 ptb = &tb1->page_next[n1];
884 }
885}
886
887static inline void tb_jmp_remove(TranslationBlock *tb, int n)
888{
889 TranslationBlock *tb1, **ptb;
890 unsigned int n1;
891
892 ptb = &tb->jmp_next[n];
893 tb1 = *ptb;
894 if (tb1) {
895 /* find tb(n) in circular list */
896 for(;;) {
897 tb1 = *ptb;
898 n1 = (long)tb1 & 3;
899 tb1 = (TranslationBlock *)((long)tb1 & ~3);
900 if (n1 == n && tb1 == tb)
901 break;
902 if (n1 == 2) {
903 ptb = &tb1->jmp_first;
904 } else {
905 ptb = &tb1->jmp_next[n1];
906 }
907 }
908 /* now we can suppress tb(n) from the list */
909 *ptb = tb->jmp_next[n];
910
911 tb->jmp_next[n] = NULL;
912 }
913}
914
915/* reset the jump entry 'n' of a TB so that it is not chained to
916 another TB */
917static inline void tb_reset_jump(TranslationBlock *tb, int n)
918{
919 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
920}
921
922void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
923{
924 CPUState *env;
925 PageDesc *p;
926 unsigned int h, n1;
927 target_phys_addr_t phys_pc;
928 TranslationBlock *tb1, *tb2;
929
930 /* remove the TB from the hash list */
931 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
932 h = tb_phys_hash_func(phys_pc);
933 tb_remove(&tb_phys_hash[h], tb,
934 offsetof(TranslationBlock, phys_hash_next));
935
936 /* remove the TB from the page list */
937 if (tb->page_addr[0] != page_addr) {
938 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
939 tb_page_remove(&p->first_tb, tb);
940 invalidate_page_bitmap(p);
941 }
942 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
943 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
944 tb_page_remove(&p->first_tb, tb);
945 invalidate_page_bitmap(p);
946 }
947
948 tb_invalidated_flag = 1;
949
950 /* remove the TB from the hash list */
951 h = tb_jmp_cache_hash_func(tb->pc);
952 for(env = first_cpu; env != NULL; env = env->next_cpu) {
953 if (env->tb_jmp_cache[h] == tb)
954 env->tb_jmp_cache[h] = NULL;
955 }
956
957 /* suppress this TB from the two jump lists */
958 tb_jmp_remove(tb, 0);
959 tb_jmp_remove(tb, 1);
960
961 /* suppress any remaining jumps to this TB */
962 tb1 = tb->jmp_first;
963 for(;;) {
964 n1 = (long)tb1 & 3;
965 if (n1 == 2)
966 break;
967 tb1 = (TranslationBlock *)((long)tb1 & ~3);
968 tb2 = tb1->jmp_next[n1];
969 tb_reset_jump(tb1, n1);
970 tb1->jmp_next[n1] = NULL;
971 tb1 = tb2;
972 }
973 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
974
975 tb_phys_invalidate_count++;
976}
977
978#ifdef VBOX
979
980void tb_invalidate_virt(CPUState *env, uint32_t eip)
981{
982# if 1
983 tb_flush(env);
984# else
985 uint8_t *cs_base, *pc;
986 unsigned int flags, h, phys_pc;
987 TranslationBlock *tb, **ptb;
988
989 flags = env->hflags;
990 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
991 cs_base = env->segs[R_CS].base;
992 pc = cs_base + eip;
993
994 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
995 flags);
996
997 if(tb)
998 {
999# ifdef DEBUG
1000 printf("invalidating TB (%08X) at %08X\n", tb, eip);
1001# endif
1002 tb_invalidate(tb);
1003 //Note: this will leak TBs, but the whole cache will be flushed
1004 // when it happens too often
1005 tb->pc = 0;
1006 tb->cs_base = 0;
1007 tb->flags = 0;
1008 }
1009# endif
1010}
1011
1012# ifdef VBOX_STRICT
1013/**
1014 * Gets the page offset.
1015 */
1016unsigned long get_phys_page_offset(target_ulong addr)
1017{
1018 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1019 return p ? p->phys_offset : 0;
1020}
1021# endif /* VBOX_STRICT */
1022
1023#endif /* VBOX */
1024
1025static inline void set_bits(uint8_t *tab, int start, int len)
1026{
1027 int end, mask, end1;
1028
1029 end = start + len;
1030 tab += start >> 3;
1031 mask = 0xff << (start & 7);
1032 if ((start & ~7) == (end & ~7)) {
1033 if (start < end) {
1034 mask &= ~(0xff << (end & 7));
1035 *tab |= mask;
1036 }
1037 } else {
1038 *tab++ |= mask;
1039 start = (start + 8) & ~7;
1040 end1 = end & ~7;
1041 while (start < end1) {
1042 *tab++ = 0xff;
1043 start += 8;
1044 }
1045 if (start < end) {
1046 mask = ~(0xff << (end & 7));
1047 *tab |= mask;
1048 }
1049 }
1050}
1051
1052static void build_page_bitmap(PageDesc *p)
1053{
1054 int n, tb_start, tb_end;
1055 TranslationBlock *tb;
1056
1057 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
1058
1059 tb = p->first_tb;
1060 while (tb != NULL) {
1061 n = (long)tb & 3;
1062 tb = (TranslationBlock *)((long)tb & ~3);
1063 /* NOTE: this is subtle as a TB may span two physical pages */
1064 if (n == 0) {
1065 /* NOTE: tb_end may be after the end of the page, but
1066 it is not a problem */
1067 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1068 tb_end = tb_start + tb->size;
1069 if (tb_end > TARGET_PAGE_SIZE)
1070 tb_end = TARGET_PAGE_SIZE;
1071 } else {
1072 tb_start = 0;
1073 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1074 }
1075 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1076 tb = tb->page_next[n];
1077 }
1078}
1079
1080TranslationBlock *tb_gen_code(CPUState *env,
1081 target_ulong pc, target_ulong cs_base,
1082 int flags, int cflags)
1083{
1084 TranslationBlock *tb;
1085 uint8_t *tc_ptr;
1086 target_ulong phys_pc, phys_page2, virt_page2;
1087 int code_gen_size;
1088
1089 phys_pc = get_phys_addr_code(env, pc);
1090 tb = tb_alloc(pc);
1091 if (!tb) {
1092 /* flush must be done */
1093 tb_flush(env);
1094 /* cannot fail at this point */
1095 tb = tb_alloc(pc);
1096 /* Don't forget to invalidate previous TB info. */
1097 tb_invalidated_flag = 1;
1098 }
1099 tc_ptr = code_gen_ptr;
1100 tb->tc_ptr = tc_ptr;
1101 tb->cs_base = cs_base;
1102 tb->flags = flags;
1103 tb->cflags = cflags;
1104 cpu_gen_code(env, tb, &code_gen_size);
1105 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1106
1107 /* check next page if needed */
1108 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1109 phys_page2 = -1;
1110 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1111 phys_page2 = get_phys_addr_code(env, virt_page2);
1112 }
1113 tb_link_phys(tb, phys_pc, phys_page2);
1114 return tb;
1115}
1116
1117/* invalidate all TBs which intersect with the target physical page
1118 starting in range [start;end[. NOTE: start and end must refer to
1119 the same physical page. 'is_cpu_write_access' should be true if called
1120 from a real cpu write access: the virtual CPU will exit the current
1121 TB if code is modified inside this TB. */
1122void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1123 int is_cpu_write_access)
1124{
1125 TranslationBlock *tb, *tb_next, *saved_tb;
1126 CPUState *env = cpu_single_env;
1127 target_ulong tb_start, tb_end;
1128 PageDesc *p;
1129 int n;
1130#ifdef TARGET_HAS_PRECISE_SMC
1131 int current_tb_not_found = is_cpu_write_access;
1132 TranslationBlock *current_tb = NULL;
1133 int current_tb_modified = 0;
1134 target_ulong current_pc = 0;
1135 target_ulong current_cs_base = 0;
1136 int current_flags = 0;
1137#endif /* TARGET_HAS_PRECISE_SMC */
1138
1139 p = page_find(start >> TARGET_PAGE_BITS);
1140 if (!p)
1141 return;
1142 if (!p->code_bitmap &&
1143 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1144 is_cpu_write_access) {
1145 /* build code bitmap */
1146 build_page_bitmap(p);
1147 }
1148
1149 /* we remove all the TBs in the range [start, end[ */
1150 /* XXX: see if in some cases it could be faster to invalidate all the code */
1151 tb = p->first_tb;
1152 while (tb != NULL) {
1153 n = (long)tb & 3;
1154 tb = (TranslationBlock *)((long)tb & ~3);
1155 tb_next = tb->page_next[n];
1156 /* NOTE: this is subtle as a TB may span two physical pages */
1157 if (n == 0) {
1158 /* NOTE: tb_end may be after the end of the page, but
1159 it is not a problem */
1160 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1161 tb_end = tb_start + tb->size;
1162 } else {
1163 tb_start = tb->page_addr[1];
1164 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1165 }
1166 if (!(tb_end <= start || tb_start >= end)) {
1167#ifdef TARGET_HAS_PRECISE_SMC
1168 if (current_tb_not_found) {
1169 current_tb_not_found = 0;
1170 current_tb = NULL;
1171 if (env->mem_io_pc) {
1172 /* now we have a real cpu fault */
1173 current_tb = tb_find_pc(env->mem_io_pc);
1174 }
1175 }
1176 if (current_tb == tb &&
1177 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1178 /* If we are modifying the current TB, we must stop
1179 its execution. We could be more precise by checking
1180 that the modification is after the current PC, but it
1181 would require a specialized function to partially
1182 restore the CPU state */
1183
1184 current_tb_modified = 1;
1185 cpu_restore_state(current_tb, env,
1186 env->mem_io_pc, NULL);
1187 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1188 &current_flags);
1189 }
1190#endif /* TARGET_HAS_PRECISE_SMC */
1191 /* we need to do that to handle the case where a signal
1192 occurs while doing tb_phys_invalidate() */
1193 saved_tb = NULL;
1194 if (env) {
1195 saved_tb = env->current_tb;
1196 env->current_tb = NULL;
1197 }
1198 tb_phys_invalidate(tb, -1);
1199 if (env) {
1200 env->current_tb = saved_tb;
1201 if (env->interrupt_request && env->current_tb)
1202 cpu_interrupt(env, env->interrupt_request);
1203 }
1204 }
1205 tb = tb_next;
1206 }
1207#if !defined(CONFIG_USER_ONLY)
1208 /* if no code remaining, no need to continue to use slow writes */
1209 if (!p->first_tb) {
1210 invalidate_page_bitmap(p);
1211 if (is_cpu_write_access) {
1212 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1213 }
1214 }
1215#endif
1216#ifdef TARGET_HAS_PRECISE_SMC
1217 if (current_tb_modified) {
1218 /* we generate a block containing just the instruction
1219 modifying the memory. It will ensure that it cannot modify
1220 itself */
1221 env->current_tb = NULL;
1222 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1223 cpu_resume_from_signal(env, NULL);
1224 }
1225#endif
1226}
1227
1228/* len must be <= 8 and start must be a multiple of len */
1229static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1230{
1231 PageDesc *p;
1232 int offset, b;
1233#if 0
1234 if (1) {
1235 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1236 cpu_single_env->mem_io_vaddr, len,
1237 cpu_single_env->eip,
1238 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1239 }
1240#endif
1241 p = page_find(start >> TARGET_PAGE_BITS);
1242 if (!p)
1243 return;
1244 if (p->code_bitmap) {
1245 offset = start & ~TARGET_PAGE_MASK;
1246 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1247 if (b & ((1 << len) - 1))
1248 goto do_invalidate;
1249 } else {
1250 do_invalidate:
1251 tb_invalidate_phys_page_range(start, start + len, 1);
1252 }
1253}
1254
1255#if !defined(CONFIG_SOFTMMU)
1256static void tb_invalidate_phys_page(target_phys_addr_t addr,
1257 unsigned long pc, void *puc)
1258{
1259 TranslationBlock *tb;
1260 PageDesc *p;
1261 int n;
1262#ifdef TARGET_HAS_PRECISE_SMC
1263 TranslationBlock *current_tb = NULL;
1264 CPUState *env = cpu_single_env;
1265 int current_tb_modified = 0;
1266 target_ulong current_pc = 0;
1267 target_ulong current_cs_base = 0;
1268 int current_flags = 0;
1269#endif
1270
1271 addr &= TARGET_PAGE_MASK;
1272 p = page_find(addr >> TARGET_PAGE_BITS);
1273 if (!p)
1274 return;
1275 tb = p->first_tb;
1276#ifdef TARGET_HAS_PRECISE_SMC
1277 if (tb && pc != 0) {
1278 current_tb = tb_find_pc(pc);
1279 }
1280#endif
1281 while (tb != NULL) {
1282 n = (long)tb & 3;
1283 tb = (TranslationBlock *)((long)tb & ~3);
1284#ifdef TARGET_HAS_PRECISE_SMC
1285 if (current_tb == tb &&
1286 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1287 /* If we are modifying the current TB, we must stop
1288 its execution. We could be more precise by checking
1289 that the modification is after the current PC, but it
1290 would require a specialized function to partially
1291 restore the CPU state */
1292
1293 current_tb_modified = 1;
1294 cpu_restore_state(current_tb, env, pc, puc);
1295 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1296 &current_flags);
1297 }
1298#endif /* TARGET_HAS_PRECISE_SMC */
1299 tb_phys_invalidate(tb, addr);
1300 tb = tb->page_next[n];
1301 }
1302 p->first_tb = NULL;
1303#ifdef TARGET_HAS_PRECISE_SMC
1304 if (current_tb_modified) {
1305 /* we generate a block containing just the instruction
1306 modifying the memory. It will ensure that it cannot modify
1307 itself */
1308 env->current_tb = NULL;
1309 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1310 cpu_resume_from_signal(env, puc);
1311 }
1312#endif
1313}
1314#endif
1315
1316/* add the tb in the target page and protect it if necessary */
1317static inline void tb_alloc_page(TranslationBlock *tb,
1318 unsigned int n, target_ulong page_addr)
1319{
1320 PageDesc *p;
1321 TranslationBlock *last_first_tb;
1322
1323 tb->page_addr[n] = page_addr;
1324 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1325 tb->page_next[n] = p->first_tb;
1326 last_first_tb = p->first_tb;
1327 p->first_tb = (TranslationBlock *)((long)tb | n);
1328 invalidate_page_bitmap(p);
1329
1330#if defined(TARGET_HAS_SMC) || 1
1331
1332#if defined(CONFIG_USER_ONLY)
1333 if (p->flags & PAGE_WRITE) {
1334 target_ulong addr;
1335 PageDesc *p2;
1336 int prot;
1337
1338 /* force the host page as non writable (writes will have a
1339 page fault + mprotect overhead) */
1340 page_addr &= qemu_host_page_mask;
1341 prot = 0;
1342 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1343 addr += TARGET_PAGE_SIZE) {
1344
1345 p2 = page_find (addr >> TARGET_PAGE_BITS);
1346 if (!p2)
1347 continue;
1348 prot |= p2->flags;
1349 p2->flags &= ~PAGE_WRITE;
1350 page_get_flags(addr);
1351 }
1352 mprotect(g2h(page_addr), qemu_host_page_size,
1353 (prot & PAGE_BITS) & ~PAGE_WRITE);
1354#ifdef DEBUG_TB_INVALIDATE
1355 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1356 page_addr);
1357#endif
1358 }
1359#else
1360 /* if some code is already present, then the pages are already
1361 protected. So we handle the case where only the first TB is
1362 allocated in a physical page */
1363 if (!last_first_tb) {
1364 tlb_protect_code(page_addr);
1365 }
1366#endif
1367
1368#endif /* TARGET_HAS_SMC */
1369}
1370
1371/* Allocate a new translation block. Flush the translation buffer if
1372 too many translation blocks or too much generated code. */
1373TranslationBlock *tb_alloc(target_ulong pc)
1374{
1375 TranslationBlock *tb;
1376
1377 if (nb_tbs >= code_gen_max_blocks ||
1378#ifndef VBOX
1379 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1380#else
1381 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1382#endif
1383 return NULL;
1384 tb = &tbs[nb_tbs++];
1385 tb->pc = pc;
1386 tb->cflags = 0;
1387 return tb;
1388}
1389
1390void tb_free(TranslationBlock *tb)
1391{
1392 /* In practice this is mostly used for single use temporary TB
1393 Ignore the hard cases and just back up if this TB happens to
1394 be the last one generated. */
1395 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1396 code_gen_ptr = tb->tc_ptr;
1397 nb_tbs--;
1398 }
1399}
1400
1401/* add a new TB and link it to the physical page tables. phys_page2 is
1402 (-1) to indicate that only one page contains the TB. */
1403void tb_link_phys(TranslationBlock *tb,
1404 target_ulong phys_pc, target_ulong phys_page2)
1405{
1406 unsigned int h;
1407 TranslationBlock **ptb;
1408
1409 /* Grab the mmap lock to stop another thread invalidating this TB
1410 before we are done. */
1411 mmap_lock();
1412 /* add in the physical hash table */
1413 h = tb_phys_hash_func(phys_pc);
1414 ptb = &tb_phys_hash[h];
1415 tb->phys_hash_next = *ptb;
1416 *ptb = tb;
1417
1418 /* add in the page list */
1419 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1420 if (phys_page2 != -1)
1421 tb_alloc_page(tb, 1, phys_page2);
1422 else
1423 tb->page_addr[1] = -1;
1424
1425 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1426 tb->jmp_next[0] = NULL;
1427 tb->jmp_next[1] = NULL;
1428
1429 /* init original jump addresses */
1430 if (tb->tb_next_offset[0] != 0xffff)
1431 tb_reset_jump(tb, 0);
1432 if (tb->tb_next_offset[1] != 0xffff)
1433 tb_reset_jump(tb, 1);
1434
1435#ifdef DEBUG_TB_CHECK
1436 tb_page_check();
1437#endif
1438 mmap_unlock();
1439}
1440
1441/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1442 tb[1].tc_ptr. Return NULL if not found */
1443TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1444{
1445 int m_min, m_max, m;
1446 unsigned long v;
1447 TranslationBlock *tb;
1448
1449 if (nb_tbs <= 0)
1450 return NULL;
1451 if (tc_ptr < (unsigned long)code_gen_buffer ||
1452 tc_ptr >= (unsigned long)code_gen_ptr)
1453 return NULL;
1454 /* binary search (cf Knuth) */
1455 m_min = 0;
1456 m_max = nb_tbs - 1;
1457 while (m_min <= m_max) {
1458 m = (m_min + m_max) >> 1;
1459 tb = &tbs[m];
1460 v = (unsigned long)tb->tc_ptr;
1461 if (v == tc_ptr)
1462 return tb;
1463 else if (tc_ptr < v) {
1464 m_max = m - 1;
1465 } else {
1466 m_min = m + 1;
1467 }
1468 }
1469 return &tbs[m_max];
1470}
1471
1472static void tb_reset_jump_recursive(TranslationBlock *tb);
1473
1474static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1475{
1476 TranslationBlock *tb1, *tb_next, **ptb;
1477 unsigned int n1;
1478
1479 tb1 = tb->jmp_next[n];
1480 if (tb1 != NULL) {
1481 /* find head of list */
1482 for(;;) {
1483 n1 = (long)tb1 & 3;
1484 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1485 if (n1 == 2)
1486 break;
1487 tb1 = tb1->jmp_next[n1];
1488 }
1489 /* we are now sure now that tb jumps to tb1 */
1490 tb_next = tb1;
1491
1492 /* remove tb from the jmp_first list */
1493 ptb = &tb_next->jmp_first;
1494 for(;;) {
1495 tb1 = *ptb;
1496 n1 = (long)tb1 & 3;
1497 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1498 if (n1 == n && tb1 == tb)
1499 break;
1500 ptb = &tb1->jmp_next[n1];
1501 }
1502 *ptb = tb->jmp_next[n];
1503 tb->jmp_next[n] = NULL;
1504
1505 /* suppress the jump to next tb in generated code */
1506 tb_reset_jump(tb, n);
1507
1508 /* suppress jumps in the tb on which we could have jumped */
1509 tb_reset_jump_recursive(tb_next);
1510 }
1511}
1512
1513static void tb_reset_jump_recursive(TranslationBlock *tb)
1514{
1515 tb_reset_jump_recursive2(tb, 0);
1516 tb_reset_jump_recursive2(tb, 1);
1517}
1518
1519#if defined(TARGET_HAS_ICE)
1520static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1521{
1522 target_phys_addr_t addr;
1523 target_ulong pd;
1524 ram_addr_t ram_addr;
1525 PhysPageDesc *p;
1526
1527 addr = cpu_get_phys_page_debug(env, pc);
1528 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1529 if (!p) {
1530 pd = IO_MEM_UNASSIGNED;
1531 } else {
1532 pd = p->phys_offset;
1533 }
1534 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1535 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1536}
1537#endif
1538
1539/* Add a watchpoint. */
1540int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1541 int flags, CPUWatchpoint **watchpoint)
1542{
1543 target_ulong len_mask = ~(len - 1);
1544 CPUWatchpoint *wp;
1545
1546 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1547 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1548 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1549 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1550#ifndef VBOX
1551 return -EINVAL;
1552#else
1553 return VERR_INVALID_PARAMETER;
1554#endif
1555 }
1556 wp = qemu_malloc(sizeof(*wp));
1557
1558 wp->vaddr = addr;
1559 wp->len_mask = len_mask;
1560 wp->flags = flags;
1561
1562 /* keep all GDB-injected watchpoints in front */
1563 if (flags & BP_GDB)
1564 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1565 else
1566 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1567
1568 tlb_flush_page(env, addr);
1569
1570 if (watchpoint)
1571 *watchpoint = wp;
1572 return 0;
1573}
1574
1575/* Remove a specific watchpoint. */
1576int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1577 int flags)
1578{
1579 target_ulong len_mask = ~(len - 1);
1580 CPUWatchpoint *wp;
1581
1582 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1583 if (addr == wp->vaddr && len_mask == wp->len_mask
1584 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1585 cpu_watchpoint_remove_by_ref(env, wp);
1586 return 0;
1587 }
1588 }
1589#ifndef VBOX
1590 return -ENOENT;
1591#else
1592 return VERR_NOT_FOUND;
1593#endif
1594}
1595
1596/* Remove a specific watchpoint by reference. */
1597void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1598{
1599 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1600
1601 tlb_flush_page(env, watchpoint->vaddr);
1602
1603 qemu_free(watchpoint);
1604}
1605
1606/* Remove all matching watchpoints. */
1607void cpu_watchpoint_remove_all(CPUState *env, int mask)
1608{
1609 CPUWatchpoint *wp, *next;
1610
1611 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1612 if (wp->flags & mask)
1613 cpu_watchpoint_remove_by_ref(env, wp);
1614 }
1615}
1616
1617/* Add a breakpoint. */
1618int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1619 CPUBreakpoint **breakpoint)
1620{
1621#if defined(TARGET_HAS_ICE)
1622 CPUBreakpoint *bp;
1623
1624 bp = qemu_malloc(sizeof(*bp));
1625
1626 bp->pc = pc;
1627 bp->flags = flags;
1628
1629 /* keep all GDB-injected breakpoints in front */
1630 if (flags & BP_GDB)
1631 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1632 else
1633 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1634
1635 breakpoint_invalidate(env, pc);
1636
1637 if (breakpoint)
1638 *breakpoint = bp;
1639 return 0;
1640#else
1641 return -ENOSYS;
1642#endif
1643}
1644
1645/* Remove a specific breakpoint. */
1646int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1647{
1648#if defined(TARGET_HAS_ICE)
1649 CPUBreakpoint *bp;
1650
1651 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1652 if (bp->pc == pc && bp->flags == flags) {
1653 cpu_breakpoint_remove_by_ref(env, bp);
1654 return 0;
1655 }
1656 }
1657# ifndef VBOX
1658 return -ENOENT;
1659# else
1660 return VERR_NOT_FOUND;
1661# endif
1662#else
1663 return -ENOSYS;
1664#endif
1665}
1666
1667/* Remove a specific breakpoint by reference. */
1668void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1669{
1670#if defined(TARGET_HAS_ICE)
1671 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1672
1673 breakpoint_invalidate(env, breakpoint->pc);
1674
1675 qemu_free(breakpoint);
1676#endif
1677}
1678
1679/* Remove all matching breakpoints. */
1680void cpu_breakpoint_remove_all(CPUState *env, int mask)
1681{
1682#if defined(TARGET_HAS_ICE)
1683 CPUBreakpoint *bp, *next;
1684
1685 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1686 if (bp->flags & mask)
1687 cpu_breakpoint_remove_by_ref(env, bp);
1688 }
1689#endif
1690}
1691
1692/* enable or disable single step mode. EXCP_DEBUG is returned by the
1693 CPU loop after each instruction */
1694void cpu_single_step(CPUState *env, int enabled)
1695{
1696#if defined(TARGET_HAS_ICE)
1697 if (env->singlestep_enabled != enabled) {
1698 env->singlestep_enabled = enabled;
1699 if (kvm_enabled())
1700 kvm_update_guest_debug(env, 0);
1701 else {
1702 /* must flush all the translated code to avoid inconsistencies */
1703 /* XXX: only flush what is necessary */
1704 tb_flush(env);
1705 }
1706 }
1707#endif
1708}
1709
1710#ifndef VBOX
1711
1712/* enable or disable low levels log */
1713void cpu_set_log(int log_flags)
1714{
1715 loglevel = log_flags;
1716 if (loglevel && !logfile) {
1717 logfile = fopen(logfilename, log_append ? "a" : "w");
1718 if (!logfile) {
1719 perror(logfilename);
1720 _exit(1);
1721 }
1722#if !defined(CONFIG_SOFTMMU)
1723 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1724 {
1725 static char logfile_buf[4096];
1726 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1727 }
1728#else
1729 setvbuf(logfile, NULL, _IOLBF, 0);
1730#endif
1731 log_append = 1;
1732 }
1733 if (!loglevel && logfile) {
1734 fclose(logfile);
1735 logfile = NULL;
1736 }
1737}
1738
1739void cpu_set_log_filename(const char *filename)
1740{
1741 logfilename = strdup(filename);
1742 if (logfile) {
1743 fclose(logfile);
1744 logfile = NULL;
1745 }
1746 cpu_set_log(loglevel);
1747}
1748
1749#endif /* !VBOX */
1750
1751static void cpu_unlink_tb(CPUState *env)
1752{
1753#if defined(USE_NPTL)
1754 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1755 problem and hope the cpu will stop of its own accord. For userspace
1756 emulation this often isn't actually as bad as it sounds. Often
1757 signals are used primarily to interrupt blocking syscalls. */
1758#else
1759 TranslationBlock *tb;
1760 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1761
1762 tb = env->current_tb;
1763 /* if the cpu is currently executing code, we must unlink it and
1764 all the potentially executing TB */
1765 if (tb && !testandset(&interrupt_lock)) {
1766 env->current_tb = NULL;
1767 tb_reset_jump_recursive(tb);
1768 resetlock(&interrupt_lock);
1769 }
1770#endif
1771}
1772
1773/* mask must never be zero, except for A20 change call */
1774void cpu_interrupt(CPUState *env, int mask)
1775{
1776 int old_mask;
1777
1778 old_mask = env->interrupt_request;
1779#ifndef VBOX
1780 env->interrupt_request |= mask;
1781#else /* VBOX */
1782 VM_ASSERT_EMT(env->pVM);
1783 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1784#endif /* VBOX */
1785
1786#ifndef VBOX
1787#ifndef CONFIG_USER_ONLY
1788 /*
1789 * If called from iothread context, wake the target cpu in
1790 * case its halted.
1791 */
1792 if (!qemu_cpu_self(env)) {
1793 qemu_cpu_kick(env);
1794 return;
1795 }
1796#endif
1797#endif /* !VBOX */
1798
1799 if (use_icount) {
1800 env->icount_decr.u16.high = 0xffff;
1801#ifndef CONFIG_USER_ONLY
1802 if (!can_do_io(env)
1803 && (mask & ~old_mask) != 0) {
1804 cpu_abort(env, "Raised interrupt while not in I/O function");
1805 }
1806#endif
1807 } else {
1808 cpu_unlink_tb(env);
1809 }
1810}
1811
1812void cpu_reset_interrupt(CPUState *env, int mask)
1813{
1814#ifdef VBOX
1815 /*
1816 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1817 * for future changes!
1818 */
1819 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1820#else /* !VBOX */
1821 env->interrupt_request &= ~mask;
1822#endif /* !VBOX */
1823}
1824
1825void cpu_exit(CPUState *env)
1826{
1827 env->exit_request = 1;
1828 cpu_unlink_tb(env);
1829}
1830
1831#ifndef VBOX
1832const CPULogItem cpu_log_items[] = {
1833 { CPU_LOG_TB_OUT_ASM, "out_asm",
1834 "show generated host assembly code for each compiled TB" },
1835 { CPU_LOG_TB_IN_ASM, "in_asm",
1836 "show target assembly code for each compiled TB" },
1837 { CPU_LOG_TB_OP, "op",
1838 "show micro ops for each compiled TB" },
1839 { CPU_LOG_TB_OP_OPT, "op_opt",
1840 "show micro ops "
1841#ifdef TARGET_I386
1842 "before eflags optimization and "
1843#endif
1844 "after liveness analysis" },
1845 { CPU_LOG_INT, "int",
1846 "show interrupts/exceptions in short format" },
1847 { CPU_LOG_EXEC, "exec",
1848 "show trace before each executed TB (lots of logs)" },
1849 { CPU_LOG_TB_CPU, "cpu",
1850 "show CPU state before block translation" },
1851#ifdef TARGET_I386
1852 { CPU_LOG_PCALL, "pcall",
1853 "show protected mode far calls/returns/exceptions" },
1854 { CPU_LOG_RESET, "cpu_reset",
1855 "show CPU state before CPU resets" },
1856#endif
1857#ifdef DEBUG_IOPORT
1858 { CPU_LOG_IOPORT, "ioport",
1859 "show all i/o ports accesses" },
1860#endif
1861 { 0, NULL, NULL },
1862};
1863
1864static int cmp1(const char *s1, int n, const char *s2)
1865{
1866 if (strlen(s2) != n)
1867 return 0;
1868 return memcmp(s1, s2, n) == 0;
1869}
1870
1871/* takes a comma separated list of log masks. Return 0 if error. */
1872int cpu_str_to_log_mask(const char *str)
1873{
1874 const CPULogItem *item;
1875 int mask;
1876 const char *p, *p1;
1877
1878 p = str;
1879 mask = 0;
1880 for(;;) {
1881 p1 = strchr(p, ',');
1882 if (!p1)
1883 p1 = p + strlen(p);
1884 if(cmp1(p,p1-p,"all")) {
1885 for(item = cpu_log_items; item->mask != 0; item++) {
1886 mask |= item->mask;
1887 }
1888 } else {
1889 for(item = cpu_log_items; item->mask != 0; item++) {
1890 if (cmp1(p, p1 - p, item->name))
1891 goto found;
1892 }
1893 return 0;
1894 }
1895 found:
1896 mask |= item->mask;
1897 if (*p1 != ',')
1898 break;
1899 p = p1 + 1;
1900 }
1901 return mask;
1902}
1903#endif /* !VBOX */
1904
1905#ifndef VBOX /* VBOX: we have our own routine. */
1906void cpu_abort(CPUState *env, const char *fmt, ...)
1907{
1908 va_list ap;
1909 va_list ap2;
1910
1911 va_start(ap, fmt);
1912 va_copy(ap2, ap);
1913 fprintf(stderr, "qemu: fatal: ");
1914 vfprintf(stderr, fmt, ap);
1915 fprintf(stderr, "\n");
1916#ifdef TARGET_I386
1917 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1918#else
1919 cpu_dump_state(env, stderr, fprintf, 0);
1920#endif
1921 if (qemu_log_enabled()) {
1922 qemu_log("qemu: fatal: ");
1923 qemu_log_vprintf(fmt, ap2);
1924 qemu_log("\n");
1925#ifdef TARGET_I386
1926 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1927#else
1928 log_cpu_state(env, 0);
1929#endif
1930 qemu_log_flush();
1931 qemu_log_close();
1932 }
1933 va_end(ap2);
1934 va_end(ap);
1935 abort();
1936}
1937#endif /* !VBOX */
1938
1939#ifndef VBOX
1940CPUState *cpu_copy(CPUState *env)
1941{
1942 CPUState *new_env = cpu_init(env->cpu_model_str);
1943 CPUState *next_cpu = new_env->next_cpu;
1944 int cpu_index = new_env->cpu_index;
1945#if defined(TARGET_HAS_ICE)
1946 CPUBreakpoint *bp;
1947 CPUWatchpoint *wp;
1948#endif
1949
1950 memcpy(new_env, env, sizeof(CPUState));
1951
1952 /* Preserve chaining and index. */
1953 new_env->next_cpu = next_cpu;
1954 new_env->cpu_index = cpu_index;
1955
1956 /* Clone all break/watchpoints.
1957 Note: Once we support ptrace with hw-debug register access, make sure
1958 BP_CPU break/watchpoints are handled correctly on clone. */
1959 TAILQ_INIT(&env->breakpoints);
1960 TAILQ_INIT(&env->watchpoints);
1961#if defined(TARGET_HAS_ICE)
1962 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1963 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1964 }
1965 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1966 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1967 wp->flags, NULL);
1968 }
1969#endif
1970
1971 return new_env;
1972}
1973#endif /* !VBOX */
1974
1975#if !defined(CONFIG_USER_ONLY)
1976
1977static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1978{
1979 unsigned int i;
1980
1981 /* Discard jump cache entries for any tb which might potentially
1982 overlap the flushed page. */
1983 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1984 memset (&env->tb_jmp_cache[i], 0,
1985 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1986
1987 i = tb_jmp_cache_hash_page(addr);
1988 memset (&env->tb_jmp_cache[i], 0,
1989 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1990
1991#ifdef VBOX
1992 /* inform raw mode about TLB page flush */
1993 remR3FlushPage(env, addr);
1994#endif /* VBOX */
1995}
1996
1997static CPUTLBEntry s_cputlb_empty_entry = {
1998 .addr_read = -1,
1999 .addr_write = -1,
2000 .addr_code = -1,
2001 .addend = -1,
2002};
2003
2004/* NOTE: if flush_global is true, also flush global entries (not
2005 implemented yet) */
2006void tlb_flush(CPUState *env, int flush_global)
2007{
2008 int i;
2009
2010#if defined(DEBUG_TLB)
2011 printf("tlb_flush:\n");
2012#endif
2013 /* must reset current TB so that interrupts cannot modify the
2014 links while we are modifying them */
2015 env->current_tb = NULL;
2016
2017 for(i = 0; i < CPU_TLB_SIZE; i++) {
2018 int mmu_idx;
2019 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2020 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
2021 }
2022 }
2023
2024 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
2025
2026#ifdef CONFIG_KQEMU
2027 if (env->kqemu_enabled) {
2028 kqemu_flush(env, flush_global);
2029 }
2030#endif
2031#ifdef VBOX
2032 /* inform raw mode about TLB flush */
2033 remR3FlushTLB(env, flush_global);
2034#endif
2035 tlb_flush_count++;
2036}
2037
2038static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
2039{
2040 if (addr == (tlb_entry->addr_read &
2041 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2042 addr == (tlb_entry->addr_write &
2043 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2044 addr == (tlb_entry->addr_code &
2045 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
2046 *tlb_entry = s_cputlb_empty_entry;
2047 }
2048}
2049
2050void tlb_flush_page(CPUState *env, target_ulong addr)
2051{
2052 int i;
2053 int mmu_idx;
2054
2055#if defined(DEBUG_TLB)
2056 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2057#endif
2058 /* must reset current TB so that interrupts cannot modify the
2059 links while we are modifying them */
2060 env->current_tb = NULL;
2061
2062 addr &= TARGET_PAGE_MASK;
2063 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2064 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2065 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
2066
2067 tlb_flush_jmp_cache(env, addr);
2068
2069#ifdef CONFIG_KQEMU
2070 if (env->kqemu_enabled) {
2071 kqemu_flush_page(env, addr);
2072 }
2073#endif
2074}
2075
2076/* update the TLBs so that writes to code in the virtual page 'addr'
2077 can be detected */
2078static void tlb_protect_code(ram_addr_t ram_addr)
2079{
2080 cpu_physical_memory_reset_dirty(ram_addr,
2081 ram_addr + TARGET_PAGE_SIZE,
2082 CODE_DIRTY_FLAG);
2083#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2084 /** @todo Retest this? This function has changed... */
2085 remR3ProtectCode(cpu_single_env, ram_addr);
2086#endif
2087}
2088
2089/* update the TLB so that writes in physical page 'phys_addr' are no longer
2090 tested for self modifying code */
2091static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2092 target_ulong vaddr)
2093{
2094#ifdef VBOX
2095 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2096#endif
2097 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
2098}
2099
2100static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2101 unsigned long start, unsigned long length)
2102{
2103 unsigned long addr;
2104
2105#ifdef VBOX
2106 if (start & 3)
2107 return;
2108#endif
2109 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2110 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2111 if ((addr - start) < length) {
2112 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2113 }
2114 }
2115}
2116
2117/* Note: start and end must be within the same ram block. */
2118void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2119 int dirty_flags)
2120{
2121 CPUState *env;
2122 unsigned long length, start1;
2123 int i, mask, len;
2124 uint8_t *p;
2125
2126 start &= TARGET_PAGE_MASK;
2127 end = TARGET_PAGE_ALIGN(end);
2128
2129 length = end - start;
2130 if (length == 0)
2131 return;
2132 len = length >> TARGET_PAGE_BITS;
2133#ifdef CONFIG_KQEMU
2134 /* XXX: should not depend on cpu context */
2135 env = first_cpu;
2136 if (env->kqemu_enabled) {
2137 ram_addr_t addr;
2138 addr = start;
2139 for(i = 0; i < len; i++) {
2140 kqemu_set_notdirty(env, addr);
2141 addr += TARGET_PAGE_SIZE;
2142 }
2143 }
2144#endif
2145 mask = ~dirty_flags;
2146 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2147#ifdef VBOX
2148 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2149#endif
2150 for(i = 0; i < len; i++)
2151 p[i] &= mask;
2152
2153 /* we modify the TLB cache so that the dirty bit will be set again
2154 when accessing the range */
2155#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2156 start1 = start;
2157#elif !defined(VBOX)
2158 start1 = (unsigned long)qemu_get_ram_ptr(start);
2159 /* Chek that we don't span multiple blocks - this breaks the
2160 address comparisons below. */
2161 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2162 != (end - 1) - start) {
2163 abort();
2164 }
2165#else
2166 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
2167#endif
2168
2169 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2170 int mmu_idx;
2171 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2172 for(i = 0; i < CPU_TLB_SIZE; i++)
2173 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2174 start1, length);
2175 }
2176 }
2177}
2178
2179#ifndef VBOX
2180int cpu_physical_memory_set_dirty_tracking(int enable)
2181{
2182 in_migration = enable;
2183 if (kvm_enabled()) {
2184 return kvm_set_migration_log(enable);
2185 }
2186 return 0;
2187}
2188
2189int cpu_physical_memory_get_dirty_tracking(void)
2190{
2191 return in_migration;
2192}
2193#endif /* !VBOX */
2194
2195int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2196 target_phys_addr_t end_addr)
2197{
2198 int ret = 0;
2199
2200 if (kvm_enabled())
2201 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
2202 return ret;
2203}
2204
2205#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2206DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2207#else
2208static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2209#endif
2210{
2211 ram_addr_t ram_addr;
2212 void *p;
2213
2214 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2215#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2216 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2217#elif !defined(VBOX)
2218 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2219 + tlb_entry->addend);
2220 ram_addr = qemu_ram_addr_from_host(p);
2221#else
2222 Assert(phys_addend != -1);
2223 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2224#endif
2225 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2226 tlb_entry->addr_write |= TLB_NOTDIRTY;
2227 }
2228 }
2229}
2230
2231/* update the TLB according to the current state of the dirty bits */
2232void cpu_tlb_update_dirty(CPUState *env)
2233{
2234 int i;
2235 int mmu_idx;
2236 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2237 for(i = 0; i < CPU_TLB_SIZE; i++)
2238#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2239 tlb_update_dirty(&env->tlb_table[mmu_idx][i], env->phys_addends[mmu_idx][i]);
2240#else
2241 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2242#endif
2243 }
2244}
2245
2246static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2247{
2248 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2249 tlb_entry->addr_write = vaddr;
2250}
2251
2252/* update the TLB corresponding to virtual page vaddr
2253 so that it is no longer dirty */
2254static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2255{
2256 int i;
2257 int mmu_idx;
2258
2259 vaddr &= TARGET_PAGE_MASK;
2260 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2261 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2262 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2263}
2264
2265/* add a new TLB entry. At most one entry for a given virtual address
2266 is permitted. Return 0 if OK or 2 if the page could not be mapped
2267 (can only happen in non SOFTMMU mode for I/O pages or pages
2268 conflicting with the host address space). */
2269int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2270 target_phys_addr_t paddr, int prot,
2271 int mmu_idx, int is_softmmu)
2272{
2273 PhysPageDesc *p;
2274 unsigned long pd;
2275 unsigned int index;
2276 target_ulong address;
2277 target_ulong code_address;
2278 target_phys_addr_t addend;
2279 int ret;
2280 CPUTLBEntry *te;
2281 CPUWatchpoint *wp;
2282 target_phys_addr_t iotlb;
2283#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2284 int read_mods = 0, write_mods = 0, code_mods = 0;
2285#endif
2286
2287 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2288 if (!p) {
2289 pd = IO_MEM_UNASSIGNED;
2290 } else {
2291 pd = p->phys_offset;
2292 }
2293#if defined(DEBUG_TLB)
2294 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2295 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2296#endif
2297
2298 ret = 0;
2299 address = vaddr;
2300 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2301 /* IO memory case (romd handled later) */
2302 address |= TLB_MMIO;
2303 }
2304#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2305 addend = pd & TARGET_PAGE_MASK;
2306#elif !defined(VBOX)
2307 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2308#else
2309 /** @todo this is racing the phys_page_find call above since it may register
2310 * a new chunk of memory... */
2311 addend = (unsigned long)remR3TlbGCPhys2Ptr(env, pd & TARGET_PAGE_MASK, !!(prot & PAGE_WRITE));
2312#endif
2313
2314 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2315 /* Normal RAM. */
2316 iotlb = pd & TARGET_PAGE_MASK;
2317 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2318 iotlb |= IO_MEM_NOTDIRTY;
2319 else
2320 iotlb |= IO_MEM_ROM;
2321 } else {
2322 /* IO handlers are currently passed a physical address.
2323 It would be nice to pass an offset from the base address
2324 of that region. This would avoid having to special case RAM,
2325 and avoid full address decoding in every device.
2326 We can't use the high bits of pd for this because
2327 IO_MEM_ROMD uses these as a ram address. */
2328 iotlb = (pd & ~TARGET_PAGE_MASK);
2329#ifndef VBOX
2330 if (p) {
2331#else
2332 if ( p->phys_offset
2333 && (pd & ~TARGET_PAGE_MASK) != env->pVM->rem.s.iMMIOMemType
2334 && (pd & ~TARGET_PAGE_MASK) != env->pVM->rem.s.iHandlerMemType) {
2335#endif
2336 iotlb += p->region_offset;
2337 } else {
2338 iotlb += paddr;
2339 }
2340 }
2341
2342 code_address = address;
2343#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2344
2345 if (addend & 0x3)
2346 {
2347 if (addend & 0x2)
2348 {
2349 /* catch write */
2350 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2351 write_mods |= TLB_MMIO;
2352 }
2353 else if (addend & 0x1)
2354 {
2355 /* catch all */
2356 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2357 {
2358 read_mods |= TLB_MMIO;
2359 write_mods |= TLB_MMIO;
2360 code_mods |= TLB_MMIO;
2361 }
2362 }
2363 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2364 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2365 addend &= ~(target_ulong)0x3;
2366 }
2367
2368#endif
2369 /* Make accesses to pages with watchpoints go via the
2370 watchpoint trap routines. */
2371 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2372 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2373 iotlb = io_mem_watch + paddr;
2374 /* TODO: The memory case can be optimized by not trapping
2375 reads of pages with a write breakpoint. */
2376 address |= TLB_MMIO;
2377 }
2378 }
2379
2380 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2381 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2382 te = &env->tlb_table[mmu_idx][index];
2383 te->addend = addend - vaddr;
2384 if (prot & PAGE_READ) {
2385 te->addr_read = address;
2386 } else {
2387 te->addr_read = -1;
2388 }
2389
2390 if (prot & PAGE_EXEC) {
2391 te->addr_code = code_address;
2392 } else {
2393 te->addr_code = -1;
2394 }
2395 if (prot & PAGE_WRITE) {
2396 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2397 (pd & IO_MEM_ROMD)) {
2398 /* Write access calls the I/O callback. */
2399 te->addr_write = address | TLB_MMIO;
2400 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2401 !cpu_physical_memory_is_dirty(pd)) {
2402 te->addr_write = address | TLB_NOTDIRTY;
2403 } else {
2404 te->addr_write = address;
2405 }
2406 } else {
2407 te->addr_write = -1;
2408 }
2409
2410#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2411 if (prot & PAGE_READ)
2412 te->addr_read |= read_mods;
2413 if (prot & PAGE_EXEC)
2414 te->addr_code |= code_mods;
2415 if (prot & PAGE_WRITE)
2416 te->addr_write |= write_mods;
2417
2418 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2419#endif
2420
2421#ifdef VBOX
2422 /* inform raw mode about TLB page change */
2423 remR3FlushPage(env, vaddr);
2424#endif
2425 return ret;
2426}
2427
2428#else
2429
2430void tlb_flush(CPUState *env, int flush_global)
2431{
2432}
2433
2434void tlb_flush_page(CPUState *env, target_ulong addr)
2435{
2436}
2437
2438int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2439 target_phys_addr_t paddr, int prot,
2440 int mmu_idx, int is_softmmu)
2441{
2442 return 0;
2443}
2444
2445#ifndef VBOX
2446
2447/*
2448 * Walks guest process memory "regions" one by one
2449 * and calls callback function 'fn' for each region.
2450 */
2451int walk_memory_regions(void *priv,
2452 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2453{
2454 unsigned long start, end;
2455 PageDesc *p = NULL;
2456 int i, j, prot, prot1;
2457 int rc = 0;
2458
2459 start = end = -1;
2460 prot = 0;
2461
2462 for (i = 0; i <= L1_SIZE; i++) {
2463 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2464 for (j = 0; j < L2_SIZE; j++) {
2465 prot1 = (p == NULL) ? 0 : p[j].flags;
2466 /*
2467 * "region" is one continuous chunk of memory
2468 * that has same protection flags set.
2469 */
2470 if (prot1 != prot) {
2471 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2472 if (start != -1) {
2473 rc = (*fn)(priv, start, end, prot);
2474 /* callback can stop iteration by returning != 0 */
2475 if (rc != 0)
2476 return (rc);
2477 }
2478 if (prot1 != 0)
2479 start = end;
2480 else
2481 start = -1;
2482 prot = prot1;
2483 }
2484 if (p == NULL)
2485 break;
2486 }
2487 }
2488 return (rc);
2489}
2490
2491static int dump_region(void *priv, unsigned long start,
2492 unsigned long end, unsigned long prot)
2493{
2494 FILE *f = (FILE *)priv;
2495
2496 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2497 start, end, end - start,
2498 ((prot & PAGE_READ) ? 'r' : '-'),
2499 ((prot & PAGE_WRITE) ? 'w' : '-'),
2500 ((prot & PAGE_EXEC) ? 'x' : '-'));
2501
2502 return (0);
2503}
2504
2505/* dump memory mappings */
2506void page_dump(FILE *f)
2507{
2508 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2509 "start", "end", "size", "prot");
2510 walk_memory_regions(f, dump_region);
2511}
2512
2513#endif /* !VBOX */
2514
2515int page_get_flags(target_ulong address)
2516{
2517 PageDesc *p;
2518
2519 p = page_find(address >> TARGET_PAGE_BITS);
2520 if (!p)
2521 return 0;
2522 return p->flags;
2523}
2524
2525/* modify the flags of a page and invalidate the code if
2526 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2527 depending on PAGE_WRITE */
2528void page_set_flags(target_ulong start, target_ulong end, int flags)
2529{
2530 PageDesc *p;
2531 target_ulong addr;
2532
2533 /* mmap_lock should already be held. */
2534 start = start & TARGET_PAGE_MASK;
2535 end = TARGET_PAGE_ALIGN(end);
2536 if (flags & PAGE_WRITE)
2537 flags |= PAGE_WRITE_ORG;
2538#ifdef VBOX
2539 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2540#endif
2541 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2542 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2543 /* We may be called for host regions that are outside guest
2544 address space. */
2545 if (!p)
2546 return;
2547 /* if the write protection is set, then we invalidate the code
2548 inside */
2549 if (!(p->flags & PAGE_WRITE) &&
2550 (flags & PAGE_WRITE) &&
2551 p->first_tb) {
2552 tb_invalidate_phys_page(addr, 0, NULL);
2553 }
2554 p->flags = flags;
2555 }
2556}
2557
2558int page_check_range(target_ulong start, target_ulong len, int flags)
2559{
2560 PageDesc *p;
2561 target_ulong end;
2562 target_ulong addr;
2563
2564 if (start + len < start)
2565 /* we've wrapped around */
2566 return -1;
2567
2568 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2569 start = start & TARGET_PAGE_MASK;
2570
2571 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2572 p = page_find(addr >> TARGET_PAGE_BITS);
2573 if( !p )
2574 return -1;
2575 if( !(p->flags & PAGE_VALID) )
2576 return -1;
2577
2578 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2579 return -1;
2580 if (flags & PAGE_WRITE) {
2581 if (!(p->flags & PAGE_WRITE_ORG))
2582 return -1;
2583 /* unprotect the page if it was put read-only because it
2584 contains translated code */
2585 if (!(p->flags & PAGE_WRITE)) {
2586 if (!page_unprotect(addr, 0, NULL))
2587 return -1;
2588 }
2589 return 0;
2590 }
2591 }
2592 return 0;
2593}
2594
2595/* called from signal handler: invalidate the code and unprotect the
2596 page. Return TRUE if the fault was successfully handled. */
2597int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2598{
2599 unsigned int page_index, prot, pindex;
2600 PageDesc *p, *p1;
2601 target_ulong host_start, host_end, addr;
2602
2603 /* Technically this isn't safe inside a signal handler. However we
2604 know this only ever happens in a synchronous SEGV handler, so in
2605 practice it seems to be ok. */
2606 mmap_lock();
2607
2608 host_start = address & qemu_host_page_mask;
2609 page_index = host_start >> TARGET_PAGE_BITS;
2610 p1 = page_find(page_index);
2611 if (!p1) {
2612 mmap_unlock();
2613 return 0;
2614 }
2615 host_end = host_start + qemu_host_page_size;
2616 p = p1;
2617 prot = 0;
2618 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2619 prot |= p->flags;
2620 p++;
2621 }
2622 /* if the page was really writable, then we change its
2623 protection back to writable */
2624 if (prot & PAGE_WRITE_ORG) {
2625 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2626 if (!(p1[pindex].flags & PAGE_WRITE)) {
2627 mprotect((void *)g2h(host_start), qemu_host_page_size,
2628 (prot & PAGE_BITS) | PAGE_WRITE);
2629 p1[pindex].flags |= PAGE_WRITE;
2630 /* and since the content will be modified, we must invalidate
2631 the corresponding translated code. */
2632 tb_invalidate_phys_page(address, pc, puc);
2633#ifdef DEBUG_TB_CHECK
2634 tb_invalidate_check(address);
2635#endif
2636 mmap_unlock();
2637 return 1;
2638 }
2639 }
2640 mmap_unlock();
2641 return 0;
2642}
2643
2644static inline void tlb_set_dirty(CPUState *env,
2645 unsigned long addr, target_ulong vaddr)
2646{
2647}
2648#endif /* defined(CONFIG_USER_ONLY) */
2649
2650#if !defined(CONFIG_USER_ONLY)
2651
2652static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2653 ram_addr_t memory, ram_addr_t region_offset);
2654static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2655 ram_addr_t orig_memory, ram_addr_t region_offset);
2656#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2657 need_subpage) \
2658 do { \
2659 if (addr > start_addr) \
2660 start_addr2 = 0; \
2661 else { \
2662 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2663 if (start_addr2 > 0) \
2664 need_subpage = 1; \
2665 } \
2666 \
2667 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2668 end_addr2 = TARGET_PAGE_SIZE - 1; \
2669 else { \
2670 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2671 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2672 need_subpage = 1; \
2673 } \
2674 } while (0)
2675
2676/* register physical memory. 'size' must be a multiple of the target
2677 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2678 io memory page. The address used when calling the IO function is
2679 the offset from the start of the region, plus region_offset. Both
2680 start_addr and region_offset are rounded down to a page boundary
2681 before calculating this offset. This should not be a problem unless
2682 the low bits of start_addr and region_offset differ. */
2683void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2684 ram_addr_t size,
2685 ram_addr_t phys_offset,
2686 ram_addr_t region_offset)
2687{
2688 target_phys_addr_t addr, end_addr;
2689 PhysPageDesc *p;
2690 CPUState *env;
2691 ram_addr_t orig_size = size;
2692 void *subpage;
2693
2694#ifdef CONFIG_KQEMU
2695 /* XXX: should not depend on cpu context */
2696 env = first_cpu;
2697 if (env->kqemu_enabled) {
2698 kqemu_set_phys_mem(start_addr, size, phys_offset);
2699 }
2700#endif
2701 if (kvm_enabled())
2702 kvm_set_phys_mem(start_addr, size, phys_offset);
2703
2704 if (phys_offset == IO_MEM_UNASSIGNED) {
2705 region_offset = start_addr;
2706 }
2707 region_offset &= TARGET_PAGE_MASK;
2708 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2709 end_addr = start_addr + (target_phys_addr_t)size;
2710 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2711 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2712 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2713 ram_addr_t orig_memory = p->phys_offset;
2714 target_phys_addr_t start_addr2, end_addr2;
2715 int need_subpage = 0;
2716
2717 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2718 need_subpage);
2719 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2720 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2721 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2722 &p->phys_offset, orig_memory,
2723 p->region_offset);
2724 } else {
2725 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2726 >> IO_MEM_SHIFT];
2727 }
2728 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2729 region_offset);
2730 p->region_offset = 0;
2731 } else {
2732 p->phys_offset = phys_offset;
2733 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2734 (phys_offset & IO_MEM_ROMD))
2735 phys_offset += TARGET_PAGE_SIZE;
2736 }
2737 } else {
2738 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2739 p->phys_offset = phys_offset;
2740 p->region_offset = region_offset;
2741 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2742 (phys_offset & IO_MEM_ROMD)) {
2743 phys_offset += TARGET_PAGE_SIZE;
2744 } else {
2745 target_phys_addr_t start_addr2, end_addr2;
2746 int need_subpage = 0;
2747
2748 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2749 end_addr2, need_subpage);
2750
2751 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2752 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2753 &p->phys_offset, IO_MEM_UNASSIGNED,
2754 addr & TARGET_PAGE_MASK);
2755 subpage_register(subpage, start_addr2, end_addr2,
2756 phys_offset, region_offset);
2757 p->region_offset = 0;
2758 }
2759 }
2760 }
2761 region_offset += TARGET_PAGE_SIZE;
2762 }
2763
2764 /* since each CPU stores ram addresses in its TLB cache, we must
2765 reset the modified entries */
2766 /* XXX: slow ! */
2767 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2768 tlb_flush(env, 1);
2769 }
2770}
2771
2772/* XXX: temporary until new memory mapping API */
2773ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2774{
2775 PhysPageDesc *p;
2776
2777 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2778 if (!p)
2779 return IO_MEM_UNASSIGNED;
2780 return p->phys_offset;
2781}
2782
2783#ifndef VBOX
2784void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2785{
2786 if (kvm_enabled())
2787 kvm_coalesce_mmio_region(addr, size);
2788}
2789
2790void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2791{
2792 if (kvm_enabled())
2793 kvm_uncoalesce_mmio_region(addr, size);
2794}
2795
2796#ifdef CONFIG_KQEMU
2797/* XXX: better than nothing */
2798static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2799{
2800 ram_addr_t addr;
2801 if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2802 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2803 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2804 abort();
2805 }
2806 addr = last_ram_offset;
2807 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2808 return addr;
2809}
2810#endif
2811
2812ram_addr_t qemu_ram_alloc(ram_addr_t size)
2813{
2814 RAMBlock *new_block;
2815
2816#ifdef CONFIG_KQEMU
2817 if (kqemu_phys_ram_base) {
2818 return kqemu_ram_alloc(size);
2819 }
2820#endif
2821
2822 size = TARGET_PAGE_ALIGN(size);
2823 new_block = qemu_malloc(sizeof(*new_block));
2824
2825 new_block->host = qemu_vmalloc(size);
2826 new_block->offset = last_ram_offset;
2827 new_block->length = size;
2828
2829 new_block->next = ram_blocks;
2830 ram_blocks = new_block;
2831
2832 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2833 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2834 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2835 0xff, size >> TARGET_PAGE_BITS);
2836
2837 last_ram_offset += size;
2838
2839 if (kvm_enabled())
2840 kvm_setup_guest_memory(new_block->host, size);
2841
2842 return new_block->offset;
2843}
2844
2845void qemu_ram_free(ram_addr_t addr)
2846{
2847 /* TODO: implement this. */
2848}
2849
2850/* Return a host pointer to ram allocated with qemu_ram_alloc.
2851 With the exception of the softmmu code in this file, this should
2852 only be used for local memory (e.g. video ram) that the device owns,
2853 and knows it isn't going to access beyond the end of the block.
2854
2855 It should not be used for general purpose DMA.
2856 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2857 */
2858void *qemu_get_ram_ptr(ram_addr_t addr)
2859{
2860 RAMBlock *prev;
2861 RAMBlock **prevp;
2862 RAMBlock *block;
2863
2864#ifdef CONFIG_KQEMU
2865 if (kqemu_phys_ram_base) {
2866 return kqemu_phys_ram_base + addr;
2867 }
2868#endif
2869
2870 prev = NULL;
2871 prevp = &ram_blocks;
2872 block = ram_blocks;
2873 while (block && (block->offset > addr
2874 || block->offset + block->length <= addr)) {
2875 if (prev)
2876 prevp = &prev->next;
2877 prev = block;
2878 block = block->next;
2879 }
2880 if (!block) {
2881 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2882 abort();
2883 }
2884 /* Move this entry to to start of the list. */
2885 if (prev) {
2886 prev->next = block->next;
2887 block->next = *prevp;
2888 *prevp = block;
2889 }
2890 return block->host + (addr - block->offset);
2891}
2892
2893/* Some of the softmmu routines need to translate from a host pointer
2894 (typically a TLB entry) back to a ram offset. */
2895ram_addr_t qemu_ram_addr_from_host(void *ptr)
2896{
2897 RAMBlock *prev;
2898 RAMBlock **prevp;
2899 RAMBlock *block;
2900 uint8_t *host = ptr;
2901
2902#ifdef CONFIG_KQEMU
2903 if (kqemu_phys_ram_base) {
2904 return host - kqemu_phys_ram_base;
2905 }
2906#endif
2907
2908 prev = NULL;
2909 prevp = &ram_blocks;
2910 block = ram_blocks;
2911 while (block && (block->host > host
2912 || block->host + block->length <= host)) {
2913 if (prev)
2914 prevp = &prev->next;
2915 prev = block;
2916 block = block->next;
2917 }
2918 if (!block) {
2919 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2920 abort();
2921 }
2922 return block->offset + (host - block->host);
2923}
2924
2925#endif /* !VBOX */
2926
2927static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2928{
2929#ifdef DEBUG_UNASSIGNED
2930 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2931#endif
2932#if defined(TARGET_SPARC)
2933 do_unassigned_access(addr, 0, 0, 0, 1);
2934#endif
2935 return 0;
2936}
2937
2938static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2939{
2940#ifdef DEBUG_UNASSIGNED
2941 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2942#endif
2943#if defined(TARGET_SPARC)
2944 do_unassigned_access(addr, 0, 0, 0, 2);
2945#endif
2946 return 0;
2947}
2948
2949static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2950{
2951#ifdef DEBUG_UNASSIGNED
2952 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2953#endif
2954#if defined(TARGET_SPARC)
2955 do_unassigned_access(addr, 0, 0, 0, 4);
2956#endif
2957 return 0;
2958}
2959
2960static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2961{
2962#ifdef DEBUG_UNASSIGNED
2963 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2964#endif
2965#if defined(TARGET_SPARC)
2966 do_unassigned_access(addr, 1, 0, 0, 1);
2967#endif
2968}
2969
2970static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2971{
2972#ifdef DEBUG_UNASSIGNED
2973 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2974#endif
2975#if defined(TARGET_SPARC)
2976 do_unassigned_access(addr, 1, 0, 0, 2);
2977#endif
2978}
2979
2980static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2981{
2982#ifdef DEBUG_UNASSIGNED
2983 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2984#endif
2985#if defined(TARGET_SPARC)
2986 do_unassigned_access(addr, 1, 0, 0, 4);
2987#endif
2988}
2989
2990static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2991 unassigned_mem_readb,
2992 unassigned_mem_readw,
2993 unassigned_mem_readl,
2994};
2995
2996static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2997 unassigned_mem_writeb,
2998 unassigned_mem_writew,
2999 unassigned_mem_writel,
3000};
3001
3002static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3003 uint32_t val)
3004{
3005 int dirty_flags;
3006#ifdef VBOX
3007 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
3008 dirty_flags = 0xff;
3009 else
3010#endif /* VBOX */
3011 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3012 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3013#if !defined(CONFIG_USER_ONLY)
3014 tb_invalidate_phys_page_fast(ram_addr, 1);
3015# ifdef VBOX
3016 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
3017 dirty_flags = 0xff;
3018 else
3019# endif /* VBOX */
3020 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3021#endif
3022 }
3023#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3024 remR3PhysWriteU8(ram_addr, val);
3025#else
3026 stb_p(qemu_get_ram_ptr(ram_addr), val);
3027#endif
3028#ifdef CONFIG_KQEMU
3029 if (cpu_single_env->kqemu_enabled &&
3030 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
3031 kqemu_modify_page(cpu_single_env, ram_addr);
3032#endif
3033 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3034#ifdef VBOX
3035 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3036#endif /* !VBOX */
3037 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
3038 /* we remove the notdirty callback only if the code has been
3039 flushed */
3040 if (dirty_flags == 0xff)
3041 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3042}
3043
3044static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3045 uint32_t val)
3046{
3047 int dirty_flags;
3048#ifdef VBOX
3049 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
3050 dirty_flags = 0xff;
3051 else
3052#endif /* VBOX */
3053 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3054 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3055#if !defined(CONFIG_USER_ONLY)
3056 tb_invalidate_phys_page_fast(ram_addr, 2);
3057# ifdef VBOX
3058 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
3059 dirty_flags = 0xff;
3060 else
3061# endif /* VBOX */
3062 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3063#endif
3064 }
3065#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3066 remR3PhysWriteU16(ram_addr, val);
3067#else
3068 stw_p(qemu_get_ram_ptr(ram_addr), val);
3069#endif
3070#ifdef CONFIG_KQEMU
3071 if (cpu_single_env->kqemu_enabled &&
3072 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
3073 kqemu_modify_page(cpu_single_env, ram_addr);
3074#endif
3075 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3076#ifdef VBOX
3077 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3078#endif
3079 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
3080 /* we remove the notdirty callback only if the code has been
3081 flushed */
3082 if (dirty_flags == 0xff)
3083 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3084}
3085
3086static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3087 uint32_t val)
3088{
3089 int dirty_flags;
3090#ifdef VBOX
3091 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
3092 dirty_flags = 0xff;
3093 else
3094#endif /* VBOX */
3095 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3096 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3097#if !defined(CONFIG_USER_ONLY)
3098 tb_invalidate_phys_page_fast(ram_addr, 4);
3099# ifdef VBOX
3100 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
3101 dirty_flags = 0xff;
3102 else
3103# endif /* VBOX */
3104 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3105#endif
3106 }
3107#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
3108 remR3PhysWriteU32(ram_addr, val);
3109#else
3110 stl_p(qemu_get_ram_ptr(ram_addr), val);
3111#endif
3112#ifdef CONFIG_KQEMU
3113 if (cpu_single_env->kqemu_enabled &&
3114 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
3115 kqemu_modify_page(cpu_single_env, ram_addr);
3116#endif
3117 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3118#ifdef VBOX
3119 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3120#endif
3121 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
3122 /* we remove the notdirty callback only if the code has been
3123 flushed */
3124 if (dirty_flags == 0xff)
3125 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3126}
3127
3128static CPUReadMemoryFunc *error_mem_read[3] = {
3129 NULL, /* never used */
3130 NULL, /* never used */
3131 NULL, /* never used */
3132};
3133
3134static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
3135 notdirty_mem_writeb,
3136 notdirty_mem_writew,
3137 notdirty_mem_writel,
3138};
3139
3140/* Generate a debug exception if a watchpoint has been hit. */
3141static void check_watchpoint(int offset, int len_mask, int flags)
3142{
3143 CPUState *env = cpu_single_env;
3144 target_ulong pc, cs_base;
3145 TranslationBlock *tb;
3146 target_ulong vaddr;
3147 CPUWatchpoint *wp;
3148 int cpu_flags;
3149
3150 if (env->watchpoint_hit) {
3151 /* We re-entered the check after replacing the TB. Now raise
3152 * the debug interrupt so that is will trigger after the
3153 * current instruction. */
3154 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3155 return;
3156 }
3157 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3158 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
3159 if ((vaddr == (wp->vaddr & len_mask) ||
3160 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3161 wp->flags |= BP_WATCHPOINT_HIT;
3162 if (!env->watchpoint_hit) {
3163 env->watchpoint_hit = wp;
3164 tb = tb_find_pc(env->mem_io_pc);
3165 if (!tb) {
3166 cpu_abort(env, "check_watchpoint: could not find TB for "
3167 "pc=%p", (void *)env->mem_io_pc);
3168 }
3169 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3170 tb_phys_invalidate(tb, -1);
3171 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3172 env->exception_index = EXCP_DEBUG;
3173 } else {
3174 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3175 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3176 }
3177 cpu_resume_from_signal(env, NULL);
3178 }
3179 } else {
3180 wp->flags &= ~BP_WATCHPOINT_HIT;
3181 }
3182 }
3183}
3184
3185/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3186 so these check for a hit then pass through to the normal out-of-line
3187 phys routines. */
3188static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3189{
3190 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3191 return ldub_phys(addr);
3192}
3193
3194static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3195{
3196 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3197 return lduw_phys(addr);
3198}
3199
3200static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3201{
3202 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3203 return ldl_phys(addr);
3204}
3205
3206static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3207 uint32_t val)
3208{
3209 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3210 stb_phys(addr, val);
3211}
3212
3213static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3214 uint32_t val)
3215{
3216 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3217 stw_phys(addr, val);
3218}
3219
3220static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3221 uint32_t val)
3222{
3223 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3224 stl_phys(addr, val);
3225}
3226
3227static CPUReadMemoryFunc *watch_mem_read[3] = {
3228 watch_mem_readb,
3229 watch_mem_readw,
3230 watch_mem_readl,
3231};
3232
3233static CPUWriteMemoryFunc *watch_mem_write[3] = {
3234 watch_mem_writeb,
3235 watch_mem_writew,
3236 watch_mem_writel,
3237};
3238
3239static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3240 unsigned int len)
3241{
3242 uint32_t ret;
3243 unsigned int idx;
3244
3245 idx = SUBPAGE_IDX(addr);
3246#if defined(DEBUG_SUBPAGE)
3247 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3248 mmio, len, addr, idx);
3249#endif
3250 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
3251 addr + mmio->region_offset[idx][0][len]);
3252
3253 return ret;
3254}
3255
3256static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3257 uint32_t value, unsigned int len)
3258{
3259 unsigned int idx;
3260
3261 idx = SUBPAGE_IDX(addr);
3262#if defined(DEBUG_SUBPAGE)
3263 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3264 mmio, len, addr, idx, value);
3265#endif
3266 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
3267 addr + mmio->region_offset[idx][1][len],
3268 value);
3269}
3270
3271static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3272{
3273#if defined(DEBUG_SUBPAGE)
3274 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3275#endif
3276
3277 return subpage_readlen(opaque, addr, 0);
3278}
3279
3280static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3281 uint32_t value)
3282{
3283#if defined(DEBUG_SUBPAGE)
3284 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3285#endif
3286 subpage_writelen(opaque, addr, value, 0);
3287}
3288
3289static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3290{
3291#if defined(DEBUG_SUBPAGE)
3292 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3293#endif
3294
3295 return subpage_readlen(opaque, addr, 1);
3296}
3297
3298static void subpage_writew (void *opaque, target_phys_addr_t addr,
3299 uint32_t value)
3300{
3301#if defined(DEBUG_SUBPAGE)
3302 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3303#endif
3304 subpage_writelen(opaque, addr, value, 1);
3305}
3306
3307static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3308{
3309#if defined(DEBUG_SUBPAGE)
3310 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3311#endif
3312
3313 return subpage_readlen(opaque, addr, 2);
3314}
3315
3316static void subpage_writel (void *opaque,
3317 target_phys_addr_t addr, uint32_t value)
3318{
3319#if defined(DEBUG_SUBPAGE)
3320 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3321#endif
3322 subpage_writelen(opaque, addr, value, 2);
3323}
3324
3325static CPUReadMemoryFunc *subpage_read[] = {
3326 &subpage_readb,
3327 &subpage_readw,
3328 &subpage_readl,
3329};
3330
3331static CPUWriteMemoryFunc *subpage_write[] = {
3332 &subpage_writeb,
3333 &subpage_writew,
3334 &subpage_writel,
3335};
3336
3337static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3338 ram_addr_t memory, ram_addr_t region_offset)
3339{
3340 int idx, eidx;
3341 unsigned int i;
3342
3343 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3344 return -1;
3345 idx = SUBPAGE_IDX(start);
3346 eidx = SUBPAGE_IDX(end);
3347#if defined(DEBUG_SUBPAGE)
3348 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3349 mmio, start, end, idx, eidx, memory);
3350#endif
3351 memory >>= IO_MEM_SHIFT;
3352 for (; idx <= eidx; idx++) {
3353 for (i = 0; i < 4; i++) {
3354 if (io_mem_read[memory][i]) {
3355 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3356 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3357 mmio->region_offset[idx][0][i] = region_offset;
3358 }
3359 if (io_mem_write[memory][i]) {
3360 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3361 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3362 mmio->region_offset[idx][1][i] = region_offset;
3363 }
3364 }
3365 }
3366
3367 return 0;
3368}
3369
3370static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3371 ram_addr_t orig_memory, ram_addr_t region_offset)
3372{
3373 subpage_t *mmio;
3374 int subpage_memory;
3375
3376 mmio = qemu_mallocz(sizeof(subpage_t));
3377
3378 mmio->base = base;
3379 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3380#if defined(DEBUG_SUBPAGE)
3381 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3382 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3383#endif
3384 *phys = subpage_memory | IO_MEM_SUBPAGE;
3385 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3386 region_offset);
3387
3388 return mmio;
3389}
3390
3391static int get_free_io_mem_idx(void)
3392{
3393 int i;
3394
3395 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3396 if (!io_mem_used[i]) {
3397 io_mem_used[i] = 1;
3398 return i;
3399 }
3400
3401 return -1;
3402}
3403
3404/* mem_read and mem_write are arrays of functions containing the
3405 function to access byte (index 0), word (index 1) and dword (index
3406 2). Functions can be omitted with a NULL function pointer.
3407 If io_index is non zero, the corresponding io zone is
3408 modified. If it is zero, a new io zone is allocated. The return
3409 value can be used with cpu_register_physical_memory(). (-1) is
3410 returned if error. */
3411static int cpu_register_io_memory_fixed(int io_index,
3412 CPUReadMemoryFunc **mem_read,
3413 CPUWriteMemoryFunc **mem_write,
3414 void *opaque)
3415{
3416 int i, subwidth = 0;
3417
3418 if (io_index <= 0) {
3419 io_index = get_free_io_mem_idx();
3420 if (io_index == -1)
3421 return io_index;
3422 } else {
3423 io_index >>= IO_MEM_SHIFT;
3424 if (io_index >= IO_MEM_NB_ENTRIES)
3425 return -1;
3426 }
3427
3428 for(i = 0;i < 3; i++) {
3429 if (!mem_read[i] || !mem_write[i])
3430 subwidth = IO_MEM_SUBWIDTH;
3431 io_mem_read[io_index][i] = mem_read[i];
3432 io_mem_write[io_index][i] = mem_write[i];
3433 }
3434 io_mem_opaque[io_index] = opaque;
3435 return (io_index << IO_MEM_SHIFT) | subwidth;
3436}
3437
3438int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
3439 CPUWriteMemoryFunc **mem_write,
3440 void *opaque)
3441{
3442 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3443}
3444
3445void cpu_unregister_io_memory(int io_table_address)
3446{
3447 int i;
3448 int io_index = io_table_address >> IO_MEM_SHIFT;
3449
3450 for (i=0;i < 3; i++) {
3451 io_mem_read[io_index][i] = unassigned_mem_read[i];
3452 io_mem_write[io_index][i] = unassigned_mem_write[i];
3453 }
3454 io_mem_opaque[io_index] = NULL;
3455 io_mem_used[io_index] = 0;
3456}
3457
3458static void io_mem_init(void)
3459{
3460 int i;
3461
3462 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3463 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3464 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3465 for (i=0; i<5; i++)
3466 io_mem_used[i] = 1;
3467
3468 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3469 watch_mem_write, NULL);
3470#ifdef CONFIG_KQEMU
3471 if (kqemu_phys_ram_base) {
3472 /* alloc dirty bits array */
3473 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3474 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3475 }
3476#endif
3477}
3478
3479#endif /* !defined(CONFIG_USER_ONLY) */
3480
3481/* physical memory access (slow version, mainly for debug) */
3482#if defined(CONFIG_USER_ONLY)
3483void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3484 int len, int is_write)
3485{
3486 int l, flags;
3487 target_ulong page;
3488 void * p;
3489
3490 while (len > 0) {
3491 page = addr & TARGET_PAGE_MASK;
3492 l = (page + TARGET_PAGE_SIZE) - addr;
3493 if (l > len)
3494 l = len;
3495 flags = page_get_flags(page);
3496 if (!(flags & PAGE_VALID))
3497 return;
3498 if (is_write) {
3499 if (!(flags & PAGE_WRITE))
3500 return;
3501 /* XXX: this code should not depend on lock_user */
3502 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3503 /* FIXME - should this return an error rather than just fail? */
3504 return;
3505 memcpy(p, buf, l);
3506 unlock_user(p, addr, l);
3507 } else {
3508 if (!(flags & PAGE_READ))
3509 return;
3510 /* XXX: this code should not depend on lock_user */
3511 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3512 /* FIXME - should this return an error rather than just fail? */
3513 return;
3514 memcpy(buf, p, l);
3515 unlock_user(p, addr, 0);
3516 }
3517 len -= l;
3518 buf += l;
3519 addr += l;
3520 }
3521}
3522
3523#else
3524void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3525 int len, int is_write)
3526{
3527 int l, io_index;
3528 uint8_t *ptr;
3529 uint32_t val;
3530 target_phys_addr_t page;
3531 unsigned long pd;
3532 PhysPageDesc *p;
3533
3534 while (len > 0) {
3535 page = addr & TARGET_PAGE_MASK;
3536 l = (page + TARGET_PAGE_SIZE) - addr;
3537 if (l > len)
3538 l = len;
3539 p = phys_page_find(page >> TARGET_PAGE_BITS);
3540 if (!p) {
3541 pd = IO_MEM_UNASSIGNED;
3542 } else {
3543 pd = p->phys_offset;
3544 }
3545
3546 if (is_write) {
3547 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3548 target_phys_addr_t addr1 = addr;
3549 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3550 if (p)
3551 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3552 /* XXX: could force cpu_single_env to NULL to avoid
3553 potential bugs */
3554 if (l >= 4 && ((addr1 & 3) == 0)) {
3555 /* 32 bit write access */
3556#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3557 val = ldl_p(buf);
3558#else
3559 val = *(const uint32_t *)buf;
3560#endif
3561 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3562 l = 4;
3563 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3564 /* 16 bit write access */
3565#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3566 val = lduw_p(buf);
3567#else
3568 val = *(const uint16_t *)buf;
3569#endif
3570 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3571 l = 2;
3572 } else {
3573 /* 8 bit write access */
3574#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3575 val = ldub_p(buf);
3576#else
3577 val = *(const uint8_t *)buf;
3578#endif
3579 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3580 l = 1;
3581 }
3582 } else {
3583 unsigned long addr1;
3584 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3585 /* RAM case */
3586#ifdef VBOX
3587 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3588#else
3589 ptr = qemu_get_ram_ptr(addr1);
3590 memcpy(ptr, buf, l);
3591#endif
3592 if (!cpu_physical_memory_is_dirty(addr1)) {
3593 /* invalidate code */
3594 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3595 /* set dirty bit */
3596#ifdef VBOX
3597 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3598#endif
3599 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3600 (0xff & ~CODE_DIRTY_FLAG);
3601 }
3602 }
3603 } else {
3604 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3605 !(pd & IO_MEM_ROMD)) {
3606 target_phys_addr_t addr1 = addr;
3607 /* I/O case */
3608 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3609 if (p)
3610 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3611 if (l >= 4 && ((addr1 & 3) == 0)) {
3612 /* 32 bit read access */
3613 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3614#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3615 stl_p(buf, val);
3616#else
3617 *(uint32_t *)buf = val;
3618#endif
3619 l = 4;
3620 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3621 /* 16 bit read access */
3622 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3623#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3624 stw_p(buf, val);
3625#else
3626 *(uint16_t *)buf = val;
3627#endif
3628 l = 2;
3629 } else {
3630 /* 8 bit read access */
3631 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3632#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3633 stb_p(buf, val);
3634#else
3635 *(uint8_t *)buf = val;
3636#endif
3637 l = 1;
3638 }
3639 } else {
3640 /* RAM case */
3641#ifdef VBOX
3642 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3643#else
3644 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3645 (addr & ~TARGET_PAGE_MASK);
3646 memcpy(buf, ptr, l);
3647#endif
3648 }
3649 }
3650 len -= l;
3651 buf += l;
3652 addr += l;
3653 }
3654}
3655
3656#ifndef VBOX
3657
3658/* used for ROM loading : can write in RAM and ROM */
3659void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3660 const uint8_t *buf, int len)
3661{
3662 int l;
3663 uint8_t *ptr;
3664 target_phys_addr_t page;
3665 unsigned long pd;
3666 PhysPageDesc *p;
3667
3668 while (len > 0) {
3669 page = addr & TARGET_PAGE_MASK;
3670 l = (page + TARGET_PAGE_SIZE) - addr;
3671 if (l > len)
3672 l = len;
3673 p = phys_page_find(page >> TARGET_PAGE_BITS);
3674 if (!p) {
3675 pd = IO_MEM_UNASSIGNED;
3676 } else {
3677 pd = p->phys_offset;
3678 }
3679
3680 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3681 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3682 !(pd & IO_MEM_ROMD)) {
3683 /* do nothing */
3684 } else {
3685 unsigned long addr1;
3686 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3687 /* ROM/RAM case */
3688 ptr = qemu_get_ram_ptr(addr1);
3689 memcpy(ptr, buf, l);
3690 }
3691 len -= l;
3692 buf += l;
3693 addr += l;
3694 }
3695}
3696
3697typedef struct {
3698 void *buffer;
3699 target_phys_addr_t addr;
3700 target_phys_addr_t len;
3701} BounceBuffer;
3702
3703static BounceBuffer bounce;
3704
3705typedef struct MapClient {
3706 void *opaque;
3707 void (*callback)(void *opaque);
3708 LIST_ENTRY(MapClient) link;
3709} MapClient;
3710
3711static LIST_HEAD(map_client_list, MapClient) map_client_list
3712 = LIST_HEAD_INITIALIZER(map_client_list);
3713
3714void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3715{
3716 MapClient *client = qemu_malloc(sizeof(*client));
3717
3718 client->opaque = opaque;
3719 client->callback = callback;
3720 LIST_INSERT_HEAD(&map_client_list, client, link);
3721 return client;
3722}
3723
3724void cpu_unregister_map_client(void *_client)
3725{
3726 MapClient *client = (MapClient *)_client;
3727
3728 LIST_REMOVE(client, link);
3729 qemu_free(client);
3730}
3731
3732static void cpu_notify_map_clients(void)
3733{
3734 MapClient *client;
3735
3736 while (!LIST_EMPTY(&map_client_list)) {
3737 client = LIST_FIRST(&map_client_list);
3738 client->callback(client->opaque);
3739 cpu_unregister_map_client(client);
3740 }
3741}
3742
3743/* Map a physical memory region into a host virtual address.
3744 * May map a subset of the requested range, given by and returned in *plen.
3745 * May return NULL if resources needed to perform the mapping are exhausted.
3746 * Use only for reads OR writes - not for read-modify-write operations.
3747 * Use cpu_register_map_client() to know when retrying the map operation is
3748 * likely to succeed.
3749 */
3750void *cpu_physical_memory_map(target_phys_addr_t addr,
3751 target_phys_addr_t *plen,
3752 int is_write)
3753{
3754 target_phys_addr_t len = *plen;
3755 target_phys_addr_t done = 0;
3756 int l;
3757 uint8_t *ret = NULL;
3758 uint8_t *ptr;
3759 target_phys_addr_t page;
3760 unsigned long pd;
3761 PhysPageDesc *p;
3762 unsigned long addr1;
3763
3764 while (len > 0) {
3765 page = addr & TARGET_PAGE_MASK;
3766 l = (page + TARGET_PAGE_SIZE) - addr;
3767 if (l > len)
3768 l = len;
3769 p = phys_page_find(page >> TARGET_PAGE_BITS);
3770 if (!p) {
3771 pd = IO_MEM_UNASSIGNED;
3772 } else {
3773 pd = p->phys_offset;
3774 }
3775
3776 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3777 if (done || bounce.buffer) {
3778 break;
3779 }
3780 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3781 bounce.addr = addr;
3782 bounce.len = l;
3783 if (!is_write) {
3784 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3785 }
3786 ptr = bounce.buffer;
3787 } else {
3788 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3789 ptr = qemu_get_ram_ptr(addr1);
3790 }
3791 if (!done) {
3792 ret = ptr;
3793 } else if (ret + done != ptr) {
3794 break;
3795 }
3796
3797 len -= l;
3798 addr += l;
3799 done += l;
3800 }
3801 *plen = done;
3802 return ret;
3803}
3804
3805/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3806 * Will also mark the memory as dirty if is_write == 1. access_len gives
3807 * the amount of memory that was actually read or written by the caller.
3808 */
3809void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3810 int is_write, target_phys_addr_t access_len)
3811{
3812 if (buffer != bounce.buffer) {
3813 if (is_write) {
3814 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3815 while (access_len) {
3816 unsigned l;
3817 l = TARGET_PAGE_SIZE;
3818 if (l > access_len)
3819 l = access_len;
3820 if (!cpu_physical_memory_is_dirty(addr1)) {
3821 /* invalidate code */
3822 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3823 /* set dirty bit */
3824 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3825 (0xff & ~CODE_DIRTY_FLAG);
3826 }
3827 addr1 += l;
3828 access_len -= l;
3829 }
3830 }
3831 return;
3832 }
3833 if (is_write) {
3834 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3835 }
3836 qemu_free(bounce.buffer);
3837 bounce.buffer = NULL;
3838 cpu_notify_map_clients();
3839}
3840
3841#endif /* !VBOX */
3842
3843/* warning: addr must be aligned */
3844uint32_t ldl_phys(target_phys_addr_t addr)
3845{
3846 int io_index;
3847 uint8_t *ptr;
3848 uint32_t val;
3849 unsigned long pd;
3850 PhysPageDesc *p;
3851
3852 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3853 if (!p) {
3854 pd = IO_MEM_UNASSIGNED;
3855 } else {
3856 pd = p->phys_offset;
3857 }
3858
3859 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3860 !(pd & IO_MEM_ROMD)) {
3861 /* I/O case */
3862 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3863 if (p)
3864 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3865 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3866 } else {
3867 /* RAM case */
3868#ifndef VBOX
3869 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3870 (addr & ~TARGET_PAGE_MASK);
3871 val = ldl_p(ptr);
3872#else
3873 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3874#endif
3875 }
3876 return val;
3877}
3878
3879/* warning: addr must be aligned */
3880uint64_t ldq_phys(target_phys_addr_t addr)
3881{
3882 int io_index;
3883 uint8_t *ptr;
3884 uint64_t val;
3885 unsigned long pd;
3886 PhysPageDesc *p;
3887
3888 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3889 if (!p) {
3890 pd = IO_MEM_UNASSIGNED;
3891 } else {
3892 pd = p->phys_offset;
3893 }
3894
3895 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3896 !(pd & IO_MEM_ROMD)) {
3897 /* I/O case */
3898 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3899 if (p)
3900 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3901#ifdef TARGET_WORDS_BIGENDIAN
3902 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3903 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3904#else
3905 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3906 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3907#endif
3908 } else {
3909 /* RAM case */
3910#ifndef VBOX
3911 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3912 (addr & ~TARGET_PAGE_MASK);
3913 val = ldq_p(ptr);
3914#else
3915 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3916#endif
3917 }
3918 return val;
3919}
3920
3921/* XXX: optimize */
3922uint32_t ldub_phys(target_phys_addr_t addr)
3923{
3924 uint8_t val;
3925 cpu_physical_memory_read(addr, &val, 1);
3926 return val;
3927}
3928
3929/* XXX: optimize */
3930uint32_t lduw_phys(target_phys_addr_t addr)
3931{
3932 uint16_t val;
3933 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3934 return tswap16(val);
3935}
3936
3937/* warning: addr must be aligned. The ram page is not masked as dirty
3938 and the code inside is not invalidated. It is useful if the dirty
3939 bits are used to track modified PTEs */
3940void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3941{
3942 int io_index;
3943 uint8_t *ptr;
3944 unsigned long pd;
3945 PhysPageDesc *p;
3946
3947 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3948 if (!p) {
3949 pd = IO_MEM_UNASSIGNED;
3950 } else {
3951 pd = p->phys_offset;
3952 }
3953
3954 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3955 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3956 if (p)
3957 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3958 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3959 } else {
3960#ifndef VBOX
3961 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3962 ptr = qemu_get_ram_ptr(addr1);
3963 stl_p(ptr, val);
3964#else
3965 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3966#endif
3967
3968#ifndef VBOX
3969 if (unlikely(in_migration)) {
3970 if (!cpu_physical_memory_is_dirty(addr1)) {
3971 /* invalidate code */
3972 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3973 /* set dirty bit */
3974 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3975 (0xff & ~CODE_DIRTY_FLAG);
3976 }
3977 }
3978#endif /* !VBOX */
3979 }
3980}
3981
3982void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3983{
3984 int io_index;
3985 uint8_t *ptr;
3986 unsigned long pd;
3987 PhysPageDesc *p;
3988
3989 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3990 if (!p) {
3991 pd = IO_MEM_UNASSIGNED;
3992 } else {
3993 pd = p->phys_offset;
3994 }
3995
3996 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3997 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3998 if (p)
3999 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4000#ifdef TARGET_WORDS_BIGENDIAN
4001 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4002 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4003#else
4004 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4005 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4006#endif
4007 } else {
4008#ifndef VBOX
4009 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4010 (addr & ~TARGET_PAGE_MASK);
4011 stq_p(ptr, val);
4012#else
4013 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
4014#endif
4015 }
4016}
4017
4018/* warning: addr must be aligned */
4019void stl_phys(target_phys_addr_t addr, uint32_t val)
4020{
4021 int io_index;
4022 uint8_t *ptr;
4023 unsigned long pd;
4024 PhysPageDesc *p;
4025
4026 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4027 if (!p) {
4028 pd = IO_MEM_UNASSIGNED;
4029 } else {
4030 pd = p->phys_offset;
4031 }
4032
4033 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4034 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4035 if (p)
4036 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4037 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4038 } else {
4039 unsigned long addr1;
4040 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4041 /* RAM case */
4042#ifndef VBOX
4043 ptr = qemu_get_ram_ptr(addr1);
4044 stl_p(ptr, val);
4045#else
4046 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
4047#endif
4048 if (!cpu_physical_memory_is_dirty(addr1)) {
4049 /* invalidate code */
4050 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4051 /* set dirty bit */
4052#ifdef VBOX
4053 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
4054#endif
4055 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
4056 (0xff & ~CODE_DIRTY_FLAG);
4057 }
4058 }
4059}
4060
4061/* XXX: optimize */
4062void stb_phys(target_phys_addr_t addr, uint32_t val)
4063{
4064 uint8_t v = val;
4065 cpu_physical_memory_write(addr, &v, 1);
4066}
4067
4068/* XXX: optimize */
4069void stw_phys(target_phys_addr_t addr, uint32_t val)
4070{
4071 uint16_t v = tswap16(val);
4072 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
4073}
4074
4075/* XXX: optimize */
4076void stq_phys(target_phys_addr_t addr, uint64_t val)
4077{
4078 val = tswap64(val);
4079 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4080}
4081
4082#endif
4083
4084#ifndef VBOX
4085/* virtual memory access for debug (includes writing to ROM) */
4086int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4087 uint8_t *buf, int len, int is_write)
4088{
4089 int l;
4090 target_phys_addr_t phys_addr;
4091 target_ulong page;
4092
4093 while (len > 0) {
4094 page = addr & TARGET_PAGE_MASK;
4095 phys_addr = cpu_get_phys_page_debug(env, page);
4096 /* if no physical page mapped, return an error */
4097 if (phys_addr == -1)
4098 return -1;
4099 l = (page + TARGET_PAGE_SIZE) - addr;
4100 if (l > len)
4101 l = len;
4102 phys_addr += (addr & ~TARGET_PAGE_MASK);
4103#if !defined(CONFIG_USER_ONLY)
4104 if (is_write)
4105 cpu_physical_memory_write_rom(phys_addr, buf, l);
4106 else
4107#endif
4108 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4109 len -= l;
4110 buf += l;
4111 addr += l;
4112 }
4113 return 0;
4114}
4115#endif /* !VBOX */
4116
4117/* in deterministic execution mode, instructions doing device I/Os
4118 must be at the end of the TB */
4119void cpu_io_recompile(CPUState *env, void *retaddr)
4120{
4121 TranslationBlock *tb;
4122 uint32_t n, cflags;
4123 target_ulong pc, cs_base;
4124 uint64_t flags;
4125
4126 tb = tb_find_pc((unsigned long)retaddr);
4127 if (!tb) {
4128 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4129 retaddr);
4130 }
4131 n = env->icount_decr.u16.low + tb->icount;
4132 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4133 /* Calculate how many instructions had been executed before the fault
4134 occurred. */
4135 n = n - env->icount_decr.u16.low;
4136 /* Generate a new TB ending on the I/O insn. */
4137 n++;
4138 /* On MIPS and SH, delay slot instructions can only be restarted if
4139 they were already the first instruction in the TB. If this is not
4140 the first instruction in a TB then re-execute the preceding
4141 branch. */
4142#if defined(TARGET_MIPS)
4143 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4144 env->active_tc.PC -= 4;
4145 env->icount_decr.u16.low++;
4146 env->hflags &= ~MIPS_HFLAG_BMASK;
4147 }
4148#elif defined(TARGET_SH4)
4149 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4150 && n > 1) {
4151 env->pc -= 2;
4152 env->icount_decr.u16.low++;
4153 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4154 }
4155#endif
4156 /* This should never happen. */
4157 if (n > CF_COUNT_MASK)
4158 cpu_abort(env, "TB too big during recompile");
4159
4160 cflags = n | CF_LAST_IO;
4161 pc = tb->pc;
4162 cs_base = tb->cs_base;
4163 flags = tb->flags;
4164 tb_phys_invalidate(tb, -1);
4165 /* FIXME: In theory this could raise an exception. In practice
4166 we have already translated the block once so it's probably ok. */
4167 tb_gen_code(env, pc, cs_base, flags, cflags);
4168 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4169 the first in the TB) then we end up generating a whole new TB and
4170 repeating the fault, which is horribly inefficient.
4171 Better would be to execute just this insn uncached, or generate a
4172 second new TB. */
4173 cpu_resume_from_signal(env, NULL);
4174}
4175
4176#ifndef VBOX
4177void dump_exec_info(FILE *f,
4178 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4179{
4180 int i, target_code_size, max_target_code_size;
4181 int direct_jmp_count, direct_jmp2_count, cross_page;
4182 TranslationBlock *tb;
4183
4184 target_code_size = 0;
4185 max_target_code_size = 0;
4186 cross_page = 0;
4187 direct_jmp_count = 0;
4188 direct_jmp2_count = 0;
4189 for(i = 0; i < nb_tbs; i++) {
4190 tb = &tbs[i];
4191 target_code_size += tb->size;
4192 if (tb->size > max_target_code_size)
4193 max_target_code_size = tb->size;
4194 if (tb->page_addr[1] != -1)
4195 cross_page++;
4196 if (tb->tb_next_offset[0] != 0xffff) {
4197 direct_jmp_count++;
4198 if (tb->tb_next_offset[1] != 0xffff) {
4199 direct_jmp2_count++;
4200 }
4201 }
4202 }
4203 /* XXX: avoid using doubles ? */
4204 cpu_fprintf(f, "Translation buffer state:\n");
4205 cpu_fprintf(f, "gen code size %ld/%ld\n",
4206 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4207 cpu_fprintf(f, "TB count %d/%d\n",
4208 nb_tbs, code_gen_max_blocks);
4209 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4210 nb_tbs ? target_code_size / nb_tbs : 0,
4211 max_target_code_size);
4212 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4213 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4214 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4215 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4216 cross_page,
4217 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4218 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4219 direct_jmp_count,
4220 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4221 direct_jmp2_count,
4222 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4223 cpu_fprintf(f, "\nStatistics:\n");
4224 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4225 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4226 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4227 tcg_dump_info(f, cpu_fprintf);
4228}
4229#endif /* !VBOX */
4230
4231#if !defined(CONFIG_USER_ONLY)
4232
4233#define MMUSUFFIX _cmmu
4234#define GETPC() NULL
4235#define env cpu_single_env
4236#define SOFTMMU_CODE_ACCESS
4237
4238#define SHIFT 0
4239#include "softmmu_template.h"
4240
4241#define SHIFT 1
4242#include "softmmu_template.h"
4243
4244#define SHIFT 2
4245#include "softmmu_template.h"
4246
4247#define SHIFT 3
4248#include "softmmu_template.h"
4249
4250#undef env
4251
4252#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette