VirtualBox

source: vbox/trunk/src/recompiler_new/exec-all.h@ 14276

Last change on this file since 14276 was 14241, checked in by vboxsync, 16 years ago

support for virtual addresses in QEMU TLB, not yet functional (be back on that)

  • Property svn:eol-style set to native
File size: 15.4 KB
Line 
1/*
2 * internal execution defines for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30/* allow to see translation results - the slowdown should be negligible, so we leave it */
31#ifndef VBOX
32#define DEBUG_DISAS
33#endif
34
35#ifdef VBOX
36# include <VBox/tm.h>
37# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
38# ifndef LOG_GROUP
39# define LOG_GROUP LOG_GROUP_REM
40# endif
41# include <VBox/log.h>
42# include "REMInternal.h"
43# include <VBox/vm.h>
44#endif /* VBOX */
45
46/* is_jmp field values */
47#define DISAS_NEXT 0 /* next instruction can be analyzed */
48#define DISAS_JUMP 1 /* only pc was modified dynamically */
49#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
50#define DISAS_TB_JUMP 3 /* only pc was modified statically */
51
52typedef struct TranslationBlock TranslationBlock;
53
54/* XXX: make safe guess about sizes */
55#define MAX_OP_PER_INSTR 64
56/* A Call op needs up to 6 + 2N parameters (N = number of arguments). */
57#define MAX_OPC_PARAM 10
58#define OPC_BUF_SIZE 512
59#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
60
61/* Maximum size a TCG op can expand to. This is complicated because a
62 single op may require several host instructions and regirster reloads.
63 For now take a wild guess at 128 bytes, which should allow at least
64 a couple of fixup instructions per argument. */
65#define TCG_MAX_OP_SIZE 128
66
67#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
68
69extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
70extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
71extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
72extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
73extern uint16_t gen_opc_icount[OPC_BUF_SIZE];
74extern target_ulong gen_opc_jump_pc[2];
75extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
76
77typedef void (GenOpFunc)(void);
78typedef void (GenOpFunc1)(long);
79typedef void (GenOpFunc2)(long, long);
80typedef void (GenOpFunc3)(long, long, long);
81
82#include "qemu-log.h"
83
84void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
85void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
86void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
87 unsigned long searched_pc, int pc_pos, void *puc);
88
89unsigned long code_gen_max_block_size(void);
90void cpu_gen_init(void);
91int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
92 int *gen_code_size_ptr);
93int cpu_restore_state(struct TranslationBlock *tb,
94 CPUState *env, unsigned long searched_pc,
95 void *puc);
96int cpu_restore_state_copy(struct TranslationBlock *tb,
97 CPUState *env, unsigned long searched_pc,
98 void *puc);
99void cpu_resume_from_signal(CPUState *env1, void *puc);
100void cpu_io_recompile(CPUState *env, void *retaddr);
101TranslationBlock *tb_gen_code(CPUState *env,
102 target_ulong pc, target_ulong cs_base, int flags,
103 int cflags);
104void cpu_exec_init(CPUState *env);
105int page_unprotect(target_ulong address, unsigned long pc, void *puc);
106void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
107 int is_cpu_write_access);
108void tb_invalidate_page_range(target_ulong start, target_ulong end);
109void tlb_flush_page(CPUState *env, target_ulong addr);
110void tlb_flush(CPUState *env, int flush_global);
111int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
112 target_phys_addr_t paddr, int prot,
113 int mmu_idx, int is_softmmu);
114#ifndef VBOX
115static inline int tlb_set_page(CPUState *env1, target_ulong vaddr,
116 target_phys_addr_t paddr, int prot,
117 int mmu_idx, int is_softmmu)
118#else
119DECLINLINE(int) tlb_set_page(CPUState *env1, target_ulong vaddr,
120 target_phys_addr_t paddr, int prot,
121 int mmu_idx, int is_softmmu)
122#endif
123{
124 if (prot & PAGE_READ)
125 prot |= PAGE_EXEC;
126 return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
127}
128
129#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
130
131#define CODE_GEN_PHYS_HASH_BITS 15
132#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
133
134#define MIN_CODE_GEN_BUFFER_SIZE (1024 * 1024)
135
136/* estimated block size for TB allocation */
137/* XXX: use a per code average code fragment size and modulate it
138 according to the host CPU */
139#if defined(CONFIG_SOFTMMU)
140#define CODE_GEN_AVG_BLOCK_SIZE 128
141#else
142#define CODE_GEN_AVG_BLOCK_SIZE 64
143#endif
144
145#if defined(__powerpc__) || defined(__x86_64__) || defined(__arm__)
146#define USE_DIRECT_JUMP
147#endif
148#if defined(__i386__) && !defined(_WIN32)
149#define USE_DIRECT_JUMP
150#endif
151
152#ifdef VBOX /* bird: not safe in next step because of threading & cpu_interrupt. */
153#undef USE_DIRECT_JUMP
154#endif /* VBOX */
155
156struct TranslationBlock {
157 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
158 target_ulong cs_base; /* CS base for this block */
159 uint64_t flags; /* flags defining in which context the code was generated */
160 uint16_t size; /* size of target code for this block (1 <=
161 size <= TARGET_PAGE_SIZE) */
162 uint16_t cflags; /* compile flags */
163#define CF_COUNT_MASK 0x7fff
164#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
165
166#ifdef VBOX
167#define CF_RAW_MODE 0x0010 /* block was generated in raw mode */
168#endif
169
170 uint8_t *tc_ptr; /* pointer to the translated code */
171 /* next matching tb for physical address. */
172 struct TranslationBlock *phys_hash_next;
173 /* first and second physical page containing code. The lower bit
174 of the pointer tells the index in page_next[] */
175 struct TranslationBlock *page_next[2];
176 target_ulong page_addr[2];
177
178 /* the following data are used to directly call another TB from
179 the code of this one. */
180 uint16_t tb_next_offset[2]; /* offset of original jump target */
181#ifdef USE_DIRECT_JUMP
182 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
183#else
184# if defined(VBOX) && defined(RT_OS_DARWIN) && defined(RT_ARCH_AMD64)
185# error "First 4GB aren't reachable. jmp dword [tb_next] wont work."
186# endif
187 unsigned long tb_next[2]; /* address of jump generated code */
188#endif
189 /* list of TBs jumping to this one. This is a circular list using
190 the two least significant bits of the pointers to tell what is
191 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
192 jmp_first */
193 struct TranslationBlock *jmp_next[2];
194 struct TranslationBlock *jmp_first;
195 uint32_t icount;
196};
197
198#ifndef VBOX
199static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
200#else
201DECLINLINE(unsigned int) tb_jmp_cache_hash_page(target_ulong pc)
202#endif
203{
204 target_ulong tmp;
205 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
206 return (tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK;
207}
208
209#ifndef VBOX
210static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
211#else
212DECLINLINE(unsigned int) tb_jmp_cache_hash_func(target_ulong pc)
213#endif
214
215{
216 target_ulong tmp;
217 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
218 return (((tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK) |
219 (tmp & TB_JMP_ADDR_MASK));
220}
221
222#ifndef VBOX
223static inline unsigned int tb_phys_hash_func(unsigned long pc)
224#else
225DECLINLINE(unsigned int) tb_phys_hash_func(unsigned long pc)
226#endif
227{
228 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
229}
230
231TranslationBlock *tb_alloc(target_ulong pc);
232void tb_free(TranslationBlock *tb);
233void tb_flush(CPUState *env);
234void tb_link_phys(TranslationBlock *tb,
235 target_ulong phys_pc, target_ulong phys_page2);
236void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr);
237
238extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
239
240extern uint8_t *code_gen_ptr;
241extern int code_gen_max_blocks;
242
243#if defined(USE_DIRECT_JUMP)
244
245#if defined(__powerpc__)
246static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
247{
248 uint32_t val, *ptr;
249
250 /* patch the branch destination */
251 ptr = (uint32_t *)jmp_addr;
252 val = *ptr;
253 val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
254 *ptr = val;
255 /* flush icache */
256 asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
257 asm volatile ("sync" : : : "memory");
258 asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
259 asm volatile ("sync" : : : "memory");
260 asm volatile ("isync" : : : "memory");
261}
262#elif defined(__i386__)
263static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
264{
265 /* patch the branch destination */
266 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
267 /* no need to flush icache explicitely */
268}
269#endif
270
271static inline void tb_set_jmp_target(TranslationBlock *tb,
272 int n, unsigned long addr)
273{
274 unsigned long offset;
275
276 offset = tb->tb_jmp_offset[n];
277 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
278 offset = tb->tb_jmp_offset[n + 2];
279 if (offset != 0xffff)
280 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
281}
282
283#else
284
285/* set the jump target */
286#ifndef VBOX
287static inline void tb_set_jmp_target(TranslationBlock *tb,
288 int n, unsigned long addr)
289#else
290DECLINLINE(void) tb_set_jmp_target(TranslationBlock *tb,
291 int n, unsigned long addr)
292#endif
293{
294 tb->tb_next[n] = addr;
295}
296
297#endif
298
299#ifndef VBOX
300static inline void tb_add_jump(TranslationBlock *tb, int n,
301 TranslationBlock *tb_next)
302#else
303DECLINLINE(void) tb_add_jump(TranslationBlock *tb, int n,
304 TranslationBlock *tb_next)
305#endif
306{
307 /* NOTE: this test is only needed for thread safety */
308 if (!tb->jmp_next[n]) {
309 /* patch the native jump address */
310 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
311
312 /* add in TB jmp circular list */
313 tb->jmp_next[n] = tb_next->jmp_first;
314 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
315 }
316}
317
318TranslationBlock *tb_find_pc(unsigned long pc_ptr);
319
320#ifndef offsetof
321#define offsetof(type, field) ((size_t) &((type *)0)->field)
322#endif
323
324#if defined(_WIN32)
325#define ASM_DATA_SECTION ".section \".data\"\n"
326#define ASM_PREVIOUS_SECTION ".section .text\n"
327#elif defined(__APPLE__)
328#define ASM_DATA_SECTION ".data\n"
329#define ASM_PREVIOUS_SECTION ".text\n"
330#else
331#define ASM_DATA_SECTION ".section \".data\"\n"
332#define ASM_PREVIOUS_SECTION ".previous\n"
333#endif
334
335#define ASM_OP_LABEL_NAME(n, opname) \
336 ASM_NAME(__op_label) #n "." ASM_NAME(opname)
337
338extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
339extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
340extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
341
342#include "qemu-lock.h"
343
344extern spinlock_t tb_lock;
345
346extern int tb_invalidated_flag;
347
348#if !defined(CONFIG_USER_ONLY)
349
350void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
351 void *retaddr);
352
353#include "softmmu_defs.h"
354
355#define ACCESS_TYPE (NB_MMU_MODES + 1)
356#define MEMSUFFIX _code
357#define env cpu_single_env
358
359#define DATA_SIZE 1
360#include "softmmu_header.h"
361
362#define DATA_SIZE 2
363#include "softmmu_header.h"
364
365#define DATA_SIZE 4
366#include "softmmu_header.h"
367
368#define DATA_SIZE 8
369#include "softmmu_header.h"
370
371#undef ACCESS_TYPE
372#undef MEMSUFFIX
373#undef env
374
375#endif
376
377#if defined(CONFIG_USER_ONLY)
378static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
379{
380 return addr;
381}
382#else
383# ifdef VBOX
384target_ulong remR3PhysGetPhysicalAddressCode(CPUState *env, target_ulong addr, CPUTLBEntry *pTLBEntry);
385# if !defined(REM_PHYS_ADDR_IN_TLB)
386target_ulong remR3HCVirt2GCPhys(CPUState *env1, void *addr);
387# endif
388# endif
389/* NOTE: this function can trigger an exception */
390/* NOTE2: the returned address is not exactly the physical address: it
391 is the offset relative to phys_ram_base */
392#ifndef VBOX
393static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
394#else
395DECLINLINE(target_ulong) get_phys_addr_code(CPUState *env1, target_ulong addr)
396#endif
397{
398 int mmu_idx, page_index, pd;
399
400 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
401 mmu_idx = cpu_mmu_index(env1);
402 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
403 (addr & TARGET_PAGE_MASK))) {
404 ldub_code(addr);
405 }
406 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
407 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
408# ifdef VBOX
409 /* deal with non-MMIO access handlers. */
410 return remR3PhysGetPhysicalAddressCode(env1, addr, &env1->tlb_table[mmu_idx][page_index]);
411# elif defined(TARGET_SPARC) || defined(TARGET_MIPS)
412 do_unassigned_access(addr, 0, 1, 0, 4);
413#else
414 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
415#endif
416 }
417
418# if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
419 return addr + env1->tlb_table[mmu_idx][page_index].addend;
420# elif defined(VBOX)
421 return remR3HCVirt2GCPhys(env1, (void *)(addr + env1->tlb_table[mmu_idx][page_index].addend));
422# else
423 return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base;
424# endif
425}
426
427
428/* Deterministic execution requires that IO only be performed on the last
429 instruction of a TB so that interrupts take effect immediately. */
430#ifndef VBOX
431static inline int can_do_io(CPUState *env)
432#else
433DECLINLINE(int) can_do_io(CPUState *env)
434#endif
435{
436 if (!use_icount)
437 return 1;
438
439 /* If not executing code then assume we are ok. */
440 if (!env->current_tb)
441 return 1;
442
443 return env->can_do_io != 0;
444}
445#endif
446
447
448#ifdef USE_KQEMU
449#define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
450
451int kqemu_init(CPUState *env);
452int kqemu_cpu_exec(CPUState *env);
453void kqemu_flush_page(CPUState *env, target_ulong addr);
454void kqemu_flush(CPUState *env, int global);
455void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
456void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
457void kqemu_cpu_interrupt(CPUState *env);
458void kqemu_record_dump(void);
459
460static inline int kqemu_is_ok(CPUState *env)
461{
462 return(env->kqemu_enabled &&
463 (env->cr[0] & CR0_PE_MASK) &&
464 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
465 (env->eflags & IF_MASK) &&
466 !(env->eflags & VM_MASK) &&
467 (env->kqemu_enabled == 2 ||
468 ((env->hflags & HF_CPL_MASK) == 3 &&
469 (env->eflags & IOPL_MASK) != IOPL_MASK)));
470}
471
472#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette