VirtualBox

source: vbox/trunk/src/recompiler/target-i386/cpu.h@ 471

Last change on this file since 471 was 1, checked in by vboxsync, 55 years ago

import

  • Property svn:eol-style set to native
File size: 20.6 KB
Line 
1/*
2 * i386 virtual CPU header
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef CPU_I386_H
21#define CPU_I386_H
22
23#include "config.h"
24
25#ifdef TARGET_X86_64
26#define TARGET_LONG_BITS 64
27#else
28#define TARGET_LONG_BITS 32
29#endif
30
31/* target supports implicit self modifying code */
32#define TARGET_HAS_SMC
33/* support for self modifying code even if the modified instruction is
34 close to the modifying instruction */
35#define TARGET_HAS_PRECISE_SMC
36
37#include "cpu-defs.h"
38
39#if defined(VBOX)
40#include <iprt/critsect.h>
41#include <iprt/thread.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <VBox/vmm.h>
45#endif /* VBOX */
46
47#if defined(__i386__) && !defined(CONFIG_SOFTMMU)
48#define USE_CODE_COPY
49#endif
50
51#define R_EAX 0
52#define R_ECX 1
53#define R_EDX 2
54#define R_EBX 3
55#define R_ESP 4
56#define R_EBP 5
57#define R_ESI 6
58#define R_EDI 7
59
60#define R_AL 0
61#define R_CL 1
62#define R_DL 2
63#define R_BL 3
64#define R_AH 4
65#define R_CH 5
66#define R_DH 6
67#define R_BH 7
68
69#define R_ES 0
70#define R_CS 1
71#define R_SS 2
72#define R_DS 3
73#define R_FS 4
74#define R_GS 5
75
76/* segment descriptor fields */
77#define DESC_G_MASK (1 << 23)
78#define DESC_B_SHIFT 22
79#define DESC_B_MASK (1 << DESC_B_SHIFT)
80#define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
81#define DESC_L_MASK (1 << DESC_L_SHIFT)
82#define DESC_AVL_MASK (1 << 20)
83#define DESC_P_MASK (1 << 15)
84#define DESC_DPL_SHIFT 13
85#define DESC_S_MASK (1 << 12)
86#define DESC_TYPE_SHIFT 8
87#define DESC_A_MASK (1 << 8)
88
89#define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
90#define DESC_C_MASK (1 << 10) /* code: conforming */
91#define DESC_R_MASK (1 << 9) /* code: readable */
92
93#define DESC_E_MASK (1 << 10) /* data: expansion direction */
94#define DESC_W_MASK (1 << 9) /* data: writable */
95
96#define DESC_TSS_BUSY_MASK (1 << 9)
97
98/* eflags masks */
99#define CC_C 0x0001
100#define CC_P 0x0004
101#define CC_A 0x0010
102#define CC_Z 0x0040
103#define CC_S 0x0080
104#define CC_O 0x0800
105
106#define TF_SHIFT 8
107#define IOPL_SHIFT 12
108#define VM_SHIFT 17
109
110#define TF_MASK 0x00000100
111#define IF_MASK 0x00000200
112#define DF_MASK 0x00000400
113#define IOPL_MASK 0x00003000
114#define NT_MASK 0x00004000
115#define RF_MASK 0x00010000
116#define VM_MASK 0x00020000
117#define AC_MASK 0x00040000
118#define VIF_MASK 0x00080000
119#define VIP_MASK 0x00100000
120#define ID_MASK 0x00200000
121
122/* hidden flags - used internally by qemu to represent additionnal cpu
123 states. Only the CPL and INHIBIT_IRQ are not redundant. We avoid
124 using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
125 with eflags. */
126/* current cpl */
127#define HF_CPL_SHIFT 0
128/* true if soft mmu is being used */
129#define HF_SOFTMMU_SHIFT 2
130/* true if hardware interrupts must be disabled for next instruction */
131#define HF_INHIBIT_IRQ_SHIFT 3
132/* 16 or 32 segments */
133#define HF_CS32_SHIFT 4
134#define HF_SS32_SHIFT 5
135/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
136#define HF_ADDSEG_SHIFT 6
137/* copy of CR0.PE (protected mode) */
138#define HF_PE_SHIFT 7
139#define HF_TF_SHIFT 8 /* must be same as eflags */
140#define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
141#define HF_EM_SHIFT 10
142#define HF_TS_SHIFT 11
143#define HF_IOPL_SHIFT 12 /* must be same as eflags */
144#define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
145#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
146#define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */
147#define HF_VM_SHIFT 17 /* must be same as eflags */
148
149#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
150#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
151#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
152#define HF_CS32_MASK (1 << HF_CS32_SHIFT)
153#define HF_SS32_MASK (1 << HF_SS32_SHIFT)
154#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
155#define HF_PE_MASK (1 << HF_PE_SHIFT)
156#define HF_TF_MASK (1 << HF_TF_SHIFT)
157#define HF_MP_MASK (1 << HF_MP_SHIFT)
158#define HF_EM_MASK (1 << HF_EM_SHIFT)
159#define HF_TS_MASK (1 << HF_TS_SHIFT)
160#define HF_LMA_MASK (1 << HF_LMA_SHIFT)
161#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
162#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
163
164#define CR0_PE_MASK (1 << 0)
165#define CR0_MP_MASK (1 << 1)
166#define CR0_EM_MASK (1 << 2)
167#define CR0_TS_MASK (1 << 3)
168#define CR0_ET_MASK (1 << 4)
169#define CR0_NE_MASK (1 << 5)
170#define CR0_WP_MASK (1 << 16)
171#define CR0_AM_MASK (1 << 18)
172#define CR0_PG_MASK (1 << 31)
173
174#define CR4_VME_MASK (1 << 0)
175#define CR4_PVI_MASK (1 << 1)
176#define CR4_TSD_MASK (1 << 2)
177#define CR4_DE_MASK (1 << 3)
178#define CR4_PSE_MASK (1 << 4)
179#define CR4_PAE_MASK (1 << 5)
180#define CR4_PGE_MASK (1 << 7)
181#define CR4_PCE_MASK (1 << 8)
182#define CR4_OSFXSR_MASK (1 << 9)
183#define CR4_OSXMMEXCPT_MASK (1 << 10)
184
185#define PG_PRESENT_BIT 0
186#define PG_RW_BIT 1
187#define PG_USER_BIT 2
188#define PG_PWT_BIT 3
189#define PG_PCD_BIT 4
190#define PG_ACCESSED_BIT 5
191#define PG_DIRTY_BIT 6
192#define PG_PSE_BIT 7
193#define PG_GLOBAL_BIT 8
194
195#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
196#define PG_RW_MASK (1 << PG_RW_BIT)
197#define PG_USER_MASK (1 << PG_USER_BIT)
198#define PG_PWT_MASK (1 << PG_PWT_BIT)
199#define PG_PCD_MASK (1 << PG_PCD_BIT)
200#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
201#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
202#define PG_PSE_MASK (1 << PG_PSE_BIT)
203#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
204
205#define PG_ERROR_W_BIT 1
206
207#define PG_ERROR_P_MASK 0x01
208#define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
209#define PG_ERROR_U_MASK 0x04
210#define PG_ERROR_RSVD_MASK 0x08
211
212#define MSR_IA32_APICBASE 0x1b
213#define MSR_IA32_APICBASE_BSP (1<<8)
214#define MSR_IA32_APICBASE_ENABLE (1<<11)
215#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
216
217#ifndef MSR_IA32_SYSENTER_CS /* VBox x86.h klugde */
218#define MSR_IA32_SYSENTER_CS 0x174
219#define MSR_IA32_SYSENTER_ESP 0x175
220#define MSR_IA32_SYSENTER_EIP 0x176
221#endif
222
223#define MSR_EFER 0xc0000080
224
225#define MSR_EFER_SCE (1 << 0)
226#define MSR_EFER_LME (1 << 8)
227#define MSR_EFER_LMA (1 << 10)
228#define MSR_EFER_NXE (1 << 11)
229#define MSR_EFER_FFXSR (1 << 14)
230
231#define MSR_STAR 0xc0000081
232#define MSR_LSTAR 0xc0000082
233#define MSR_CSTAR 0xc0000083
234#define MSR_FMASK 0xc0000084
235#define MSR_FSBASE 0xc0000100
236#define MSR_GSBASE 0xc0000101
237#define MSR_KERNELGSBASE 0xc0000102
238
239/* cpuid_features bits */
240#define CPUID_FP87 (1 << 0)
241#define CPUID_VME (1 << 1)
242#define CPUID_DE (1 << 2)
243#define CPUID_PSE (1 << 3)
244#define CPUID_TSC (1 << 4)
245#define CPUID_MSR (1 << 5)
246#define CPUID_PAE (1 << 6)
247#define CPUID_MCE (1 << 7)
248#define CPUID_CX8 (1 << 8)
249#define CPUID_APIC (1 << 9)
250#define CPUID_SEP (1 << 11) /* sysenter/sysexit */
251#define CPUID_MTRR (1 << 12)
252#define CPUID_PGE (1 << 13)
253#define CPUID_MCA (1 << 14)
254#define CPUID_CMOV (1 << 15)
255#define CPUID_PAT (1 << 16)
256#define CPUID_CLFLUSH (1 << 19)
257/* ... */
258#define CPUID_MMX (1 << 23)
259#define CPUID_FXSR (1 << 24)
260#define CPUID_SSE (1 << 25)
261#define CPUID_SSE2 (1 << 26)
262
263#ifdef VBOX
264#define CPUID_PSE36 (1 << 17)
265#define CPUID_HTT (1 << 28)
266#endif
267
268#define CPUID_EXT_SS3 (1 << 0)
269#define CPUID_EXT_MONITOR (1 << 3)
270#define CPUID_EXT_CX16 (1 << 13)
271
272#define CPUID_EXT2_SYSCALL (1 << 11)
273#define CPUID_EXT2_NX (1 << 20)
274#define CPUID_EXT2_LM (1 << 29)
275
276#define EXCP00_DIVZ 0
277#define EXCP01_SSTP 1
278#define EXCP02_NMI 2
279#define EXCP03_INT3 3
280#define EXCP04_INTO 4
281#define EXCP05_BOUND 5
282#define EXCP06_ILLOP 6
283#define EXCP07_PREX 7
284#define EXCP08_DBLE 8
285#define EXCP09_XERR 9
286#define EXCP0A_TSS 10
287#define EXCP0B_NOSEG 11
288#define EXCP0C_STACK 12
289#define EXCP0D_GPF 13
290#define EXCP0E_PAGE 14
291#define EXCP10_COPR 16
292#define EXCP11_ALGN 17
293#define EXCP12_MCHK 18
294
295enum {
296 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
297 CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */
298
299 CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
300 CC_OP_MULW,
301 CC_OP_MULL,
302 CC_OP_MULQ,
303
304 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
305 CC_OP_ADDW,
306 CC_OP_ADDL,
307 CC_OP_ADDQ,
308
309 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
310 CC_OP_ADCW,
311 CC_OP_ADCL,
312 CC_OP_ADCQ,
313
314 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
315 CC_OP_SUBW,
316 CC_OP_SUBL,
317 CC_OP_SUBQ,
318
319 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
320 CC_OP_SBBW,
321 CC_OP_SBBL,
322 CC_OP_SBBQ,
323
324 CC_OP_LOGICB, /* modify all flags, CC_DST = res */
325 CC_OP_LOGICW,
326 CC_OP_LOGICL,
327 CC_OP_LOGICQ,
328
329 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
330 CC_OP_INCW,
331 CC_OP_INCL,
332 CC_OP_INCQ,
333
334 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
335 CC_OP_DECW,
336 CC_OP_DECL,
337 CC_OP_DECQ,
338
339 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
340 CC_OP_SHLW,
341 CC_OP_SHLL,
342 CC_OP_SHLQ,
343
344 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
345 CC_OP_SARW,
346 CC_OP_SARL,
347 CC_OP_SARQ,
348
349 CC_OP_NB,
350};
351
352#if (defined(__i386__) || defined(__x86_64__)) && !defined(_BSD)
353#define USE_X86LDOUBLE
354#endif
355
356#ifdef USE_X86LDOUBLE
357typedef long double CPU86_LDouble;
358#else
359typedef double CPU86_LDouble;
360#endif
361
362typedef struct SegmentCache {
363 uint32_t selector;
364 target_ulong base;
365 uint32_t limit;
366 uint32_t flags;
367#ifdef VBOX
368 /** The new selector is saved here when we are unable to sync it before invoking the recompiled code. */
369 uint32_t newselector;
370#endif
371} SegmentCache;
372
373typedef union {
374 uint8_t _b[16];
375 uint16_t _w[8];
376 uint32_t _l[4];
377 uint64_t _q[2];
378 float _s[4];
379 double _d[2];
380} XMMReg;
381
382typedef union {
383 uint8_t _b[8];
384 uint16_t _w[2];
385 uint32_t _l[1];
386 uint64_t q;
387} MMXReg;
388
389#ifdef WORDS_BIGENDIAN
390#define XMM_B(n) _b[15 - (n)]
391#define XMM_W(n) _w[7 - (n)]
392#define XMM_L(n) _l[3 - (n)]
393#define XMM_S(n) _s[3 - (n)]
394#define XMM_Q(n) _q[1 - (n)]
395#define XMM_D(n) _d[1 - (n)]
396
397#define MMX_B(n) _b[7 - (n)]
398#define MMX_W(n) _w[3 - (n)]
399#define MMX_L(n) _l[1 - (n)]
400#else
401#define XMM_B(n) _b[n]
402#define XMM_W(n) _w[n]
403#define XMM_L(n) _l[n]
404#define XMM_S(n) _s[n]
405#define XMM_Q(n) _q[n]
406#define XMM_D(n) _d[n]
407
408#define MMX_B(n) _b[n]
409#define MMX_W(n) _w[n]
410#define MMX_L(n) _l[n]
411#endif
412#define MMX_Q(n) q
413
414#ifdef TARGET_X86_64
415#define CPU_NB_REGS 16
416#else
417#define CPU_NB_REGS 8
418#endif
419
420typedef struct CPUX86State {
421#if TARGET_LONG_BITS > HOST_LONG_BITS
422 /* temporaries if we cannot store them in host registers */
423 target_ulong t0, t1, t2;
424#endif
425
426 /* standard registers */
427 target_ulong regs[CPU_NB_REGS];
428 target_ulong eip;
429 target_ulong eflags; /* eflags register. During CPU emulation, CC
430 flags and DF are set to zero because they are
431 stored elsewhere */
432
433 /* emulator internal eflags handling */
434 target_ulong cc_src;
435 target_ulong cc_dst;
436 uint32_t cc_op;
437 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
438 uint32_t hflags; /* hidden flags, see HF_xxx constants */
439
440 /* segments */
441 SegmentCache segs[6]; /* selector values */
442 SegmentCache ldt;
443 SegmentCache tr;
444 SegmentCache gdt; /* only base and limit are used */
445 SegmentCache idt; /* only base and limit are used */
446
447 target_ulong cr[5]; /* NOTE: cr1 is unused */
448 uint32_t a20_mask;
449
450 /* FPU state */
451 unsigned int fpstt; /* top of stack index */
452 unsigned int fpus;
453 unsigned int fpuc;
454 uint8_t fptags[8]; /* 0 = valid, 1 = empty */
455 union {
456#ifdef USE_X86LDOUBLE
457 CPU86_LDouble d __attribute__((aligned(16)));
458#else
459 CPU86_LDouble d;
460#endif
461 MMXReg mmx;
462 } fpregs[8];
463
464 /* emulator internal variables */
465 CPU86_LDouble ft0;
466 union {
467 float f;
468 double d;
469 int i32;
470 int64_t i64;
471 } fp_convert;
472
473 uint32_t mxcsr;
474 XMMReg xmm_regs[CPU_NB_REGS];
475 XMMReg xmm_t0;
476 MMXReg mmx_t0;
477
478 /* sysenter registers */
479 uint32_t sysenter_cs;
480 uint32_t sysenter_esp;
481 uint32_t sysenter_eip;
482 uint64_t efer;
483 uint64_t star;
484#ifdef TARGET_X86_64
485 target_ulong lstar;
486 target_ulong cstar;
487 target_ulong fmask;
488 target_ulong kernelgsbase;
489#endif
490
491 /* temporary data for USE_CODE_COPY mode */
492#ifdef USE_CODE_COPY
493 uint32_t tmp0;
494 uint32_t saved_esp;
495 int native_fp_regs; /* if true, the FPU state is in the native CPU regs */
496#endif
497
498 /* exception/interrupt handling */
499 jmp_buf jmp_env;
500 int exception_index;
501 int error_code;
502 int exception_is_int;
503 target_ulong exception_next_eip;
504#if defined(VBOX)
505 struct TranslationBlock * volatile current_tb; /* currently executing TB */
506#else
507 struct TranslationBlock *current_tb; /* currently executing TB */
508#endif
509 target_ulong dr[8]; /* debug registers */
510#if defined(VBOX)
511 volatile int32_t interrupt_request;
512#else
513 int interrupt_request;
514#endif
515 int user_mode_only; /* user mode only simulation */
516
517 /* soft mmu support */
518 /* in order to avoid passing too many arguments to the memory
519 write helpers, we store some rarely used information in the CPU
520 context) */
521 unsigned long mem_write_pc; /* host pc at which the memory was
522 written */
523 target_ulong mem_write_vaddr; /* target virtual addr at which the
524 memory was written */
525 /* 0 = kernel, 1 = user */
526 CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
527 CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
528
529 /* from this point: preserved by CPU reset */
530 /* ice debug support */
531 target_ulong breakpoints[MAX_BREAKPOINTS];
532 int nb_breakpoints;
533 int singlestep_enabled;
534
535#ifdef VBOX
536 /** cpu state flags. (see defines below) */
537 uint32_t state;
538 /** The VM handle. */
539 PVM pVM;
540 /** code buffer for instruction emulation */
541 void *pvCodeBuffer;
542 /** code buffer size */
543 uint32_t cbCodeBuffer;
544#endif /* VBOX */
545
546 /* processor features (e.g. for CPUID insn) */
547#ifndef VBOX /* remR3CpuId deals with these */
548 uint32_t cpuid_vendor1;
549 uint32_t cpuid_vendor2;
550 uint32_t cpuid_vendor3;
551 uint32_t cpuid_version;
552#endif /* !VBOX */
553 uint32_t cpuid_ext_features;
554 uint32_t cpuid_features;
555
556#ifndef VBOX
557 /* in order to simplify APIC support, we leave this pointer to the
558 user */
559 struct APICState *apic_state;
560 /* user data */
561 void *opaque;
562#endif
563} CPUX86State;
564
565#ifdef VBOX
566/** CPUX86State state flags
567 * @{ */
568#define CPU_RAW_RING0 0x0002 /* Set after first time RawR0 is executed, never cleared. */
569#define CPU_EMULATE_SINGLE_INSTR 0x0040 /* Execute a single instruction in emulation mode */
570#define CPU_EMULATE_SINGLE_STEP 0x0080 /* go into single step mode */
571#define CPU_RAW_HWACC 0x0100 /* Set after first time HWACC is executed, never cleared. */
572/** @} */
573#endif /* !VBOX */
574
575#ifdef VBOX
576CPUX86State *cpu_x86_init(CPUX86State *env);
577#else /* !VBOX */
578CPUX86State *cpu_x86_init(void);
579#endif /* !VBOX */
580int cpu_x86_exec(CPUX86State *s);
581void cpu_x86_close(CPUX86State *s);
582int cpu_get_pic_interrupt(CPUX86State *s);
583/* MSDOS compatibility mode FPU exception support */
584void cpu_set_ferr(CPUX86State *s);
585
586/* this function must always be used to load data in the segment
587 cache: it synchronizes the hflags with the segment cache values */
588static inline void cpu_x86_load_seg_cache(CPUX86State *env,
589 int seg_reg, unsigned int selector,
590 uint32_t base, unsigned int limit,
591 unsigned int flags)
592{
593 SegmentCache *sc;
594 unsigned int new_hflags;
595
596 sc = &env->segs[seg_reg];
597 sc->selector = selector;
598 sc->base = base;
599 sc->limit = limit;
600 sc->flags = flags;
601#ifdef VBOX
602 sc->newselector = 0;
603#endif
604
605 /* update the hidden flags */
606 {
607 if (seg_reg == R_CS) {
608#ifdef TARGET_X86_64
609 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
610 /* long mode */
611 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
612 env->hflags &= ~(HF_ADDSEG_MASK);
613 } else
614#endif
615 {
616 /* legacy / compatibility case */
617 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
618 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
619 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
620 new_hflags;
621 }
622 }
623 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
624 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
625 if (env->hflags & HF_CS64_MASK) {
626 /* zero base assumed for DS, ES and SS in long mode */
627 } else if (!(env->cr[0] & CR0_PE_MASK) ||
628 (env->eflags & VM_MASK) ||
629 !(env->hflags & HF_CS32_MASK)) {
630 /* XXX: try to avoid this test. The problem comes from the
631 fact that is real mode or vm86 mode we only modify the
632 'base' and 'selector' fields of the segment cache to go
633 faster. A solution may be to force addseg to one in
634 translate-i386.c. */
635 new_hflags |= HF_ADDSEG_MASK;
636 } else {
637 new_hflags |= ((env->segs[R_DS].base |
638 env->segs[R_ES].base |
639 env->segs[R_SS].base) != 0) <<
640 HF_ADDSEG_SHIFT;
641 }
642 env->hflags = (env->hflags &
643 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
644 }
645}
646
647/* wrapper, just in case memory mappings must be changed */
648static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
649{
650#if HF_CPL_MASK == 3
651 s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
652#else
653#error HF_CPL_MASK is hardcoded
654#endif
655}
656
657/* used for debug or cpu save/restore */
658void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f);
659CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper);
660
661/* the following helpers are only usable in user mode simulation as
662 they can trigger unexpected exceptions */
663void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
664void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32);
665void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32);
666
667/* you can call this signal handler from your SIGBUS and SIGSEGV
668 signal handlers to inform the virtual CPU of exceptions. non zero
669 is returned if the signal was handled by the virtual CPU. */
670struct siginfo;
671int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
672 void *puc);
673void cpu_x86_set_a20(CPUX86State *env, int a20_state);
674
675uint64_t cpu_get_tsc(CPUX86State *env);
676
677void cpu_set_apic_base(CPUX86State *env, uint64_t val);
678uint64_t cpu_get_apic_base(CPUX86State *env);
679void cpu_set_apic_tpr(CPUX86State *env, uint8_t val);
680#ifndef NO_CPU_IO_DEFS
681uint8_t cpu_get_apic_tpr(CPUX86State *env);
682#endif
683
684/* will be suppressed */
685void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
686
687/* used to debug */
688#define X86_DUMP_FPU 0x0001 /* dump FPU state too */
689#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
690
691#ifdef VBOX
692void cpu_trap_raw(CPUX86State *env1);
693
694/* in helper.c */
695uint8_t read_byte(CPUX86State *env1, target_ulong addr);
696uint16_t read_word(CPUX86State *env1, target_ulong addr);
697void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val);
698uint32_t read_dword(CPUX86State *env1, target_ulong addr);
699void write_word(CPUX86State *env1, target_ulong addr, uint16_t val);
700void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val);
701/* in helper.c */
702int emulate_single_instr(CPUX86State *env1);
703int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr, uint32_t *esp_ptr, int dpl);
704
705void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr);
706void save_raw_fp_state(CPUX86State *env, uint8_t *ptr);
707
708#endif
709
710#define TARGET_PAGE_BITS 12
711#include "cpu-all.h"
712
713#endif /* CPU_I386_H */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette