VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper2.c@ 12022

Last change on this file since 12022 was 12022, checked in by vboxsync, 16 years ago

More logging

  • Property svn:eol-style set to native
File size: 33.9 KB
Line 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include <stdarg.h>
30#include <stdlib.h>
31#include <stdio.h>
32#include <string.h>
33#include <inttypes.h>
34#ifndef VBOX
35#include <signal.h>
36#include <assert.h>
37#else
38# include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
39#endif
40
41#include "cpu.h"
42#include "exec-all.h"
43
44//#define DEBUG_MMU
45
46#ifdef USE_CODE_COPY
47#include <asm/ldt.h>
48#include <linux/unistd.h>
49#include <linux/version.h>
50
51int modify_ldt(int func, void *ptr, unsigned long bytecount)
52{
53 return syscall(__NR_modify_ldt, func, ptr, bytecount);
54}
55
56#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
57#define modify_ldt_ldt_s user_desc
58#endif
59#endif /* USE_CODE_COPY */
60
61#ifdef VBOX
62CPUX86State *cpu_x86_init(CPUX86State *env)
63{
64#else /* !VBOX */
65CPUX86State *cpu_x86_init(void)
66{
67 CPUX86State *env;
68#endif /* !VBOX */
69 static int inited;
70
71#ifndef VBOX
72 env = qemu_mallocz(sizeof(CPUX86State));
73 if (!env)
74 return NULL;
75#endif /* !VBOX */
76 cpu_exec_init(env);
77
78 /* init various static tables */
79 if (!inited) {
80 inited = 1;
81 optimize_flags_init();
82 }
83#ifdef USE_CODE_COPY
84 /* testing code for code copy case */
85 {
86 struct modify_ldt_ldt_s ldt;
87
88 ldt.entry_number = 1;
89 ldt.base_addr = (unsigned long)env;
90 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
91 ldt.seg_32bit = 1;
92 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
93 ldt.read_exec_only = 0;
94 ldt.limit_in_pages = 1;
95 ldt.seg_not_present = 0;
96 ldt.useable = 1;
97 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
98
99 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
100 }
101#endif
102#ifndef VBOX /* cpuid_features is initialized by caller */
103 {
104 int family, model, stepping;
105#ifdef TARGET_X86_64
106 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
107 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
108 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
109 family = 6;
110 model = 2;
111 stepping = 3;
112#else
113 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
114 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
115 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
116#if 0
117 /* pentium 75-200 */
118 family = 5;
119 model = 2;
120 stepping = 11;
121#else
122 /* pentium pro */
123 family = 6;
124 model = 3;
125 stepping = 3;
126#endif
127#endif
128 env->cpuid_level = 2;
129 env->cpuid_version = (family << 8) | (model << 4) | stepping;
130 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
131 CPUID_TSC | CPUID_MSR | CPUID_MCE |
132 CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
133 CPUID_PAT);
134 env->pat = 0x0007040600070406ULL;
135 env->cpuid_ext_features = CPUID_EXT_SSE3;
136 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
137 env->cpuid_features |= CPUID_APIC;
138 env->cpuid_xlevel = 0;
139 {
140 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
141 int c, len, i;
142 len = strlen(model_id);
143 for(i = 0; i < 48; i++) {
144 if (i >= len)
145 c = '\0';
146 else
147 c = model_id[i];
148 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
149 }
150 }
151#ifdef TARGET_X86_64
152 /* currently not enabled for std i386 because not fully tested */
153 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
154 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
155 env->cpuid_xlevel = 0x80000008;
156
157 /* these features are needed for Win64 and aren't fully implemented */
158 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
159 /* this feature is needed for Solaris and isn't fully implemented */
160 env->cpuid_features |= CPUID_PSE36;
161#endif
162 }
163#endif /* VBOX */
164 cpu_reset(env);
165#ifdef USE_KQEMU
166 kqemu_init(env);
167#endif
168 return env;
169}
170
171/* NOTE: must be called outside the CPU execute loop */
172void cpu_reset(CPUX86State *env)
173{
174 int i;
175
176 memset(env, 0, offsetof(CPUX86State, breakpoints));
177
178 tlb_flush(env, 1);
179
180 /* init to reset state */
181
182#ifdef CONFIG_SOFTMMU
183 env->hflags |= HF_SOFTMMU_MASK;
184#endif
185
186 cpu_x86_update_cr0(env, 0x60000010);
187 env->a20_mask = 0xffffffff;
188 env->smbase = 0x30000;
189
190 env->idt.limit = 0xffff;
191 env->gdt.limit = 0xffff;
192 env->ldt.limit = 0xffff;
193 env->ldt.flags = DESC_P_MASK;
194 env->tr.limit = 0xffff;
195 env->tr.flags = DESC_P_MASK;
196
197 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
198 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
199 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
200 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
201 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
202 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
203
204 env->eip = 0xfff0;
205 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
206
207 env->eflags = 0x2;
208
209 /* FPU init */
210 for(i = 0;i < 8; i++)
211 env->fptags[i] = 1;
212 env->fpuc = 0x37f;
213
214 env->mxcsr = 0x1f80;
215}
216
217#ifndef VBOX
218void cpu_x86_close(CPUX86State *env)
219{
220 free(env);
221}
222#endif
223
224/***********************************************************/
225/* x86 debug */
226
227static const char *cc_op_str[] = {
228 "DYNAMIC",
229 "EFLAGS",
230
231 "MULB",
232 "MULW",
233 "MULL",
234 "MULQ",
235
236 "ADDB",
237 "ADDW",
238 "ADDL",
239 "ADDQ",
240
241 "ADCB",
242 "ADCW",
243 "ADCL",
244 "ADCQ",
245
246 "SUBB",
247 "SUBW",
248 "SUBL",
249 "SUBQ",
250
251 "SBBB",
252 "SBBW",
253 "SBBL",
254 "SBBQ",
255
256 "LOGICB",
257 "LOGICW",
258 "LOGICL",
259 "LOGICQ",
260
261 "INCB",
262 "INCW",
263 "INCL",
264 "INCQ",
265
266 "DECB",
267 "DECW",
268 "DECL",
269 "DECQ",
270
271 "SHLB",
272 "SHLW",
273 "SHLL",
274 "SHLQ",
275
276 "SARB",
277 "SARW",
278 "SARL",
279 "SARQ",
280};
281
282void cpu_dump_state(CPUState *env, FILE *f,
283 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
284 int flags)
285{
286 int eflags, i, nb;
287 char cc_op_name[32];
288 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
289
290 eflags = env->eflags;
291#ifdef TARGET_X86_64
292 if (env->hflags & HF_CS64_MASK) {
293 cpu_fprintf(f,
294 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
295 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
296 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
297 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
298 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
299 env->regs[R_EAX],
300 env->regs[R_EBX],
301 env->regs[R_ECX],
302 env->regs[R_EDX],
303 env->regs[R_ESI],
304 env->regs[R_EDI],
305 env->regs[R_EBP],
306 env->regs[R_ESP],
307 env->regs[8],
308 env->regs[9],
309 env->regs[10],
310 env->regs[11],
311 env->regs[12],
312 env->regs[13],
313 env->regs[14],
314 env->regs[15],
315 env->eip, eflags,
316 eflags & DF_MASK ? 'D' : '-',
317 eflags & CC_O ? 'O' : '-',
318 eflags & CC_S ? 'S' : '-',
319 eflags & CC_Z ? 'Z' : '-',
320 eflags & CC_A ? 'A' : '-',
321 eflags & CC_P ? 'P' : '-',
322 eflags & CC_C ? 'C' : '-',
323 env->hflags & HF_CPL_MASK,
324 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
325 (env->a20_mask >> 20) & 1,
326 (env->hflags >> HF_SMM_SHIFT) & 1,
327 (env->hflags >> HF_HALTED_SHIFT) & 1);
328 } else
329#endif
330 {
331 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
332 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
333 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
334 (uint32_t)env->regs[R_EAX],
335 (uint32_t)env->regs[R_EBX],
336 (uint32_t)env->regs[R_ECX],
337 (uint32_t)env->regs[R_EDX],
338 (uint32_t)env->regs[R_ESI],
339 (uint32_t)env->regs[R_EDI],
340 (uint32_t)env->regs[R_EBP],
341 (uint32_t)env->regs[R_ESP],
342 (uint32_t)env->eip, eflags,
343 eflags & DF_MASK ? 'D' : '-',
344 eflags & CC_O ? 'O' : '-',
345 eflags & CC_S ? 'S' : '-',
346 eflags & CC_Z ? 'Z' : '-',
347 eflags & CC_A ? 'A' : '-',
348 eflags & CC_P ? 'P' : '-',
349 eflags & CC_C ? 'C' : '-',
350 env->hflags & HF_CPL_MASK,
351 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
352 (env->a20_mask >> 20) & 1,
353 (env->hflags >> HF_SMM_SHIFT) & 1,
354 (env->hflags >> HF_HALTED_SHIFT) & 1);
355 }
356
357#ifdef TARGET_X86_64
358 if (env->hflags & HF_LMA_MASK) {
359 for(i = 0; i < 6; i++) {
360 SegmentCache *sc = &env->segs[i];
361 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
362 seg_name[i],
363 sc->selector,
364 sc->base,
365 sc->limit,
366 sc->flags);
367 }
368 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
369 env->ldt.selector,
370 env->ldt.base,
371 env->ldt.limit,
372 env->ldt.flags);
373 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
374 env->tr.selector,
375 env->tr.base,
376 env->tr.limit,
377 env->tr.flags);
378 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
379 env->gdt.base, env->gdt.limit);
380 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
381 env->idt.base, env->idt.limit);
382 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
383 (uint32_t)env->cr[0],
384 env->cr[2],
385 env->cr[3],
386 (uint32_t)env->cr[4]);
387 } else
388#endif
389 {
390 for(i = 0; i < 6; i++) {
391 SegmentCache *sc = &env->segs[i];
392 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
393 seg_name[i],
394 sc->selector,
395 (uint32_t)sc->base,
396 sc->limit,
397 sc->flags);
398 }
399 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
400 env->ldt.selector,
401 (uint32_t)env->ldt.base,
402 env->ldt.limit,
403 env->ldt.flags);
404 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
405 env->tr.selector,
406 (uint32_t)env->tr.base,
407 env->tr.limit,
408 env->tr.flags);
409 cpu_fprintf(f, "GDT= %08x %08x\n",
410 (uint32_t)env->gdt.base, env->gdt.limit);
411 cpu_fprintf(f, "IDT= %08x %08x\n",
412 (uint32_t)env->idt.base, env->idt.limit);
413 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
414 (uint32_t)env->cr[0],
415 (uint32_t)env->cr[2],
416 (uint32_t)env->cr[3],
417 (uint32_t)env->cr[4]);
418 }
419 if (flags & X86_DUMP_CCOP) {
420 if ((unsigned)env->cc_op < CC_OP_NB)
421 qemu_snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
422 else
423 qemu_snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
424#ifdef TARGET_X86_64
425 if (env->hflags & HF_CS64_MASK) {
426 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
427 env->cc_src, env->cc_dst,
428 cc_op_name);
429 } else
430#endif
431 {
432 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
433 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
434 cc_op_name);
435 }
436 }
437 if (flags & X86_DUMP_FPU) {
438 int fptag;
439 fptag = 0;
440 for(i = 0; i < 8; i++) {
441 fptag |= ((!env->fptags[i]) << i);
442 }
443 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
444 env->fpuc,
445 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
446 env->fpstt,
447 fptag,
448 env->mxcsr);
449 for(i=0;i<8;i++) {
450#if defined(USE_X86LDOUBLE)
451 union {
452 long double d;
453 struct {
454 uint64_t lower;
455 uint16_t upper;
456 } l;
457 } tmp;
458 tmp.d = env->fpregs[i].d;
459 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
460 i, tmp.l.lower, tmp.l.upper);
461#else
462 cpu_fprintf(f, "FPR%d=%016" PRIx64,
463 i, env->fpregs[i].mmx.q);
464#endif
465 if ((i & 1) == 1)
466 cpu_fprintf(f, "\n");
467 else
468 cpu_fprintf(f, " ");
469 }
470 if (env->hflags & HF_CS64_MASK)
471 nb = 16;
472 else
473 nb = 8;
474 for(i=0;i<nb;i++) {
475 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
476 i,
477 env->xmm_regs[i].XMM_L(3),
478 env->xmm_regs[i].XMM_L(2),
479 env->xmm_regs[i].XMM_L(1),
480 env->xmm_regs[i].XMM_L(0));
481 if ((i & 1) == 1)
482 cpu_fprintf(f, "\n");
483 else
484 cpu_fprintf(f, " ");
485 }
486 }
487}
488
489/***********************************************************/
490/* x86 mmu */
491/* XXX: add PGE support */
492
493void cpu_x86_set_a20(CPUX86State *env, int a20_state)
494{
495 a20_state = (a20_state != 0);
496 if (a20_state != ((env->a20_mask >> 20) & 1)) {
497#if defined(DEBUG_MMU)
498 printf("A20 update: a20=%d\n", a20_state);
499#endif
500 /* if the cpu is currently executing code, we must unlink it and
501 all the potentially executing TB */
502 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
503
504 /* when a20 is changed, all the MMU mappings are invalid, so
505 we must flush everything */
506 tlb_flush(env, 1);
507 env->a20_mask = 0xffefffff | (a20_state << 20);
508 }
509}
510
511void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
512{
513 int pe_state;
514
515#if defined(DEBUG_MMU)
516 printf("CR0 update: CR0 %x->0x%08x efer=%x\n", (uint32_t)env->cr[0], new_cr0, env->efer);
517#endif
518 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
519 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
520 tlb_flush(env, 1);
521 }
522
523#ifdef TARGET_X86_64
524 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
525 (env->efer & MSR_EFER_LME)) {
526 /* enter in long mode */
527 /* XXX: generate an exception */
528 if (!(env->cr[4] & CR4_PAE_MASK))
529 return;
530 env->efer |= MSR_EFER_LMA;
531 env->hflags |= HF_LMA_MASK;
532 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
533 (env->efer & MSR_EFER_LMA)) {
534 /* exit long mode */
535 env->efer &= ~MSR_EFER_LMA;
536 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
537 env->eip &= 0xffffffff;
538 }
539#endif
540 env->cr[0] = new_cr0 | CR0_ET_MASK;
541
542 /* update PE flag in hidden flags */
543 pe_state = (env->cr[0] & CR0_PE_MASK);
544 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
545 /* ensure that ADDSEG is always set in real mode */
546 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
547 /* update FPU flags */
548 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
549 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
550#ifdef VBOX
551 remR3ChangeCpuMode(env);
552#endif
553}
554
555/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
556 the PDPT */
557void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
558{
559 env->cr[3] = new_cr3;
560 if (env->cr[0] & CR0_PG_MASK) {
561#if defined(DEBUG_MMU)
562 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
563#endif
564 tlb_flush(env, 0);
565 }
566}
567
568void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
569{
570#if defined(DEBUG_MMU)
571 printf("CR4 update: CR4=%08x -> %08x\n", (uint32_t)env->cr[4], (uint32_t)new_cr4);
572#endif
573 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
574 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
575 tlb_flush(env, 1);
576 }
577 /* SSE handling */
578 if (!(env->cpuid_features & CPUID_SSE))
579 new_cr4 &= ~CR4_OSFXSR_MASK;
580 if (new_cr4 & CR4_OSFXSR_MASK)
581 env->hflags |= HF_OSFXSR_MASK;
582 else
583 env->hflags &= ~HF_OSFXSR_MASK;
584
585 env->cr[4] = new_cr4;
586#ifdef VBOX
587 remR3ChangeCpuMode(env);
588#endif
589}
590
591/* XXX: also flush 4MB pages */
592void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
593{
594#if defined(DEBUG) && defined(VBOX)
595 uint32_t pde;
596
597 /* page directory entry */
598 pde = remR3PhysReadU32(((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
599
600 /* if PSE bit is set, then we use a 4MB page */
601 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
602 printf("cpu_x86_flush_tlb: 4 MB page!!!!!\n");
603 }
604#endif
605 tlb_flush_page(env, addr);
606}
607
608#if defined(CONFIG_USER_ONLY)
609
610int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
611 int is_write, int is_user, int is_softmmu)
612{
613 /* user mode only emulation */
614 is_write &= 1;
615 env->cr[2] = addr;
616 env->error_code = (is_write << PG_ERROR_W_BIT);
617 env->error_code |= PG_ERROR_U_MASK;
618 env->exception_index = EXCP0E_PAGE;
619 return 1;
620}
621
622target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
623{
624 return addr;
625}
626
627#else
628
629#define PHYS_ADDR_MASK 0xfffff000
630
631/* return value:
632 -1 = cannot handle fault
633 0 = nothing more to do
634 1 = generate PF fault
635 2 = soft MMU activation required for this block
636*/
637int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
638 int is_write1, int is_user, int is_softmmu)
639{
640 uint64_t ptep, pte;
641 uint32_t pdpe_addr, pde_addr, pte_addr;
642 int error_code, is_dirty, prot, page_size, ret, is_write;
643 unsigned long paddr, page_offset;
644 target_ulong vaddr, virt_addr;
645
646#if defined(DEBUG_MMU)
647 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
648 addr, is_write1, is_user, env->eip);
649#endif
650 is_write = is_write1 & 1;
651
652 if (!(env->cr[0] & CR0_PG_MASK)) {
653 pte = addr;
654 virt_addr = addr & TARGET_PAGE_MASK;
655 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
656 page_size = 4096;
657 goto do_mapping;
658 }
659
660 if (env->cr[4] & CR4_PAE_MASK) {
661 uint64_t pde, pdpe;
662
663 /* XXX: we only use 32 bit physical addresses */
664#ifdef TARGET_X86_64
665 if (env->hflags & HF_LMA_MASK) {
666 uint32_t pml4e_addr;
667 uint64_t pml4e;
668 int32_t sext;
669
670 /* test virtual address sign extension */
671 sext = (int64_t)addr >> 47;
672 if (sext != 0 && sext != -1) {
673 env->error_code = 0;
674 env->exception_index = EXCP0D_GPF;
675 return 1;
676 }
677
678 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
679 env->a20_mask;
680 pml4e = ldq_phys(pml4e_addr);
681 if (!(pml4e & PG_PRESENT_MASK)) {
682 error_code = 0;
683 goto do_fault;
684 }
685 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
686 error_code = PG_ERROR_RSVD_MASK;
687 goto do_fault;
688 }
689 if (!(pml4e & PG_ACCESSED_MASK)) {
690 pml4e |= PG_ACCESSED_MASK;
691 stl_phys_notdirty(pml4e_addr, pml4e);
692 }
693 ptep = pml4e ^ PG_NX_MASK;
694 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
695 env->a20_mask;
696 pdpe = ldq_phys(pdpe_addr);
697 if (!(pdpe & PG_PRESENT_MASK)) {
698 error_code = 0;
699 goto do_fault;
700 }
701 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
702 error_code = PG_ERROR_RSVD_MASK;
703 goto do_fault;
704 }
705 ptep &= pdpe ^ PG_NX_MASK;
706 if (!(pdpe & PG_ACCESSED_MASK)) {
707 pdpe |= PG_ACCESSED_MASK;
708 stl_phys_notdirty(pdpe_addr, pdpe);
709 }
710 } else
711#endif
712 {
713 /* XXX: load them when cr3 is loaded ? */
714 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
715 env->a20_mask;
716 pdpe = ldq_phys(pdpe_addr);
717 if (!(pdpe & PG_PRESENT_MASK)) {
718 error_code = 0;
719 goto do_fault;
720 }
721 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
722 }
723
724 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
725 env->a20_mask;
726 pde = ldq_phys(pde_addr);
727 if (!(pde & PG_PRESENT_MASK)) {
728 error_code = 0;
729 goto do_fault;
730 }
731 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
732 error_code = PG_ERROR_RSVD_MASK;
733 goto do_fault;
734 }
735 ptep &= pde ^ PG_NX_MASK;
736 if (pde & PG_PSE_MASK) {
737 /* 2 MB page */
738 page_size = 2048 * 1024;
739 ptep ^= PG_NX_MASK;
740 if ((ptep & PG_NX_MASK) && is_write1 == 2)
741 goto do_fault_protect;
742 if (is_user) {
743 if (!(ptep & PG_USER_MASK))
744 goto do_fault_protect;
745 if (is_write && !(ptep & PG_RW_MASK))
746 goto do_fault_protect;
747 } else {
748 if ((env->cr[0] & CR0_WP_MASK) &&
749 is_write && !(ptep & PG_RW_MASK))
750 goto do_fault_protect;
751 }
752 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
753 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
754 pde |= PG_ACCESSED_MASK;
755 if (is_dirty)
756 pde |= PG_DIRTY_MASK;
757 stl_phys_notdirty(pde_addr, pde);
758 }
759 /* align to page_size */
760 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
761 virt_addr = addr & ~(page_size - 1);
762 } else {
763 /* 4 KB page */
764 if (!(pde & PG_ACCESSED_MASK)) {
765 pde |= PG_ACCESSED_MASK;
766 stl_phys_notdirty(pde_addr, pde);
767 }
768 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
769 env->a20_mask;
770 pte = ldq_phys(pte_addr);
771 if (!(pte & PG_PRESENT_MASK)) {
772 error_code = 0;
773 goto do_fault;
774 }
775 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
776 error_code = PG_ERROR_RSVD_MASK;
777 goto do_fault;
778 }
779 /* combine pde and pte nx, user and rw protections */
780 ptep &= pte ^ PG_NX_MASK;
781 ptep ^= PG_NX_MASK;
782 if ((ptep & PG_NX_MASK) && is_write1 == 2)
783 goto do_fault_protect;
784 if (is_user) {
785 if (!(ptep & PG_USER_MASK))
786 goto do_fault_protect;
787 if (is_write && !(ptep & PG_RW_MASK))
788 goto do_fault_protect;
789 } else {
790 if ((env->cr[0] & CR0_WP_MASK) &&
791 is_write && !(ptep & PG_RW_MASK))
792 goto do_fault_protect;
793 }
794 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
795 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
796 pte |= PG_ACCESSED_MASK;
797 if (is_dirty)
798 pte |= PG_DIRTY_MASK;
799 stl_phys_notdirty(pte_addr, pte);
800 }
801 page_size = 4096;
802 virt_addr = addr & ~0xfff;
803 pte = pte & (PHYS_ADDR_MASK | 0xfff);
804 }
805 } else {
806 uint32_t pde;
807
808 /* page directory entry */
809 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
810 env->a20_mask;
811 pde = ldl_phys(pde_addr);
812 if (!(pde & PG_PRESENT_MASK)) {
813 error_code = 0;
814 goto do_fault;
815 }
816 /* if PSE bit is set, then we use a 4MB page */
817 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
818 page_size = 4096 * 1024;
819 if (is_user) {
820 if (!(pde & PG_USER_MASK))
821 goto do_fault_protect;
822 if (is_write && !(pde & PG_RW_MASK))
823 goto do_fault_protect;
824 } else {
825 if ((env->cr[0] & CR0_WP_MASK) &&
826 is_write && !(pde & PG_RW_MASK))
827 goto do_fault_protect;
828 }
829 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
830 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
831 pde |= PG_ACCESSED_MASK;
832 if (is_dirty)
833 pde |= PG_DIRTY_MASK;
834 stl_phys_notdirty(pde_addr, pde);
835 }
836
837 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
838 ptep = pte;
839 virt_addr = addr & ~(page_size - 1);
840 } else {
841 if (!(pde & PG_ACCESSED_MASK)) {
842 pde |= PG_ACCESSED_MASK;
843 stl_phys_notdirty(pde_addr, pde);
844 }
845
846 /* page directory entry */
847 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
848 env->a20_mask;
849 pte = ldl_phys(pte_addr);
850 if (!(pte & PG_PRESENT_MASK)) {
851 error_code = 0;
852 goto do_fault;
853 }
854 /* combine pde and pte user and rw protections */
855 ptep = pte & pde;
856 if (is_user) {
857 if (!(ptep & PG_USER_MASK))
858 goto do_fault_protect;
859 if (is_write && !(ptep & PG_RW_MASK))
860 goto do_fault_protect;
861 } else {
862 if ((env->cr[0] & CR0_WP_MASK) &&
863 is_write && !(ptep & PG_RW_MASK))
864 goto do_fault_protect;
865 }
866 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
867 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
868 pte |= PG_ACCESSED_MASK;
869 if (is_dirty)
870 pte |= PG_DIRTY_MASK;
871 stl_phys_notdirty(pte_addr, pte);
872 }
873 page_size = 4096;
874 virt_addr = addr & ~0xfff;
875 }
876 }
877 /* the page can be put in the TLB */
878 prot = PAGE_READ;
879 if (!(ptep & PG_NX_MASK))
880 prot |= PAGE_EXEC;
881 if (pte & PG_DIRTY_MASK) {
882 /* only set write access if already dirty... otherwise wait
883 for dirty access */
884 if (is_user) {
885 if (ptep & PG_RW_MASK)
886 prot |= PAGE_WRITE;
887 } else {
888 if (!(env->cr[0] & CR0_WP_MASK) ||
889 (ptep & PG_RW_MASK))
890 prot |= PAGE_WRITE;
891 }
892 }
893 do_mapping:
894 pte = pte & env->a20_mask;
895
896 /* Even if 4MB pages, we map only one 4KB page in the cache to
897 avoid filling it too fast */
898 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
899 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
900 vaddr = virt_addr + page_offset;
901
902 ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
903 return ret;
904 do_fault_protect:
905 error_code = PG_ERROR_P_MASK;
906 do_fault:
907 env->cr[2] = addr;
908 error_code |= (is_write << PG_ERROR_W_BIT);
909 if (is_user)
910 error_code |= PG_ERROR_U_MASK;
911 if (is_write1 == 2 &&
912 (env->efer & MSR_EFER_NXE) &&
913 (env->cr[4] & CR4_PAE_MASK))
914 error_code |= PG_ERROR_I_D_MASK;
915 env->error_code = error_code;
916 env->exception_index = EXCP0E_PAGE;
917 return 1;
918}
919
920target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
921{
922 uint32_t pde_addr, pte_addr;
923 uint32_t pde, pte, paddr, page_offset, page_size;
924
925 if (env->cr[4] & CR4_PAE_MASK) {
926 uint32_t pdpe_addr, pde_addr, pte_addr;
927 uint32_t pdpe;
928
929 /* XXX: we only use 32 bit physical addresses */
930#ifdef TARGET_X86_64
931 if (env->hflags & HF_LMA_MASK) {
932 uint32_t pml4e_addr, pml4e;
933 int32_t sext;
934
935 /* test virtual address sign extension */
936 sext = (int64_t)addr >> 47;
937 if (sext != 0 && sext != -1)
938 return -1;
939
940 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
941 env->a20_mask;
942 pml4e = ldl_phys(pml4e_addr);
943 if (!(pml4e & PG_PRESENT_MASK))
944 return -1;
945
946 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
947 env->a20_mask;
948 pdpe = ldl_phys(pdpe_addr);
949 if (!(pdpe & PG_PRESENT_MASK))
950 return -1;
951 } else
952#endif
953 {
954 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
955 env->a20_mask;
956 pdpe = ldl_phys(pdpe_addr);
957 if (!(pdpe & PG_PRESENT_MASK))
958 return -1;
959 }
960
961 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
962 env->a20_mask;
963 pde = ldl_phys(pde_addr);
964 if (!(pde & PG_PRESENT_MASK)) {
965 return -1;
966 }
967 if (pde & PG_PSE_MASK) {
968 /* 2 MB page */
969 page_size = 2048 * 1024;
970 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
971 } else {
972 /* 4 KB page */
973 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
974 env->a20_mask;
975 page_size = 4096;
976 pte = ldl_phys(pte_addr);
977 }
978 } else {
979 if (!(env->cr[0] & CR0_PG_MASK)) {
980 pte = addr;
981 page_size = 4096;
982 } else {
983 /* page directory entry */
984 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
985 pde = ldl_phys(pde_addr);
986 if (!(pde & PG_PRESENT_MASK))
987 return -1;
988 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
989 pte = pde & ~0x003ff000; /* align to 4MB */
990 page_size = 4096 * 1024;
991 } else {
992 /* page directory entry */
993 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
994 pte = ldl_phys(pte_addr);
995 if (!(pte & PG_PRESENT_MASK))
996 return -1;
997 page_size = 4096;
998 }
999 }
1000 pte = pte & env->a20_mask;
1001 }
1002
1003 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1004 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1005 return paddr;
1006}
1007#endif /* !CONFIG_USER_ONLY */
1008
1009#if defined(USE_CODE_COPY)
1010struct fpstate {
1011 uint16_t fpuc;
1012 uint16_t dummy1;
1013 uint16_t fpus;
1014 uint16_t dummy2;
1015 uint16_t fptag;
1016 uint16_t dummy3;
1017
1018 uint32_t fpip;
1019 uint32_t fpcs;
1020 uint32_t fpoo;
1021 uint32_t fpos;
1022 uint8_t fpregs1[8 * 10];
1023};
1024
1025void restore_native_fp_state(CPUState *env)
1026{
1027 int fptag, i, j;
1028 struct fpstate fp1, *fp = &fp1;
1029
1030 fp->fpuc = env->fpuc;
1031 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1032 fptag = 0;
1033 for (i=7; i>=0; i--) {
1034 fptag <<= 2;
1035 if (env->fptags[i]) {
1036 fptag |= 3;
1037 } else {
1038 /* the FPU automatically computes it */
1039 }
1040 }
1041 fp->fptag = fptag;
1042 j = env->fpstt;
1043 for(i = 0;i < 8; i++) {
1044 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
1045 j = (j + 1) & 7;
1046 }
1047 asm volatile ("frstor %0" : "=m" (*fp));
1048 env->native_fp_regs = 1;
1049}
1050
1051void save_native_fp_state(CPUState *env)
1052{
1053 int fptag, i, j;
1054 uint16_t fpuc;
1055 struct fpstate fp1, *fp = &fp1;
1056
1057 asm volatile ("fsave %0" : : "m" (*fp));
1058 env->fpuc = fp->fpuc;
1059 env->fpstt = (fp->fpus >> 11) & 7;
1060 env->fpus = fp->fpus & ~0x3800;
1061 fptag = fp->fptag;
1062 for(i = 0;i < 8; i++) {
1063 env->fptags[i] = ((fptag & 3) == 3);
1064 fptag >>= 2;
1065 }
1066 j = env->fpstt;
1067 for(i = 0;i < 8; i++) {
1068 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1069 j = (j + 1) & 7;
1070 }
1071 /* we must restore the default rounding state */
1072 /* XXX: we do not restore the exception state */
1073 fpuc = 0x037f | (env->fpuc & (3 << 10));
1074 asm volatile("fldcw %0" : : "m" (fpuc));
1075 env->native_fp_regs = 0;
1076}
1077#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette