VirtualBox

source: vbox/trunk/src/recompiler/new/target-i386/helper2.c@ 1144

Last change on this file since 1144 was 1111, checked in by vboxsync, 18 years ago

Wrong changes in old sync

  • Property svn:eol-style set to native
File size: 33.4 KB
Line 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <stdarg.h>
21#include <stdlib.h>
22#include <stdio.h>
23#include <string.h>
24#include <inttypes.h>
25#ifndef VBOX
26#include <signal.h>
27#include <assert.h>
28#endif
29
30#include "cpu.h"
31#include "exec-all.h"
32
33//#define DEBUG_MMU
34
35#ifdef USE_CODE_COPY
36#include <asm/ldt.h>
37#include <linux/unistd.h>
38#include <linux/version.h>
39
40int modify_ldt(int func, void *ptr, unsigned long bytecount)
41{
42 return syscall(__NR_modify_ldt, func, ptr, bytecount);
43}
44
45#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
46#define modify_ldt_ldt_s user_desc
47#endif
48#endif /* USE_CODE_COPY */
49
50#ifdef VBOX
51CPUX86State *cpu_x86_init(CPUX86State *env)
52{
53#else /* !VBOX */
54CPUX86State *cpu_x86_init(void)
55{
56 CPUX86State *env;
57#endif /* !VBOX */
58 static int inited;
59
60#ifndef VBOX
61 env = qemu_mallocz(sizeof(CPUX86State));
62 if (!env)
63 return NULL;
64#endif /* !VBOX */
65 cpu_exec_init(env);
66
67 /* init various static tables */
68 if (!inited) {
69 inited = 1;
70 optimize_flags_init();
71 }
72#ifdef USE_CODE_COPY
73 /* testing code for code copy case */
74 {
75 struct modify_ldt_ldt_s ldt;
76
77 ldt.entry_number = 1;
78 ldt.base_addr = (unsigned long)env;
79 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
80 ldt.seg_32bit = 1;
81 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
82 ldt.read_exec_only = 0;
83 ldt.limit_in_pages = 1;
84 ldt.seg_not_present = 0;
85 ldt.useable = 1;
86 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
87
88 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
89 }
90#endif
91#ifndef VBOX /* cpuid_features is initialized by caller */
92 {
93 int family, model, stepping;
94#ifdef TARGET_X86_64
95 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
96 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
97 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
98 family = 6;
99 model = 2;
100 stepping = 3;
101#else
102 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
103 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
104 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
105#if 0
106 /* pentium 75-200 */
107 family = 5;
108 model = 2;
109 stepping = 11;
110#else
111 /* pentium pro */
112 family = 6;
113 model = 3;
114 stepping = 3;
115#endif
116#endif
117 env->cpuid_level = 2;
118 env->cpuid_version = (family << 8) | (model << 4) | stepping;
119 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
120 CPUID_TSC | CPUID_MSR | CPUID_MCE |
121 CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
122 CPUID_PAT);
123 env->pat = 0x0007040600070406ULL;
124 env->cpuid_ext_features = CPUID_EXT_SSE3;
125 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
126 env->cpuid_features |= CPUID_APIC;
127 env->cpuid_xlevel = 0;
128 {
129 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
130 int c, len, i;
131 len = strlen(model_id);
132 for(i = 0; i < 48; i++) {
133 if (i >= len)
134 c = '\0';
135 else
136 c = model_id[i];
137 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
138 }
139 }
140#ifdef TARGET_X86_64
141 /* currently not enabled for std i386 because not fully tested */
142 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
143 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
144 env->cpuid_xlevel = 0x80000008;
145
146 /* these features are needed for Win64 and aren't fully implemented */
147 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
148 /* this feature is needed for Solaris and isn't fully implemented */
149 env->cpuid_features |= CPUID_PSE36;
150#endif
151 }
152#endif /* VBOX */
153 cpu_reset(env);
154#ifdef USE_KQEMU
155 kqemu_init(env);
156#endif
157 return env;
158}
159
160/* NOTE: must be called outside the CPU execute loop */
161void cpu_reset(CPUX86State *env)
162{
163 int i;
164
165 memset(env, 0, offsetof(CPUX86State, breakpoints));
166
167 tlb_flush(env, 1);
168
169 /* init to reset state */
170
171#ifdef CONFIG_SOFTMMU
172 env->hflags |= HF_SOFTMMU_MASK;
173#endif
174
175 cpu_x86_update_cr0(env, 0x60000010);
176 env->a20_mask = 0xffffffff;
177 env->smbase = 0x30000;
178
179 env->idt.limit = 0xffff;
180 env->gdt.limit = 0xffff;
181 env->ldt.limit = 0xffff;
182 env->ldt.flags = DESC_P_MASK;
183 env->tr.limit = 0xffff;
184 env->tr.flags = DESC_P_MASK;
185
186 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
187 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
188 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
189 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
190 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
191 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
192
193 env->eip = 0xfff0;
194 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
195
196 env->eflags = 0x2;
197
198 /* FPU init */
199 for(i = 0;i < 8; i++)
200 env->fptags[i] = 1;
201 env->fpuc = 0x37f;
202
203 env->mxcsr = 0x1f80;
204}
205
206#ifndef VBOX
207void cpu_x86_close(CPUX86State *env)
208{
209 free(env);
210}
211#endif
212
213/***********************************************************/
214/* x86 debug */
215
216static const char *cc_op_str[] = {
217 "DYNAMIC",
218 "EFLAGS",
219
220 "MULB",
221 "MULW",
222 "MULL",
223 "MULQ",
224
225 "ADDB",
226 "ADDW",
227 "ADDL",
228 "ADDQ",
229
230 "ADCB",
231 "ADCW",
232 "ADCL",
233 "ADCQ",
234
235 "SUBB",
236 "SUBW",
237 "SUBL",
238 "SUBQ",
239
240 "SBBB",
241 "SBBW",
242 "SBBL",
243 "SBBQ",
244
245 "LOGICB",
246 "LOGICW",
247 "LOGICL",
248 "LOGICQ",
249
250 "INCB",
251 "INCW",
252 "INCL",
253 "INCQ",
254
255 "DECB",
256 "DECW",
257 "DECL",
258 "DECQ",
259
260 "SHLB",
261 "SHLW",
262 "SHLL",
263 "SHLQ",
264
265 "SARB",
266 "SARW",
267 "SARL",
268 "SARQ",
269};
270
271void cpu_dump_state(CPUState *env, FILE *f,
272 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
273 int flags)
274{
275 int eflags, i, nb;
276 char cc_op_name[32];
277 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
278
279 eflags = env->eflags;
280#ifdef TARGET_X86_64
281 if (env->hflags & HF_CS64_MASK) {
282 cpu_fprintf(f,
283 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
284 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
285 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
286 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
287 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
288 env->regs[R_EAX],
289 env->regs[R_EBX],
290 env->regs[R_ECX],
291 env->regs[R_EDX],
292 env->regs[R_ESI],
293 env->regs[R_EDI],
294 env->regs[R_EBP],
295 env->regs[R_ESP],
296 env->regs[8],
297 env->regs[9],
298 env->regs[10],
299 env->regs[11],
300 env->regs[12],
301 env->regs[13],
302 env->regs[14],
303 env->regs[15],
304 env->eip, eflags,
305 eflags & DF_MASK ? 'D' : '-',
306 eflags & CC_O ? 'O' : '-',
307 eflags & CC_S ? 'S' : '-',
308 eflags & CC_Z ? 'Z' : '-',
309 eflags & CC_A ? 'A' : '-',
310 eflags & CC_P ? 'P' : '-',
311 eflags & CC_C ? 'C' : '-',
312 env->hflags & HF_CPL_MASK,
313 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
314 (env->a20_mask >> 20) & 1,
315 (env->hflags >> HF_SMM_SHIFT) & 1,
316 (env->hflags >> HF_HALTED_SHIFT) & 1);
317 } else
318#endif
319 {
320 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
321 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
322 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
323 (uint32_t)env->regs[R_EAX],
324 (uint32_t)env->regs[R_EBX],
325 (uint32_t)env->regs[R_ECX],
326 (uint32_t)env->regs[R_EDX],
327 (uint32_t)env->regs[R_ESI],
328 (uint32_t)env->regs[R_EDI],
329 (uint32_t)env->regs[R_EBP],
330 (uint32_t)env->regs[R_ESP],
331 (uint32_t)env->eip, eflags,
332 eflags & DF_MASK ? 'D' : '-',
333 eflags & CC_O ? 'O' : '-',
334 eflags & CC_S ? 'S' : '-',
335 eflags & CC_Z ? 'Z' : '-',
336 eflags & CC_A ? 'A' : '-',
337 eflags & CC_P ? 'P' : '-',
338 eflags & CC_C ? 'C' : '-',
339 env->hflags & HF_CPL_MASK,
340 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
341 (env->a20_mask >> 20) & 1,
342 (env->hflags >> HF_SMM_SHIFT) & 1,
343 (env->hflags >> HF_HALTED_SHIFT) & 1);
344 }
345
346#ifdef TARGET_X86_64
347 if (env->hflags & HF_LMA_MASK) {
348 for(i = 0; i < 6; i++) {
349 SegmentCache *sc = &env->segs[i];
350 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
351 seg_name[i],
352 sc->selector,
353 sc->base,
354 sc->limit,
355 sc->flags);
356 }
357 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
358 env->ldt.selector,
359 env->ldt.base,
360 env->ldt.limit,
361 env->ldt.flags);
362 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
363 env->tr.selector,
364 env->tr.base,
365 env->tr.limit,
366 env->tr.flags);
367 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
368 env->gdt.base, env->gdt.limit);
369 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
370 env->idt.base, env->idt.limit);
371 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
372 (uint32_t)env->cr[0],
373 env->cr[2],
374 env->cr[3],
375 (uint32_t)env->cr[4]);
376 } else
377#endif
378 {
379 for(i = 0; i < 6; i++) {
380 SegmentCache *sc = &env->segs[i];
381 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
382 seg_name[i],
383 sc->selector,
384 (uint32_t)sc->base,
385 sc->limit,
386 sc->flags);
387 }
388 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
389 env->ldt.selector,
390 (uint32_t)env->ldt.base,
391 env->ldt.limit,
392 env->ldt.flags);
393 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
394 env->tr.selector,
395 (uint32_t)env->tr.base,
396 env->tr.limit,
397 env->tr.flags);
398 cpu_fprintf(f, "GDT= %08x %08x\n",
399 (uint32_t)env->gdt.base, env->gdt.limit);
400 cpu_fprintf(f, "IDT= %08x %08x\n",
401 (uint32_t)env->idt.base, env->idt.limit);
402 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
403 (uint32_t)env->cr[0],
404 (uint32_t)env->cr[2],
405 (uint32_t)env->cr[3],
406 (uint32_t)env->cr[4]);
407 }
408 if (flags & X86_DUMP_CCOP) {
409 if ((unsigned)env->cc_op < CC_OP_NB)
410 qemu_snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
411 else
412 qemu_snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
413#ifdef TARGET_X86_64
414 if (env->hflags & HF_CS64_MASK) {
415 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
416 env->cc_src, env->cc_dst,
417 cc_op_name);
418 } else
419#endif
420 {
421 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
422 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
423 cc_op_name);
424 }
425 }
426 if (flags & X86_DUMP_FPU) {
427 int fptag;
428 fptag = 0;
429 for(i = 0; i < 8; i++) {
430 fptag |= ((!env->fptags[i]) << i);
431 }
432 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
433 env->fpuc,
434 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
435 env->fpstt,
436 fptag,
437 env->mxcsr);
438 for(i=0;i<8;i++) {
439#if defined(USE_X86LDOUBLE)
440 union {
441 long double d;
442 struct {
443 uint64_t lower;
444 uint16_t upper;
445 } l;
446 } tmp;
447 tmp.d = env->fpregs[i].d;
448 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
449 i, tmp.l.lower, tmp.l.upper);
450#else
451 cpu_fprintf(f, "FPR%d=%016" PRIx64,
452 i, env->fpregs[i].mmx.q);
453#endif
454 if ((i & 1) == 1)
455 cpu_fprintf(f, "\n");
456 else
457 cpu_fprintf(f, " ");
458 }
459 if (env->hflags & HF_CS64_MASK)
460 nb = 16;
461 else
462 nb = 8;
463 for(i=0;i<nb;i++) {
464 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
465 i,
466 env->xmm_regs[i].XMM_L(3),
467 env->xmm_regs[i].XMM_L(2),
468 env->xmm_regs[i].XMM_L(1),
469 env->xmm_regs[i].XMM_L(0));
470 if ((i & 1) == 1)
471 cpu_fprintf(f, "\n");
472 else
473 cpu_fprintf(f, " ");
474 }
475 }
476}
477
478/***********************************************************/
479/* x86 mmu */
480/* XXX: add PGE support */
481
482void cpu_x86_set_a20(CPUX86State *env, int a20_state)
483{
484 a20_state = (a20_state != 0);
485 if (a20_state != ((env->a20_mask >> 20) & 1)) {
486#if defined(DEBUG_MMU)
487 printf("A20 update: a20=%d\n", a20_state);
488#endif
489 /* if the cpu is currently executing code, we must unlink it and
490 all the potentially executing TB */
491 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
492
493 /* when a20 is changed, all the MMU mappings are invalid, so
494 we must flush everything */
495 tlb_flush(env, 1);
496 env->a20_mask = 0xffefffff | (a20_state << 20);
497 }
498}
499
500void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
501{
502 int pe_state;
503
504#if defined(DEBUG_MMU)
505 printf("CR0 update: CR0=0x%08x\n", new_cr0);
506#endif
507 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
508 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
509 tlb_flush(env, 1);
510 }
511
512#ifdef TARGET_X86_64
513 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
514 (env->efer & MSR_EFER_LME)) {
515 /* enter in long mode */
516 /* XXX: generate an exception */
517 if (!(env->cr[4] & CR4_PAE_MASK))
518 return;
519 env->efer |= MSR_EFER_LMA;
520 env->hflags |= HF_LMA_MASK;
521 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
522 (env->efer & MSR_EFER_LMA)) {
523 /* exit long mode */
524 env->efer &= ~MSR_EFER_LMA;
525 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
526 env->eip &= 0xffffffff;
527 }
528#endif
529 env->cr[0] = new_cr0 | CR0_ET_MASK;
530
531 /* update PE flag in hidden flags */
532 pe_state = (env->cr[0] & CR0_PE_MASK);
533 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
534 /* ensure that ADDSEG is always set in real mode */
535 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
536 /* update FPU flags */
537 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
538 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
539#ifdef VBOX
540 remR3ChangeCpuMode(env);
541#endif
542}
543
544/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
545 the PDPT */
546void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
547{
548 env->cr[3] = new_cr3;
549 if (env->cr[0] & CR0_PG_MASK) {
550#if defined(DEBUG_MMU)
551 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
552#endif
553 tlb_flush(env, 0);
554 }
555}
556
557void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
558{
559#if defined(DEBUG_MMU)
560 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
561#endif
562 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
563 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
564 tlb_flush(env, 1);
565 }
566 /* SSE handling */
567 if (!(env->cpuid_features & CPUID_SSE))
568 new_cr4 &= ~CR4_OSFXSR_MASK;
569 if (new_cr4 & CR4_OSFXSR_MASK)
570 env->hflags |= HF_OSFXSR_MASK;
571 else
572 env->hflags &= ~HF_OSFXSR_MASK;
573
574 env->cr[4] = new_cr4;
575#ifdef VBOX
576 remR3ChangeCpuMode(env);
577#endif
578}
579
580/* XXX: also flush 4MB pages */
581void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
582{
583#if defined(DEBUG) && defined(VBOX)
584 uint32_t pde;
585 uint8_t *pde_ptr;
586
587 /* page directory entry */
588 pde_ptr = remR3GCPhys2HCVirt(env, (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask));
589 pde = ldl_raw(pde_ptr);
590 /* if PSE bit is set, then we use a 4MB page */
591 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
592 printf("cpu_x86_flush_tlb: 4 MB page!!!!!\n");
593 }
594#endif
595 tlb_flush_page(env, addr);
596}
597
598#if defined(CONFIG_USER_ONLY)
599
600int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
601 int is_write, int is_user, int is_softmmu)
602{
603 /* user mode only emulation */
604 is_write &= 1;
605 env->cr[2] = addr;
606 env->error_code = (is_write << PG_ERROR_W_BIT);
607 env->error_code |= PG_ERROR_U_MASK;
608 env->exception_index = EXCP0E_PAGE;
609 return 1;
610}
611
612target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
613{
614 return addr;
615}
616
617#else
618
619#define PHYS_ADDR_MASK 0xfffff000
620
621/* return value:
622 -1 = cannot handle fault
623 0 = nothing more to do
624 1 = generate PF fault
625 2 = soft MMU activation required for this block
626*/
627int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
628 int is_write1, int is_user, int is_softmmu)
629{
630 uint64_t ptep, pte;
631 uint32_t pdpe_addr, pde_addr, pte_addr;
632 int error_code, is_dirty, prot, page_size, ret, is_write;
633 unsigned long paddr, page_offset;
634 target_ulong vaddr, virt_addr;
635
636#if defined(DEBUG_MMU)
637 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
638 addr, is_write1, is_user, env->eip);
639#endif
640 is_write = is_write1 & 1;
641
642 if (!(env->cr[0] & CR0_PG_MASK)) {
643 pte = addr;
644 virt_addr = addr & TARGET_PAGE_MASK;
645 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
646 page_size = 4096;
647 goto do_mapping;
648 }
649
650 if (env->cr[4] & CR4_PAE_MASK) {
651 uint64_t pde, pdpe;
652
653 /* XXX: we only use 32 bit physical addresses */
654#ifdef TARGET_X86_64
655 if (env->hflags & HF_LMA_MASK) {
656 uint32_t pml4e_addr;
657 uint64_t pml4e;
658 int32_t sext;
659
660 /* test virtual address sign extension */
661 sext = (int64_t)addr >> 47;
662 if (sext != 0 && sext != -1) {
663 env->error_code = 0;
664 env->exception_index = EXCP0D_GPF;
665 return 1;
666 }
667
668 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
669 env->a20_mask;
670 pml4e = ldq_phys(pml4e_addr);
671 if (!(pml4e & PG_PRESENT_MASK)) {
672 error_code = 0;
673 goto do_fault;
674 }
675 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
676 error_code = PG_ERROR_RSVD_MASK;
677 goto do_fault;
678 }
679 if (!(pml4e & PG_ACCESSED_MASK)) {
680 pml4e |= PG_ACCESSED_MASK;
681 stl_phys_notdirty(pml4e_addr, pml4e);
682 }
683 ptep = pml4e ^ PG_NX_MASK;
684 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
685 env->a20_mask;
686 pdpe = ldq_phys(pdpe_addr);
687 if (!(pdpe & PG_PRESENT_MASK)) {
688 error_code = 0;
689 goto do_fault;
690 }
691 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
692 error_code = PG_ERROR_RSVD_MASK;
693 goto do_fault;
694 }
695 ptep &= pdpe ^ PG_NX_MASK;
696 if (!(pdpe & PG_ACCESSED_MASK)) {
697 pdpe |= PG_ACCESSED_MASK;
698 stl_phys_notdirty(pdpe_addr, pdpe);
699 }
700 } else
701#endif
702 {
703 /* XXX: load them when cr3 is loaded ? */
704 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
705 env->a20_mask;
706 pdpe = ldq_phys(pdpe_addr);
707 if (!(pdpe & PG_PRESENT_MASK)) {
708 error_code = 0;
709 goto do_fault;
710 }
711 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
712 }
713
714 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
715 env->a20_mask;
716 pde = ldq_phys(pde_addr);
717 if (!(pde & PG_PRESENT_MASK)) {
718 error_code = 0;
719 goto do_fault;
720 }
721 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
722 error_code = PG_ERROR_RSVD_MASK;
723 goto do_fault;
724 }
725 ptep &= pde ^ PG_NX_MASK;
726 if (pde & PG_PSE_MASK) {
727 /* 2 MB page */
728 page_size = 2048 * 1024;
729 ptep ^= PG_NX_MASK;
730 if ((ptep & PG_NX_MASK) && is_write1 == 2)
731 goto do_fault_protect;
732 if (is_user) {
733 if (!(ptep & PG_USER_MASK))
734 goto do_fault_protect;
735 if (is_write && !(ptep & PG_RW_MASK))
736 goto do_fault_protect;
737 } else {
738 if ((env->cr[0] & CR0_WP_MASK) &&
739 is_write && !(ptep & PG_RW_MASK))
740 goto do_fault_protect;
741 }
742 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
743 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
744 pde |= PG_ACCESSED_MASK;
745 if (is_dirty)
746 pde |= PG_DIRTY_MASK;
747 stl_phys_notdirty(pde_addr, pde);
748 }
749 /* align to page_size */
750 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
751 virt_addr = addr & ~(page_size - 1);
752 } else {
753 /* 4 KB page */
754 if (!(pde & PG_ACCESSED_MASK)) {
755 pde |= PG_ACCESSED_MASK;
756 stl_phys_notdirty(pde_addr, pde);
757 }
758 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
759 env->a20_mask;
760 pte = ldq_phys(pte_addr);
761 if (!(pte & PG_PRESENT_MASK)) {
762 error_code = 0;
763 goto do_fault;
764 }
765 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
766 error_code = PG_ERROR_RSVD_MASK;
767 goto do_fault;
768 }
769 /* combine pde and pte nx, user and rw protections */
770 ptep &= pte ^ PG_NX_MASK;
771 ptep ^= PG_NX_MASK;
772 if ((ptep & PG_NX_MASK) && is_write1 == 2)
773 goto do_fault_protect;
774 if (is_user) {
775 if (!(ptep & PG_USER_MASK))
776 goto do_fault_protect;
777 if (is_write && !(ptep & PG_RW_MASK))
778 goto do_fault_protect;
779 } else {
780 if ((env->cr[0] & CR0_WP_MASK) &&
781 is_write && !(ptep & PG_RW_MASK))
782 goto do_fault_protect;
783 }
784 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
785 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
786 pte |= PG_ACCESSED_MASK;
787 if (is_dirty)
788 pte |= PG_DIRTY_MASK;
789 stl_phys_notdirty(pte_addr, pte);
790 }
791 page_size = 4096;
792 virt_addr = addr & ~0xfff;
793 pte = pte & (PHYS_ADDR_MASK | 0xfff);
794 }
795 } else {
796 uint32_t pde;
797
798 /* page directory entry */
799 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
800 env->a20_mask;
801 pde = ldl_phys(pde_addr);
802 if (!(pde & PG_PRESENT_MASK)) {
803 error_code = 0;
804 goto do_fault;
805 }
806 /* if PSE bit is set, then we use a 4MB page */
807 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
808 page_size = 4096 * 1024;
809 if (is_user) {
810 if (!(pde & PG_USER_MASK))
811 goto do_fault_protect;
812 if (is_write && !(pde & PG_RW_MASK))
813 goto do_fault_protect;
814 } else {
815 if ((env->cr[0] & CR0_WP_MASK) &&
816 is_write && !(pde & PG_RW_MASK))
817 goto do_fault_protect;
818 }
819 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
820 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
821 pde |= PG_ACCESSED_MASK;
822 if (is_dirty)
823 pde |= PG_DIRTY_MASK;
824 stl_phys_notdirty(pde_addr, pde);
825 }
826
827 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
828 ptep = pte;
829 virt_addr = addr & ~(page_size - 1);
830 } else {
831 if (!(pde & PG_ACCESSED_MASK)) {
832 pde |= PG_ACCESSED_MASK;
833 stl_phys_notdirty(pde_addr, pde);
834 }
835
836 /* page directory entry */
837 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
838 env->a20_mask;
839 pte = ldl_phys(pte_addr);
840 if (!(pte & PG_PRESENT_MASK)) {
841 error_code = 0;
842 goto do_fault;
843 }
844 /* combine pde and pte user and rw protections */
845 ptep = pte & pde;
846 if (is_user) {
847 if (!(ptep & PG_USER_MASK))
848 goto do_fault_protect;
849 if (is_write && !(ptep & PG_RW_MASK))
850 goto do_fault_protect;
851 } else {
852 if ((env->cr[0] & CR0_WP_MASK) &&
853 is_write && !(ptep & PG_RW_MASK))
854 goto do_fault_protect;
855 }
856 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
857 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
858 pte |= PG_ACCESSED_MASK;
859 if (is_dirty)
860 pte |= PG_DIRTY_MASK;
861 stl_phys_notdirty(pte_addr, pte);
862 }
863 page_size = 4096;
864 virt_addr = addr & ~0xfff;
865 }
866 }
867 /* the page can be put in the TLB */
868 prot = PAGE_READ;
869 if (!(ptep & PG_NX_MASK))
870 prot |= PAGE_EXEC;
871 if (pte & PG_DIRTY_MASK) {
872 /* only set write access if already dirty... otherwise wait
873 for dirty access */
874 if (is_user) {
875 if (ptep & PG_RW_MASK)
876 prot |= PAGE_WRITE;
877 } else {
878 if (!(env->cr[0] & CR0_WP_MASK) ||
879 (ptep & PG_RW_MASK))
880 prot |= PAGE_WRITE;
881 }
882 }
883 do_mapping:
884 pte = pte & env->a20_mask;
885
886 /* Even if 4MB pages, we map only one 4KB page in the cache to
887 avoid filling it too fast */
888 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
889 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
890 vaddr = virt_addr + page_offset;
891
892 ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
893 return ret;
894 do_fault_protect:
895 error_code = PG_ERROR_P_MASK;
896 do_fault:
897 env->cr[2] = addr;
898 error_code |= (is_write << PG_ERROR_W_BIT);
899 if (is_user)
900 error_code |= PG_ERROR_U_MASK;
901 if (is_write1 == 2 &&
902 (env->efer & MSR_EFER_NXE) &&
903 (env->cr[4] & CR4_PAE_MASK))
904 error_code |= PG_ERROR_I_D_MASK;
905 env->error_code = error_code;
906 env->exception_index = EXCP0E_PAGE;
907 return 1;
908}
909
910target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
911{
912 uint32_t pde_addr, pte_addr;
913 uint32_t pde, pte, paddr, page_offset, page_size;
914
915 if (env->cr[4] & CR4_PAE_MASK) {
916 uint32_t pdpe_addr, pde_addr, pte_addr;
917 uint32_t pdpe;
918
919 /* XXX: we only use 32 bit physical addresses */
920#ifdef TARGET_X86_64
921 if (env->hflags & HF_LMA_MASK) {
922 uint32_t pml4e_addr, pml4e;
923 int32_t sext;
924
925 /* test virtual address sign extension */
926 sext = (int64_t)addr >> 47;
927 if (sext != 0 && sext != -1)
928 return -1;
929
930 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
931 env->a20_mask;
932 pml4e = ldl_phys(pml4e_addr);
933 if (!(pml4e & PG_PRESENT_MASK))
934 return -1;
935
936 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
937 env->a20_mask;
938 pdpe = ldl_phys(pdpe_addr);
939 if (!(pdpe & PG_PRESENT_MASK))
940 return -1;
941 } else
942#endif
943 {
944 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
945 env->a20_mask;
946 pdpe = ldl_phys(pdpe_addr);
947 if (!(pdpe & PG_PRESENT_MASK))
948 return -1;
949 }
950
951 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
952 env->a20_mask;
953 pde = ldl_phys(pde_addr);
954 if (!(pde & PG_PRESENT_MASK)) {
955 return -1;
956 }
957 if (pde & PG_PSE_MASK) {
958 /* 2 MB page */
959 page_size = 2048 * 1024;
960 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
961 } else {
962 /* 4 KB page */
963 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
964 env->a20_mask;
965 page_size = 4096;
966 pte = ldl_phys(pte_addr);
967 }
968 } else {
969 if (!(env->cr[0] & CR0_PG_MASK)) {
970 pte = addr;
971 page_size = 4096;
972 } else {
973 /* page directory entry */
974 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
975 pde = ldl_phys(pde_addr);
976 if (!(pde & PG_PRESENT_MASK))
977 return -1;
978 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
979 pte = pde & ~0x003ff000; /* align to 4MB */
980 page_size = 4096 * 1024;
981 } else {
982 /* page directory entry */
983 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
984 pte = ldl_phys(pte_addr);
985 if (!(pte & PG_PRESENT_MASK))
986 return -1;
987 page_size = 4096;
988 }
989 }
990 pte = pte & env->a20_mask;
991 }
992
993 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
994 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
995 return paddr;
996}
997#endif /* !CONFIG_USER_ONLY */
998
999#if defined(USE_CODE_COPY)
1000struct fpstate {
1001 uint16_t fpuc;
1002 uint16_t dummy1;
1003 uint16_t fpus;
1004 uint16_t dummy2;
1005 uint16_t fptag;
1006 uint16_t dummy3;
1007
1008 uint32_t fpip;
1009 uint32_t fpcs;
1010 uint32_t fpoo;
1011 uint32_t fpos;
1012 uint8_t fpregs1[8 * 10];
1013};
1014
1015void restore_native_fp_state(CPUState *env)
1016{
1017 int fptag, i, j;
1018 struct fpstate fp1, *fp = &fp1;
1019
1020 fp->fpuc = env->fpuc;
1021 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1022 fptag = 0;
1023 for (i=7; i>=0; i--) {
1024 fptag <<= 2;
1025 if (env->fptags[i]) {
1026 fptag |= 3;
1027 } else {
1028 /* the FPU automatically computes it */
1029 }
1030 }
1031 fp->fptag = fptag;
1032 j = env->fpstt;
1033 for(i = 0;i < 8; i++) {
1034 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
1035 j = (j + 1) & 7;
1036 }
1037 asm volatile ("frstor %0" : "=m" (*fp));
1038 env->native_fp_regs = 1;
1039}
1040
1041void save_native_fp_state(CPUState *env)
1042{
1043 int fptag, i, j;
1044 uint16_t fpuc;
1045 struct fpstate fp1, *fp = &fp1;
1046
1047 asm volatile ("fsave %0" : : "m" (*fp));
1048 env->fpuc = fp->fpuc;
1049 env->fpstt = (fp->fpus >> 11) & 7;
1050 env->fpus = fp->fpus & ~0x3800;
1051 fptag = fp->fptag;
1052 for(i = 0;i < 8; i++) {
1053 env->fptags[i] = ((fptag & 3) == 3);
1054 fptag >>= 2;
1055 }
1056 j = env->fpstt;
1057 for(i = 0;i < 8; i++) {
1058 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1059 j = (j + 1) & 7;
1060 }
1061 /* we must restore the default rounding state */
1062 /* XXX: we do not restore the exception state */
1063 fpuc = 0x037f | (env->fpuc & (3 << 10));
1064 asm volatile("fldcw %0" : : "m" (fpuc));
1065 env->native_fp_regs = 0;
1066}
1067#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette