VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/helper.c@ 18922

Last change on this file since 18922 was 18473, checked in by vboxsync, 16 years ago

REM/helper.c: warning about unused static function.

  • Property svn:eol-style set to native
File size: 43.9 KB
Line 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include <stdarg.h>
30#include <stdlib.h>
31#include <stdio.h>
32#include <string.h>
33#ifndef VBOX
34#include <inttypes.h>
35#include <signal.h>
36#include <assert.h>
37#endif
38
39#include "cpu.h"
40#include "exec-all.h"
41#include "svm.h"
42#include "qemu-common.h"
43
44//#define DEBUG_MMU
45
46static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
47
48#ifndef VBOX
49static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
50 uint32_t *ext_features,
51 uint32_t *ext2_features,
52 uint32_t *ext3_features)
53{
54 int i;
55 /* feature flags taken from "Intel Processor Identification and the CPUID
56 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
57 * about feature names, the Linux name is used. */
58 static const char *feature_name[] = {
59 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
60 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
61 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
62 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
63 };
64 static const char *ext_feature_name[] = {
65 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
66 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
67 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69 };
70 static const char *ext2_feature_name[] = {
71 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
72 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
73 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
74 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
75 };
76 static const char *ext3_feature_name[] = {
77 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
78 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
79 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
80 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
81 };
82
83 for ( i = 0 ; i < 32 ; i++ )
84 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
85 *features |= 1 << i;
86 return;
87 }
88 for ( i = 0 ; i < 32 ; i++ )
89 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
90 *ext_features |= 1 << i;
91 return;
92 }
93 for ( i = 0 ; i < 32 ; i++ )
94 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
95 *ext2_features |= 1 << i;
96 return;
97 }
98 for ( i = 0 ; i < 32 ; i++ )
99 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
100 *ext3_features |= 1 << i;
101 return;
102 }
103 fprintf(stderr, "CPU feature %s not found\n", flagname);
104}
105#endif /* !VBOX */
106#ifndef VBOX
107CPUX86State *cpu_x86_init(const char *cpu_model)
108{
109 CPUX86State *env;
110 static int inited;
111
112 env = qemu_mallocz(sizeof(CPUX86State));
113 if (!env)
114 return NULL;
115#else
116CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)
117{
118 static int inited;
119#endif
120 cpu_exec_init(env);
121 env->cpu_model_str = cpu_model;
122
123 /* init various static tables */
124 if (!inited) {
125 inited = 1;
126 optimize_flags_init();
127 }
128 if (cpu_x86_register(env, cpu_model) < 0) {
129 cpu_x86_close(env);
130 return NULL;
131 }
132 cpu_reset(env);
133#ifdef USE_KQEMU
134 kqemu_init(env);
135#endif
136 return env;
137}
138
139typedef struct x86_def_t {
140 const char *name;
141 uint32_t level;
142 uint32_t vendor1, vendor2, vendor3;
143 int family;
144 int model;
145 int stepping;
146 uint32_t features, ext_features, ext2_features, ext3_features;
147 uint32_t xlevel;
148 char model_id[48];
149} x86_def_t;
150
151#ifndef VBOX
152#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
153#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
154 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
155#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
156 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
157 CPUID_PSE36 | CPUID_FXSR)
158#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
159#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
160 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
161 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
162 CPUID_PAE | CPUID_SEP | CPUID_APIC)
163static x86_def_t x86_defs[] = {
164#ifdef TARGET_X86_64
165 {
166 .name = "qemu64",
167 .level = 2,
168 .vendor1 = CPUID_VENDOR_AMD_1,
169 .vendor2 = CPUID_VENDOR_AMD_2,
170 .vendor3 = CPUID_VENDOR_AMD_3,
171 .family = 6,
172 .model = 2,
173 .stepping = 3,
174 .features = PPRO_FEATURES |
175 /* these features are needed for Win64 and aren't fully implemented */
176 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
177 /* this feature is needed for Solaris and isn't fully implemented */
178 CPUID_PSE36,
179 .ext_features = CPUID_EXT_SSE3,
180 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
181 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
182 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
183 .ext3_features = CPUID_EXT3_SVM,
184 .xlevel = 0x8000000A,
185 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
186 },
187 {
188 .name = "core2duo",
189 .level = 10,
190 .family = 6,
191 .model = 15,
192 .stepping = 11,
193 /* The original CPU also implements these features:
194 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
195 CPUID_TM, CPUID_PBE */
196 .features = PPRO_FEATURES |
197 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
198 CPUID_PSE36,
199 /* The original CPU also implements these ext features:
200 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
201 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
202 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
203 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
204 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
205 .xlevel = 0x80000008,
206 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
207 },
208#endif
209 {
210 .name = "qemu32",
211 .level = 2,
212 .family = 6,
213 .model = 3,
214 .stepping = 3,
215 .features = PPRO_FEATURES,
216 .ext_features = CPUID_EXT_SSE3,
217 .xlevel = 0,
218 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
219 },
220 {
221 .name = "coreduo",
222 .level = 10,
223 .family = 6,
224 .model = 14,
225 .stepping = 8,
226 /* The original CPU also implements these features:
227 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
228 CPUID_TM, CPUID_PBE */
229 .features = PPRO_FEATURES | CPUID_VME |
230 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
231 /* The original CPU also implements these ext features:
232 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
233 CPUID_EXT_PDCM */
234 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
235 .ext2_features = CPUID_EXT2_NX,
236 .xlevel = 0x80000008,
237 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
238 },
239 {
240 .name = "486",
241 .level = 0,
242 .family = 4,
243 .model = 0,
244 .stepping = 0,
245 .features = I486_FEATURES,
246 .xlevel = 0,
247 },
248 {
249 .name = "pentium",
250 .level = 1,
251 .family = 5,
252 .model = 4,
253 .stepping = 3,
254 .features = PENTIUM_FEATURES,
255 .xlevel = 0,
256 },
257 {
258 .name = "pentium2",
259 .level = 2,
260 .family = 6,
261 .model = 5,
262 .stepping = 2,
263 .features = PENTIUM2_FEATURES,
264 .xlevel = 0,
265 },
266 {
267 .name = "pentium3",
268 .level = 2,
269 .family = 6,
270 .model = 7,
271 .stepping = 3,
272 .features = PENTIUM3_FEATURES,
273 .xlevel = 0,
274 },
275 {
276 .name = "athlon",
277 .level = 2,
278 .vendor1 = 0x68747541, /* "Auth" */
279 .vendor2 = 0x69746e65, /* "enti" */
280 .vendor3 = 0x444d4163, /* "cAMD" */
281 .family = 6,
282 .model = 2,
283 .stepping = 3,
284 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
285 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
286 .xlevel = 0x80000008,
287 /* XXX: put another string ? */
288 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
289 },
290 {
291 .name = "n270",
292 /* original is on level 10 */
293 .level = 5,
294 .family = 6,
295 .model = 28,
296 .stepping = 2,
297 .features = PPRO_FEATURES |
298 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
299 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
300 * CPUID_HT | CPUID_TM | CPUID_PBE */
301 /* Some CPUs got no CPUID_SEP */
302 .ext_features = CPUID_EXT_MONITOR |
303 CPUID_EXT_SSE3 /* PNI */,
304 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
305 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
306 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
307 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
308 .xlevel = 0x8000000A,
309 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
310 },
311};
312
313static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
314{
315 unsigned int i;
316 x86_def_t *def;
317
318 char *s = strdup(cpu_model);
319 char *featurestr, *name = strtok(s, ",");
320 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
321 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
322 int family = -1, model = -1, stepping = -1;
323
324 def = NULL;
325 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
326 if (strcmp(name, x86_defs[i].name) == 0) {
327 def = &x86_defs[i];
328 break;
329 }
330 }
331 if (!def)
332 goto error;
333 memcpy(x86_cpu_def, def, sizeof(*def));
334
335 featurestr = strtok(NULL, ",");
336
337 while (featurestr) {
338 char *val;
339 if (featurestr[0] == '+') {
340 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
341 } else if (featurestr[0] == '-') {
342 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
343 } else if ((val = strchr(featurestr, '='))) {
344 *val = 0; val++;
345 if (!strcmp(featurestr, "family")) {
346 char *err;
347 family = strtol(val, &err, 10);
348 if (!*val || *err || family < 0) {
349 fprintf(stderr, "bad numerical value %s\n", val);
350 goto error;
351 }
352 x86_cpu_def->family = family;
353 } else if (!strcmp(featurestr, "model")) {
354 char *err;
355 model = strtol(val, &err, 10);
356 if (!*val || *err || model < 0 || model > 0xf) {
357 fprintf(stderr, "bad numerical value %s\n", val);
358 goto error;
359 }
360 x86_cpu_def->model = model;
361 } else if (!strcmp(featurestr, "stepping")) {
362 char *err;
363 stepping = strtol(val, &err, 10);
364 if (!*val || *err || stepping < 0 || stepping > 0xf) {
365 fprintf(stderr, "bad numerical value %s\n", val);
366 goto error;
367 }
368 x86_cpu_def->stepping = stepping;
369 } else if (!strcmp(featurestr, "vendor")) {
370 if (strlen(val) != 12) {
371 fprintf(stderr, "vendor string must be 12 chars long\n");
372 goto error;
373 }
374 x86_cpu_def->vendor1 = 0;
375 x86_cpu_def->vendor2 = 0;
376 x86_cpu_def->vendor3 = 0;
377 for(i = 0; i < 4; i++) {
378 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
379 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
380 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
381 }
382 } else if (!strcmp(featurestr, "model_id")) {
383 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
384 val);
385 } else {
386 fprintf(stderr, "unrecognized feature %s\n", featurestr);
387 goto error;
388 }
389 } else {
390 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
391 goto error;
392 }
393 featurestr = strtok(NULL, ",");
394 }
395 x86_cpu_def->features |= plus_features;
396 x86_cpu_def->ext_features |= plus_ext_features;
397 x86_cpu_def->ext2_features |= plus_ext2_features;
398 x86_cpu_def->ext3_features |= plus_ext3_features;
399 x86_cpu_def->features &= ~minus_features;
400 x86_cpu_def->ext_features &= ~minus_ext_features;
401 x86_cpu_def->ext2_features &= ~minus_ext2_features;
402 x86_cpu_def->ext3_features &= ~minus_ext3_features;
403 free(s);
404 return 0;
405
406error:
407 free(s);
408 return -1;
409}
410
411void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
412{
413 unsigned int i;
414
415 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
416 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
417}
418#endif /* !VBOX */
419
420static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
421{
422#ifndef VBOX
423 x86_def_t def1, *def = &def1;
424
425 if (cpu_x86_find_by_name(def, cpu_model) < 0)
426 return -1;
427 if (def->vendor1) {
428 env->cpuid_vendor1 = def->vendor1;
429 env->cpuid_vendor2 = def->vendor2;
430 env->cpuid_vendor3 = def->vendor3;
431 } else {
432 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
433 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
434 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
435 }
436 env->cpuid_level = def->level;
437 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
438 env->cpuid_features = def->features;
439 env->pat = 0x0007040600070406ULL;
440 env->cpuid_ext_features = def->ext_features;
441 env->cpuid_ext2_features = def->ext2_features;
442 env->cpuid_xlevel = def->xlevel;
443 env->cpuid_ext3_features = def->ext3_features;
444 {
445 const char *model_id = def->model_id;
446 int c, len, i;
447 if (!model_id)
448 model_id = "";
449 len = strlen(model_id);
450 for(i = 0; i < 48; i++) {
451 if (i >= len)
452 c = '\0';
453 else
454 c = (uint8_t)model_id[i];
455 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
456 }
457 }
458#endif // !VBOX
459 return 0;
460}
461
462/* NOTE: must be called outside the CPU execute loop */
463void cpu_reset(CPUX86State *env)
464{
465 int i;
466
467 memset(env, 0, offsetof(CPUX86State, breakpoints));
468
469 tlb_flush(env, 1);
470
471 env->old_exception = -1;
472
473 /* init to reset state */
474
475#ifdef CONFIG_SOFTMMU
476 env->hflags |= HF_SOFTMMU_MASK;
477#endif
478 env->hflags2 |= HF2_GIF_MASK;
479
480 cpu_x86_update_cr0(env, 0x60000010);
481 env->a20_mask = ~0x0;
482 env->smbase = 0x30000;
483
484 env->idt.limit = 0xffff;
485 env->gdt.limit = 0xffff;
486 env->ldt.limit = 0xffff;
487 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
488 env->tr.limit = 0xffff;
489 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
490
491 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
492 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
493 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
494 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
495 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
496 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
497 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
498 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
499 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
500 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
501 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
502 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
503
504 env->eip = 0xfff0;
505#ifndef VBOX
506 env->regs[R_EDX] = env->cpuid_version;
507#else
508 /** @todo: is it right? */
509 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
510#endif
511
512 env->eflags = 0x2;
513
514 /* FPU init */
515 for(i = 0;i < 8; i++)
516 env->fptags[i] = 1;
517 env->fpuc = 0x37f;
518
519 env->mxcsr = 0x1f80;
520}
521
522void cpu_x86_close(CPUX86State *env)
523{
524#ifndef VBOX
525 qemu_free(env);
526#endif
527}
528
529/***********************************************************/
530/* x86 debug */
531
532static const char *cc_op_str[] = {
533 "DYNAMIC",
534 "EFLAGS",
535
536 "MULB",
537 "MULW",
538 "MULL",
539 "MULQ",
540
541 "ADDB",
542 "ADDW",
543 "ADDL",
544 "ADDQ",
545
546 "ADCB",
547 "ADCW",
548 "ADCL",
549 "ADCQ",
550
551 "SUBB",
552 "SUBW",
553 "SUBL",
554 "SUBQ",
555
556 "SBBB",
557 "SBBW",
558 "SBBL",
559 "SBBQ",
560
561 "LOGICB",
562 "LOGICW",
563 "LOGICL",
564 "LOGICQ",
565
566 "INCB",
567 "INCW",
568 "INCL",
569 "INCQ",
570
571 "DECB",
572 "DECW",
573 "DECL",
574 "DECQ",
575
576 "SHLB",
577 "SHLW",
578 "SHLL",
579 "SHLQ",
580
581 "SARB",
582 "SARW",
583 "SARL",
584 "SARQ",
585};
586
587void cpu_dump_state(CPUState *env, FILE *f,
588 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
589 int flags)
590{
591 int eflags, i, nb;
592 char cc_op_name[32];
593 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
594
595 eflags = env->eflags;
596#ifdef TARGET_X86_64
597 if (env->hflags & HF_CS64_MASK) {
598 cpu_fprintf(f,
599 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
600 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
601 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
602 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
603 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
604 env->regs[R_EAX],
605 env->regs[R_EBX],
606 env->regs[R_ECX],
607 env->regs[R_EDX],
608 env->regs[R_ESI],
609 env->regs[R_EDI],
610 env->regs[R_EBP],
611 env->regs[R_ESP],
612 env->regs[8],
613 env->regs[9],
614 env->regs[10],
615 env->regs[11],
616 env->regs[12],
617 env->regs[13],
618 env->regs[14],
619 env->regs[15],
620 env->eip, eflags,
621 eflags & DF_MASK ? 'D' : '-',
622 eflags & CC_O ? 'O' : '-',
623 eflags & CC_S ? 'S' : '-',
624 eflags & CC_Z ? 'Z' : '-',
625 eflags & CC_A ? 'A' : '-',
626 eflags & CC_P ? 'P' : '-',
627 eflags & CC_C ? 'C' : '-',
628 env->hflags & HF_CPL_MASK,
629 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
630 (int)(env->a20_mask >> 20) & 1,
631 (env->hflags >> HF_SMM_SHIFT) & 1,
632 env->halted);
633 } else
634#endif
635 {
636 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
637 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
638 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
639 (uint32_t)env->regs[R_EAX],
640 (uint32_t)env->regs[R_EBX],
641 (uint32_t)env->regs[R_ECX],
642 (uint32_t)env->regs[R_EDX],
643 (uint32_t)env->regs[R_ESI],
644 (uint32_t)env->regs[R_EDI],
645 (uint32_t)env->regs[R_EBP],
646 (uint32_t)env->regs[R_ESP],
647 (uint32_t)env->eip, eflags,
648 eflags & DF_MASK ? 'D' : '-',
649 eflags & CC_O ? 'O' : '-',
650 eflags & CC_S ? 'S' : '-',
651 eflags & CC_Z ? 'Z' : '-',
652 eflags & CC_A ? 'A' : '-',
653 eflags & CC_P ? 'P' : '-',
654 eflags & CC_C ? 'C' : '-',
655 env->hflags & HF_CPL_MASK,
656 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
657 (int)(env->a20_mask >> 20) & 1,
658 (env->hflags >> HF_SMM_SHIFT) & 1,
659 env->halted);
660 }
661
662#ifdef TARGET_X86_64
663 if (env->hflags & HF_LMA_MASK) {
664 for(i = 0; i < 6; i++) {
665 SegmentCache *sc = &env->segs[i];
666 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
667 seg_name[i],
668 sc->selector,
669 sc->base,
670 sc->limit,
671 sc->flags);
672 }
673 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
674 env->ldt.selector,
675 env->ldt.base,
676 env->ldt.limit,
677 env->ldt.flags);
678 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
679 env->tr.selector,
680 env->tr.base,
681 env->tr.limit,
682 env->tr.flags);
683 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
684 env->gdt.base, env->gdt.limit);
685 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
686 env->idt.base, env->idt.limit);
687 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
688 (uint32_t)env->cr[0],
689 env->cr[2],
690 env->cr[3],
691 (uint32_t)env->cr[4]);
692 } else
693#endif
694 {
695 for(i = 0; i < 6; i++) {
696 SegmentCache *sc = &env->segs[i];
697 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
698 seg_name[i],
699 sc->selector,
700 (uint32_t)sc->base,
701 sc->limit,
702 sc->flags);
703 }
704 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
705 env->ldt.selector,
706 (uint32_t)env->ldt.base,
707 env->ldt.limit,
708 env->ldt.flags);
709 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
710 env->tr.selector,
711 (uint32_t)env->tr.base,
712 env->tr.limit,
713 env->tr.flags);
714 cpu_fprintf(f, "GDT= %08x %08x\n",
715 (uint32_t)env->gdt.base, env->gdt.limit);
716 cpu_fprintf(f, "IDT= %08x %08x\n",
717 (uint32_t)env->idt.base, env->idt.limit);
718 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
719 (uint32_t)env->cr[0],
720 (uint32_t)env->cr[2],
721 (uint32_t)env->cr[3],
722 (uint32_t)env->cr[4]);
723 }
724 if (flags & X86_DUMP_CCOP) {
725 if ((unsigned)env->cc_op < CC_OP_NB)
726 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
727 else
728 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
729#ifdef TARGET_X86_64
730 if (env->hflags & HF_CS64_MASK) {
731 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
732 env->cc_src, env->cc_dst,
733 cc_op_name);
734 } else
735#endif
736 {
737 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
738 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
739 cc_op_name);
740 }
741 }
742 if (flags & X86_DUMP_FPU) {
743 int fptag;
744 fptag = 0;
745 for(i = 0; i < 8; i++) {
746 fptag |= ((!env->fptags[i]) << i);
747 }
748 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
749 env->fpuc,
750 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
751 env->fpstt,
752 fptag,
753 env->mxcsr);
754 for(i=0;i<8;i++) {
755#if defined(USE_X86LDOUBLE)
756 union {
757 long double d;
758 struct {
759 uint64_t lower;
760 uint16_t upper;
761 } l;
762 } tmp;
763 tmp.d = env->fpregs[i].d;
764 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
765 i, tmp.l.lower, tmp.l.upper);
766#else
767 cpu_fprintf(f, "FPR%d=%016" PRIx64,
768 i, env->fpregs[i].mmx.q);
769#endif
770 if ((i & 1) == 1)
771 cpu_fprintf(f, "\n");
772 else
773 cpu_fprintf(f, " ");
774 }
775 if (env->hflags & HF_CS64_MASK)
776 nb = 16;
777 else
778 nb = 8;
779 for(i=0;i<nb;i++) {
780 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
781 i,
782 env->xmm_regs[i].XMM_L(3),
783 env->xmm_regs[i].XMM_L(2),
784 env->xmm_regs[i].XMM_L(1),
785 env->xmm_regs[i].XMM_L(0));
786 if ((i & 1) == 1)
787 cpu_fprintf(f, "\n");
788 else
789 cpu_fprintf(f, " ");
790 }
791 }
792}
793
794/***********************************************************/
795/* x86 mmu */
796/* XXX: add PGE support */
797
798void cpu_x86_set_a20(CPUX86State *env, int a20_state)
799{
800 a20_state = (a20_state != 0);
801 if (a20_state != ((env->a20_mask >> 20) & 1)) {
802#if defined(DEBUG_MMU)
803 printf("A20 update: a20=%d\n", a20_state);
804#endif
805 /* if the cpu is currently executing code, we must unlink it and
806 all the potentially executing TB */
807 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
808
809 /* when a20 is changed, all the MMU mappings are invalid, so
810 we must flush everything */
811 tlb_flush(env, 1);
812 env->a20_mask = (~0x100000) | (a20_state << 20);
813 }
814}
815
816void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
817{
818 int pe_state;
819
820#if defined(DEBUG_MMU)
821 printf("CR0 update: CR0=0x%08x\n", new_cr0);
822#endif
823 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
824 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
825 tlb_flush(env, 1);
826 }
827
828#ifdef TARGET_X86_64
829 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
830 (env->efer & MSR_EFER_LME)) {
831 /* enter in long mode */
832 /* XXX: generate an exception */
833 if (!(env->cr[4] & CR4_PAE_MASK))
834 return;
835 env->efer |= MSR_EFER_LMA;
836 env->hflags |= HF_LMA_MASK;
837 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
838 (env->efer & MSR_EFER_LMA)) {
839 /* exit long mode */
840 env->efer &= ~MSR_EFER_LMA;
841 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
842 env->eip &= 0xffffffff;
843 }
844#endif
845 env->cr[0] = new_cr0 | CR0_ET_MASK;
846
847 /* update PE flag in hidden flags */
848 pe_state = (env->cr[0] & CR0_PE_MASK);
849 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
850 /* ensure that ADDSEG is always set in real mode */
851 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
852 /* update FPU flags */
853 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
854 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
855
856#ifdef VBOX
857 remR3ChangeCpuMode(env);
858#endif
859}
860
861/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
862 the PDPT */
863void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
864{
865 env->cr[3] = new_cr3;
866 if (env->cr[0] & CR0_PG_MASK) {
867#if defined(DEBUG_MMU)
868 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
869#endif
870 tlb_flush(env, 0);
871 }
872}
873
874void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
875{
876#if defined(DEBUG_MMU)
877 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
878#endif
879 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
880 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
881 tlb_flush(env, 1);
882 }
883 /* SSE handling */
884 if (!(env->cpuid_features & CPUID_SSE))
885 new_cr4 &= ~CR4_OSFXSR_MASK;
886 if (new_cr4 & CR4_OSFXSR_MASK)
887 env->hflags |= HF_OSFXSR_MASK;
888 else
889 env->hflags &= ~HF_OSFXSR_MASK;
890
891 env->cr[4] = new_cr4;
892#ifdef VBOX
893 remR3ChangeCpuMode(env);
894#endif
895}
896
897/* XXX: also flush 4MB pages */
898void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
899{
900 tlb_flush_page(env, addr);
901}
902
903#if defined(CONFIG_USER_ONLY)
904
905int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
906 int is_write, int mmu_idx, int is_softmmu)
907{
908 /* user mode only emulation */
909 is_write &= 1;
910 env->cr[2] = addr;
911 env->error_code = (is_write << PG_ERROR_W_BIT);
912 env->error_code |= PG_ERROR_U_MASK;
913 env->exception_index = EXCP0E_PAGE;
914 return 1;
915}
916
917target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
918{
919 return addr;
920}
921
922#else
923
924/* XXX: This value should match the one returned by CPUID
925 * and in exec.c */
926#if defined(USE_KQEMU)
927#define PHYS_ADDR_MASK 0xfffff000LL
928#else
929# if defined(TARGET_X86_64)
930# define PHYS_ADDR_MASK 0xfffffff000LL
931# else
932# define PHYS_ADDR_MASK 0xffffff000LL
933# endif
934#endif
935
936/* return value:
937 -1 = cannot handle fault
938 0 = nothing more to do
939 1 = generate PF fault
940 2 = soft MMU activation required for this block
941*/
942int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
943 int is_write1, int mmu_idx, int is_softmmu)
944{
945 uint64_t ptep, pte;
946 target_ulong pde_addr, pte_addr;
947 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
948 target_phys_addr_t paddr;
949 uint32_t page_offset;
950 target_ulong vaddr, virt_addr;
951
952 is_user = mmu_idx == MMU_USER_IDX;
953#if defined(DEBUG_MMU)
954 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
955 addr, is_write1, is_user, env->eip);
956#endif
957 is_write = is_write1 & 1;
958
959 if (!(env->cr[0] & CR0_PG_MASK)) {
960 pte = addr;
961 virt_addr = addr & TARGET_PAGE_MASK;
962 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
963 page_size = 4096;
964 goto do_mapping;
965 }
966
967 if (env->cr[4] & CR4_PAE_MASK) {
968 uint64_t pde, pdpe;
969 target_ulong pdpe_addr;
970
971#ifdef TARGET_X86_64
972 if (env->hflags & HF_LMA_MASK) {
973 uint64_t pml4e_addr, pml4e;
974 int32_t sext;
975
976 /* test virtual address sign extension */
977 sext = (int64_t)addr >> 47;
978 if (sext != 0 && sext != -1) {
979 env->error_code = 0;
980 env->exception_index = EXCP0D_GPF;
981 return 1;
982 }
983
984 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
985 env->a20_mask;
986 pml4e = ldq_phys(pml4e_addr);
987 if (!(pml4e & PG_PRESENT_MASK)) {
988 error_code = 0;
989 goto do_fault;
990 }
991 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
992 error_code = PG_ERROR_RSVD_MASK;
993 goto do_fault;
994 }
995 if (!(pml4e & PG_ACCESSED_MASK)) {
996 pml4e |= PG_ACCESSED_MASK;
997 stl_phys_notdirty(pml4e_addr, pml4e);
998 }
999 ptep = pml4e ^ PG_NX_MASK;
1000 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1001 env->a20_mask;
1002 pdpe = ldq_phys(pdpe_addr);
1003 if (!(pdpe & PG_PRESENT_MASK)) {
1004 error_code = 0;
1005 goto do_fault;
1006 }
1007 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1008 error_code = PG_ERROR_RSVD_MASK;
1009 goto do_fault;
1010 }
1011 ptep &= pdpe ^ PG_NX_MASK;
1012 if (!(pdpe & PG_ACCESSED_MASK)) {
1013 pdpe |= PG_ACCESSED_MASK;
1014 stl_phys_notdirty(pdpe_addr, pdpe);
1015 }
1016 } else
1017#endif
1018 {
1019 /* XXX: load them when cr3 is loaded ? */
1020 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1021 env->a20_mask;
1022 pdpe = ldq_phys(pdpe_addr);
1023 if (!(pdpe & PG_PRESENT_MASK)) {
1024 error_code = 0;
1025 goto do_fault;
1026 }
1027 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1028 }
1029
1030 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1031 env->a20_mask;
1032 pde = ldq_phys(pde_addr);
1033 if (!(pde & PG_PRESENT_MASK)) {
1034 error_code = 0;
1035 goto do_fault;
1036 }
1037 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1038 error_code = PG_ERROR_RSVD_MASK;
1039 goto do_fault;
1040 }
1041 ptep &= pde ^ PG_NX_MASK;
1042 if (pde & PG_PSE_MASK) {
1043 /* 2 MB page */
1044 page_size = 2048 * 1024;
1045 ptep ^= PG_NX_MASK;
1046 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1047 goto do_fault_protect;
1048 if (is_user) {
1049 if (!(ptep & PG_USER_MASK))
1050 goto do_fault_protect;
1051 if (is_write && !(ptep & PG_RW_MASK))
1052 goto do_fault_protect;
1053 } else {
1054 if ((env->cr[0] & CR0_WP_MASK) &&
1055 is_write && !(ptep & PG_RW_MASK))
1056 goto do_fault_protect;
1057 }
1058 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1059 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1060 pde |= PG_ACCESSED_MASK;
1061 if (is_dirty)
1062 pde |= PG_DIRTY_MASK;
1063 stl_phys_notdirty(pde_addr, pde);
1064 }
1065 /* align to page_size */
1066 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1067 virt_addr = addr & ~(page_size - 1);
1068 } else {
1069 /* 4 KB page */
1070 if (!(pde & PG_ACCESSED_MASK)) {
1071 pde |= PG_ACCESSED_MASK;
1072 stl_phys_notdirty(pde_addr, pde);
1073 }
1074 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1075 env->a20_mask;
1076 pte = ldq_phys(pte_addr);
1077 if (!(pte & PG_PRESENT_MASK)) {
1078 error_code = 0;
1079 goto do_fault;
1080 }
1081 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1082 error_code = PG_ERROR_RSVD_MASK;
1083 goto do_fault;
1084 }
1085 /* combine pde and pte nx, user and rw protections */
1086 ptep &= pte ^ PG_NX_MASK;
1087 ptep ^= PG_NX_MASK;
1088 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1089 goto do_fault_protect;
1090 if (is_user) {
1091 if (!(ptep & PG_USER_MASK))
1092 goto do_fault_protect;
1093 if (is_write && !(ptep & PG_RW_MASK))
1094 goto do_fault_protect;
1095 } else {
1096 if ((env->cr[0] & CR0_WP_MASK) &&
1097 is_write && !(ptep & PG_RW_MASK))
1098 goto do_fault_protect;
1099 }
1100 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1101 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1102 pte |= PG_ACCESSED_MASK;
1103 if (is_dirty)
1104 pte |= PG_DIRTY_MASK;
1105 stl_phys_notdirty(pte_addr, pte);
1106 }
1107 page_size = 4096;
1108 virt_addr = addr & ~0xfff;
1109 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1110 }
1111 } else {
1112 uint32_t pde;
1113
1114 /* page directory entry */
1115 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1116 env->a20_mask;
1117 pde = ldl_phys(pde_addr);
1118 if (!(pde & PG_PRESENT_MASK)) {
1119 error_code = 0;
1120 goto do_fault;
1121 }
1122 /* if PSE bit is set, then we use a 4MB page */
1123 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1124 page_size = 4096 * 1024;
1125 if (is_user) {
1126 if (!(pde & PG_USER_MASK))
1127 goto do_fault_protect;
1128 if (is_write && !(pde & PG_RW_MASK))
1129 goto do_fault_protect;
1130 } else {
1131 if ((env->cr[0] & CR0_WP_MASK) &&
1132 is_write && !(pde & PG_RW_MASK))
1133 goto do_fault_protect;
1134 }
1135 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1136 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1137 pde |= PG_ACCESSED_MASK;
1138 if (is_dirty)
1139 pde |= PG_DIRTY_MASK;
1140 stl_phys_notdirty(pde_addr, pde);
1141 }
1142
1143 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1144 ptep = pte;
1145 virt_addr = addr & ~(page_size - 1);
1146 } else {
1147 if (!(pde & PG_ACCESSED_MASK)) {
1148 pde |= PG_ACCESSED_MASK;
1149 stl_phys_notdirty(pde_addr, pde);
1150 }
1151
1152 /* page directory entry */
1153 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1154 env->a20_mask;
1155 pte = ldl_phys(pte_addr);
1156 if (!(pte & PG_PRESENT_MASK)) {
1157 error_code = 0;
1158 goto do_fault;
1159 }
1160 /* combine pde and pte user and rw protections */
1161 ptep = pte & pde;
1162 if (is_user) {
1163 if (!(ptep & PG_USER_MASK))
1164 goto do_fault_protect;
1165 if (is_write && !(ptep & PG_RW_MASK))
1166 goto do_fault_protect;
1167 } else {
1168 if ((env->cr[0] & CR0_WP_MASK) &&
1169 is_write && !(ptep & PG_RW_MASK))
1170 goto do_fault_protect;
1171 }
1172 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1173 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1174 pte |= PG_ACCESSED_MASK;
1175 if (is_dirty)
1176 pte |= PG_DIRTY_MASK;
1177 stl_phys_notdirty(pte_addr, pte);
1178 }
1179 page_size = 4096;
1180 virt_addr = addr & ~0xfff;
1181 }
1182 }
1183 /* the page can be put in the TLB */
1184 prot = PAGE_READ;
1185 if (!(ptep & PG_NX_MASK))
1186 prot |= PAGE_EXEC;
1187 if (pte & PG_DIRTY_MASK) {
1188 /* only set write access if already dirty... otherwise wait
1189 for dirty access */
1190 if (is_user) {
1191 if (ptep & PG_RW_MASK)
1192 prot |= PAGE_WRITE;
1193 } else {
1194 if (!(env->cr[0] & CR0_WP_MASK) ||
1195 (ptep & PG_RW_MASK))
1196 prot |= PAGE_WRITE;
1197 }
1198 }
1199 do_mapping:
1200 pte = pte & env->a20_mask;
1201
1202 /* Even if 4MB pages, we map only one 4KB page in the cache to
1203 avoid filling it too fast */
1204 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1205 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1206 vaddr = virt_addr + page_offset;
1207
1208 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1209 return ret;
1210 do_fault_protect:
1211 error_code = PG_ERROR_P_MASK;
1212 do_fault:
1213 error_code |= (is_write << PG_ERROR_W_BIT);
1214 if (is_user)
1215 error_code |= PG_ERROR_U_MASK;
1216 if (is_write1 == 2 &&
1217 (env->efer & MSR_EFER_NXE) &&
1218 (env->cr[4] & CR4_PAE_MASK))
1219 error_code |= PG_ERROR_I_D_MASK;
1220 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1221 /* cr2 is not modified in case of exceptions */
1222 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1223 addr);
1224 } else {
1225 env->cr[2] = addr;
1226 }
1227 env->error_code = error_code;
1228 env->exception_index = EXCP0E_PAGE;
1229 return 1;
1230}
1231
1232target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1233{
1234 target_ulong pde_addr, pte_addr;
1235 uint64_t pte;
1236 target_phys_addr_t paddr;
1237 uint32_t page_offset;
1238 int page_size;
1239
1240 if (env->cr[4] & CR4_PAE_MASK) {
1241 target_ulong pdpe_addr;
1242 uint64_t pde, pdpe;
1243
1244#ifdef TARGET_X86_64
1245 if (env->hflags & HF_LMA_MASK) {
1246 uint64_t pml4e_addr, pml4e;
1247 int32_t sext;
1248
1249 /* test virtual address sign extension */
1250 sext = (int64_t)addr >> 47;
1251 if (sext != 0 && sext != -1)
1252 return -1;
1253
1254 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1255 env->a20_mask;
1256 pml4e = ldq_phys(pml4e_addr);
1257 if (!(pml4e & PG_PRESENT_MASK))
1258 return -1;
1259
1260 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1261 env->a20_mask;
1262 pdpe = ldq_phys(pdpe_addr);
1263 if (!(pdpe & PG_PRESENT_MASK))
1264 return -1;
1265 } else
1266#endif
1267 {
1268 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1269 env->a20_mask;
1270 pdpe = ldq_phys(pdpe_addr);
1271 if (!(pdpe & PG_PRESENT_MASK))
1272 return -1;
1273 }
1274
1275 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1276 env->a20_mask;
1277 pde = ldq_phys(pde_addr);
1278 if (!(pde & PG_PRESENT_MASK)) {
1279 return -1;
1280 }
1281 if (pde & PG_PSE_MASK) {
1282 /* 2 MB page */
1283 page_size = 2048 * 1024;
1284 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1285 } else {
1286 /* 4 KB page */
1287 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1288 env->a20_mask;
1289 page_size = 4096;
1290 pte = ldq_phys(pte_addr);
1291 }
1292 if (!(pte & PG_PRESENT_MASK))
1293 return -1;
1294 } else {
1295 uint32_t pde;
1296
1297 if (!(env->cr[0] & CR0_PG_MASK)) {
1298 pte = addr;
1299 page_size = 4096;
1300 } else {
1301 /* page directory entry */
1302 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1303 pde = ldl_phys(pde_addr);
1304 if (!(pde & PG_PRESENT_MASK))
1305 return -1;
1306 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1307 pte = pde & ~0x003ff000; /* align to 4MB */
1308 page_size = 4096 * 1024;
1309 } else {
1310 /* page directory entry */
1311 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1312 pte = ldl_phys(pte_addr);
1313 if (!(pte & PG_PRESENT_MASK))
1314 return -1;
1315 page_size = 4096;
1316 }
1317 }
1318 pte = pte & env->a20_mask;
1319 }
1320
1321 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1322 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1323 return paddr;
1324}
1325#endif /* !CONFIG_USER_ONLY */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette