VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 37675

Last change on this file since 37675 was 37675, checked in by vboxsync, 13 years ago

rem: Synced with v0.12.5.

  • Property svn:eol-style set to native
File size: 64.4 KB
Line 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include <stdarg.h>
30#include <stdlib.h>
31#include <stdio.h>
32#include <string.h>
33#ifndef VBOX
34#include <inttypes.h>
35#include <signal.h>
36#endif /* !VBOX */
37
38#include "cpu.h"
39#include "exec-all.h"
40#include "qemu-common.h"
41#include "kvm.h"
42
43//#define DEBUG_MMU
44
45#ifndef VBOX
46/* feature flags taken from "Intel Processor Identification and the CPUID
47 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
48 * about feature names, the Linux name is used. */
49static const char *feature_name[] = {
50 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
51 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
52 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
53 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
54};
55static const char *ext_feature_name[] = {
56 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
57 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
58 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
59 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
60};
61static const char *ext2_feature_name[] = {
62 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
63 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
64 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
65 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
66};
67static const char *ext3_feature_name[] = {
68 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
69 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
70 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
71 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
72};
73
74static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
75 uint32_t *ext_features,
76 uint32_t *ext2_features,
77 uint32_t *ext3_features)
78{
79 int i;
80 int found = 0;
81
82 for ( i = 0 ; i < 32 ; i++ )
83 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
84 *features |= 1 << i;
85 found = 1;
86 }
87 for ( i = 0 ; i < 32 ; i++ )
88 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
89 *ext_features |= 1 << i;
90 found = 1;
91 }
92 for ( i = 0 ; i < 32 ; i++ )
93 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
94 *ext2_features |= 1 << i;
95 found = 1;
96 }
97 for ( i = 0 ; i < 32 ; i++ )
98 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
99 *ext3_features |= 1 << i;
100 found = 1;
101 }
102 if (!found) {
103 fprintf(stderr, "CPU feature %s not found\n", flagname);
104 }
105}
106#endif /* !VBOX */
107
108typedef struct x86_def_t {
109 const char *name;
110 uint32_t level;
111 uint32_t vendor1, vendor2, vendor3;
112 int family;
113 int model;
114 int stepping;
115 uint32_t features, ext_features, ext2_features, ext3_features;
116 uint32_t xlevel;
117 char model_id[48];
118 int vendor_override;
119} x86_def_t;
120
121#ifndef VBOX
122#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
123#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
124 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
125#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
126 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
127 CPUID_PSE36 | CPUID_FXSR)
128#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
129#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
130 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
131 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
132 CPUID_PAE | CPUID_SEP | CPUID_APIC)
133static x86_def_t x86_defs[] = {
134#ifdef TARGET_X86_64
135 {
136 .name = "qemu64",
137 .level = 4,
138 .vendor1 = CPUID_VENDOR_AMD_1,
139 .vendor2 = CPUID_VENDOR_AMD_2,
140 .vendor3 = CPUID_VENDOR_AMD_3,
141 .family = 6,
142 .model = 2,
143 .stepping = 3,
144 .features = PPRO_FEATURES |
145 /* these features are needed for Win64 and aren't fully implemented */
146 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
147 /* this feature is needed for Solaris and isn't fully implemented */
148 CPUID_PSE36,
149 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
150 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
151 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
152 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
153 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
154 .xlevel = 0x8000000A,
155 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
156 },
157 {
158 .name = "phenom",
159 .level = 5,
160 .vendor1 = CPUID_VENDOR_AMD_1,
161 .vendor2 = CPUID_VENDOR_AMD_2,
162 .vendor3 = CPUID_VENDOR_AMD_3,
163 .family = 16,
164 .model = 2,
165 .stepping = 3,
166 /* Missing: CPUID_VME, CPUID_HT */
167 .features = PPRO_FEATURES |
168 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
169 CPUID_PSE36,
170 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
171 CPUID_EXT_POPCNT,
172 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
173 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
174 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
175 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
176 CPUID_EXT2_FFXSR,
177 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
178 CPUID_EXT3_CR8LEG,
179 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
180 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
181 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
182 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
183 .xlevel = 0x8000001A,
184 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
185 },
186 {
187 .name = "core2duo",
188 .level = 10,
189 .family = 6,
190 .model = 15,
191 .stepping = 11,
192 /* The original CPU also implements these features:
193 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
194 CPUID_TM, CPUID_PBE */
195 .features = PPRO_FEATURES |
196 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
197 CPUID_PSE36,
198 /* The original CPU also implements these ext features:
199 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
200 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
201 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
202 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
203 .ext3_features = CPUID_EXT3_LAHF_LM,
204 .xlevel = 0x80000008,
205 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
206 },
207 {
208 .name = "kvm64",
209 .level = 5,
210 .vendor1 = CPUID_VENDOR_INTEL_1,
211 .vendor2 = CPUID_VENDOR_INTEL_2,
212 .vendor3 = CPUID_VENDOR_INTEL_3,
213 .family = 15,
214 .model = 6,
215 .stepping = 1,
216 /* Missing: CPUID_VME, CPUID_HT */
217 .features = PPRO_FEATURES |
218 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
219 CPUID_PSE36,
220 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
221 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
222 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
223 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
224 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
225 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
226 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
227 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
228 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
229 .ext3_features = 0,
230 .xlevel = 0x80000008,
231 .model_id = "Common KVM processor"
232 },
233#endif
234 {
235 .name = "qemu32",
236 .level = 4,
237 .family = 6,
238 .model = 3,
239 .stepping = 3,
240 .features = PPRO_FEATURES,
241 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
242 .xlevel = 0,
243 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
244 },
245 {
246 .name = "coreduo",
247 .level = 10,
248 .family = 6,
249 .model = 14,
250 .stepping = 8,
251 /* The original CPU also implements these features:
252 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
253 CPUID_TM, CPUID_PBE */
254 .features = PPRO_FEATURES | CPUID_VME |
255 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
256 /* The original CPU also implements these ext features:
257 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
258 CPUID_EXT_PDCM */
259 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
260 .ext2_features = CPUID_EXT2_NX,
261 .xlevel = 0x80000008,
262 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
263 },
264 {
265 .name = "486",
266 .level = 0,
267 .family = 4,
268 .model = 0,
269 .stepping = 0,
270 .features = I486_FEATURES,
271 .xlevel = 0,
272 },
273 {
274 .name = "pentium",
275 .level = 1,
276 .family = 5,
277 .model = 4,
278 .stepping = 3,
279 .features = PENTIUM_FEATURES,
280 .xlevel = 0,
281 },
282 {
283 .name = "pentium2",
284 .level = 2,
285 .family = 6,
286 .model = 5,
287 .stepping = 2,
288 .features = PENTIUM2_FEATURES,
289 .xlevel = 0,
290 },
291 {
292 .name = "pentium3",
293 .level = 2,
294 .family = 6,
295 .model = 7,
296 .stepping = 3,
297 .features = PENTIUM3_FEATURES,
298 .xlevel = 0,
299 },
300 {
301 .name = "athlon",
302 .level = 2,
303 .vendor1 = CPUID_VENDOR_AMD_1,
304 .vendor2 = CPUID_VENDOR_AMD_2,
305 .vendor3 = CPUID_VENDOR_AMD_3,
306 .family = 6,
307 .model = 2,
308 .stepping = 3,
309 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
310 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
311 .xlevel = 0x80000008,
312 /* XXX: put another string ? */
313 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
314 },
315 {
316 .name = "n270",
317 /* original is on level 10 */
318 .level = 5,
319 .family = 6,
320 .model = 28,
321 .stepping = 2,
322 .features = PPRO_FEATURES |
323 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
324 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
325 * CPUID_HT | CPUID_TM | CPUID_PBE */
326 /* Some CPUs got no CPUID_SEP */
327 .ext_features = CPUID_EXT_MONITOR |
328 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
329 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
330 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
331 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
332 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
333 .xlevel = 0x8000000A,
334 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
335 },
336};
337
338static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
339 uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
340
341static int cpu_x86_fill_model_id(char *str)
342{
343 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
344 int i;
345
346 for (i = 0; i < 3; i++) {
347 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
348 memcpy(str + i * 16 + 0, &eax, 4);
349 memcpy(str + i * 16 + 4, &ebx, 4);
350 memcpy(str + i * 16 + 8, &ecx, 4);
351 memcpy(str + i * 16 + 12, &edx, 4);
352 }
353 return 0;
354}
355
356static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
357{
358 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
359
360 x86_cpu_def->name = "host";
361 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
362 x86_cpu_def->level = eax;
363 x86_cpu_def->vendor1 = ebx;
364 x86_cpu_def->vendor2 = edx;
365 x86_cpu_def->vendor3 = ecx;
366
367 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
368 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
369 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
370 x86_cpu_def->stepping = eax & 0x0F;
371 x86_cpu_def->ext_features = ecx;
372 x86_cpu_def->features = edx;
373
374 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
375 x86_cpu_def->xlevel = eax;
376
377 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
378 x86_cpu_def->ext2_features = edx;
379 x86_cpu_def->ext3_features = ecx;
380 cpu_x86_fill_model_id(x86_cpu_def->model_id);
381 x86_cpu_def->vendor_override = 0;
382
383 return 0;
384}
385
386static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
387{
388 unsigned int i;
389 x86_def_t *def;
390
391 char *s = strdup(cpu_model);
392 char *featurestr, *name = strtok(s, ",");
393 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
394 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
395 uint32_t numvalue;
396
397 def = NULL;
398 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
399 if (strcmp(name, x86_defs[i].name) == 0) {
400 def = &x86_defs[i];
401 break;
402 }
403 }
404 if (kvm_enabled() && strcmp(name, "host") == 0) {
405 cpu_x86_fill_host(x86_cpu_def);
406 } else if (!def) {
407 goto error;
408 } else {
409 memcpy(x86_cpu_def, def, sizeof(*def));
410 }
411
412 add_flagname_to_bitmaps("hypervisor", &plus_features,
413 &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
414
415 featurestr = strtok(NULL, ",");
416
417 while (featurestr) {
418 char *val;
419 if (featurestr[0] == '+') {
420 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
421 } else if (featurestr[0] == '-') {
422 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
423 } else if ((val = strchr(featurestr, '='))) {
424 *val = 0; val++;
425 if (!strcmp(featurestr, "family")) {
426 char *err;
427 numvalue = strtoul(val, &err, 0);
428 if (!*val || *err) {
429 fprintf(stderr, "bad numerical value %s\n", val);
430 goto error;
431 }
432 x86_cpu_def->family = numvalue;
433 } else if (!strcmp(featurestr, "model")) {
434 char *err;
435 numvalue = strtoul(val, &err, 0);
436 if (!*val || *err || numvalue > 0xff) {
437 fprintf(stderr, "bad numerical value %s\n", val);
438 goto error;
439 }
440 x86_cpu_def->model = numvalue;
441 } else if (!strcmp(featurestr, "stepping")) {
442 char *err;
443 numvalue = strtoul(val, &err, 0);
444 if (!*val || *err || numvalue > 0xf) {
445 fprintf(stderr, "bad numerical value %s\n", val);
446 goto error;
447 }
448 x86_cpu_def->stepping = numvalue ;
449 } else if (!strcmp(featurestr, "level")) {
450 char *err;
451 numvalue = strtoul(val, &err, 0);
452 if (!*val || *err) {
453 fprintf(stderr, "bad numerical value %s\n", val);
454 goto error;
455 }
456 x86_cpu_def->level = numvalue;
457 } else if (!strcmp(featurestr, "xlevel")) {
458 char *err;
459 numvalue = strtoul(val, &err, 0);
460 if (!*val || *err) {
461 fprintf(stderr, "bad numerical value %s\n", val);
462 goto error;
463 }
464 if (numvalue < 0x80000000) {
465 numvalue += 0x80000000;
466 }
467 x86_cpu_def->xlevel = numvalue;
468 } else if (!strcmp(featurestr, "vendor")) {
469 if (strlen(val) != 12) {
470 fprintf(stderr, "vendor string must be 12 chars long\n");
471 goto error;
472 }
473 x86_cpu_def->vendor1 = 0;
474 x86_cpu_def->vendor2 = 0;
475 x86_cpu_def->vendor3 = 0;
476 for(i = 0; i < 4; i++) {
477 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
478 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
479 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
480 }
481 x86_cpu_def->vendor_override = 1;
482 } else if (!strcmp(featurestr, "model_id")) {
483 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
484 val);
485 } else {
486 fprintf(stderr, "unrecognized feature %s\n", featurestr);
487 goto error;
488 }
489 } else {
490 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
491 goto error;
492 }
493 featurestr = strtok(NULL, ",");
494 }
495 x86_cpu_def->features |= plus_features;
496 x86_cpu_def->ext_features |= plus_ext_features;
497 x86_cpu_def->ext2_features |= plus_ext2_features;
498 x86_cpu_def->ext3_features |= plus_ext3_features;
499 x86_cpu_def->features &= ~minus_features;
500 x86_cpu_def->ext_features &= ~minus_ext_features;
501 x86_cpu_def->ext2_features &= ~minus_ext2_features;
502 x86_cpu_def->ext3_features &= ~minus_ext3_features;
503 free(s);
504 return 0;
505
506error:
507 free(s);
508 return -1;
509}
510
511void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
512{
513 unsigned int i;
514
515 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
516 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
517}
518#endif /* !VBOX */
519
520static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
521{
522#ifndef VBOX
523 x86_def_t def1, *def = &def1;
524
525 if (cpu_x86_find_by_name(def, cpu_model) < 0)
526 return -1;
527 if (def->vendor1) {
528 env->cpuid_vendor1 = def->vendor1;
529 env->cpuid_vendor2 = def->vendor2;
530 env->cpuid_vendor3 = def->vendor3;
531 } else {
532 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
533 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
534 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
535 }
536 env->cpuid_vendor_override = def->vendor_override;
537 env->cpuid_level = def->level;
538 if (def->family > 0x0f)
539 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
540 else
541 env->cpuid_version = def->family << 8;
542 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
543 env->cpuid_version |= def->stepping;
544 env->cpuid_features = def->features;
545 env->pat = 0x0007040600070406ULL;
546 env->cpuid_ext_features = def->ext_features;
547 env->cpuid_ext2_features = def->ext2_features;
548 env->cpuid_xlevel = def->xlevel;
549 env->cpuid_ext3_features = def->ext3_features;
550 {
551 const char *model_id = def->model_id;
552 int c, len, i;
553 if (!model_id)
554 model_id = "";
555 len = strlen(model_id);
556 for(i = 0; i < 48; i++) {
557 if (i >= len)
558 c = '\0';
559 else
560 c = (uint8_t)model_id[i];
561 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
562 }
563 }
564#endif /* !VBOX */
565 return 0;
566}
567
568/* NOTE: must be called outside the CPU execute loop */
569void cpu_reset(CPUX86State *env)
570{
571 int i;
572
573 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
574 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
575 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
576 }
577
578 memset(env, 0, offsetof(CPUX86State, breakpoints));
579
580 tlb_flush(env, 1);
581
582 env->old_exception = -1;
583
584 /* init to reset state */
585
586#ifdef CONFIG_SOFTMMU
587 env->hflags |= HF_SOFTMMU_MASK;
588#endif
589 env->hflags2 |= HF2_GIF_MASK;
590
591 cpu_x86_update_cr0(env, 0x60000010);
592 env->a20_mask = ~0x0;
593 env->smbase = 0x30000;
594
595 env->idt.limit = 0xffff;
596 env->gdt.limit = 0xffff;
597 env->ldt.limit = 0xffff;
598 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
599 env->tr.limit = 0xffff;
600 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
601
602 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
603 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
604 DESC_R_MASK | DESC_A_MASK);
605 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
606 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
607 DESC_A_MASK);
608 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
609 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
610 DESC_A_MASK);
611 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
612 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
613 DESC_A_MASK);
614 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
615 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
616 DESC_A_MASK);
617 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
618 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
619 DESC_A_MASK);
620
621 env->eip = 0xfff0;
622#ifndef VBOX
623 env->regs[R_EDX] = env->cpuid_version;
624#else
625 /** @todo: is it right? */
626 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
627#endif
628
629 env->eflags = 0x2;
630
631 /* FPU init */
632 for(i = 0;i < 8; i++)
633 env->fptags[i] = 1;
634 env->fpuc = 0x37f;
635
636 env->mxcsr = 0x1f80;
637
638 memset(env->dr, 0, sizeof(env->dr));
639 env->dr[6] = DR6_FIXED_1;
640 env->dr[7] = DR7_FIXED_1;
641 cpu_breakpoint_remove_all(env, BP_CPU);
642 cpu_watchpoint_remove_all(env, BP_CPU);
643
644#ifndef VBOX
645 env->mcg_status = 0;
646#endif
647}
648
649void cpu_x86_close(CPUX86State *env)
650{
651#ifndef VBOX
652 qemu_free(env);
653#endif
654}
655
656/***********************************************************/
657/* x86 debug */
658
659static const char *cc_op_str[] = {
660 "DYNAMIC",
661 "EFLAGS",
662
663 "MULB",
664 "MULW",
665 "MULL",
666 "MULQ",
667
668 "ADDB",
669 "ADDW",
670 "ADDL",
671 "ADDQ",
672
673 "ADCB",
674 "ADCW",
675 "ADCL",
676 "ADCQ",
677
678 "SUBB",
679 "SUBW",
680 "SUBL",
681 "SUBQ",
682
683 "SBBB",
684 "SBBW",
685 "SBBL",
686 "SBBQ",
687
688 "LOGICB",
689 "LOGICW",
690 "LOGICL",
691 "LOGICQ",
692
693 "INCB",
694 "INCW",
695 "INCL",
696 "INCQ",
697
698 "DECB",
699 "DECW",
700 "DECL",
701 "DECQ",
702
703 "SHLB",
704 "SHLW",
705 "SHLL",
706 "SHLQ",
707
708 "SARB",
709 "SARW",
710 "SARL",
711 "SARQ",
712};
713
714static void
715cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
716 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
717 const char *name, struct SegmentCache *sc)
718{
719#ifdef VBOX
720# define cpu_fprintf(f, ...) RTLogPrintf(__VA_ARGS__)
721#endif
722#ifdef TARGET_X86_64
723 if (env->hflags & HF_CS64_MASK) {
724 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
725 sc->selector, sc->base, sc->limit, sc->flags);
726 } else
727#endif
728 {
729 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
730 (uint32_t)sc->base, sc->limit, sc->flags);
731 }
732
733 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
734 goto done;
735
736 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
737 if (sc->flags & DESC_S_MASK) {
738 if (sc->flags & DESC_CS_MASK) {
739 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
740 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
741 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
742 (sc->flags & DESC_R_MASK) ? 'R' : '-');
743 } else {
744 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
745 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
746 (sc->flags & DESC_W_MASK) ? 'W' : '-');
747 }
748 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
749 } else {
750 static const char *sys_type_name[2][16] = {
751 { /* 32 bit mode */
752 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
753 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
754 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
755 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
756 },
757 { /* 64 bit mode */
758 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
759 "Reserved", "Reserved", "Reserved", "Reserved",
760 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
761 "Reserved", "IntGate64", "TrapGate64"
762 }
763 };
764 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
765 [(sc->flags & DESC_TYPE_MASK)
766 >> DESC_TYPE_SHIFT]);
767 }
768done:
769 cpu_fprintf(f, "\n");
770#ifdef VBOX
771# undef cpu_fprintf
772#endif
773}
774
775void cpu_dump_state(CPUState *env, FILE *f,
776 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
777 int flags)
778{
779 int eflags, i, nb;
780 char cc_op_name[32];
781 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
782
783#ifdef VBOX
784# define cpu_fprintf(f, ...) RTLogPrintf(__VA_ARGS__)
785#endif
786 cpu_synchronize_state(env);
787
788 eflags = env->eflags;
789#ifdef TARGET_X86_64
790 if (env->hflags & HF_CS64_MASK) {
791 cpu_fprintf(f,
792 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
793 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
794 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
795 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
796 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
797 env->regs[R_EAX],
798 env->regs[R_EBX],
799 env->regs[R_ECX],
800 env->regs[R_EDX],
801 env->regs[R_ESI],
802 env->regs[R_EDI],
803 env->regs[R_EBP],
804 env->regs[R_ESP],
805 env->regs[8],
806 env->regs[9],
807 env->regs[10],
808 env->regs[11],
809 env->regs[12],
810 env->regs[13],
811 env->regs[14],
812 env->regs[15],
813 env->eip, eflags,
814 eflags & DF_MASK ? 'D' : '-',
815 eflags & CC_O ? 'O' : '-',
816 eflags & CC_S ? 'S' : '-',
817 eflags & CC_Z ? 'Z' : '-',
818 eflags & CC_A ? 'A' : '-',
819 eflags & CC_P ? 'P' : '-',
820 eflags & CC_C ? 'C' : '-',
821 env->hflags & HF_CPL_MASK,
822 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
823 (env->a20_mask >> 20) & 1,
824 (env->hflags >> HF_SMM_SHIFT) & 1,
825 env->halted);
826 } else
827#endif
828 {
829 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
830 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
831 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
832 (uint32_t)env->regs[R_EAX],
833 (uint32_t)env->regs[R_EBX],
834 (uint32_t)env->regs[R_ECX],
835 (uint32_t)env->regs[R_EDX],
836 (uint32_t)env->regs[R_ESI],
837 (uint32_t)env->regs[R_EDI],
838 (uint32_t)env->regs[R_EBP],
839 (uint32_t)env->regs[R_ESP],
840 (uint32_t)env->eip, eflags,
841 eflags & DF_MASK ? 'D' : '-',
842 eflags & CC_O ? 'O' : '-',
843 eflags & CC_S ? 'S' : '-',
844 eflags & CC_Z ? 'Z' : '-',
845 eflags & CC_A ? 'A' : '-',
846 eflags & CC_P ? 'P' : '-',
847 eflags & CC_C ? 'C' : '-',
848 env->hflags & HF_CPL_MASK,
849 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
850 (env->a20_mask >> 20) & 1,
851 (env->hflags >> HF_SMM_SHIFT) & 1,
852 env->halted);
853 }
854
855 for(i = 0; i < 6; i++) {
856 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
857 &env->segs[i]);
858 }
859 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
860 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
861
862#ifdef TARGET_X86_64
863 if (env->hflags & HF_LMA_MASK) {
864 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
865 env->gdt.base, env->gdt.limit);
866 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
867 env->idt.base, env->idt.limit);
868 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
869 (uint32_t)env->cr[0],
870 env->cr[2],
871 env->cr[3],
872 (uint32_t)env->cr[4]);
873 for(i = 0; i < 4; i++)
874 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
875 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
876 env->dr[6], env->dr[7]);
877 } else
878#endif
879 {
880 cpu_fprintf(f, "GDT= %08x %08x\n",
881 (uint32_t)env->gdt.base, env->gdt.limit);
882 cpu_fprintf(f, "IDT= %08x %08x\n",
883 (uint32_t)env->idt.base, env->idt.limit);
884 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
885 (uint32_t)env->cr[0],
886 (uint32_t)env->cr[2],
887 (uint32_t)env->cr[3],
888 (uint32_t)env->cr[4]);
889 for(i = 0; i < 4; i++)
890 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
891 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
892 }
893 if (flags & X86_DUMP_CCOP) {
894 if ((unsigned)env->cc_op < CC_OP_NB)
895 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
896 else
897 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
898#ifdef TARGET_X86_64
899 if (env->hflags & HF_CS64_MASK) {
900 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
901 env->cc_src, env->cc_dst,
902 cc_op_name);
903 } else
904#endif
905 {
906 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
907 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
908 cc_op_name);
909 }
910 }
911 if (flags & X86_DUMP_FPU) {
912 int fptag;
913 fptag = 0;
914 for(i = 0; i < 8; i++) {
915 fptag |= ((!env->fptags[i]) << i);
916 }
917 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
918 env->fpuc,
919 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
920 env->fpstt,
921 fptag,
922 env->mxcsr);
923 for(i=0;i<8;i++) {
924#if defined(USE_X86LDOUBLE)
925 union {
926 long double d;
927 struct {
928 uint64_t lower;
929 uint16_t upper;
930 } l;
931 } tmp;
932 tmp.d = env->fpregs[i].d;
933 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
934 i, tmp.l.lower, tmp.l.upper);
935#else
936 cpu_fprintf(f, "FPR%d=%016" PRIx64,
937 i, env->fpregs[i].mmx.q);
938#endif
939 if ((i & 1) == 1)
940 cpu_fprintf(f, "\n");
941 else
942 cpu_fprintf(f, " ");
943 }
944 if (env->hflags & HF_CS64_MASK)
945 nb = 16;
946 else
947 nb = 8;
948 for(i=0;i<nb;i++) {
949 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
950 i,
951 env->xmm_regs[i].XMM_L(3),
952 env->xmm_regs[i].XMM_L(2),
953 env->xmm_regs[i].XMM_L(1),
954 env->xmm_regs[i].XMM_L(0));
955 if ((i & 1) == 1)
956 cpu_fprintf(f, "\n");
957 else
958 cpu_fprintf(f, " ");
959 }
960 }
961#ifdef VBOX
962# undef cpu_fprintf
963#endif
964}
965
966/***********************************************************/
967/* x86 mmu */
968/* XXX: add PGE support */
969
970void cpu_x86_set_a20(CPUX86State *env, int a20_state)
971{
972 a20_state = (a20_state != 0);
973 if (a20_state != ((env->a20_mask >> 20) & 1)) {
974#if defined(DEBUG_MMU)
975 printf("A20 update: a20=%d\n", a20_state);
976#endif
977 /* if the cpu is currently executing code, we must unlink it and
978 all the potentially executing TB */
979 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
980
981 /* when a20 is changed, all the MMU mappings are invalid, so
982 we must flush everything */
983 tlb_flush(env, 1);
984 env->a20_mask = ~(1 << 20) | (a20_state << 20);
985 }
986}
987
988void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
989{
990 int pe_state;
991
992#if defined(DEBUG_MMU)
993 printf("CR0 update: CR0=0x%08x\n", new_cr0);
994#endif
995 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
996 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
997 tlb_flush(env, 1);
998 }
999
1000#ifdef TARGET_X86_64
1001 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
1002 (env->efer & MSR_EFER_LME)) {
1003 /* enter in long mode */
1004 /* XXX: generate an exception */
1005 if (!(env->cr[4] & CR4_PAE_MASK))
1006 return;
1007 env->efer |= MSR_EFER_LMA;
1008 env->hflags |= HF_LMA_MASK;
1009 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
1010 (env->efer & MSR_EFER_LMA)) {
1011 /* exit long mode */
1012 env->efer &= ~MSR_EFER_LMA;
1013 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1014 env->eip &= 0xffffffff;
1015 }
1016#endif
1017 env->cr[0] = new_cr0 | CR0_ET_MASK;
1018
1019 /* update PE flag in hidden flags */
1020 pe_state = (env->cr[0] & CR0_PE_MASK);
1021 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
1022 /* ensure that ADDSEG is always set in real mode */
1023 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
1024 /* update FPU flags */
1025 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
1026 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
1027#ifdef VBOX
1028
1029 remR3ChangeCpuMode(env);
1030#endif
1031}
1032
1033/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
1034 the PDPT */
1035void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
1036{
1037 env->cr[3] = new_cr3;
1038 if (env->cr[0] & CR0_PG_MASK) {
1039#if defined(DEBUG_MMU)
1040 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
1041#endif
1042 tlb_flush(env, 0);
1043 }
1044}
1045
1046void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
1047{
1048#if defined(DEBUG_MMU)
1049 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
1050#endif
1051 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
1052 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
1053 tlb_flush(env, 1);
1054 }
1055 /* SSE handling */
1056 if (!(env->cpuid_features & CPUID_SSE))
1057 new_cr4 &= ~CR4_OSFXSR_MASK;
1058 if (new_cr4 & CR4_OSFXSR_MASK)
1059 env->hflags |= HF_OSFXSR_MASK;
1060 else
1061 env->hflags &= ~HF_OSFXSR_MASK;
1062
1063 env->cr[4] = new_cr4;
1064#ifdef VBOX
1065 remR3ChangeCpuMode(env);
1066#endif
1067}
1068
1069#if defined(CONFIG_USER_ONLY)
1070
1071int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1072 int is_write, int mmu_idx, int is_softmmu)
1073{
1074 /* user mode only emulation */
1075 is_write &= 1;
1076 env->cr[2] = addr;
1077 env->error_code = (is_write << PG_ERROR_W_BIT);
1078 env->error_code |= PG_ERROR_U_MASK;
1079 env->exception_index = EXCP0E_PAGE;
1080 return 1;
1081}
1082
1083target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1084{
1085 return addr;
1086}
1087
1088#else
1089
1090/* XXX: This value should match the one returned by CPUID
1091 * and in exec.c */
1092# if defined(TARGET_X86_64)
1093# define PHYS_ADDR_MASK 0xfffffff000LL
1094# else
1095# define PHYS_ADDR_MASK 0xffffff000LL
1096# endif
1097
1098/* return value:
1099 -1 = cannot handle fault
1100 0 = nothing more to do
1101 1 = generate PF fault
1102 2 = soft MMU activation required for this block
1103*/
1104int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1105 int is_write1, int mmu_idx, int is_softmmu)
1106{
1107 uint64_t ptep, pte;
1108 target_ulong pde_addr, pte_addr;
1109 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1110 target_phys_addr_t paddr;
1111 uint32_t page_offset;
1112 target_ulong vaddr, virt_addr;
1113
1114 is_user = mmu_idx == MMU_USER_IDX;
1115#if defined(DEBUG_MMU)
1116 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1117 addr, is_write1, is_user, env->eip);
1118#endif
1119 is_write = is_write1 & 1;
1120
1121 if (!(env->cr[0] & CR0_PG_MASK)) {
1122 pte = addr;
1123 virt_addr = addr & TARGET_PAGE_MASK;
1124 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1125 page_size = 4096;
1126 goto do_mapping;
1127 }
1128
1129 if (env->cr[4] & CR4_PAE_MASK) {
1130 uint64_t pde, pdpe;
1131 target_ulong pdpe_addr;
1132
1133#ifdef TARGET_X86_64
1134 if (env->hflags & HF_LMA_MASK) {
1135 uint64_t pml4e_addr, pml4e;
1136 int32_t sext;
1137
1138 /* test virtual address sign extension */
1139 sext = (int64_t)addr >> 47;
1140 if (sext != 0 && sext != -1) {
1141 env->error_code = 0;
1142 env->exception_index = EXCP0D_GPF;
1143 return 1;
1144 }
1145
1146 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1147 env->a20_mask;
1148 pml4e = ldq_phys(pml4e_addr);
1149 if (!(pml4e & PG_PRESENT_MASK)) {
1150 error_code = 0;
1151 goto do_fault;
1152 }
1153 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1154 error_code = PG_ERROR_RSVD_MASK;
1155 goto do_fault;
1156 }
1157 if (!(pml4e & PG_ACCESSED_MASK)) {
1158 pml4e |= PG_ACCESSED_MASK;
1159 stl_phys_notdirty(pml4e_addr, pml4e);
1160 }
1161 ptep = pml4e ^ PG_NX_MASK;
1162 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1163 env->a20_mask;
1164 pdpe = ldq_phys(pdpe_addr);
1165 if (!(pdpe & PG_PRESENT_MASK)) {
1166 error_code = 0;
1167 goto do_fault;
1168 }
1169 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1170 error_code = PG_ERROR_RSVD_MASK;
1171 goto do_fault;
1172 }
1173 ptep &= pdpe ^ PG_NX_MASK;
1174 if (!(pdpe & PG_ACCESSED_MASK)) {
1175 pdpe |= PG_ACCESSED_MASK;
1176 stl_phys_notdirty(pdpe_addr, pdpe);
1177 }
1178 } else
1179#endif
1180 {
1181 /* XXX: load them when cr3 is loaded ? */
1182 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1183 env->a20_mask;
1184 pdpe = ldq_phys(pdpe_addr);
1185 if (!(pdpe & PG_PRESENT_MASK)) {
1186 error_code = 0;
1187 goto do_fault;
1188 }
1189 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1190 }
1191
1192 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1193 env->a20_mask;
1194 pde = ldq_phys(pde_addr);
1195 if (!(pde & PG_PRESENT_MASK)) {
1196 error_code = 0;
1197 goto do_fault;
1198 }
1199 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1200 error_code = PG_ERROR_RSVD_MASK;
1201 goto do_fault;
1202 }
1203 ptep &= pde ^ PG_NX_MASK;
1204 if (pde & PG_PSE_MASK) {
1205 /* 2 MB page */
1206 page_size = 2048 * 1024;
1207 ptep ^= PG_NX_MASK;
1208 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1209 goto do_fault_protect;
1210 if (is_user) {
1211 if (!(ptep & PG_USER_MASK))
1212 goto do_fault_protect;
1213 if (is_write && !(ptep & PG_RW_MASK))
1214 goto do_fault_protect;
1215 } else {
1216 if ((env->cr[0] & CR0_WP_MASK) &&
1217 is_write && !(ptep & PG_RW_MASK))
1218 goto do_fault_protect;
1219 }
1220 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1221 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1222 pde |= PG_ACCESSED_MASK;
1223 if (is_dirty)
1224 pde |= PG_DIRTY_MASK;
1225 stl_phys_notdirty(pde_addr, pde);
1226 }
1227 /* align to page_size */
1228 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1229 virt_addr = addr & ~(page_size - 1);
1230 } else {
1231 /* 4 KB page */
1232 if (!(pde & PG_ACCESSED_MASK)) {
1233 pde |= PG_ACCESSED_MASK;
1234 stl_phys_notdirty(pde_addr, pde);
1235 }
1236 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1237 env->a20_mask;
1238 pte = ldq_phys(pte_addr);
1239 if (!(pte & PG_PRESENT_MASK)) {
1240 error_code = 0;
1241 goto do_fault;
1242 }
1243 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1244 error_code = PG_ERROR_RSVD_MASK;
1245 goto do_fault;
1246 }
1247 /* combine pde and pte nx, user and rw protections */
1248 ptep &= pte ^ PG_NX_MASK;
1249 ptep ^= PG_NX_MASK;
1250 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1251 goto do_fault_protect;
1252 if (is_user) {
1253 if (!(ptep & PG_USER_MASK))
1254 goto do_fault_protect;
1255 if (is_write && !(ptep & PG_RW_MASK))
1256 goto do_fault_protect;
1257 } else {
1258 if ((env->cr[0] & CR0_WP_MASK) &&
1259 is_write && !(ptep & PG_RW_MASK))
1260 goto do_fault_protect;
1261 }
1262 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1263 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1264 pte |= PG_ACCESSED_MASK;
1265 if (is_dirty)
1266 pte |= PG_DIRTY_MASK;
1267 stl_phys_notdirty(pte_addr, pte);
1268 }
1269 page_size = 4096;
1270 virt_addr = addr & ~0xfff;
1271 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1272 }
1273 } else {
1274 uint32_t pde;
1275
1276 /* page directory entry */
1277 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1278 env->a20_mask;
1279 pde = ldl_phys(pde_addr);
1280 if (!(pde & PG_PRESENT_MASK)) {
1281 error_code = 0;
1282 goto do_fault;
1283 }
1284 /* if PSE bit is set, then we use a 4MB page */
1285 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1286 page_size = 4096 * 1024;
1287 if (is_user) {
1288 if (!(pde & PG_USER_MASK))
1289 goto do_fault_protect;
1290 if (is_write && !(pde & PG_RW_MASK))
1291 goto do_fault_protect;
1292 } else {
1293 if ((env->cr[0] & CR0_WP_MASK) &&
1294 is_write && !(pde & PG_RW_MASK))
1295 goto do_fault_protect;
1296 }
1297 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1298 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1299 pde |= PG_ACCESSED_MASK;
1300 if (is_dirty)
1301 pde |= PG_DIRTY_MASK;
1302 stl_phys_notdirty(pde_addr, pde);
1303 }
1304
1305 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1306 ptep = pte;
1307 virt_addr = addr & ~(page_size - 1);
1308 } else {
1309 if (!(pde & PG_ACCESSED_MASK)) {
1310 pde |= PG_ACCESSED_MASK;
1311 stl_phys_notdirty(pde_addr, pde);
1312 }
1313
1314 /* page directory entry */
1315 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1316 env->a20_mask;
1317 pte = ldl_phys(pte_addr);
1318 if (!(pte & PG_PRESENT_MASK)) {
1319 error_code = 0;
1320 goto do_fault;
1321 }
1322 /* combine pde and pte user and rw protections */
1323 ptep = pte & pde;
1324 if (is_user) {
1325 if (!(ptep & PG_USER_MASK))
1326 goto do_fault_protect;
1327 if (is_write && !(ptep & PG_RW_MASK))
1328 goto do_fault_protect;
1329 } else {
1330 if ((env->cr[0] & CR0_WP_MASK) &&
1331 is_write && !(ptep & PG_RW_MASK))
1332 goto do_fault_protect;
1333 }
1334 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1335 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1336 pte |= PG_ACCESSED_MASK;
1337 if (is_dirty)
1338 pte |= PG_DIRTY_MASK;
1339 stl_phys_notdirty(pte_addr, pte);
1340 }
1341 page_size = 4096;
1342 virt_addr = addr & ~0xfff;
1343 }
1344 }
1345 /* the page can be put in the TLB */
1346 prot = PAGE_READ;
1347 if (!(ptep & PG_NX_MASK))
1348 prot |= PAGE_EXEC;
1349 if (pte & PG_DIRTY_MASK) {
1350 /* only set write access if already dirty... otherwise wait
1351 for dirty access */
1352 if (is_user) {
1353 if (ptep & PG_RW_MASK)
1354 prot |= PAGE_WRITE;
1355 } else {
1356 if (!(env->cr[0] & CR0_WP_MASK) ||
1357 (ptep & PG_RW_MASK))
1358 prot |= PAGE_WRITE;
1359 }
1360 }
1361 do_mapping:
1362 pte = pte & env->a20_mask;
1363
1364 /* Even if 4MB pages, we map only one 4KB page in the cache to
1365 avoid filling it too fast */
1366 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1367 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1368 vaddr = virt_addr + page_offset;
1369
1370 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1371 return ret;
1372 do_fault_protect:
1373 error_code = PG_ERROR_P_MASK;
1374 do_fault:
1375 error_code |= (is_write << PG_ERROR_W_BIT);
1376 if (is_user)
1377 error_code |= PG_ERROR_U_MASK;
1378 if (is_write1 == 2 &&
1379 (env->efer & MSR_EFER_NXE) &&
1380 (env->cr[4] & CR4_PAE_MASK))
1381 error_code |= PG_ERROR_I_D_MASK;
1382 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1383 /* cr2 is not modified in case of exceptions */
1384 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1385 addr);
1386 } else {
1387 env->cr[2] = addr;
1388 }
1389 env->error_code = error_code;
1390 env->exception_index = EXCP0E_PAGE;
1391 return 1;
1392}
1393
1394target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1395{
1396 target_ulong pde_addr, pte_addr;
1397 uint64_t pte;
1398 target_phys_addr_t paddr;
1399 uint32_t page_offset;
1400 int page_size;
1401
1402 if (env->cr[4] & CR4_PAE_MASK) {
1403 target_ulong pdpe_addr;
1404 uint64_t pde, pdpe;
1405
1406#ifdef TARGET_X86_64
1407 if (env->hflags & HF_LMA_MASK) {
1408 uint64_t pml4e_addr, pml4e;
1409 int32_t sext;
1410
1411 /* test virtual address sign extension */
1412 sext = (int64_t)addr >> 47;
1413 if (sext != 0 && sext != -1)
1414 return -1;
1415
1416 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1417 env->a20_mask;
1418 pml4e = ldq_phys(pml4e_addr);
1419 if (!(pml4e & PG_PRESENT_MASK))
1420 return -1;
1421
1422 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1423 env->a20_mask;
1424 pdpe = ldq_phys(pdpe_addr);
1425 if (!(pdpe & PG_PRESENT_MASK))
1426 return -1;
1427 } else
1428#endif
1429 {
1430 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1431 env->a20_mask;
1432 pdpe = ldq_phys(pdpe_addr);
1433 if (!(pdpe & PG_PRESENT_MASK))
1434 return -1;
1435 }
1436
1437 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1438 env->a20_mask;
1439 pde = ldq_phys(pde_addr);
1440 if (!(pde & PG_PRESENT_MASK)) {
1441 return -1;
1442 }
1443 if (pde & PG_PSE_MASK) {
1444 /* 2 MB page */
1445 page_size = 2048 * 1024;
1446 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1447 } else {
1448 /* 4 KB page */
1449 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1450 env->a20_mask;
1451 page_size = 4096;
1452 pte = ldq_phys(pte_addr);
1453 }
1454 if (!(pte & PG_PRESENT_MASK))
1455 return -1;
1456 } else {
1457 uint32_t pde;
1458
1459 if (!(env->cr[0] & CR0_PG_MASK)) {
1460 pte = addr;
1461 page_size = 4096;
1462 } else {
1463 /* page directory entry */
1464 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1465 pde = ldl_phys(pde_addr);
1466 if (!(pde & PG_PRESENT_MASK))
1467 return -1;
1468 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1469 pte = pde & ~0x003ff000; /* align to 4MB */
1470 page_size = 4096 * 1024;
1471 } else {
1472 /* page directory entry */
1473 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1474 pte = ldl_phys(pte_addr);
1475 if (!(pte & PG_PRESENT_MASK))
1476 return -1;
1477 page_size = 4096;
1478 }
1479 }
1480 pte = pte & env->a20_mask;
1481 }
1482
1483 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1484 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1485 return paddr;
1486}
1487
1488void hw_breakpoint_insert(CPUState *env, int index)
1489{
1490 int type, err = 0;
1491
1492 switch (hw_breakpoint_type(env->dr[7], index)) {
1493 case 0:
1494 if (hw_breakpoint_enabled(env->dr[7], index))
1495 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1496 &env->cpu_breakpoint[index]);
1497 break;
1498 case 1:
1499 type = BP_CPU | BP_MEM_WRITE;
1500 goto insert_wp;
1501 case 2:
1502 /* No support for I/O watchpoints yet */
1503 break;
1504 case 3:
1505 type = BP_CPU | BP_MEM_ACCESS;
1506 insert_wp:
1507 err = cpu_watchpoint_insert(env, env->dr[index],
1508 hw_breakpoint_len(env->dr[7], index),
1509 type, &env->cpu_watchpoint[index]);
1510 break;
1511 }
1512 if (err)
1513 env->cpu_breakpoint[index] = NULL;
1514}
1515
1516void hw_breakpoint_remove(CPUState *env, int index)
1517{
1518 if (!env->cpu_breakpoint[index])
1519 return;
1520 switch (hw_breakpoint_type(env->dr[7], index)) {
1521 case 0:
1522 if (hw_breakpoint_enabled(env->dr[7], index))
1523 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1524 break;
1525 case 1:
1526 case 3:
1527 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1528 break;
1529 case 2:
1530 /* No support for I/O watchpoints yet */
1531 break;
1532 }
1533}
1534
1535int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1536{
1537 target_ulong dr6;
1538 int reg, type;
1539 int hit_enabled = 0;
1540
1541 dr6 = env->dr[6] & ~0xf;
1542 for (reg = 0; reg < 4; reg++) {
1543 type = hw_breakpoint_type(env->dr[7], reg);
1544 if ((type == 0 && env->dr[reg] == env->eip) ||
1545 ((type & 1) && env->cpu_watchpoint[reg] &&
1546 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1547 dr6 |= 1 << reg;
1548 if (hw_breakpoint_enabled(env->dr[7], reg))
1549 hit_enabled = 1;
1550 }
1551 }
1552 if (hit_enabled || force_dr6_update)
1553 env->dr[6] = dr6;
1554 return hit_enabled;
1555}
1556
1557static CPUDebugExcpHandler *prev_debug_excp_handler;
1558
1559void raise_exception(int exception_index);
1560
1561static void breakpoint_handler(CPUState *env)
1562{
1563 CPUBreakpoint *bp;
1564
1565 if (env->watchpoint_hit) {
1566 if (env->watchpoint_hit->flags & BP_CPU) {
1567 env->watchpoint_hit = NULL;
1568 if (check_hw_breakpoints(env, 0))
1569 raise_exception(EXCP01_DB);
1570 else
1571 cpu_resume_from_signal(env, NULL);
1572 }
1573 } else {
1574 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1575 if (bp->pc == env->eip) {
1576 if (bp->flags & BP_CPU) {
1577 check_hw_breakpoints(env, 1);
1578 raise_exception(EXCP01_DB);
1579 }
1580 break;
1581 }
1582 }
1583 if (prev_debug_excp_handler)
1584 prev_debug_excp_handler(env);
1585}
1586
1587
1588#ifndef VBOX
1589/* This should come from sysemu.h - if we could include it here... */
1590void qemu_system_reset_request(void);
1591
1592void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1593 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1594{
1595 uint64_t mcg_cap = cenv->mcg_cap;
1596 unsigned bank_num = mcg_cap & 0xff;
1597 uint64_t *banks = cenv->mce_banks;
1598
1599 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1600 return;
1601
1602 /*
1603 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1604 * reporting is disabled
1605 */
1606 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1607 cenv->mcg_ctl != ~(uint64_t)0)
1608 return;
1609 banks += 4 * bank;
1610 /*
1611 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1612 * reporting is disabled for the bank
1613 */
1614 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1615 return;
1616 if (status & MCI_STATUS_UC) {
1617 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1618 !(cenv->cr[4] & CR4_MCE_MASK)) {
1619 fprintf(stderr, "injects mce exception while previous "
1620 "one is in progress!\n");
1621 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1622 qemu_system_reset_request();
1623 return;
1624 }
1625 if (banks[1] & MCI_STATUS_VAL)
1626 status |= MCI_STATUS_OVER;
1627 banks[2] = addr;
1628 banks[3] = misc;
1629 cenv->mcg_status = mcg_status;
1630 banks[1] = status;
1631 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1632 } else if (!(banks[1] & MCI_STATUS_VAL)
1633 || !(banks[1] & MCI_STATUS_UC)) {
1634 if (banks[1] & MCI_STATUS_VAL)
1635 status |= MCI_STATUS_OVER;
1636 banks[2] = addr;
1637 banks[3] = misc;
1638 banks[1] = status;
1639 } else
1640 banks[1] |= MCI_STATUS_OVER;
1641}
1642#endif /* !VBOX */
1643#endif /* !CONFIG_USER_ONLY */
1644
1645#ifndef VBOX
1646
1647static void mce_init(CPUX86State *cenv)
1648{
1649 unsigned int bank, bank_num;
1650
1651 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1652 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1653 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1654 cenv->mcg_ctl = ~(uint64_t)0;
1655 bank_num = MCE_BANKS_DEF;
1656 for (bank = 0; bank < bank_num; bank++)
1657 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1658 }
1659}
1660
1661static void host_cpuid(uint32_t function, uint32_t count,
1662 uint32_t *eax, uint32_t *ebx,
1663 uint32_t *ecx, uint32_t *edx)
1664{
1665#if defined(CONFIG_KVM)
1666 uint32_t vec[4];
1667
1668#ifdef __x86_64__
1669 asm volatile("cpuid"
1670 : "=a"(vec[0]), "=b"(vec[1]),
1671 "=c"(vec[2]), "=d"(vec[3])
1672 : "0"(function), "c"(count) : "cc");
1673#else
1674 asm volatile("pusha \n\t"
1675 "cpuid \n\t"
1676 "mov %%eax, 0(%2) \n\t"
1677 "mov %%ebx, 4(%2) \n\t"
1678 "mov %%ecx, 8(%2) \n\t"
1679 "mov %%edx, 12(%2) \n\t"
1680 "popa"
1681 : : "a"(function), "c"(count), "S"(vec)
1682 : "memory", "cc");
1683#endif
1684
1685 if (eax)
1686 *eax = vec[0];
1687 if (ebx)
1688 *ebx = vec[1];
1689 if (ecx)
1690 *ecx = vec[2];
1691 if (edx)
1692 *edx = vec[3];
1693#endif
1694}
1695
1696static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
1697 uint32_t *ecx, uint32_t *edx)
1698{
1699 *ebx = env->cpuid_vendor1;
1700 *edx = env->cpuid_vendor2;
1701 *ecx = env->cpuid_vendor3;
1702
1703 /* sysenter isn't supported on compatibility mode on AMD, syscall
1704 * isn't supported in compatibility mode on Intel.
1705 * Normally we advertise the actual cpu vendor, but you can override
1706 * this if you want to use KVM's sysenter/syscall emulation
1707 * in compatibility mode and when doing cross vendor migration
1708 */
1709 if (kvm_enabled() && env->cpuid_vendor_override) {
1710 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1711 }
1712}
1713
1714void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1715 uint32_t *eax, uint32_t *ebx,
1716 uint32_t *ecx, uint32_t *edx)
1717{
1718 /* test if maximum index reached */
1719 if (index & 0x80000000) {
1720 if (index > env->cpuid_xlevel)
1721 index = env->cpuid_level;
1722 } else {
1723 if (index > env->cpuid_level)
1724 index = env->cpuid_level;
1725 }
1726
1727 switch(index) {
1728 case 0:
1729 *eax = env->cpuid_level;
1730 get_cpuid_vendor(env, ebx, ecx, edx);
1731 break;
1732 case 1:
1733 *eax = env->cpuid_version;
1734 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1735 *ecx = env->cpuid_ext_features;
1736 *edx = env->cpuid_features;
1737 if (env->nr_cores * env->nr_threads > 1) {
1738 *ebx |= (env->nr_cores * env->nr_threads) << 16;
1739 *edx |= 1 << 28; /* HTT bit */
1740 }
1741 break;
1742 case 2:
1743 /* cache info: needed for Pentium Pro compatibility */
1744 *eax = 1;
1745 *ebx = 0;
1746 *ecx = 0;
1747 *edx = 0x2c307d;
1748 break;
1749 case 4:
1750 /* cache info: needed for Core compatibility */
1751 if (env->nr_cores > 1) {
1752 *eax = (env->nr_cores - 1) << 26;
1753 } else {
1754 *eax = 0;
1755 }
1756 switch (count) {
1757 case 0: /* L1 dcache info */
1758 *eax |= 0x0000121;
1759 *ebx = 0x1c0003f;
1760 *ecx = 0x000003f;
1761 *edx = 0x0000001;
1762 break;
1763 case 1: /* L1 icache info */
1764 *eax |= 0x0000122;
1765 *ebx = 0x1c0003f;
1766 *ecx = 0x000003f;
1767 *edx = 0x0000001;
1768 break;
1769 case 2: /* L2 cache info */
1770 *eax |= 0x0000143;
1771 if (env->nr_threads > 1) {
1772 *eax |= (env->nr_threads - 1) << 14;
1773 }
1774 *ebx = 0x3c0003f;
1775 *ecx = 0x0000fff;
1776 *edx = 0x0000001;
1777 break;
1778 default: /* end of info */
1779 *eax = 0;
1780 *ebx = 0;
1781 *ecx = 0;
1782 *edx = 0;
1783 break;
1784 }
1785 break;
1786 case 5:
1787 /* mwait info: needed for Core compatibility */
1788 *eax = 0; /* Smallest monitor-line size in bytes */
1789 *ebx = 0; /* Largest monitor-line size in bytes */
1790 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1791 *edx = 0;
1792 break;
1793 case 6:
1794 /* Thermal and Power Leaf */
1795 *eax = 0;
1796 *ebx = 0;
1797 *ecx = 0;
1798 *edx = 0;
1799 break;
1800 case 9:
1801 /* Direct Cache Access Information Leaf */
1802 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1803 *ebx = 0;
1804 *ecx = 0;
1805 *edx = 0;
1806 break;
1807 case 0xA:
1808 /* Architectural Performance Monitoring Leaf */
1809 *eax = 0;
1810 *ebx = 0;
1811 *ecx = 0;
1812 *edx = 0;
1813 break;
1814 case 0x80000000:
1815 *eax = env->cpuid_xlevel;
1816 *ebx = env->cpuid_vendor1;
1817 *edx = env->cpuid_vendor2;
1818 *ecx = env->cpuid_vendor3;
1819 break;
1820 case 0x80000001:
1821 *eax = env->cpuid_version;
1822 *ebx = 0;
1823 *ecx = env->cpuid_ext3_features;
1824 *edx = env->cpuid_ext2_features;
1825
1826 /* The Linux kernel checks for the CMPLegacy bit and
1827 * discards multiple thread information if it is set.
1828 * So dont set it here for Intel to make Linux guests happy.
1829 */
1830 if (env->nr_cores * env->nr_threads > 1) {
1831 uint32_t tebx, tecx, tedx;
1832 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
1833 if (tebx != CPUID_VENDOR_INTEL_1 ||
1834 tedx != CPUID_VENDOR_INTEL_2 ||
1835 tecx != CPUID_VENDOR_INTEL_3) {
1836 *ecx |= 1 << 1; /* CmpLegacy bit */
1837 }
1838 }
1839
1840 if (kvm_enabled()) {
1841 /* Nested SVM not yet supported in upstream QEMU */
1842 *ecx &= ~CPUID_EXT3_SVM;
1843 }
1844 break;
1845 case 0x80000002:
1846 case 0x80000003:
1847 case 0x80000004:
1848 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1849 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1850 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1851 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1852 break;
1853 case 0x80000005:
1854 /* cache info (L1 cache) */
1855 *eax = 0x01ff01ff;
1856 *ebx = 0x01ff01ff;
1857 *ecx = 0x40020140;
1858 *edx = 0x40020140;
1859 break;
1860 case 0x80000006:
1861 /* cache info (L2 cache) */
1862 *eax = 0;
1863 *ebx = 0x42004200;
1864 *ecx = 0x02008140;
1865 *edx = 0;
1866 break;
1867 case 0x80000008:
1868 /* virtual & phys address size in low 2 bytes. */
1869/* XXX: This value must match the one used in the MMU code. */
1870 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1871 /* 64 bit processor */
1872/* XXX: The physical address space is limited to 42 bits in exec.c. */
1873 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1874 } else {
1875 if (env->cpuid_features & CPUID_PSE36)
1876 *eax = 0x00000024; /* 36 bits physical */
1877 else
1878 *eax = 0x00000020; /* 32 bits physical */
1879 }
1880 *ebx = 0;
1881 *ecx = 0;
1882 *edx = 0;
1883 if (env->nr_cores * env->nr_threads > 1) {
1884 *ecx |= (env->nr_cores * env->nr_threads) - 1;
1885 }
1886 break;
1887 case 0x8000000A:
1888 *eax = 0x00000001; /* SVM Revision */
1889 *ebx = 0x00000010; /* nr of ASIDs */
1890 *ecx = 0;
1891 *edx = 0; /* optional features */
1892 break;
1893 default:
1894 /* reserved values: zero */
1895 *eax = 0;
1896 *ebx = 0;
1897 *ecx = 0;
1898 *edx = 0;
1899 break;
1900 }
1901}
1902
1903
1904int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1905 target_ulong *base, unsigned int *limit,
1906 unsigned int *flags)
1907{
1908 SegmentCache *dt;
1909 target_ulong ptr;
1910 uint32_t e1, e2;
1911 int index;
1912
1913 if (selector & 0x4)
1914 dt = &env->ldt;
1915 else
1916 dt = &env->gdt;
1917 index = selector & ~7;
1918 ptr = dt->base + index;
1919 if ((index + 7) > dt->limit
1920 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1921 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1922 return 0;
1923
1924 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1925 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1926 if (e2 & DESC_G_MASK)
1927 *limit = (*limit << 12) | 0xfff;
1928 *flags = e2;
1929
1930 return 1;
1931}
1932
1933#endif /* !VBOX */
1934
1935#ifndef VBOX
1936CPUX86State *cpu_x86_init(const char *cpu_model)
1937#else
1938CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)
1939#endif
1940{
1941#ifndef VBOX
1942 CPUX86State *env;
1943#endif
1944 static int inited;
1945
1946#ifndef VBOX
1947 env = qemu_mallocz(sizeof(CPUX86State));
1948#endif
1949 cpu_exec_init(env);
1950 env->cpu_model_str = cpu_model;
1951
1952 /* init various static tables */
1953 if (!inited) {
1954 inited = 1;
1955 optimize_flags_init();
1956#ifndef CONFIG_USER_ONLY
1957 prev_debug_excp_handler =
1958 cpu_set_debug_excp_handler(breakpoint_handler);
1959#endif
1960 }
1961 if (cpu_x86_register(env, cpu_model) < 0) {
1962 cpu_x86_close(env);
1963 return NULL;
1964 }
1965#ifndef VBOX
1966 mce_init(env);
1967#endif
1968
1969 qemu_init_vcpu(env);
1970
1971 return env;
1972}
1973
1974#ifndef VBOX
1975#if !defined(CONFIG_USER_ONLY)
1976void do_cpu_init(CPUState *env)
1977{
1978 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1979 cpu_reset(env);
1980 env->interrupt_request = sipi;
1981 apic_init_reset(env);
1982}
1983
1984void do_cpu_sipi(CPUState *env)
1985{
1986 apic_sipi(env);
1987}
1988#else
1989void do_cpu_init(CPUState *env)
1990{
1991}
1992void do_cpu_sipi(CPUState *env)
1993{
1994}
1995#endif
1996#endif /* !VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette