VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 39691

Last change on this file since 39691 was 37689, checked in by vboxsync, 13 years ago

recompiler: Merged in changes from 0.13.0.

  • Property svn:eol-style set to native
File size: 38.1 KB
Line 
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include <stdarg.h>
30#include <stdlib.h>
31#include <stdio.h>
32#include <string.h>
33#ifndef VBOX
34#include <inttypes.h>
35#include <signal.h>
36#endif /* !VBOX */
37
38#include "cpu.h"
39#include "exec-all.h"
40#include "qemu-common.h"
41#include "kvm.h"
42
43//#define DEBUG_MMU
44
45/* NOTE: must be called outside the CPU execute loop */
46void cpu_reset(CPUX86State *env)
47{
48 int i;
49
50 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
51 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
52 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
53 }
54
55 memset(env, 0, offsetof(CPUX86State, breakpoints));
56
57 tlb_flush(env, 1);
58
59 env->old_exception = -1;
60
61 /* init to reset state */
62
63#ifdef CONFIG_SOFTMMU
64 env->hflags |= HF_SOFTMMU_MASK;
65#endif
66 env->hflags2 |= HF2_GIF_MASK;
67
68 cpu_x86_update_cr0(env, 0x60000010);
69 env->a20_mask = ~0x0;
70 env->smbase = 0x30000;
71
72 env->idt.limit = 0xffff;
73 env->gdt.limit = 0xffff;
74 env->ldt.limit = 0xffff;
75 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
76 env->tr.limit = 0xffff;
77 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
78
79 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
80 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
81 DESC_R_MASK | DESC_A_MASK);
82 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
83 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
84 DESC_A_MASK);
85 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
86 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
87 DESC_A_MASK);
88 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
89 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
90 DESC_A_MASK);
91 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
92 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
93 DESC_A_MASK);
94 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
95 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
96 DESC_A_MASK);
97
98 env->eip = 0xfff0;
99#ifndef VBOX /* We'll get the right value from CPUM. */
100 env->regs[R_EDX] = env->cpuid_version;
101#endif
102
103 env->eflags = 0x2;
104
105 /* FPU init */
106 for(i = 0;i < 8; i++)
107 env->fptags[i] = 1;
108 env->fpuc = 0x37f;
109
110 env->mxcsr = 0x1f80;
111
112 memset(env->dr, 0, sizeof(env->dr));
113 env->dr[6] = DR6_FIXED_1;
114 env->dr[7] = DR7_FIXED_1;
115 cpu_breakpoint_remove_all(env, BP_CPU);
116 cpu_watchpoint_remove_all(env, BP_CPU);
117
118#ifndef VBOX
119 env->mcg_status = 0;
120#endif
121}
122
123void cpu_x86_close(CPUX86State *env)
124{
125#ifndef VBOX
126 qemu_free(env);
127#endif
128}
129
130/***********************************************************/
131/* x86 debug */
132
133static const char *cc_op_str[] = {
134 "DYNAMIC",
135 "EFLAGS",
136
137 "MULB",
138 "MULW",
139 "MULL",
140 "MULQ",
141
142 "ADDB",
143 "ADDW",
144 "ADDL",
145 "ADDQ",
146
147 "ADCB",
148 "ADCW",
149 "ADCL",
150 "ADCQ",
151
152 "SUBB",
153 "SUBW",
154 "SUBL",
155 "SUBQ",
156
157 "SBBB",
158 "SBBW",
159 "SBBL",
160 "SBBQ",
161
162 "LOGICB",
163 "LOGICW",
164 "LOGICL",
165 "LOGICQ",
166
167 "INCB",
168 "INCW",
169 "INCL",
170 "INCQ",
171
172 "DECB",
173 "DECW",
174 "DECL",
175 "DECQ",
176
177 "SHLB",
178 "SHLW",
179 "SHLL",
180 "SHLQ",
181
182 "SARB",
183 "SARW",
184 "SARL",
185 "SARQ",
186};
187
188static void
189cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
190 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
191 const char *name, struct SegmentCache *sc)
192{
193#ifdef VBOX
194# define cpu_fprintf(f, ...) RTLogPrintf(__VA_ARGS__)
195#endif
196#ifdef TARGET_X86_64
197 if (env->hflags & HF_CS64_MASK) {
198 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
199 sc->selector, sc->base, sc->limit, sc->flags);
200 } else
201#endif
202 {
203 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
204 (uint32_t)sc->base, sc->limit, sc->flags);
205 }
206
207 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
208 goto done;
209
210 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
211 if (sc->flags & DESC_S_MASK) {
212 if (sc->flags & DESC_CS_MASK) {
213 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
214 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
215 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
216 (sc->flags & DESC_R_MASK) ? 'R' : '-');
217 } else {
218 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
219 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
220 (sc->flags & DESC_W_MASK) ? 'W' : '-');
221 }
222 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
223 } else {
224 static const char *sys_type_name[2][16] = {
225 { /* 32 bit mode */
226 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
227 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
228 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
229 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
230 },
231 { /* 64 bit mode */
232 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
233 "Reserved", "Reserved", "Reserved", "Reserved",
234 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
235 "Reserved", "IntGate64", "TrapGate64"
236 }
237 };
238 cpu_fprintf(f, "%s",
239 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
240 [(sc->flags & DESC_TYPE_MASK)
241 >> DESC_TYPE_SHIFT]);
242 }
243done:
244 cpu_fprintf(f, "\n");
245#ifdef VBOX
246# undef cpu_fprintf
247#endif
248}
249
250void cpu_dump_state(CPUState *env, FILE *f,
251 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
252 int flags)
253{
254 int eflags, i, nb;
255 char cc_op_name[32];
256 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
257
258#ifdef VBOX
259# define cpu_fprintf(f, ...) RTLogPrintf(__VA_ARGS__)
260#endif
261 cpu_synchronize_state(env);
262
263 eflags = env->eflags;
264#ifdef TARGET_X86_64
265 if (env->hflags & HF_CS64_MASK) {
266 cpu_fprintf(f,
267 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
268 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
269 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
270 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
271 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
272 env->regs[R_EAX],
273 env->regs[R_EBX],
274 env->regs[R_ECX],
275 env->regs[R_EDX],
276 env->regs[R_ESI],
277 env->regs[R_EDI],
278 env->regs[R_EBP],
279 env->regs[R_ESP],
280 env->regs[8],
281 env->regs[9],
282 env->regs[10],
283 env->regs[11],
284 env->regs[12],
285 env->regs[13],
286 env->regs[14],
287 env->regs[15],
288 env->eip, eflags,
289 eflags & DF_MASK ? 'D' : '-',
290 eflags & CC_O ? 'O' : '-',
291 eflags & CC_S ? 'S' : '-',
292 eflags & CC_Z ? 'Z' : '-',
293 eflags & CC_A ? 'A' : '-',
294 eflags & CC_P ? 'P' : '-',
295 eflags & CC_C ? 'C' : '-',
296 env->hflags & HF_CPL_MASK,
297 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
298 (env->a20_mask >> 20) & 1,
299 (env->hflags >> HF_SMM_SHIFT) & 1,
300 env->halted);
301 } else
302#endif
303 {
304 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
305 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
306 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
307 (uint32_t)env->regs[R_EAX],
308 (uint32_t)env->regs[R_EBX],
309 (uint32_t)env->regs[R_ECX],
310 (uint32_t)env->regs[R_EDX],
311 (uint32_t)env->regs[R_ESI],
312 (uint32_t)env->regs[R_EDI],
313 (uint32_t)env->regs[R_EBP],
314 (uint32_t)env->regs[R_ESP],
315 (uint32_t)env->eip, eflags,
316 eflags & DF_MASK ? 'D' : '-',
317 eflags & CC_O ? 'O' : '-',
318 eflags & CC_S ? 'S' : '-',
319 eflags & CC_Z ? 'Z' : '-',
320 eflags & CC_A ? 'A' : '-',
321 eflags & CC_P ? 'P' : '-',
322 eflags & CC_C ? 'C' : '-',
323 env->hflags & HF_CPL_MASK,
324 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
325 (env->a20_mask >> 20) & 1,
326 (env->hflags >> HF_SMM_SHIFT) & 1,
327 env->halted);
328 }
329
330 for(i = 0; i < 6; i++) {
331 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
332 &env->segs[i]);
333 }
334 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
335 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
336
337#ifdef TARGET_X86_64
338 if (env->hflags & HF_LMA_MASK) {
339 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
340 env->gdt.base, env->gdt.limit);
341 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
342 env->idt.base, env->idt.limit);
343 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
344 (uint32_t)env->cr[0],
345 env->cr[2],
346 env->cr[3],
347 (uint32_t)env->cr[4]);
348 for(i = 0; i < 4; i++)
349 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
350 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
351 env->dr[6], env->dr[7]);
352 } else
353#endif
354 {
355 cpu_fprintf(f, "GDT= %08x %08x\n",
356 (uint32_t)env->gdt.base, env->gdt.limit);
357 cpu_fprintf(f, "IDT= %08x %08x\n",
358 (uint32_t)env->idt.base, env->idt.limit);
359 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
360 (uint32_t)env->cr[0],
361 (uint32_t)env->cr[2],
362 (uint32_t)env->cr[3],
363 (uint32_t)env->cr[4]);
364 for(i = 0; i < 4; i++)
365 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
366 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
367 }
368 if (flags & X86_DUMP_CCOP) {
369 if ((unsigned)env->cc_op < CC_OP_NB)
370 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
371 else
372 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
373#ifdef TARGET_X86_64
374 if (env->hflags & HF_CS64_MASK) {
375 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
376 env->cc_src, env->cc_dst,
377 cc_op_name);
378 } else
379#endif
380 {
381 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
382 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
383 cc_op_name);
384 }
385 }
386 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
387 if (flags & X86_DUMP_FPU) {
388 int fptag;
389 fptag = 0;
390 for(i = 0; i < 8; i++) {
391 fptag |= ((!env->fptags[i]) << i);
392 }
393 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
394 env->fpuc,
395 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
396 env->fpstt,
397 fptag,
398 env->mxcsr);
399 for(i=0;i<8;i++) {
400#if defined(USE_X86LDOUBLE)
401 union {
402 long double d;
403 struct {
404 uint64_t lower;
405 uint16_t upper;
406 } l;
407 } tmp;
408 tmp.d = env->fpregs[i].d;
409 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
410 i, tmp.l.lower, tmp.l.upper);
411#else
412 cpu_fprintf(f, "FPR%d=%016" PRIx64,
413 i, env->fpregs[i].mmx.q);
414#endif
415 if ((i & 1) == 1)
416 cpu_fprintf(f, "\n");
417 else
418 cpu_fprintf(f, " ");
419 }
420 if (env->hflags & HF_CS64_MASK)
421 nb = 16;
422 else
423 nb = 8;
424 for(i=0;i<nb;i++) {
425 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
426 i,
427 env->xmm_regs[i].XMM_L(3),
428 env->xmm_regs[i].XMM_L(2),
429 env->xmm_regs[i].XMM_L(1),
430 env->xmm_regs[i].XMM_L(0));
431 if ((i & 1) == 1)
432 cpu_fprintf(f, "\n");
433 else
434 cpu_fprintf(f, " ");
435 }
436 }
437#ifdef VBOX
438# undef cpu_fprintf
439#endif
440}
441
442/***********************************************************/
443/* x86 mmu */
444/* XXX: add PGE support */
445
446void cpu_x86_set_a20(CPUX86State *env, int a20_state)
447{
448 a20_state = (a20_state != 0);
449 if (a20_state != ((env->a20_mask >> 20) & 1)) {
450#if defined(DEBUG_MMU)
451 printf("A20 update: a20=%d\n", a20_state);
452#endif
453 /* if the cpu is currently executing code, we must unlink it and
454 all the potentially executing TB */
455 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
456
457 /* when a20 is changed, all the MMU mappings are invalid, so
458 we must flush everything */
459 tlb_flush(env, 1);
460 env->a20_mask = ~(1 << 20) | (a20_state << 20);
461 }
462}
463
464void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
465{
466 int pe_state;
467
468#if defined(DEBUG_MMU)
469 printf("CR0 update: CR0=0x%08x\n", new_cr0);
470#endif
471 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
472 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
473 tlb_flush(env, 1);
474 }
475
476#ifdef TARGET_X86_64
477 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
478 (env->efer & MSR_EFER_LME)) {
479 /* enter in long mode */
480 /* XXX: generate an exception */
481 if (!(env->cr[4] & CR4_PAE_MASK))
482 return;
483 env->efer |= MSR_EFER_LMA;
484 env->hflags |= HF_LMA_MASK;
485 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
486 (env->efer & MSR_EFER_LMA)) {
487 /* exit long mode */
488 env->efer &= ~MSR_EFER_LMA;
489 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
490 env->eip &= 0xffffffff;
491 }
492#endif
493 env->cr[0] = new_cr0 | CR0_ET_MASK;
494
495 /* update PE flag in hidden flags */
496 pe_state = (env->cr[0] & CR0_PE_MASK);
497 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
498 /* ensure that ADDSEG is always set in real mode */
499 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
500 /* update FPU flags */
501 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
502 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
503#ifdef VBOX
504 remR3ChangeCpuMode(env);
505#endif
506}
507
508/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
509 the PDPT */
510void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
511{
512 env->cr[3] = new_cr3;
513 if (env->cr[0] & CR0_PG_MASK) {
514#if defined(DEBUG_MMU)
515 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
516#endif
517 tlb_flush(env, 0);
518 }
519}
520
521void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
522{
523#if defined(DEBUG_MMU)
524 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
525#endif
526 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
527 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
528 tlb_flush(env, 1);
529 }
530 /* SSE handling */
531 if (!(env->cpuid_features & CPUID_SSE))
532 new_cr4 &= ~CR4_OSFXSR_MASK;
533 if (new_cr4 & CR4_OSFXSR_MASK)
534 env->hflags |= HF_OSFXSR_MASK;
535 else
536 env->hflags &= ~HF_OSFXSR_MASK;
537
538 env->cr[4] = new_cr4;
539#ifdef VBOX
540 remR3ChangeCpuMode(env);
541#endif
542}
543
544#if defined(CONFIG_USER_ONLY)
545
546int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
547 int is_write, int mmu_idx, int is_softmmu)
548{
549 /* user mode only emulation */
550 is_write &= 1;
551 env->cr[2] = addr;
552 env->error_code = (is_write << PG_ERROR_W_BIT);
553 env->error_code |= PG_ERROR_U_MASK;
554 env->exception_index = EXCP0E_PAGE;
555 return 1;
556}
557
558#else
559
560/* XXX: This value should match the one returned by CPUID
561 * and in exec.c */
562# if defined(TARGET_X86_64)
563# define PHYS_ADDR_MASK 0xfffffff000LL
564# else
565# define PHYS_ADDR_MASK 0xffffff000LL
566# endif
567
568/* return value:
569 -1 = cannot handle fault
570 0 = nothing more to do
571 1 = generate PF fault
572*/
573int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
574 int is_write1, int mmu_idx, int is_softmmu)
575{
576 uint64_t ptep, pte;
577 target_ulong pde_addr, pte_addr;
578 int error_code, is_dirty, prot, page_size, is_write, is_user;
579 target_phys_addr_t paddr;
580 uint32_t page_offset;
581 target_ulong vaddr, virt_addr;
582
583 is_user = mmu_idx == MMU_USER_IDX;
584#if defined(DEBUG_MMU)
585 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
586 addr, is_write1, is_user, env->eip);
587#endif
588 is_write = is_write1 & 1;
589
590 if (!(env->cr[0] & CR0_PG_MASK)) {
591 pte = addr;
592 virt_addr = addr & TARGET_PAGE_MASK;
593 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
594 page_size = 4096;
595 goto do_mapping;
596 }
597
598 if (env->cr[4] & CR4_PAE_MASK) {
599 uint64_t pde, pdpe;
600 target_ulong pdpe_addr;
601
602#ifdef TARGET_X86_64
603 if (env->hflags & HF_LMA_MASK) {
604 uint64_t pml4e_addr, pml4e;
605 int32_t sext;
606
607 /* test virtual address sign extension */
608 sext = (int64_t)addr >> 47;
609 if (sext != 0 && sext != -1) {
610 env->error_code = 0;
611 env->exception_index = EXCP0D_GPF;
612 return 1;
613 }
614
615 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
616 env->a20_mask;
617 pml4e = ldq_phys(pml4e_addr);
618 if (!(pml4e & PG_PRESENT_MASK)) {
619 error_code = 0;
620 goto do_fault;
621 }
622 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
623 error_code = PG_ERROR_RSVD_MASK;
624 goto do_fault;
625 }
626 if (!(pml4e & PG_ACCESSED_MASK)) {
627 pml4e |= PG_ACCESSED_MASK;
628 stl_phys_notdirty(pml4e_addr, pml4e);
629 }
630 ptep = pml4e ^ PG_NX_MASK;
631 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
632 env->a20_mask;
633 pdpe = ldq_phys(pdpe_addr);
634 if (!(pdpe & PG_PRESENT_MASK)) {
635 error_code = 0;
636 goto do_fault;
637 }
638 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
639 error_code = PG_ERROR_RSVD_MASK;
640 goto do_fault;
641 }
642 ptep &= pdpe ^ PG_NX_MASK;
643 if (!(pdpe & PG_ACCESSED_MASK)) {
644 pdpe |= PG_ACCESSED_MASK;
645 stl_phys_notdirty(pdpe_addr, pdpe);
646 }
647 } else
648#endif
649 {
650 /* XXX: load them when cr3 is loaded ? */
651 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
652 env->a20_mask;
653 pdpe = ldq_phys(pdpe_addr);
654 if (!(pdpe & PG_PRESENT_MASK)) {
655 error_code = 0;
656 goto do_fault;
657 }
658 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
659 }
660
661 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
662 env->a20_mask;
663 pde = ldq_phys(pde_addr);
664 if (!(pde & PG_PRESENT_MASK)) {
665 error_code = 0;
666 goto do_fault;
667 }
668 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
669 error_code = PG_ERROR_RSVD_MASK;
670 goto do_fault;
671 }
672 ptep &= pde ^ PG_NX_MASK;
673 if (pde & PG_PSE_MASK) {
674 /* 2 MB page */
675 page_size = 2048 * 1024;
676 ptep ^= PG_NX_MASK;
677 if ((ptep & PG_NX_MASK) && is_write1 == 2)
678 goto do_fault_protect;
679 if (is_user) {
680 if (!(ptep & PG_USER_MASK))
681 goto do_fault_protect;
682 if (is_write && !(ptep & PG_RW_MASK))
683 goto do_fault_protect;
684 } else {
685 if ((env->cr[0] & CR0_WP_MASK) &&
686 is_write && !(ptep & PG_RW_MASK))
687 goto do_fault_protect;
688 }
689 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
690 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
691 pde |= PG_ACCESSED_MASK;
692 if (is_dirty)
693 pde |= PG_DIRTY_MASK;
694 stl_phys_notdirty(pde_addr, pde);
695 }
696 /* align to page_size */
697 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
698 virt_addr = addr & ~(page_size - 1);
699 } else {
700 /* 4 KB page */
701 if (!(pde & PG_ACCESSED_MASK)) {
702 pde |= PG_ACCESSED_MASK;
703 stl_phys_notdirty(pde_addr, pde);
704 }
705 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
706 env->a20_mask;
707 pte = ldq_phys(pte_addr);
708 if (!(pte & PG_PRESENT_MASK)) {
709 error_code = 0;
710 goto do_fault;
711 }
712 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
713 error_code = PG_ERROR_RSVD_MASK;
714 goto do_fault;
715 }
716 /* combine pde and pte nx, user and rw protections */
717 ptep &= pte ^ PG_NX_MASK;
718 ptep ^= PG_NX_MASK;
719 if ((ptep & PG_NX_MASK) && is_write1 == 2)
720 goto do_fault_protect;
721 if (is_user) {
722 if (!(ptep & PG_USER_MASK))
723 goto do_fault_protect;
724 if (is_write && !(ptep & PG_RW_MASK))
725 goto do_fault_protect;
726 } else {
727 if ((env->cr[0] & CR0_WP_MASK) &&
728 is_write && !(ptep & PG_RW_MASK))
729 goto do_fault_protect;
730 }
731 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
732 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
733 pte |= PG_ACCESSED_MASK;
734 if (is_dirty)
735 pte |= PG_DIRTY_MASK;
736 stl_phys_notdirty(pte_addr, pte);
737 }
738 page_size = 4096;
739 virt_addr = addr & ~0xfff;
740 pte = pte & (PHYS_ADDR_MASK | 0xfff);
741 }
742 } else {
743 uint32_t pde;
744
745 /* page directory entry */
746 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
747 env->a20_mask;
748 pde = ldl_phys(pde_addr);
749 if (!(pde & PG_PRESENT_MASK)) {
750 error_code = 0;
751 goto do_fault;
752 }
753 /* if PSE bit is set, then we use a 4MB page */
754 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
755 page_size = 4096 * 1024;
756 if (is_user) {
757 if (!(pde & PG_USER_MASK))
758 goto do_fault_protect;
759 if (is_write && !(pde & PG_RW_MASK))
760 goto do_fault_protect;
761 } else {
762 if ((env->cr[0] & CR0_WP_MASK) &&
763 is_write && !(pde & PG_RW_MASK))
764 goto do_fault_protect;
765 }
766 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
767 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
768 pde |= PG_ACCESSED_MASK;
769 if (is_dirty)
770 pde |= PG_DIRTY_MASK;
771 stl_phys_notdirty(pde_addr, pde);
772 }
773
774 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
775 ptep = pte;
776 virt_addr = addr & ~(page_size - 1);
777 } else {
778 if (!(pde & PG_ACCESSED_MASK)) {
779 pde |= PG_ACCESSED_MASK;
780 stl_phys_notdirty(pde_addr, pde);
781 }
782
783 /* page directory entry */
784 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
785 env->a20_mask;
786 pte = ldl_phys(pte_addr);
787 if (!(pte & PG_PRESENT_MASK)) {
788 error_code = 0;
789 goto do_fault;
790 }
791 /* combine pde and pte user and rw protections */
792 ptep = pte & pde;
793 if (is_user) {
794 if (!(ptep & PG_USER_MASK))
795 goto do_fault_protect;
796 if (is_write && !(ptep & PG_RW_MASK))
797 goto do_fault_protect;
798 } else {
799 if ((env->cr[0] & CR0_WP_MASK) &&
800 is_write && !(ptep & PG_RW_MASK))
801 goto do_fault_protect;
802 }
803 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
804 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
805 pte |= PG_ACCESSED_MASK;
806 if (is_dirty)
807 pte |= PG_DIRTY_MASK;
808 stl_phys_notdirty(pte_addr, pte);
809 }
810 page_size = 4096;
811 virt_addr = addr & ~0xfff;
812 }
813 }
814 /* the page can be put in the TLB */
815 prot = PAGE_READ;
816 if (!(ptep & PG_NX_MASK))
817 prot |= PAGE_EXEC;
818 if (pte & PG_DIRTY_MASK) {
819 /* only set write access if already dirty... otherwise wait
820 for dirty access */
821 if (is_user) {
822 if (ptep & PG_RW_MASK)
823 prot |= PAGE_WRITE;
824 } else {
825 if (!(env->cr[0] & CR0_WP_MASK) ||
826 (ptep & PG_RW_MASK))
827 prot |= PAGE_WRITE;
828 }
829 }
830 do_mapping:
831 pte = pte & env->a20_mask;
832
833 /* Even if 4MB pages, we map only one 4KB page in the cache to
834 avoid filling it too fast */
835 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
836 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
837 vaddr = virt_addr + page_offset;
838
839 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
840 return 0;
841 do_fault_protect:
842 error_code = PG_ERROR_P_MASK;
843 do_fault:
844 error_code |= (is_write << PG_ERROR_W_BIT);
845 if (is_user)
846 error_code |= PG_ERROR_U_MASK;
847 if (is_write1 == 2 &&
848 (env->efer & MSR_EFER_NXE) &&
849 (env->cr[4] & CR4_PAE_MASK))
850 error_code |= PG_ERROR_I_D_MASK;
851 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
852 /* cr2 is not modified in case of exceptions */
853 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
854 addr);
855 } else {
856 env->cr[2] = addr;
857 }
858 env->error_code = error_code;
859 env->exception_index = EXCP0E_PAGE;
860 return 1;
861}
862
863target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
864{
865 target_ulong pde_addr, pte_addr;
866 uint64_t pte;
867 target_phys_addr_t paddr;
868 uint32_t page_offset;
869 int page_size;
870
871 if (env->cr[4] & CR4_PAE_MASK) {
872 target_ulong pdpe_addr;
873 uint64_t pde, pdpe;
874
875#ifdef TARGET_X86_64
876 if (env->hflags & HF_LMA_MASK) {
877 uint64_t pml4e_addr, pml4e;
878 int32_t sext;
879
880 /* test virtual address sign extension */
881 sext = (int64_t)addr >> 47;
882 if (sext != 0 && sext != -1)
883 return -1;
884
885 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
886 env->a20_mask;
887 pml4e = ldq_phys(pml4e_addr);
888 if (!(pml4e & PG_PRESENT_MASK))
889 return -1;
890
891 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
892 env->a20_mask;
893 pdpe = ldq_phys(pdpe_addr);
894 if (!(pdpe & PG_PRESENT_MASK))
895 return -1;
896 } else
897#endif
898 {
899 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
900 env->a20_mask;
901 pdpe = ldq_phys(pdpe_addr);
902 if (!(pdpe & PG_PRESENT_MASK))
903 return -1;
904 }
905
906 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
907 env->a20_mask;
908 pde = ldq_phys(pde_addr);
909 if (!(pde & PG_PRESENT_MASK)) {
910 return -1;
911 }
912 if (pde & PG_PSE_MASK) {
913 /* 2 MB page */
914 page_size = 2048 * 1024;
915 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
916 } else {
917 /* 4 KB page */
918 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
919 env->a20_mask;
920 page_size = 4096;
921 pte = ldq_phys(pte_addr);
922 }
923 if (!(pte & PG_PRESENT_MASK))
924 return -1;
925 } else {
926 uint32_t pde;
927
928 if (!(env->cr[0] & CR0_PG_MASK)) {
929 pte = addr;
930 page_size = 4096;
931 } else {
932 /* page directory entry */
933 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
934 pde = ldl_phys(pde_addr);
935 if (!(pde & PG_PRESENT_MASK))
936 return -1;
937 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
938 pte = pde & ~0x003ff000; /* align to 4MB */
939 page_size = 4096 * 1024;
940 } else {
941 /* page directory entry */
942 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
943 pte = ldl_phys(pte_addr);
944 if (!(pte & PG_PRESENT_MASK))
945 return -1;
946 page_size = 4096;
947 }
948 }
949 pte = pte & env->a20_mask;
950 }
951
952 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
953 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
954 return paddr;
955}
956
957void hw_breakpoint_insert(CPUState *env, int index)
958{
959 int type, err = 0;
960
961 switch (hw_breakpoint_type(env->dr[7], index)) {
962 case 0:
963 if (hw_breakpoint_enabled(env->dr[7], index))
964 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
965 &env->cpu_breakpoint[index]);
966 break;
967 case 1:
968 type = BP_CPU | BP_MEM_WRITE;
969 goto insert_wp;
970 case 2:
971 /* No support for I/O watchpoints yet */
972 break;
973 case 3:
974 type = BP_CPU | BP_MEM_ACCESS;
975 insert_wp:
976 err = cpu_watchpoint_insert(env, env->dr[index],
977 hw_breakpoint_len(env->dr[7], index),
978 type, &env->cpu_watchpoint[index]);
979 break;
980 }
981 if (err)
982 env->cpu_breakpoint[index] = NULL;
983}
984
985void hw_breakpoint_remove(CPUState *env, int index)
986{
987 if (!env->cpu_breakpoint[index])
988 return;
989 switch (hw_breakpoint_type(env->dr[7], index)) {
990 case 0:
991 if (hw_breakpoint_enabled(env->dr[7], index))
992 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
993 break;
994 case 1:
995 case 3:
996 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
997 break;
998 case 2:
999 /* No support for I/O watchpoints yet */
1000 break;
1001 }
1002}
1003
1004int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1005{
1006 target_ulong dr6;
1007 int reg, type;
1008 int hit_enabled = 0;
1009
1010 dr6 = env->dr[6] & ~0xf;
1011 for (reg = 0; reg < 4; reg++) {
1012 type = hw_breakpoint_type(env->dr[7], reg);
1013 if ((type == 0 && env->dr[reg] == env->eip) ||
1014 ((type & 1) && env->cpu_watchpoint[reg] &&
1015 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1016 dr6 |= 1 << reg;
1017 if (hw_breakpoint_enabled(env->dr[7], reg))
1018 hit_enabled = 1;
1019 }
1020 }
1021 if (hit_enabled || force_dr6_update)
1022 env->dr[6] = dr6;
1023 return hit_enabled;
1024}
1025
1026static CPUDebugExcpHandler *prev_debug_excp_handler;
1027
1028void raise_exception_env(int exception_index, CPUState *env);
1029
1030static void breakpoint_handler(CPUState *env)
1031{
1032 CPUBreakpoint *bp;
1033
1034 if (env->watchpoint_hit) {
1035 if (env->watchpoint_hit->flags & BP_CPU) {
1036 env->watchpoint_hit = NULL;
1037 if (check_hw_breakpoints(env, 0))
1038 raise_exception_env(EXCP01_DB, env);
1039 else
1040 cpu_resume_from_signal(env, NULL);
1041 }
1042 } else {
1043 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1044 if (bp->pc == env->eip) {
1045 if (bp->flags & BP_CPU) {
1046 check_hw_breakpoints(env, 1);
1047 raise_exception_env(EXCP01_DB, env);
1048 }
1049 break;
1050 }
1051 }
1052 if (prev_debug_excp_handler)
1053 prev_debug_excp_handler(env);
1054}
1055
1056#ifndef VBOX
1057/* This should come from sysemu.h - if we could include it here... */
1058void qemu_system_reset_request(void);
1059
1060void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1061 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1062{
1063 uint64_t mcg_cap = cenv->mcg_cap;
1064 unsigned bank_num = mcg_cap & 0xff;
1065 uint64_t *banks = cenv->mce_banks;
1066
1067 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1068 return;
1069
1070 /*
1071 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1072 * reporting is disabled
1073 */
1074 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1075 cenv->mcg_ctl != ~(uint64_t)0)
1076 return;
1077 banks += 4 * bank;
1078 /*
1079 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1080 * reporting is disabled for the bank
1081 */
1082 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1083 return;
1084 if (status & MCI_STATUS_UC) {
1085 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1086 !(cenv->cr[4] & CR4_MCE_MASK)) {
1087 fprintf(stderr, "injects mce exception while previous "
1088 "one is in progress!\n");
1089 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1090 qemu_system_reset_request();
1091 return;
1092 }
1093 if (banks[1] & MCI_STATUS_VAL)
1094 status |= MCI_STATUS_OVER;
1095 banks[2] = addr;
1096 banks[3] = misc;
1097 cenv->mcg_status = mcg_status;
1098 banks[1] = status;
1099 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1100 } else if (!(banks[1] & MCI_STATUS_VAL)
1101 || !(banks[1] & MCI_STATUS_UC)) {
1102 if (banks[1] & MCI_STATUS_VAL)
1103 status |= MCI_STATUS_OVER;
1104 banks[2] = addr;
1105 banks[3] = misc;
1106 banks[1] = status;
1107 } else
1108 banks[1] |= MCI_STATUS_OVER;
1109}
1110#endif /* !VBOX */
1111#endif /* !CONFIG_USER_ONLY */
1112
1113#ifndef VBOX
1114
1115static void mce_init(CPUX86State *cenv)
1116{
1117 unsigned int bank, bank_num;
1118
1119 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1120 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1121 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1122 cenv->mcg_ctl = ~(uint64_t)0;
1123 bank_num = MCE_BANKS_DEF;
1124 for (bank = 0; bank < bank_num; bank++)
1125 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1126 }
1127}
1128
1129int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1130 target_ulong *base, unsigned int *limit,
1131 unsigned int *flags)
1132{
1133 SegmentCache *dt;
1134 target_ulong ptr;
1135 uint32_t e1, e2;
1136 int index;
1137
1138 if (selector & 0x4)
1139 dt = &env->ldt;
1140 else
1141 dt = &env->gdt;
1142 index = selector & ~7;
1143 ptr = dt->base + index;
1144 if ((index + 7) > dt->limit
1145 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1146 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1147 return 0;
1148
1149 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1150 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1151 if (e2 & DESC_G_MASK)
1152 *limit = (*limit << 12) | 0xfff;
1153 *flags = e2;
1154
1155 return 1;
1156}
1157
1158#endif /* !VBOX */
1159
1160#ifndef VBOX
1161CPUX86State *cpu_x86_init(const char *cpu_model)
1162#else
1163CPUX86State *cpu_x86_init(CPUX86State *env, const char *cpu_model)
1164#endif
1165{
1166#ifndef VBOX
1167 CPUX86State *env;
1168#endif
1169 static int inited;
1170
1171#ifndef VBOX
1172 env = qemu_mallocz(sizeof(CPUX86State));
1173#endif
1174 cpu_exec_init(env);
1175 env->cpu_model_str = cpu_model;
1176
1177 /* init various static tables */
1178 if (!inited) {
1179 inited = 1;
1180 optimize_flags_init();
1181#ifndef CONFIG_USER_ONLY
1182 prev_debug_excp_handler =
1183 cpu_set_debug_excp_handler(breakpoint_handler);
1184#endif
1185 }
1186#ifndef VBOX
1187 if (cpu_x86_register(env, cpu_model) < 0) {
1188 cpu_x86_close(env);
1189 return NULL;
1190 }
1191 mce_init(env);
1192#endif
1193
1194 qemu_init_vcpu(env);
1195
1196 return env;
1197}
1198
1199#ifndef VBOX
1200#if !defined(CONFIG_USER_ONLY)
1201void do_cpu_init(CPUState *env)
1202{
1203 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1204 cpu_reset(env);
1205 env->interrupt_request = sipi;
1206 apic_init_reset(env->apic_state);
1207 env->halted = !cpu_is_bsp(env);
1208}
1209
1210void do_cpu_sipi(CPUState *env)
1211{
1212 apic_sipi(env->apic_state);
1213}
1214#else
1215void do_cpu_init(CPUState *env)
1216{
1217}
1218void do_cpu_sipi(CPUState *env)
1219{
1220}
1221#endif
1222#endif /* !VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette