VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp@ 72417

Last change on this file since 72417 was 72358, checked in by vboxsync, 7 years ago

NEM: Sync more MSR state; don't treat unrecoverable exceptions as triple fault because checking with IEM (need more checking). bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 79.2 KB
Line 
1/* $Id: CPUMAllRegs.cpp 72358 2018-05-28 14:47:51Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor(/Manager) - Getters and Setters.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_CPUM
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/apic.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/em.h>
30#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
31# include <VBox/vmm/selm.h>
32#endif
33#include "CPUMInternal.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/err.h>
36#include <VBox/dis.h>
37#include <VBox/log.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/tm.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-amd64-x86.h>
43#ifdef IN_RING3
44#include <iprt/thread.h>
45#endif
46
47/** Disable stack frame pointer generation here. */
48#if defined(_MSC_VER) && !defined(DEBUG) && defined(RT_ARCH_X86)
49# pragma optimize("y", off)
50#endif
51
52AssertCompile2MemberOffsets(VM, cpum.s.HostFeatures, cpum.ro.HostFeatures);
53AssertCompile2MemberOffsets(VM, cpum.s.GuestFeatures, cpum.ro.GuestFeatures);
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59/**
60 * Converts a CPUMCPU::Guest pointer into a VMCPU pointer.
61 *
62 * @returns Pointer to the Virtual CPU.
63 * @param a_pGuestCtx Pointer to the guest context.
64 */
65#define CPUM_GUEST_CTX_TO_VMCPU(a_pGuestCtx) RT_FROM_MEMBER(a_pGuestCtx, VMCPU, cpum.s.Guest)
66
67/**
68 * Lazily loads the hidden parts of a selector register when using raw-mode.
69 */
70#if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0)
71# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
72 do \
73 { \
74 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \
75 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \
76 } while (0)
77#else
78# define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \
79 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg));
80#endif
81
82
83
84#ifdef VBOX_WITH_RAW_MODE_NOT_R0
85
86/**
87 * Does the lazy hidden selector register loading.
88 *
89 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
90 * @param pSReg The selector register to lazily load hidden parts of.
91 */
92static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
93{
94 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
95 Assert(VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)));
96 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
97
98 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
99 {
100 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */
101 pSReg->Attr.u = 0;
102 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;
103 pSReg->Attr.n.u1DescType = 1; /* code/data segment */
104 pSReg->Attr.n.u2Dpl = 3;
105 pSReg->Attr.n.u1Present = 1;
106 pSReg->u32Limit = 0x0000ffff;
107 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
108 pSReg->ValidSel = pSReg->Sel;
109 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
110 /** @todo Check what the accessed bit should be (VT-x and AMD-V). */
111 }
112 else if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
113 {
114 /* Real mode - leave the limit and flags alone here, at least for now. */
115 pSReg->u64Base = (uint32_t)pSReg->Sel << 4;
116 pSReg->ValidSel = pSReg->Sel;
117 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
118 }
119 else
120 {
121 /* Protected mode - get it from the selector descriptor tables. */
122 if (!(pSReg->Sel & X86_SEL_MASK_OFF_RPL))
123 {
124 Assert(!CPUMIsGuestInLongMode(pVCpu));
125 pSReg->Sel = 0;
126 pSReg->u64Base = 0;
127 pSReg->u32Limit = 0;
128 pSReg->Attr.u = 0;
129 pSReg->ValidSel = 0;
130 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
131 /** @todo see todo in iemHlpLoadNullDataSelectorProt. */
132 }
133 else
134 SELMLoadHiddenSelectorReg(pVCpu, &pVCpu->cpum.s.Guest, pSReg);
135 }
136}
137
138
139/**
140 * Makes sure the hidden CS and SS selector registers are valid, loading them if
141 * necessary.
142 *
143 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
144 */
145VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu)
146{
147 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
148 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
149}
150
151
152/**
153 * Loads a the hidden parts of a selector register.
154 *
155 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
156 * @param pSReg The selector register to lazily load hidden parts of.
157 */
158VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg)
159{
160 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg);
161}
162
163#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
164
165
166/**
167 * Obsolete.
168 *
169 * We don't support nested hypervisor context interrupts or traps. Life is much
170 * simpler when we don't. It's also slightly faster at times.
171 *
172 * @param pVCpu The cross context virtual CPU structure.
173 */
174VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu)
175{
176 return CPUMCTX2CORE(&pVCpu->cpum.s.Hyper);
177}
178
179
180/**
181 * Gets the pointer to the hypervisor CPU context structure of a virtual CPU.
182 *
183 * @param pVCpu The cross context virtual CPU structure.
184 */
185VMMDECL(PCPUMCTX) CPUMGetHyperCtxPtr(PVMCPU pVCpu)
186{
187 return &pVCpu->cpum.s.Hyper;
188}
189
190
191VMMDECL(void) CPUMSetHyperGDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
192{
193 pVCpu->cpum.s.Hyper.gdtr.cbGdt = limit;
194 pVCpu->cpum.s.Hyper.gdtr.pGdt = addr;
195}
196
197
198VMMDECL(void) CPUMSetHyperIDTR(PVMCPU pVCpu, uint32_t addr, uint16_t limit)
199{
200 pVCpu->cpum.s.Hyper.idtr.cbIdt = limit;
201 pVCpu->cpum.s.Hyper.idtr.pIdt = addr;
202}
203
204
205VMMDECL(void) CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3)
206{
207 pVCpu->cpum.s.Hyper.cr3 = cr3;
208
209#ifdef IN_RC
210 /* Update the current CR3. */
211 ASMSetCR3(cr3);
212#endif
213}
214
215VMMDECL(uint32_t) CPUMGetHyperCR3(PVMCPU pVCpu)
216{
217 return pVCpu->cpum.s.Hyper.cr3;
218}
219
220
221VMMDECL(void) CPUMSetHyperCS(PVMCPU pVCpu, RTSEL SelCS)
222{
223 pVCpu->cpum.s.Hyper.cs.Sel = SelCS;
224}
225
226
227VMMDECL(void) CPUMSetHyperDS(PVMCPU pVCpu, RTSEL SelDS)
228{
229 pVCpu->cpum.s.Hyper.ds.Sel = SelDS;
230}
231
232
233VMMDECL(void) CPUMSetHyperES(PVMCPU pVCpu, RTSEL SelES)
234{
235 pVCpu->cpum.s.Hyper.es.Sel = SelES;
236}
237
238
239VMMDECL(void) CPUMSetHyperFS(PVMCPU pVCpu, RTSEL SelFS)
240{
241 pVCpu->cpum.s.Hyper.fs.Sel = SelFS;
242}
243
244
245VMMDECL(void) CPUMSetHyperGS(PVMCPU pVCpu, RTSEL SelGS)
246{
247 pVCpu->cpum.s.Hyper.gs.Sel = SelGS;
248}
249
250
251VMMDECL(void) CPUMSetHyperSS(PVMCPU pVCpu, RTSEL SelSS)
252{
253 pVCpu->cpum.s.Hyper.ss.Sel = SelSS;
254}
255
256
257VMMDECL(void) CPUMSetHyperESP(PVMCPU pVCpu, uint32_t u32ESP)
258{
259 pVCpu->cpum.s.Hyper.esp = u32ESP;
260}
261
262
263VMMDECL(void) CPUMSetHyperEDX(PVMCPU pVCpu, uint32_t u32ESP)
264{
265 pVCpu->cpum.s.Hyper.esp = u32ESP;
266}
267
268
269VMMDECL(int) CPUMSetHyperEFlags(PVMCPU pVCpu, uint32_t Efl)
270{
271 pVCpu->cpum.s.Hyper.eflags.u32 = Efl;
272 return VINF_SUCCESS;
273}
274
275
276VMMDECL(void) CPUMSetHyperEIP(PVMCPU pVCpu, uint32_t u32EIP)
277{
278 pVCpu->cpum.s.Hyper.eip = u32EIP;
279}
280
281
282/**
283 * Used by VMMR3RawRunGC to reinitialize the general raw-mode context registers,
284 * EFLAGS and EIP prior to resuming guest execution.
285 *
286 * All general register not given as a parameter will be set to 0. The EFLAGS
287 * register will be set to sane values for C/C++ code execution with interrupts
288 * disabled and IOPL 0.
289 *
290 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
291 * @param u32EIP The EIP value.
292 * @param u32ESP The ESP value.
293 * @param u32EAX The EAX value.
294 * @param u32EDX The EDX value.
295 */
296VMM_INT_DECL(void) CPUMSetHyperState(PVMCPU pVCpu, uint32_t u32EIP, uint32_t u32ESP, uint32_t u32EAX, uint32_t u32EDX)
297{
298 pVCpu->cpum.s.Hyper.eip = u32EIP;
299 pVCpu->cpum.s.Hyper.esp = u32ESP;
300 pVCpu->cpum.s.Hyper.eax = u32EAX;
301 pVCpu->cpum.s.Hyper.edx = u32EDX;
302 pVCpu->cpum.s.Hyper.ecx = 0;
303 pVCpu->cpum.s.Hyper.ebx = 0;
304 pVCpu->cpum.s.Hyper.ebp = 0;
305 pVCpu->cpum.s.Hyper.esi = 0;
306 pVCpu->cpum.s.Hyper.edi = 0;
307 pVCpu->cpum.s.Hyper.eflags.u = X86_EFL_1;
308}
309
310
311VMMDECL(void) CPUMSetHyperTR(PVMCPU pVCpu, RTSEL SelTR)
312{
313 pVCpu->cpum.s.Hyper.tr.Sel = SelTR;
314}
315
316
317VMMDECL(void) CPUMSetHyperLDTR(PVMCPU pVCpu, RTSEL SelLDTR)
318{
319 pVCpu->cpum.s.Hyper.ldtr.Sel = SelLDTR;
320}
321
322
323/** @def MAYBE_LOAD_DRx
324 * Macro for updating DRx values in raw-mode and ring-0 contexts.
325 */
326#ifdef IN_RING0
327# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
328# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
329 do { \
330 if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
331 a_fnLoad(a_uValue); \
332 else \
333 (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
334 } while (0)
335# else
336# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
337 do { \
338 a_fnLoad(a_uValue); \
339 } while (0)
340# endif
341
342#elif defined(IN_RC)
343# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
344 do { \
345 if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
346 { a_fnLoad(a_uValue); } \
347 } while (0)
348
349#else
350# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
351#endif
352
353VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
354{
355 pVCpu->cpum.s.Hyper.dr[0] = uDr0;
356 MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
357}
358
359
360VMMDECL(void) CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1)
361{
362 pVCpu->cpum.s.Hyper.dr[1] = uDr1;
363 MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
364}
365
366
367VMMDECL(void) CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2)
368{
369 pVCpu->cpum.s.Hyper.dr[2] = uDr2;
370 MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
371}
372
373
374VMMDECL(void) CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3)
375{
376 pVCpu->cpum.s.Hyper.dr[3] = uDr3;
377 MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
378}
379
380
381VMMDECL(void) CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6)
382{
383 pVCpu->cpum.s.Hyper.dr[6] = uDr6;
384}
385
386
387VMMDECL(void) CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7)
388{
389 pVCpu->cpum.s.Hyper.dr[7] = uDr7;
390#ifdef IN_RC
391 MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
392#endif
393}
394
395
396VMMDECL(RTSEL) CPUMGetHyperCS(PVMCPU pVCpu)
397{
398 return pVCpu->cpum.s.Hyper.cs.Sel;
399}
400
401
402VMMDECL(RTSEL) CPUMGetHyperDS(PVMCPU pVCpu)
403{
404 return pVCpu->cpum.s.Hyper.ds.Sel;
405}
406
407
408VMMDECL(RTSEL) CPUMGetHyperES(PVMCPU pVCpu)
409{
410 return pVCpu->cpum.s.Hyper.es.Sel;
411}
412
413
414VMMDECL(RTSEL) CPUMGetHyperFS(PVMCPU pVCpu)
415{
416 return pVCpu->cpum.s.Hyper.fs.Sel;
417}
418
419
420VMMDECL(RTSEL) CPUMGetHyperGS(PVMCPU pVCpu)
421{
422 return pVCpu->cpum.s.Hyper.gs.Sel;
423}
424
425
426VMMDECL(RTSEL) CPUMGetHyperSS(PVMCPU pVCpu)
427{
428 return pVCpu->cpum.s.Hyper.ss.Sel;
429}
430
431
432VMMDECL(uint32_t) CPUMGetHyperEAX(PVMCPU pVCpu)
433{
434 return pVCpu->cpum.s.Hyper.eax;
435}
436
437
438VMMDECL(uint32_t) CPUMGetHyperEBX(PVMCPU pVCpu)
439{
440 return pVCpu->cpum.s.Hyper.ebx;
441}
442
443
444VMMDECL(uint32_t) CPUMGetHyperECX(PVMCPU pVCpu)
445{
446 return pVCpu->cpum.s.Hyper.ecx;
447}
448
449
450VMMDECL(uint32_t) CPUMGetHyperEDX(PVMCPU pVCpu)
451{
452 return pVCpu->cpum.s.Hyper.edx;
453}
454
455
456VMMDECL(uint32_t) CPUMGetHyperESI(PVMCPU pVCpu)
457{
458 return pVCpu->cpum.s.Hyper.esi;
459}
460
461
462VMMDECL(uint32_t) CPUMGetHyperEDI(PVMCPU pVCpu)
463{
464 return pVCpu->cpum.s.Hyper.edi;
465}
466
467
468VMMDECL(uint32_t) CPUMGetHyperEBP(PVMCPU pVCpu)
469{
470 return pVCpu->cpum.s.Hyper.ebp;
471}
472
473
474VMMDECL(uint32_t) CPUMGetHyperESP(PVMCPU pVCpu)
475{
476 return pVCpu->cpum.s.Hyper.esp;
477}
478
479
480VMMDECL(uint32_t) CPUMGetHyperEFlags(PVMCPU pVCpu)
481{
482 return pVCpu->cpum.s.Hyper.eflags.u32;
483}
484
485
486VMMDECL(uint32_t) CPUMGetHyperEIP(PVMCPU pVCpu)
487{
488 return pVCpu->cpum.s.Hyper.eip;
489}
490
491
492VMMDECL(uint64_t) CPUMGetHyperRIP(PVMCPU pVCpu)
493{
494 return pVCpu->cpum.s.Hyper.rip;
495}
496
497
498VMMDECL(uint32_t) CPUMGetHyperIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
499{
500 if (pcbLimit)
501 *pcbLimit = pVCpu->cpum.s.Hyper.idtr.cbIdt;
502 return pVCpu->cpum.s.Hyper.idtr.pIdt;
503}
504
505
506VMMDECL(uint32_t) CPUMGetHyperGDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
507{
508 if (pcbLimit)
509 *pcbLimit = pVCpu->cpum.s.Hyper.gdtr.cbGdt;
510 return pVCpu->cpum.s.Hyper.gdtr.pGdt;
511}
512
513
514VMMDECL(RTSEL) CPUMGetHyperLDTR(PVMCPU pVCpu)
515{
516 return pVCpu->cpum.s.Hyper.ldtr.Sel;
517}
518
519
520VMMDECL(RTGCUINTREG) CPUMGetHyperDR0(PVMCPU pVCpu)
521{
522 return pVCpu->cpum.s.Hyper.dr[0];
523}
524
525
526VMMDECL(RTGCUINTREG) CPUMGetHyperDR1(PVMCPU pVCpu)
527{
528 return pVCpu->cpum.s.Hyper.dr[1];
529}
530
531
532VMMDECL(RTGCUINTREG) CPUMGetHyperDR2(PVMCPU pVCpu)
533{
534 return pVCpu->cpum.s.Hyper.dr[2];
535}
536
537
538VMMDECL(RTGCUINTREG) CPUMGetHyperDR3(PVMCPU pVCpu)
539{
540 return pVCpu->cpum.s.Hyper.dr[3];
541}
542
543
544VMMDECL(RTGCUINTREG) CPUMGetHyperDR6(PVMCPU pVCpu)
545{
546 return pVCpu->cpum.s.Hyper.dr[6];
547}
548
549
550VMMDECL(RTGCUINTREG) CPUMGetHyperDR7(PVMCPU pVCpu)
551{
552 return pVCpu->cpum.s.Hyper.dr[7];
553}
554
555
556/**
557 * Gets the pointer to the internal CPUMCTXCORE structure.
558 * This is only for reading in order to save a few calls.
559 *
560 * @param pVCpu The cross context virtual CPU structure.
561 */
562VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu)
563{
564 return CPUMCTX2CORE(&pVCpu->cpum.s.Guest);
565}
566
567
568/**
569 * Queries the pointer to the internal CPUMCTX structure.
570 *
571 * @returns The CPUMCTX pointer.
572 * @param pVCpu The cross context virtual CPU structure.
573 */
574VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu)
575{
576 return &pVCpu->cpum.s.Guest;
577}
578
579
580/**
581 * Queries the pointer to the internal CPUMCTXMSRS structure.
582 *
583 * This is for NEM only.
584 *
585 * @returns The CPUMCTX pointer.
586 * @param pVCpu The cross context virtual CPU structure.
587 */
588VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
589{
590 return &pVCpu->cpum.s.GuestMsrs;
591}
592
593
594VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
595{
596#ifdef VBOX_WITH_RAW_MODE_NOT_R0
597 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
598 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
599#endif
600 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit;
601 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase;
602 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR;
603 return VINF_SUCCESS; /* formality, consider it void. */
604}
605
606
607VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
608{
609#ifdef VBOX_WITH_RAW_MODE_NOT_R0
610 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
611 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
612#endif
613 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit;
614 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase;
615 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR;
616 return VINF_SUCCESS; /* formality, consider it void. */
617}
618
619
620VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
621{
622#ifdef VBOX_WITH_RAW_MODE_NOT_R0
623 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
624 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
625#endif
626 pVCpu->cpum.s.Guest.tr.Sel = tr;
627 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_TR;
628 return VINF_SUCCESS; /* formality, consider it void. */
629}
630
631
632VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
633{
634#ifdef VBOX_WITH_RAW_MODE_NOT_R0
635 if ( ( ldtr != 0
636 || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
637 && VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)))
638 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
639#endif
640 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr;
641 /* The caller will set more hidden bits if it has them. */
642 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0;
643 pVCpu->cpum.s.Guest.ldtr.fFlags = 0;
644 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR;
645 return VINF_SUCCESS; /* formality, consider it void. */
646}
647
648
649/**
650 * Set the guest CR0.
651 *
652 * When called in GC, the hyper CR0 may be updated if that is
653 * required. The caller only has to take special action if AM,
654 * WP, PG or PE changes.
655 *
656 * @returns VINF_SUCCESS (consider it void).
657 * @param pVCpu The cross context virtual CPU structure.
658 * @param cr0 The new CR0 value.
659 */
660VMMDECL(int) CPUMSetGuestCR0(PVMCPU pVCpu, uint64_t cr0)
661{
662#ifdef IN_RC
663 /*
664 * Check if we need to change hypervisor CR0 because
665 * of math stuff.
666 */
667 if ( (cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
668 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)))
669 {
670 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST))
671 {
672 /*
673 * We haven't loaded the guest FPU state yet, so TS and MT are both set
674 * and EM should be reflecting the guest EM (it always does this).
675 */
676 if ((cr0 & X86_CR0_EM) != (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM))
677 {
678 uint32_t HyperCR0 = ASMGetCR0();
679 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
680 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
681 HyperCR0 &= ~X86_CR0_EM;
682 HyperCR0 |= cr0 & X86_CR0_EM;
683 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
684 ASMSetCR0(HyperCR0);
685 }
686# ifdef VBOX_STRICT
687 else
688 {
689 uint32_t HyperCR0 = ASMGetCR0();
690 AssertMsg((HyperCR0 & (X86_CR0_TS | X86_CR0_MP)) == (X86_CR0_TS | X86_CR0_MP), ("%#x\n", HyperCR0));
691 AssertMsg((HyperCR0 & X86_CR0_EM) == (pVCpu->cpum.s.Guest.cr0 & X86_CR0_EM), ("%#x\n", HyperCR0));
692 }
693# endif
694 }
695 else
696 {
697 /*
698 * Already loaded the guest FPU state, so we're just mirroring
699 * the guest flags.
700 */
701 uint32_t HyperCR0 = ASMGetCR0();
702 AssertMsg( (HyperCR0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP))
703 == (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)),
704 ("%#x %#x\n", HyperCR0, pVCpu->cpum.s.Guest.cr0));
705 HyperCR0 &= ~(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
706 HyperCR0 |= cr0 & (X86_CR0_TS | X86_CR0_EM | X86_CR0_MP);
707 Log(("CPUM: New HyperCR0=%#x\n", HyperCR0));
708 ASMSetCR0(HyperCR0);
709 }
710 }
711#endif /* IN_RC */
712
713 /*
714 * Check for changes causing TLB flushes (for REM).
715 * The caller is responsible for calling PGM when appropriate.
716 */
717 if ( (cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
718 != (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)))
719 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
720 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR0;
721
722 /*
723 * Let PGM know if the WP goes from 0 to 1 (netware WP0+RO+US hack)
724 */
725 if (((cr0 ^ pVCpu->cpum.s.Guest.cr0) & X86_CR0_WP) && (cr0 & X86_CR0_WP))
726 PGMCr0WpEnabled(pVCpu);
727
728 /* The ET flag is settable on a 386 and hardwired on 486+. */
729 if ( !(cr0 & X86_CR0_ET)
730 && pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386)
731 cr0 |= X86_CR0_ET;
732
733 pVCpu->cpum.s.Guest.cr0 = cr0;
734 return VINF_SUCCESS;
735}
736
737
738VMMDECL(int) CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2)
739{
740 pVCpu->cpum.s.Guest.cr2 = cr2;
741 return VINF_SUCCESS;
742}
743
744
745VMMDECL(int) CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3)
746{
747 pVCpu->cpum.s.Guest.cr3 = cr3;
748 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR3;
749 return VINF_SUCCESS;
750}
751
752
753VMMDECL(int) CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4)
754{
755 /* Note! We don't bother with OSXSAVE and legacy CPUID patches. */
756
757 if ( (cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE))
758 != (pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PGE | X86_CR4_PAE | X86_CR4_PSE)))
759 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GLOBAL_TLB_FLUSH;
760
761 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_CR4;
762 pVCpu->cpum.s.Guest.cr4 = cr4;
763 return VINF_SUCCESS;
764}
765
766
767VMMDECL(int) CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags)
768{
769 pVCpu->cpum.s.Guest.eflags.u32 = eflags;
770 return VINF_SUCCESS;
771}
772
773
774VMMDECL(int) CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip)
775{
776 pVCpu->cpum.s.Guest.eip = eip;
777 return VINF_SUCCESS;
778}
779
780
781VMMDECL(int) CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax)
782{
783 pVCpu->cpum.s.Guest.eax = eax;
784 return VINF_SUCCESS;
785}
786
787
788VMMDECL(int) CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx)
789{
790 pVCpu->cpum.s.Guest.ebx = ebx;
791 return VINF_SUCCESS;
792}
793
794
795VMMDECL(int) CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx)
796{
797 pVCpu->cpum.s.Guest.ecx = ecx;
798 return VINF_SUCCESS;
799}
800
801
802VMMDECL(int) CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx)
803{
804 pVCpu->cpum.s.Guest.edx = edx;
805 return VINF_SUCCESS;
806}
807
808
809VMMDECL(int) CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp)
810{
811 pVCpu->cpum.s.Guest.esp = esp;
812 return VINF_SUCCESS;
813}
814
815
816VMMDECL(int) CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp)
817{
818 pVCpu->cpum.s.Guest.ebp = ebp;
819 return VINF_SUCCESS;
820}
821
822
823VMMDECL(int) CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi)
824{
825 pVCpu->cpum.s.Guest.esi = esi;
826 return VINF_SUCCESS;
827}
828
829
830VMMDECL(int) CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi)
831{
832 pVCpu->cpum.s.Guest.edi = edi;
833 return VINF_SUCCESS;
834}
835
836
837VMMDECL(int) CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss)
838{
839 pVCpu->cpum.s.Guest.ss.Sel = ss;
840 return VINF_SUCCESS;
841}
842
843
844VMMDECL(int) CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs)
845{
846 pVCpu->cpum.s.Guest.cs.Sel = cs;
847 return VINF_SUCCESS;
848}
849
850
851VMMDECL(int) CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds)
852{
853 pVCpu->cpum.s.Guest.ds.Sel = ds;
854 return VINF_SUCCESS;
855}
856
857
858VMMDECL(int) CPUMSetGuestES(PVMCPU pVCpu, uint16_t es)
859{
860 pVCpu->cpum.s.Guest.es.Sel = es;
861 return VINF_SUCCESS;
862}
863
864
865VMMDECL(int) CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs)
866{
867 pVCpu->cpum.s.Guest.fs.Sel = fs;
868 return VINF_SUCCESS;
869}
870
871
872VMMDECL(int) CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs)
873{
874 pVCpu->cpum.s.Guest.gs.Sel = gs;
875 return VINF_SUCCESS;
876}
877
878
879VMMDECL(void) CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val)
880{
881 pVCpu->cpum.s.Guest.msrEFER = val;
882}
883
884
885VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
886{
887 if (pcbLimit)
888 *pcbLimit = pVCpu->cpum.s.Guest.idtr.cbIdt;
889 return pVCpu->cpum.s.Guest.idtr.pIdt;
890}
891
892
893VMMDECL(RTSEL) CPUMGetGuestTR(PVMCPU pVCpu, PCPUMSELREGHID pHidden)
894{
895 if (pHidden)
896 *pHidden = pVCpu->cpum.s.Guest.tr;
897 return pVCpu->cpum.s.Guest.tr.Sel;
898}
899
900
901VMMDECL(RTSEL) CPUMGetGuestCS(PVMCPU pVCpu)
902{
903 return pVCpu->cpum.s.Guest.cs.Sel;
904}
905
906
907VMMDECL(RTSEL) CPUMGetGuestDS(PVMCPU pVCpu)
908{
909 return pVCpu->cpum.s.Guest.ds.Sel;
910}
911
912
913VMMDECL(RTSEL) CPUMGetGuestES(PVMCPU pVCpu)
914{
915 return pVCpu->cpum.s.Guest.es.Sel;
916}
917
918
919VMMDECL(RTSEL) CPUMGetGuestFS(PVMCPU pVCpu)
920{
921 return pVCpu->cpum.s.Guest.fs.Sel;
922}
923
924
925VMMDECL(RTSEL) CPUMGetGuestGS(PVMCPU pVCpu)
926{
927 return pVCpu->cpum.s.Guest.gs.Sel;
928}
929
930
931VMMDECL(RTSEL) CPUMGetGuestSS(PVMCPU pVCpu)
932{
933 return pVCpu->cpum.s.Guest.ss.Sel;
934}
935
936
937VMMDECL(uint64_t) CPUMGetGuestFlatPC(PVMCPU pVCpu)
938{
939 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
940 if ( !CPUMIsGuestInLongMode(pVCpu)
941 || pVCpu->cpum.s.Guest.cs.Attr.n.u1Long)
942 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.cs.u64Base;
943 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.cs.u64Base;
944}
945
946
947VMMDECL(uint64_t) CPUMGetGuestFlatSP(PVMCPU pVCpu)
948{
949 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss);
950 if ( !CPUMIsGuestInLongMode(pVCpu)
951 || pVCpu->cpum.s.Guest.ss.Attr.n.u1Long)
952 return pVCpu->cpum.s.Guest.eip + (uint32_t)pVCpu->cpum.s.Guest.ss.u64Base;
953 return pVCpu->cpum.s.Guest.rip + pVCpu->cpum.s.Guest.ss.u64Base;
954}
955
956
957VMMDECL(RTSEL) CPUMGetGuestLDTR(PVMCPU pVCpu)
958{
959 return pVCpu->cpum.s.Guest.ldtr.Sel;
960}
961
962
963VMMDECL(RTSEL) CPUMGetGuestLdtrEx(PVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit)
964{
965 *pGCPtrBase = pVCpu->cpum.s.Guest.ldtr.u64Base;
966 *pcbLimit = pVCpu->cpum.s.Guest.ldtr.u32Limit;
967 return pVCpu->cpum.s.Guest.ldtr.Sel;
968}
969
970
971VMMDECL(uint64_t) CPUMGetGuestCR0(PVMCPU pVCpu)
972{
973 return pVCpu->cpum.s.Guest.cr0;
974}
975
976
977VMMDECL(uint64_t) CPUMGetGuestCR2(PVMCPU pVCpu)
978{
979 return pVCpu->cpum.s.Guest.cr2;
980}
981
982
983VMMDECL(uint64_t) CPUMGetGuestCR3(PVMCPU pVCpu)
984{
985 return pVCpu->cpum.s.Guest.cr3;
986}
987
988
989VMMDECL(uint64_t) CPUMGetGuestCR4(PVMCPU pVCpu)
990{
991 return pVCpu->cpum.s.Guest.cr4;
992}
993
994
995VMMDECL(uint64_t) CPUMGetGuestCR8(PVMCPU pVCpu)
996{
997 uint64_t u64;
998 int rc = CPUMGetGuestCRx(pVCpu, DISCREG_CR8, &u64);
999 if (RT_FAILURE(rc))
1000 u64 = 0;
1001 return u64;
1002}
1003
1004
1005VMMDECL(void) CPUMGetGuestGDTR(PVMCPU pVCpu, PVBOXGDTR pGDTR)
1006{
1007 *pGDTR = pVCpu->cpum.s.Guest.gdtr;
1008}
1009
1010
1011VMMDECL(uint32_t) CPUMGetGuestEIP(PVMCPU pVCpu)
1012{
1013 return pVCpu->cpum.s.Guest.eip;
1014}
1015
1016
1017VMMDECL(uint64_t) CPUMGetGuestRIP(PVMCPU pVCpu)
1018{
1019 return pVCpu->cpum.s.Guest.rip;
1020}
1021
1022
1023VMMDECL(uint32_t) CPUMGetGuestEAX(PVMCPU pVCpu)
1024{
1025 return pVCpu->cpum.s.Guest.eax;
1026}
1027
1028
1029VMMDECL(uint32_t) CPUMGetGuestEBX(PVMCPU pVCpu)
1030{
1031 return pVCpu->cpum.s.Guest.ebx;
1032}
1033
1034
1035VMMDECL(uint32_t) CPUMGetGuestECX(PVMCPU pVCpu)
1036{
1037 return pVCpu->cpum.s.Guest.ecx;
1038}
1039
1040
1041VMMDECL(uint32_t) CPUMGetGuestEDX(PVMCPU pVCpu)
1042{
1043 return pVCpu->cpum.s.Guest.edx;
1044}
1045
1046
1047VMMDECL(uint32_t) CPUMGetGuestESI(PVMCPU pVCpu)
1048{
1049 return pVCpu->cpum.s.Guest.esi;
1050}
1051
1052
1053VMMDECL(uint32_t) CPUMGetGuestEDI(PVMCPU pVCpu)
1054{
1055 return pVCpu->cpum.s.Guest.edi;
1056}
1057
1058
1059VMMDECL(uint32_t) CPUMGetGuestESP(PVMCPU pVCpu)
1060{
1061 return pVCpu->cpum.s.Guest.esp;
1062}
1063
1064
1065VMMDECL(uint32_t) CPUMGetGuestEBP(PVMCPU pVCpu)
1066{
1067 return pVCpu->cpum.s.Guest.ebp;
1068}
1069
1070
1071VMMDECL(uint32_t) CPUMGetGuestEFlags(PVMCPU pVCpu)
1072{
1073 return pVCpu->cpum.s.Guest.eflags.u32;
1074}
1075
1076
1077VMMDECL(int) CPUMGetGuestCRx(PVMCPU pVCpu, unsigned iReg, uint64_t *pValue)
1078{
1079 switch (iReg)
1080 {
1081 case DISCREG_CR0:
1082 *pValue = pVCpu->cpum.s.Guest.cr0;
1083 break;
1084
1085 case DISCREG_CR2:
1086 *pValue = pVCpu->cpum.s.Guest.cr2;
1087 break;
1088
1089 case DISCREG_CR3:
1090 *pValue = pVCpu->cpum.s.Guest.cr3;
1091 break;
1092
1093 case DISCREG_CR4:
1094 *pValue = pVCpu->cpum.s.Guest.cr4;
1095 break;
1096
1097 case DISCREG_CR8:
1098 {
1099 uint8_t u8Tpr;
1100 int rc = APICGetTpr(pVCpu, &u8Tpr, NULL /* pfPending */, NULL /* pu8PendingIrq */);
1101 if (RT_FAILURE(rc))
1102 {
1103 AssertMsg(rc == VERR_PDM_NO_APIC_INSTANCE, ("%Rrc\n", rc));
1104 *pValue = 0;
1105 return rc;
1106 }
1107 *pValue = u8Tpr >> 4; /* bits 7-4 contain the task priority that go in cr8, bits 3-0 */
1108 break;
1109 }
1110
1111 default:
1112 return VERR_INVALID_PARAMETER;
1113 }
1114 return VINF_SUCCESS;
1115}
1116
1117
1118VMMDECL(uint64_t) CPUMGetGuestDR0(PVMCPU pVCpu)
1119{
1120 return pVCpu->cpum.s.Guest.dr[0];
1121}
1122
1123
1124VMMDECL(uint64_t) CPUMGetGuestDR1(PVMCPU pVCpu)
1125{
1126 return pVCpu->cpum.s.Guest.dr[1];
1127}
1128
1129
1130VMMDECL(uint64_t) CPUMGetGuestDR2(PVMCPU pVCpu)
1131{
1132 return pVCpu->cpum.s.Guest.dr[2];
1133}
1134
1135
1136VMMDECL(uint64_t) CPUMGetGuestDR3(PVMCPU pVCpu)
1137{
1138 return pVCpu->cpum.s.Guest.dr[3];
1139}
1140
1141
1142VMMDECL(uint64_t) CPUMGetGuestDR6(PVMCPU pVCpu)
1143{
1144 return pVCpu->cpum.s.Guest.dr[6];
1145}
1146
1147
1148VMMDECL(uint64_t) CPUMGetGuestDR7(PVMCPU pVCpu)
1149{
1150 return pVCpu->cpum.s.Guest.dr[7];
1151}
1152
1153
1154VMMDECL(int) CPUMGetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t *pValue)
1155{
1156 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1157 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1158 if (iReg == 4 || iReg == 5)
1159 iReg += 2;
1160 *pValue = pVCpu->cpum.s.Guest.dr[iReg];
1161 return VINF_SUCCESS;
1162}
1163
1164
1165VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu)
1166{
1167 return pVCpu->cpum.s.Guest.msrEFER;
1168}
1169
1170
1171/**
1172 * Looks up a CPUID leaf in the CPUID leaf array, no subleaf.
1173 *
1174 * @returns Pointer to the leaf if found, NULL if not.
1175 *
1176 * @param pVM The cross context VM structure.
1177 * @param uLeaf The leaf to get.
1178 */
1179PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf)
1180{
1181 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1182 if (iEnd)
1183 {
1184 unsigned iStart = 0;
1185 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1186 for (;;)
1187 {
1188 unsigned i = iStart + (iEnd - iStart) / 2U;
1189 if (uLeaf < paLeaves[i].uLeaf)
1190 {
1191 if (i <= iStart)
1192 return NULL;
1193 iEnd = i;
1194 }
1195 else if (uLeaf > paLeaves[i].uLeaf)
1196 {
1197 i += 1;
1198 if (i >= iEnd)
1199 return NULL;
1200 iStart = i;
1201 }
1202 else
1203 {
1204 if (RT_LIKELY(paLeaves[i].fSubLeafMask == 0 && paLeaves[i].uSubLeaf == 0))
1205 return &paLeaves[i];
1206
1207 /* This shouldn't normally happen. But in case the it does due
1208 to user configuration overrids or something, just return the
1209 first sub-leaf. */
1210 AssertMsgFailed(("uLeaf=%#x fSubLeafMask=%#x uSubLeaf=%#x\n",
1211 uLeaf, paLeaves[i].fSubLeafMask, paLeaves[i].uSubLeaf));
1212 while ( paLeaves[i].uSubLeaf != 0
1213 && i > 0
1214 && uLeaf == paLeaves[i - 1].uLeaf)
1215 i--;
1216 return &paLeaves[i];
1217 }
1218 }
1219 }
1220
1221 return NULL;
1222}
1223
1224
1225/**
1226 * Looks up a CPUID leaf in the CPUID leaf array.
1227 *
1228 * @returns Pointer to the leaf if found, NULL if not.
1229 *
1230 * @param pVM The cross context VM structure.
1231 * @param uLeaf The leaf to get.
1232 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it
1233 * isn't.
1234 * @param pfExactSubLeafHit Whether we've got an exact subleaf hit or not.
1235 */
1236PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit)
1237{
1238 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
1239 if (iEnd)
1240 {
1241 unsigned iStart = 0;
1242 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
1243 for (;;)
1244 {
1245 unsigned i = iStart + (iEnd - iStart) / 2U;
1246 if (uLeaf < paLeaves[i].uLeaf)
1247 {
1248 if (i <= iStart)
1249 return NULL;
1250 iEnd = i;
1251 }
1252 else if (uLeaf > paLeaves[i].uLeaf)
1253 {
1254 i += 1;
1255 if (i >= iEnd)
1256 return NULL;
1257 iStart = i;
1258 }
1259 else
1260 {
1261 uSubLeaf &= paLeaves[i].fSubLeafMask;
1262 if (uSubLeaf == paLeaves[i].uSubLeaf)
1263 *pfExactSubLeafHit = true;
1264 else
1265 {
1266 /* Find the right subleaf. We return the last one before
1267 uSubLeaf if we don't find an exact match. */
1268 if (uSubLeaf < paLeaves[i].uSubLeaf)
1269 while ( i > 0
1270 && uLeaf == paLeaves[i - 1].uLeaf
1271 && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
1272 i--;
1273 else
1274 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
1275 && uLeaf == paLeaves[i + 1].uLeaf
1276 && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
1277 i++;
1278 *pfExactSubLeafHit = uSubLeaf == paLeaves[i].uSubLeaf;
1279 }
1280 return &paLeaves[i];
1281 }
1282 }
1283 }
1284
1285 *pfExactSubLeafHit = false;
1286 return NULL;
1287}
1288
1289
1290/**
1291 * Gets a CPUID leaf.
1292 *
1293 * @param pVCpu The cross context virtual CPU structure.
1294 * @param uLeaf The CPUID leaf to get.
1295 * @param uSubLeaf The CPUID sub-leaf to get, if applicable.
1296 * @param pEax Where to store the EAX value.
1297 * @param pEbx Where to store the EBX value.
1298 * @param pEcx Where to store the ECX value.
1299 * @param pEdx Where to store the EDX value.
1300 */
1301VMMDECL(void) CPUMGetGuestCpuId(PVMCPU pVCpu, uint32_t uLeaf, uint32_t uSubLeaf,
1302 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
1303{
1304 bool fExactSubLeafHit;
1305 PVM pVM = pVCpu->CTX_SUFF(pVM);
1306 PCCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeafEx(pVM, uLeaf, uSubLeaf, &fExactSubLeafHit);
1307 if (pLeaf)
1308 {
1309 AssertMsg(pLeaf->uLeaf == uLeaf, ("%#x %#x\n", pLeaf->uLeaf, uLeaf));
1310 if (fExactSubLeafHit)
1311 {
1312 *pEax = pLeaf->uEax;
1313 *pEbx = pLeaf->uEbx;
1314 *pEcx = pLeaf->uEcx;
1315 *pEdx = pLeaf->uEdx;
1316
1317 /*
1318 * Deal with CPU specific information.
1319 */
1320 if (pLeaf->fFlags & ( CPUMCPUIDLEAF_F_CONTAINS_APIC_ID
1321 | CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE
1322 | CPUMCPUIDLEAF_F_CONTAINS_APIC ))
1323 {
1324 if (uLeaf == 1)
1325 {
1326 /* EBX: Bits 31-24: Initial APIC ID. */
1327 Assert(pVCpu->idCpu <= 255);
1328 AssertMsg((pLeaf->uEbx >> 24) == 0, ("%#x\n", pLeaf->uEbx)); /* raw-mode assumption */
1329 *pEbx = (pLeaf->uEbx & UINT32_C(0x00ffffff)) | (pVCpu->idCpu << 24);
1330
1331 /* EDX: Bit 9: AND with APICBASE.EN. */
1332 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible && (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1333 *pEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
1334
1335 /* ECX: Bit 27: CR4.OSXSAVE mirror. */
1336 *pEcx = (pLeaf->uEcx & ~X86_CPUID_FEATURE_ECX_OSXSAVE)
1337 | (pVCpu->cpum.s.Guest.cr4 & X86_CR4_OSXSAVE ? X86_CPUID_FEATURE_ECX_OSXSAVE : 0);
1338 }
1339 else if (uLeaf == 0xb)
1340 {
1341 /* EDX: Initial extended APIC ID. */
1342 AssertMsg(pLeaf->uEdx == 0, ("%#x\n", pLeaf->uEdx)); /* raw-mode assumption */
1343 *pEdx = pVCpu->idCpu;
1344 Assert(!(pLeaf->fFlags & ~(CPUMCPUIDLEAF_F_CONTAINS_APIC_ID | CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)));
1345 }
1346 else if (uLeaf == UINT32_C(0x8000001e))
1347 {
1348 /* EAX: Initial extended APIC ID. */
1349 AssertMsg(pLeaf->uEax == 0, ("%#x\n", pLeaf->uEax)); /* raw-mode assumption */
1350 *pEax = pVCpu->idCpu;
1351 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC_ID));
1352 }
1353 else if (uLeaf == UINT32_C(0x80000001))
1354 {
1355 /* EDX: Bit 9: AND with APICBASE.EN. */
1356 if (!pVCpu->cpum.s.fCpuIdApicFeatureVisible)
1357 *pEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1358 Assert(!(pLeaf->fFlags & ~CPUMCPUIDLEAF_F_CONTAINS_APIC));
1359 }
1360 else
1361 AssertMsgFailed(("uLeaf=%#x\n", uLeaf));
1362 }
1363 }
1364 /*
1365 * Out of range sub-leaves aren't quite as easy and pretty as we emulate
1366 * them here, but we do the best we can here...
1367 */
1368 else
1369 {
1370 *pEax = *pEbx = *pEcx = *pEdx = 0;
1371 if (pLeaf->fFlags & CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES)
1372 {
1373 *pEcx = uSubLeaf & 0xff;
1374 *pEdx = pVCpu->idCpu;
1375 }
1376 }
1377 }
1378 else
1379 {
1380 /*
1381 * Different CPUs have different ways of dealing with unknown CPUID leaves.
1382 */
1383 switch (pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod)
1384 {
1385 default:
1386 AssertFailed();
1387 RT_FALL_THRU();
1388 case CPUMUNKNOWNCPUID_DEFAULTS:
1389 case CPUMUNKNOWNCPUID_LAST_STD_LEAF: /* ASSUME this is executed */
1390 case CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX: /** @todo Implement CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX */
1391 *pEax = pVM->cpum.s.GuestInfo.DefCpuId.uEax;
1392 *pEbx = pVM->cpum.s.GuestInfo.DefCpuId.uEbx;
1393 *pEcx = pVM->cpum.s.GuestInfo.DefCpuId.uEcx;
1394 *pEdx = pVM->cpum.s.GuestInfo.DefCpuId.uEdx;
1395 break;
1396 case CPUMUNKNOWNCPUID_PASSTHRU:
1397 *pEax = uLeaf;
1398 *pEbx = 0;
1399 *pEcx = uSubLeaf;
1400 *pEdx = 0;
1401 break;
1402 }
1403 }
1404 Log2(("CPUMGetGuestCpuId: uLeaf=%#010x/%#010x %RX32 %RX32 %RX32 %RX32\n", uLeaf, uSubLeaf, *pEax, *pEbx, *pEcx, *pEdx));
1405}
1406
1407
1408/**
1409 * Sets the visibility of the X86_CPUID_FEATURE_EDX_APIC and
1410 * X86_CPUID_AMD_FEATURE_EDX_APIC CPUID bits.
1411 *
1412 * @returns Previous value.
1413 * @param pVCpu The cross context virtual CPU structure to make the
1414 * change on. Usually the calling EMT.
1415 * @param fVisible Whether to make it visible (true) or hide it (false).
1416 *
1417 * @remarks This is "VMMDECL" so that it still links with
1418 * the old APIC code which is in VBoxDD2 and not in
1419 * the VMM module.
1420 */
1421VMMDECL(bool) CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible)
1422{
1423 bool fOld = pVCpu->cpum.s.fCpuIdApicFeatureVisible;
1424 pVCpu->cpum.s.fCpuIdApicFeatureVisible = fVisible;
1425
1426#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1427 /*
1428 * Patch manager saved state legacy pain.
1429 */
1430 PVM pVM = pVCpu->CTX_SUFF(pVM);
1431 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001));
1432 if (pLeaf)
1433 {
1434 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1435 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx;
1436 else
1437 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx & ~X86_CPUID_FEATURE_EDX_APIC;
1438 }
1439
1440 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001));
1441 if (pLeaf)
1442 {
1443 if (fVisible || (pLeaf->fFlags & CPUMCPUIDLEAF_F_CONTAINS_APIC))
1444 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx;
1445 else
1446 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx & ~X86_CPUID_AMD_FEATURE_EDX_APIC;
1447 }
1448#endif
1449
1450 return fOld;
1451}
1452
1453
1454/**
1455 * Gets the host CPU vendor.
1456 *
1457 * @returns CPU vendor.
1458 * @param pVM The cross context VM structure.
1459 */
1460VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
1461{
1462 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
1463}
1464
1465
1466/**
1467 * Gets the CPU vendor.
1468 *
1469 * @returns CPU vendor.
1470 * @param pVM The cross context VM structure.
1471 */
1472VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
1473{
1474 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
1475}
1476
1477
1478VMMDECL(int) CPUMSetGuestDR0(PVMCPU pVCpu, uint64_t uDr0)
1479{
1480 pVCpu->cpum.s.Guest.dr[0] = uDr0;
1481 return CPUMRecalcHyperDRx(pVCpu, 0, false);
1482}
1483
1484
1485VMMDECL(int) CPUMSetGuestDR1(PVMCPU pVCpu, uint64_t uDr1)
1486{
1487 pVCpu->cpum.s.Guest.dr[1] = uDr1;
1488 return CPUMRecalcHyperDRx(pVCpu, 1, false);
1489}
1490
1491
1492VMMDECL(int) CPUMSetGuestDR2(PVMCPU pVCpu, uint64_t uDr2)
1493{
1494 pVCpu->cpum.s.Guest.dr[2] = uDr2;
1495 return CPUMRecalcHyperDRx(pVCpu, 2, false);
1496}
1497
1498
1499VMMDECL(int) CPUMSetGuestDR3(PVMCPU pVCpu, uint64_t uDr3)
1500{
1501 pVCpu->cpum.s.Guest.dr[3] = uDr3;
1502 return CPUMRecalcHyperDRx(pVCpu, 3, false);
1503}
1504
1505
1506VMMDECL(int) CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6)
1507{
1508 pVCpu->cpum.s.Guest.dr[6] = uDr6;
1509 return VINF_SUCCESS; /* No need to recalc. */
1510}
1511
1512
1513VMMDECL(int) CPUMSetGuestDR7(PVMCPU pVCpu, uint64_t uDr7)
1514{
1515 pVCpu->cpum.s.Guest.dr[7] = uDr7;
1516 return CPUMRecalcHyperDRx(pVCpu, 7, false);
1517}
1518
1519
1520VMMDECL(int) CPUMSetGuestDRx(PVMCPU pVCpu, uint32_t iReg, uint64_t Value)
1521{
1522 AssertReturn(iReg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1523 /* DR4 is an alias for DR6, and DR5 is an alias for DR7. */
1524 if (iReg == 4 || iReg == 5)
1525 iReg += 2;
1526 pVCpu->cpum.s.Guest.dr[iReg] = Value;
1527 return CPUMRecalcHyperDRx(pVCpu, iReg, false);
1528}
1529
1530
1531/**
1532 * Recalculates the hypervisor DRx register values based on current guest
1533 * registers and DBGF breakpoints, updating changed registers depending on the
1534 * context.
1535 *
1536 * This is called whenever a guest DRx register is modified (any context) and
1537 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
1538 *
1539 * In raw-mode context this function will reload any (hyper) DRx registers which
1540 * comes out with a different value. It may also have to save the host debug
1541 * registers if that haven't been done already. In this context though, we'll
1542 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
1543 * are only important when breakpoints are actually enabled.
1544 *
1545 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
1546 * reloaded by the HM code if it changes. Further more, we will only use the
1547 * combined register set when the VBox debugger is actually using hardware BPs,
1548 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
1549 * concern us here).
1550 *
1551 * In ring-3 we won't be loading anything, so well calculate hypervisor values
1552 * all the time.
1553 *
1554 * @returns VINF_SUCCESS.
1555 * @param pVCpu The cross context virtual CPU structure.
1556 * @param iGstReg The guest debug register number that was modified.
1557 * UINT8_MAX if not guest register.
1558 * @param fForceHyper Used in HM to force hyper registers because of single
1559 * stepping.
1560 */
1561VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg, bool fForceHyper)
1562{
1563 PVM pVM = pVCpu->CTX_SUFF(pVM);
1564#ifndef IN_RING0
1565 RT_NOREF_PV(iGstReg);
1566#endif
1567
1568 /*
1569 * Compare the DR7s first.
1570 *
1571 * We only care about the enabled flags. GD is virtualized when we
1572 * dispatch the #DB, we never enable it. The DBGF DR7 value is will
1573 * always have the LE and GE bits set, so no need to check and disable
1574 * stuff if they're cleared like we have to for the guest DR7.
1575 */
1576 RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
1577 /** @todo This isn't correct. BPs work without setting LE and GE under AMD-V. They are also documented as unsupported by P6+. */
1578 if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
1579 uGstDr7 = 0;
1580 else if (!(uGstDr7 & X86_DR7_LE))
1581 uGstDr7 &= ~X86_DR7_LE_ALL;
1582 else if (!(uGstDr7 & X86_DR7_GE))
1583 uGstDr7 &= ~X86_DR7_GE_ALL;
1584
1585 const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
1586
1587#ifdef IN_RING0
1588 if (!fForceHyper && (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER))
1589 fForceHyper = true;
1590#endif
1591 if ( (!VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM)) && !fForceHyper ? uDbgfDr7 : (uGstDr7 | uDbgfDr7))
1592 & X86_DR7_ENABLED_MASK)
1593 {
1594 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1595#ifdef IN_RC
1596 bool const fRawModeEnabled = true;
1597#elif defined(IN_RING3)
1598 bool const fRawModeEnabled = VM_IS_RAW_MODE_ENABLED(pVM);
1599#endif
1600
1601 /*
1602 * Ok, something is enabled. Recalc each of the breakpoints, taking
1603 * the VM debugger ones of the guest ones. In raw-mode context we will
1604 * not allow breakpoints with values inside the hypervisor area.
1605 */
1606 RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
1607
1608 /* bp 0 */
1609 RTGCUINTREG uNewDr0;
1610 if (uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0))
1611 {
1612 uNewDr7 |= uDbgfDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1613 uNewDr0 = DBGFBpGetDR0(pVM);
1614 }
1615 else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
1616 {
1617 uNewDr0 = CPUMGetGuestDR0(pVCpu);
1618#ifndef IN_RING0
1619 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr0))
1620 uNewDr0 = 0;
1621 else
1622#endif
1623 uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
1624 }
1625 else
1626 uNewDr0 = 0;
1627
1628 /* bp 1 */
1629 RTGCUINTREG uNewDr1;
1630 if (uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1))
1631 {
1632 uNewDr7 |= uDbgfDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1633 uNewDr1 = DBGFBpGetDR1(pVM);
1634 }
1635 else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
1636 {
1637 uNewDr1 = CPUMGetGuestDR1(pVCpu);
1638#ifndef IN_RING0
1639 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr1))
1640 uNewDr1 = 0;
1641 else
1642#endif
1643 uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
1644 }
1645 else
1646 uNewDr1 = 0;
1647
1648 /* bp 2 */
1649 RTGCUINTREG uNewDr2;
1650 if (uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2))
1651 {
1652 uNewDr7 |= uDbgfDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1653 uNewDr2 = DBGFBpGetDR2(pVM);
1654 }
1655 else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
1656 {
1657 uNewDr2 = CPUMGetGuestDR2(pVCpu);
1658#ifndef IN_RING0
1659 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr2))
1660 uNewDr2 = 0;
1661 else
1662#endif
1663 uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
1664 }
1665 else
1666 uNewDr2 = 0;
1667
1668 /* bp 3 */
1669 RTGCUINTREG uNewDr3;
1670 if (uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3))
1671 {
1672 uNewDr7 |= uDbgfDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1673 uNewDr3 = DBGFBpGetDR3(pVM);
1674 }
1675 else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
1676 {
1677 uNewDr3 = CPUMGetGuestDR3(pVCpu);
1678#ifndef IN_RING0
1679 if (fRawModeEnabled && MMHyperIsInsideArea(pVM, uNewDr3))
1680 uNewDr3 = 0;
1681 else
1682#endif
1683 uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
1684 }
1685 else
1686 uNewDr3 = 0;
1687
1688 /*
1689 * Apply the updates.
1690 */
1691#ifdef IN_RC
1692 /* Make sure to save host registers first. */
1693 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
1694 {
1695 if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HOST))
1696 {
1697 pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
1698 pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
1699 }
1700 pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
1701 pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
1702 pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
1703 pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
1704 pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
1705
1706 /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
1707 pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
1708 ASMSetDR0(uNewDr0);
1709 pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
1710 ASMSetDR1(uNewDr1);
1711 pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
1712 ASMSetDR2(uNewDr2);
1713 pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
1714 ASMSetDR3(uNewDr3);
1715 ASMSetDR6(X86_DR6_INIT_VAL);
1716 pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
1717 ASMSetDR7(uNewDr7);
1718 }
1719 else
1720#endif
1721 {
1722 pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
1723 if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
1724 CPUMSetHyperDR3(pVCpu, uNewDr3);
1725 if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
1726 CPUMSetHyperDR2(pVCpu, uNewDr2);
1727 if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
1728 CPUMSetHyperDR1(pVCpu, uNewDr1);
1729 if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
1730 CPUMSetHyperDR0(pVCpu, uNewDr0);
1731 if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
1732 CPUMSetHyperDR7(pVCpu, uNewDr7);
1733 }
1734 }
1735#ifdef IN_RING0
1736 else if (CPUMIsGuestDebugStateActive(pVCpu))
1737 {
1738 /*
1739 * Reload the register that was modified. Normally this won't happen
1740 * as we won't intercept DRx writes when not having the hyper debug
1741 * state loaded, but in case we do for some reason we'll simply deal
1742 * with it.
1743 */
1744 switch (iGstReg)
1745 {
1746 case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
1747 case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
1748 case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
1749 case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
1750 default:
1751 AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
1752 }
1753 }
1754#endif
1755 else
1756 {
1757 /*
1758 * No active debug state any more. In raw-mode this means we have to
1759 * make sure DR7 has everything disabled now, if we armed it already.
1760 * In ring-0 we might end up here when just single stepping.
1761 */
1762#if defined(IN_RC) || defined(IN_RING0)
1763 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
1764 {
1765# ifdef IN_RC
1766 ASMSetDR7(X86_DR7_INIT_VAL);
1767# endif
1768 if (pVCpu->cpum.s.Hyper.dr[0])
1769 ASMSetDR0(0);
1770 if (pVCpu->cpum.s.Hyper.dr[1])
1771 ASMSetDR1(0);
1772 if (pVCpu->cpum.s.Hyper.dr[2])
1773 ASMSetDR2(0);
1774 if (pVCpu->cpum.s.Hyper.dr[3])
1775 ASMSetDR3(0);
1776 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
1777 }
1778#endif
1779 pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
1780
1781 /* Clear all the registers. */
1782 pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
1783 pVCpu->cpum.s.Hyper.dr[3] = 0;
1784 pVCpu->cpum.s.Hyper.dr[2] = 0;
1785 pVCpu->cpum.s.Hyper.dr[1] = 0;
1786 pVCpu->cpum.s.Hyper.dr[0] = 0;
1787
1788 }
1789 Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr %RGr %RGr\n",
1790 pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
1791 pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
1792 pVCpu->cpum.s.Hyper.dr[7]));
1793
1794 return VINF_SUCCESS;
1795}
1796
1797
1798/**
1799 * Set the guest XCR0 register.
1800 *
1801 * Will load additional state if the FPU state is already loaded (in ring-0 &
1802 * raw-mode context).
1803 *
1804 * @returns VINF_SUCCESS on success, VERR_CPUM_RAISE_GP_0 on invalid input
1805 * value.
1806 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1807 * @param uNewValue The new value.
1808 * @thread EMT(pVCpu)
1809 */
1810VMM_INT_DECL(int) CPUMSetGuestXcr0(PVMCPU pVCpu, uint64_t uNewValue)
1811{
1812 if ( (uNewValue & ~pVCpu->CTX_SUFF(pVM)->cpum.s.fXStateGuestMask) == 0
1813 /* The X87 bit cannot be cleared. */
1814 && (uNewValue & XSAVE_C_X87)
1815 /* AVX requires SSE. */
1816 && (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM)) != XSAVE_C_YMM
1817 /* AVX-512 requires YMM, SSE and all of its three components to be enabled. */
1818 && ( (uNewValue & (XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI)) == 0
1819 || (uNewValue & (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI))
1820 == (XSAVE_C_SSE | XSAVE_C_YMM | XSAVE_C_OPMASK | XSAVE_C_ZMM_HI256 | XSAVE_C_ZMM_16HI) )
1821 )
1822 {
1823 pVCpu->cpum.s.Guest.aXcr[0] = uNewValue;
1824
1825 /* If more state components are enabled, we need to take care to load
1826 them if the FPU/SSE state is already loaded. May otherwise leak
1827 host state to the guest. */
1828 uint64_t fNewComponents = ~pVCpu->cpum.s.Guest.fXStateMask & uNewValue;
1829 if (fNewComponents)
1830 {
1831#if defined(IN_RING0) || defined(IN_RC)
1832 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
1833 {
1834 if (pVCpu->cpum.s.Guest.fXStateMask != 0)
1835 /* Adding more components. */
1836 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), fNewComponents);
1837 else
1838 {
1839 /* We're switching from FXSAVE/FXRSTOR to XSAVE/XRSTOR. */
1840 pVCpu->cpum.s.Guest.fXStateMask |= XSAVE_C_X87 | XSAVE_C_SSE;
1841 if (uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE))
1842 ASMXRstor(pVCpu->cpum.s.Guest.CTX_SUFF(pXState), uNewValue & ~(XSAVE_C_X87 | XSAVE_C_SSE));
1843 }
1844 }
1845#endif
1846 pVCpu->cpum.s.Guest.fXStateMask |= uNewValue;
1847 }
1848 return VINF_SUCCESS;
1849 }
1850 return VERR_CPUM_RAISE_GP_0;
1851}
1852
1853
1854/**
1855 * Tests if the guest has No-Execute Page Protection Enabled (NXE).
1856 *
1857 * @returns true if in real mode, otherwise false.
1858 * @param pVCpu The cross context virtual CPU structure.
1859 */
1860VMMDECL(bool) CPUMIsGuestNXEnabled(PVMCPU pVCpu)
1861{
1862 return !!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE);
1863}
1864
1865
1866/**
1867 * Tests if the guest has the Page Size Extension enabled (PSE).
1868 *
1869 * @returns true if in real mode, otherwise false.
1870 * @param pVCpu The cross context virtual CPU structure.
1871 */
1872VMMDECL(bool) CPUMIsGuestPageSizeExtEnabled(PVMCPU pVCpu)
1873{
1874 /* PAE or AMD64 implies support for big pages regardless of CR4.PSE */
1875 return !!(pVCpu->cpum.s.Guest.cr4 & (X86_CR4_PSE | X86_CR4_PAE));
1876}
1877
1878
1879/**
1880 * Tests if the guest has the paging enabled (PG).
1881 *
1882 * @returns true if in real mode, otherwise false.
1883 * @param pVCpu The cross context virtual CPU structure.
1884 */
1885VMMDECL(bool) CPUMIsGuestPagingEnabled(PVMCPU pVCpu)
1886{
1887 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG);
1888}
1889
1890
1891/**
1892 * Tests if the guest has the paging enabled (PG).
1893 *
1894 * @returns true if in real mode, otherwise false.
1895 * @param pVCpu The cross context virtual CPU structure.
1896 */
1897VMMDECL(bool) CPUMIsGuestR0WriteProtEnabled(PVMCPU pVCpu)
1898{
1899 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_WP);
1900}
1901
1902
1903/**
1904 * Tests if the guest is running in real mode or not.
1905 *
1906 * @returns true if in real mode, otherwise false.
1907 * @param pVCpu The cross context virtual CPU structure.
1908 */
1909VMMDECL(bool) CPUMIsGuestInRealMode(PVMCPU pVCpu)
1910{
1911 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1912}
1913
1914
1915/**
1916 * Tests if the guest is running in real or virtual 8086 mode.
1917 *
1918 * @returns @c true if it is, @c false if not.
1919 * @param pVCpu The cross context virtual CPU structure.
1920 */
1921VMMDECL(bool) CPUMIsGuestInRealOrV86Mode(PVMCPU pVCpu)
1922{
1923 return !(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
1924 || pVCpu->cpum.s.Guest.eflags.Bits.u1VM; /** @todo verify that this cannot be set in long mode. */
1925}
1926
1927
1928/**
1929 * Tests if the guest is running in protected or not.
1930 *
1931 * @returns true if in protected mode, otherwise false.
1932 * @param pVCpu The cross context virtual CPU structure.
1933 */
1934VMMDECL(bool) CPUMIsGuestInProtectedMode(PVMCPU pVCpu)
1935{
1936 return !!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE);
1937}
1938
1939
1940/**
1941 * Tests if the guest is running in paged protected or not.
1942 *
1943 * @returns true if in paged protected mode, otherwise false.
1944 * @param pVCpu The cross context virtual CPU structure.
1945 */
1946VMMDECL(bool) CPUMIsGuestInPagedProtectedMode(PVMCPU pVCpu)
1947{
1948 return (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
1949}
1950
1951
1952/**
1953 * Tests if the guest is running in long mode or not.
1954 *
1955 * @returns true if in long mode, otherwise false.
1956 * @param pVCpu The cross context virtual CPU structure.
1957 */
1958VMMDECL(bool) CPUMIsGuestInLongMode(PVMCPU pVCpu)
1959{
1960 return (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
1961}
1962
1963
1964/**
1965 * Tests if the guest is running in PAE mode or not.
1966 *
1967 * @returns true if in PAE mode, otherwise false.
1968 * @param pVCpu The cross context virtual CPU structure.
1969 */
1970VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu)
1971{
1972 /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
1973 than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set. */
1974 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)
1975 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)
1976 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);
1977}
1978
1979
1980/**
1981 * Tests if the guest is running in 64 bits mode or not.
1982 *
1983 * @returns true if in 64 bits protected mode, otherwise false.
1984 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1985 */
1986VMMDECL(bool) CPUMIsGuestIn64BitCode(PVMCPU pVCpu)
1987{
1988 if (!CPUMIsGuestInLongMode(pVCpu))
1989 return false;
1990 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
1991 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long;
1992}
1993
1994
1995/**
1996 * Helper for CPUMIsGuestIn64BitCodeEx that handles lazy resolving of hidden CS
1997 * registers.
1998 *
1999 * @returns true if in 64 bits protected mode, otherwise false.
2000 * @param pCtx Pointer to the current guest CPU context.
2001 */
2002VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx)
2003{
2004 return CPUMIsGuestIn64BitCode(CPUM_GUEST_CTX_TO_VMCPU(pCtx));
2005}
2006
2007#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2008
2009/**
2010 *
2011 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are
2012 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1).
2013 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2014 */
2015VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu)
2016{
2017 return pVCpu->cpum.s.fRawEntered;
2018}
2019
2020/**
2021 * Transforms the guest CPU state to raw-ring mode.
2022 *
2023 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
2024 *
2025 * @returns VBox status code. (recompiler failure)
2026 * @param pVCpu The cross context virtual CPU structure.
2027 * @see @ref pg_raw
2028 */
2029VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu)
2030{
2031 PVM pVM = pVCpu->CTX_SUFF(pVM);
2032
2033 Assert(!pVCpu->cpum.s.fRawEntered);
2034 Assert(!pVCpu->cpum.s.fRemEntered);
2035 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2036
2037 /*
2038 * Are we in Ring-0?
2039 */
2040 if ( pCtx->ss.Sel
2041 && (pCtx->ss.Sel & X86_SEL_RPL) == 0
2042 && !pCtx->eflags.Bits.u1VM)
2043 {
2044 /*
2045 * Enter execution mode.
2046 */
2047 PATMRawEnter(pVM, pCtx);
2048
2049 /*
2050 * Set CPL to Ring-1.
2051 */
2052 pCtx->ss.Sel |= 1;
2053 if ( pCtx->cs.Sel
2054 && (pCtx->cs.Sel & X86_SEL_RPL) == 0)
2055 pCtx->cs.Sel |= 1;
2056 }
2057 else
2058 {
2059# ifdef VBOX_WITH_RAW_RING1
2060 if ( EMIsRawRing1Enabled(pVM)
2061 && !pCtx->eflags.Bits.u1VM
2062 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)
2063 {
2064 /* Set CPL to Ring-2. */
2065 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;
2066 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)
2067 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;
2068 }
2069# else
2070 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM,
2071 ("ring-1 code not supported\n"));
2072# endif
2073 /*
2074 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well.
2075 */
2076 PATMRawEnter(pVM, pCtx);
2077 }
2078
2079 /*
2080 * Assert sanity.
2081 */
2082 AssertMsg((pCtx->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
2083 AssertReleaseMsg(pCtx->eflags.Bits.u2IOPL == 0,
2084 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2085 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE));
2086
2087 pCtx->eflags.u32 |= X86_EFL_IF; /* paranoia */
2088
2089 pVCpu->cpum.s.fRawEntered = true;
2090 return VINF_SUCCESS;
2091}
2092
2093
2094/**
2095 * Transforms the guest CPU state from raw-ring mode to correct values.
2096 *
2097 * This function will change any selector registers with DPL=1 to DPL=0.
2098 *
2099 * @returns Adjusted rc.
2100 * @param pVCpu The cross context virtual CPU structure.
2101 * @param rc Raw mode return code
2102 * @see @ref pg_raw
2103 */
2104VMM_INT_DECL(int) CPUMRawLeave(PVMCPU pVCpu, int rc)
2105{
2106 PVM pVM = pVCpu->CTX_SUFF(pVM);
2107
2108 /*
2109 * Don't leave if we've already left (in RC).
2110 */
2111 Assert(!pVCpu->cpum.s.fRemEntered);
2112 if (!pVCpu->cpum.s.fRawEntered)
2113 return rc;
2114 pVCpu->cpum.s.fRawEntered = false;
2115
2116 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2117 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL));
2118 AssertMsg(pCtx->eflags.Bits.u1VM || pCtx->eflags.Bits.u2IOPL < (unsigned)(pCtx->ss.Sel & X86_SEL_RPL),
2119 ("X86_EFL_IOPL=%d CPL=%d\n", pCtx->eflags.Bits.u2IOPL, pCtx->ss.Sel & X86_SEL_RPL));
2120
2121 /*
2122 * Are we executing in raw ring-1?
2123 */
2124 if ( (pCtx->ss.Sel & X86_SEL_RPL) == 1
2125 && !pCtx->eflags.Bits.u1VM)
2126 {
2127 /*
2128 * Leave execution mode.
2129 */
2130 PATMRawLeave(pVM, pCtx, rc);
2131 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2132 /** @todo See what happens if we remove this. */
2133 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2134 pCtx->ds.Sel &= ~X86_SEL_RPL;
2135 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2136 pCtx->es.Sel &= ~X86_SEL_RPL;
2137 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2138 pCtx->fs.Sel &= ~X86_SEL_RPL;
2139 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2140 pCtx->gs.Sel &= ~X86_SEL_RPL;
2141
2142 /*
2143 * Ring-1 selector => Ring-0.
2144 */
2145 pCtx->ss.Sel &= ~X86_SEL_RPL;
2146 if ((pCtx->cs.Sel & X86_SEL_RPL) == 1)
2147 pCtx->cs.Sel &= ~X86_SEL_RPL;
2148 }
2149 else
2150 {
2151 /*
2152 * PATM is taking care of the IOPL and IF flags for us.
2153 */
2154 PATMRawLeave(pVM, pCtx, rc);
2155 if (!pCtx->eflags.Bits.u1VM)
2156 {
2157# ifdef VBOX_WITH_RAW_RING1
2158 if ( EMIsRawRing1Enabled(pVM)
2159 && (pCtx->ss.Sel & X86_SEL_RPL) == 2)
2160 {
2161 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */
2162 /** @todo See what happens if we remove this. */
2163 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2)
2164 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1;
2165 if ((pCtx->es.Sel & X86_SEL_RPL) == 2)
2166 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1;
2167 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2)
2168 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1;
2169 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2)
2170 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1;
2171
2172 /*
2173 * Ring-2 selector => Ring-1.
2174 */
2175 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1;
2176 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2)
2177 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1;
2178 }
2179 else
2180 {
2181# endif
2182 /** @todo See what happens if we remove this. */
2183 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1)
2184 pCtx->ds.Sel &= ~X86_SEL_RPL;
2185 if ((pCtx->es.Sel & X86_SEL_RPL) == 1)
2186 pCtx->es.Sel &= ~X86_SEL_RPL;
2187 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1)
2188 pCtx->fs.Sel &= ~X86_SEL_RPL;
2189 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1)
2190 pCtx->gs.Sel &= ~X86_SEL_RPL;
2191# ifdef VBOX_WITH_RAW_RING1
2192 }
2193# endif
2194 }
2195 }
2196
2197 return rc;
2198}
2199
2200#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2201
2202/**
2203 * Updates the EFLAGS while we're in raw-mode.
2204 *
2205 * @param pVCpu The cross context virtual CPU structure.
2206 * @param fEfl The new EFLAGS value.
2207 */
2208VMMDECL(void) CPUMRawSetEFlags(PVMCPU pVCpu, uint32_t fEfl)
2209{
2210#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2211 if (pVCpu->cpum.s.fRawEntered)
2212 PATMRawSetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest, fEfl);
2213 else
2214#endif
2215 pVCpu->cpum.s.Guest.eflags.u32 = fEfl;
2216}
2217
2218
2219/**
2220 * Gets the EFLAGS while we're in raw-mode.
2221 *
2222 * @returns The eflags.
2223 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2224 */
2225VMMDECL(uint32_t) CPUMRawGetEFlags(PVMCPU pVCpu)
2226{
2227#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2228 if (pVCpu->cpum.s.fRawEntered)
2229 return PATMRawGetEFlags(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.s.Guest);
2230#endif
2231 return pVCpu->cpum.s.Guest.eflags.u32;
2232}
2233
2234
2235/**
2236 * Sets the specified changed flags (CPUM_CHANGED_*).
2237 *
2238 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2239 * @param fChangedAdd The changed flags to add.
2240 */
2241VMMDECL(void) CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd)
2242{
2243 pVCpu->cpum.s.fChanged |= fChangedAdd;
2244}
2245
2246
2247/**
2248 * Checks if the CPU supports the XSAVE and XRSTOR instruction.
2249 *
2250 * @returns true if supported.
2251 * @returns false if not supported.
2252 * @param pVM The cross context VM structure.
2253 */
2254VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
2255{
2256 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
2257}
2258
2259
2260/**
2261 * Checks if the host OS uses the SYSENTER / SYSEXIT instructions.
2262 * @returns true if used.
2263 * @returns false if not used.
2264 * @param pVM The cross context VM structure.
2265 */
2266VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
2267{
2268 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
2269}
2270
2271
2272/**
2273 * Checks if the host OS uses the SYSCALL / SYSRET instructions.
2274 * @returns true if used.
2275 * @returns false if not used.
2276 * @param pVM The cross context VM structure.
2277 */
2278VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
2279{
2280 return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
2281}
2282
2283#ifdef IN_RC
2284
2285/**
2286 * Lazily sync in the FPU/XMM state.
2287 *
2288 * @returns VBox status code.
2289 * @param pVCpu The cross context virtual CPU structure.
2290 */
2291VMMDECL(int) CPUMHandleLazyFPU(PVMCPU pVCpu)
2292{
2293 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s);
2294}
2295
2296#endif /* !IN_RC */
2297
2298/**
2299 * Checks if we activated the FPU/XMM state of the guest OS.
2300 *
2301 * This differs from CPUMIsGuestFPUStateLoaded() in that it refers to the next
2302 * time we'll be executing guest code, so it may return true for 64-on-32 when
2303 * we still haven't actually loaded the FPU status, just scheduled it to be
2304 * loaded the next time we go thru the world switcher (CPUM_SYNC_FPU_STATE).
2305 *
2306 * @returns true / false.
2307 * @param pVCpu The cross context virtual CPU structure.
2308 */
2309VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
2310{
2311 return RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_SYNC_FPU_STATE));
2312}
2313
2314
2315/**
2316 * Checks if we've really loaded the FPU/XMM state of the guest OS.
2317 *
2318 * @returns true / false.
2319 * @param pVCpu The cross context virtual CPU structure.
2320 */
2321VMMDECL(bool) CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu)
2322{
2323 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST);
2324}
2325
2326
2327/**
2328 * Checks if we saved the FPU/XMM state of the host OS.
2329 *
2330 * @returns true / false.
2331 * @param pVCpu The cross context virtual CPU structure.
2332 */
2333VMMDECL(bool) CPUMIsHostFPUStateSaved(PVMCPU pVCpu)
2334{
2335 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_HOST);
2336}
2337
2338
2339/**
2340 * Checks if the guest debug state is active.
2341 *
2342 * @returns boolean
2343 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2344 */
2345VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
2346{
2347 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
2348}
2349
2350
2351/**
2352 * Checks if the guest debug state is to be made active during the world-switch
2353 * (currently only used for the 32->64 switcher case).
2354 *
2355 * @returns boolean
2356 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2357 */
2358VMMDECL(bool) CPUMIsGuestDebugStateActivePending(PVMCPU pVCpu)
2359{
2360 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_GUEST);
2361}
2362
2363
2364/**
2365 * Checks if the hyper debug state is active.
2366 *
2367 * @returns boolean
2368 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2369 */
2370VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
2371{
2372 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
2373}
2374
2375
2376/**
2377 * Checks if the hyper debug state is to be made active during the world-switch
2378 * (currently only used for the 32->64 switcher case).
2379 *
2380 * @returns boolean
2381 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2382 */
2383VMMDECL(bool) CPUMIsHyperDebugStateActivePending(PVMCPU pVCpu)
2384{
2385 return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_REGS_HYPER);
2386}
2387
2388
2389/**
2390 * Mark the guest's debug state as inactive.
2391 *
2392 * @returns boolean
2393 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2394 * @todo This API doesn't make sense any more.
2395 */
2396VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
2397{
2398 Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
2399 NOREF(pVCpu);
2400}
2401
2402
2403/**
2404 * Get the current privilege level of the guest.
2405 *
2406 * @returns CPL
2407 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2408 */
2409VMMDECL(uint32_t) CPUMGetGuestCPL(PVMCPU pVCpu)
2410{
2411 /*
2412 * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
2413 *
2414 * Note! We used to check CS.DPL here, assuming it was always equal to
2415 * CPL even if a conforming segment was loaded. But this turned out to
2416 * only apply to older AMD-V. With VT-x we had an ACP2 regression
2417 * during install after a far call to ring 2 with VT-x. Then on newer
2418 * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
2419 * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
2420 *
2421 * So, forget CS.DPL, always use SS.DPL.
2422 *
2423 * Note! The SS RPL is always equal to the CPL, while the CS RPL
2424 * isn't necessarily equal if the segment is conforming.
2425 * See section 4.11.1 in the AMD manual.
2426 *
2427 * Update: Where the heck does it say CS.RPL can differ from CPL other than
2428 * right after real->prot mode switch and when in V8086 mode? That
2429 * section says the RPL specified in a direct transfere (call, jmp,
2430 * ret) is not the one loaded into CS. Besides, if CS.RPL != CPL
2431 * it would be impossible for an exception handle or the iret
2432 * instruction to figure out whether SS:ESP are part of the frame
2433 * or not. VBox or qemu bug must've lead to this misconception.
2434 *
2435 * Update2: On an AMD bulldozer system here, I've no trouble loading a null
2436 * selector into SS with an RPL other than the CPL when CPL != 3 and
2437 * we're in 64-bit mode. The intel dev box doesn't allow this, on
2438 * RPL = CPL. Weird.
2439 */
2440 uint32_t uCpl;
2441 if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
2442 {
2443 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2444 {
2445 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss))
2446 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl;
2447 else
2448 {
2449 uCpl = (pVCpu->cpum.s.Guest.ss.Sel & X86_SEL_RPL);
2450#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2451# ifdef VBOX_WITH_RAW_RING1
2452 if (pVCpu->cpum.s.fRawEntered)
2453 {
2454 if ( uCpl == 2
2455 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)))
2456 uCpl = 1;
2457 else if (uCpl == 1)
2458 uCpl = 0;
2459 }
2460 Assert(uCpl != 2); /* ring 2 support not allowed anymore. */
2461# else
2462 if (uCpl == 1)
2463 uCpl = 0;
2464# endif
2465#endif
2466 }
2467 }
2468 else
2469 uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */
2470 }
2471 else
2472 uCpl = 0; /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
2473 return uCpl;
2474}
2475
2476
2477/**
2478 * Gets the current guest CPU mode.
2479 *
2480 * If paging mode is what you need, check out PGMGetGuestMode().
2481 *
2482 * @returns The CPU mode.
2483 * @param pVCpu The cross context virtual CPU structure.
2484 */
2485VMMDECL(CPUMMODE) CPUMGetGuestMode(PVMCPU pVCpu)
2486{
2487 CPUMMODE enmMode;
2488 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2489 enmMode = CPUMMODE_REAL;
2490 else if (!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2491 enmMode = CPUMMODE_PROTECTED;
2492 else
2493 enmMode = CPUMMODE_LONG;
2494
2495 return enmMode;
2496}
2497
2498
2499/**
2500 * Figure whether the CPU is currently executing 16, 32 or 64 bit code.
2501 *
2502 * @returns 16, 32 or 64.
2503 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2504 */
2505VMMDECL(uint32_t) CPUMGetGuestCodeBits(PVMCPU pVCpu)
2506{
2507 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2508 return 16;
2509
2510 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2511 {
2512 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2513 return 16;
2514 }
2515
2516 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2517 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2518 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2519 return 64;
2520
2521 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2522 return 32;
2523
2524 return 16;
2525}
2526
2527
2528VMMDECL(DISCPUMODE) CPUMGetGuestDisMode(PVMCPU pVCpu)
2529{
2530 if (!(pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE))
2531 return DISCPUMODE_16BIT;
2532
2533 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM)
2534 {
2535 Assert(!(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA));
2536 return DISCPUMODE_16BIT;
2537 }
2538
2539 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs);
2540 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long
2541 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA))
2542 return DISCPUMODE_64BIT;
2543
2544 if (pVCpu->cpum.s.Guest.cs.Attr.n.u1DefBig)
2545 return DISCPUMODE_32BIT;
2546
2547 return DISCPUMODE_16BIT;
2548}
2549
2550
2551/**
2552 * Gets the guest MXCSR_MASK value.
2553 *
2554 * This does not access the x87 state, but the value we determined at VM
2555 * initialization.
2556 *
2557 * @returns MXCSR mask.
2558 * @param pVM The cross context VM structure.
2559 */
2560VMMDECL(uint32_t) CPUMGetGuestMxCsrMask(PVM pVM)
2561{
2562 return pVM->cpum.s.GuestInfo.fMxCsrMask;
2563}
2564
2565
2566/**
2567 * Checks whether the SVM nested-guest is in a state to receive physical (APIC)
2568 * interrupts.
2569 *
2570 * @returns VBox status code.
2571 * @retval true if it's ready, false otherwise.
2572 *
2573 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2574 * @param pCtx The guest-CPU context.
2575 *
2576 * @sa hmR0SvmCanNstGstTakePhysIntr.
2577 */
2578VMM_INT_DECL(bool) CPUMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx)
2579{
2580 /** @todo Optimization: Avoid this function call and use a pointer to the
2581 * relevant eflags instead (setup during VMRUN instruction emulation). */
2582#ifdef IN_RC
2583 RT_NOREF2(pVCpu, pCtx);
2584 AssertReleaseFailedReturn(false);
2585#else
2586 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2587 Assert(pCtx->hwvirt.fGif);
2588
2589 X86EFLAGS fEFlags;
2590 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2591 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2592 else
2593 fEFlags.u = pCtx->eflags.u;
2594
2595 return fEFlags.Bits.u1IF;
2596#endif
2597}
2598
2599
2600/**
2601 * Checks whether the SVM nested-guest is in a state to receive virtual (setup
2602 * for injection by VMRUN instruction) interrupts.
2603 *
2604 * @returns VBox status code.
2605 * @retval true if it's ready, false otherwise.
2606 *
2607 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2608 * @param pCtx The guest-CPU context.
2609 */
2610VMM_INT_DECL(bool) CPUMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx)
2611{
2612#ifdef IN_RC
2613 RT_NOREF2(pVCpu, pCtx);
2614 AssertReleaseFailedReturn(false);
2615#else
2616 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
2617 Assert(pCtx->hwvirt.fGif);
2618
2619 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2620 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2621 if ( !pVmcbIntCtrl->n.u1IgnoreTPR
2622 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR)
2623 return false;
2624
2625 X86EFLAGS fEFlags;
2626 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))
2627 fEFlags.u = pCtx->eflags.u;
2628 else
2629 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;
2630
2631 return fEFlags.Bits.u1IF;
2632#endif
2633}
2634
2635
2636/**
2637 * Gets the pending SVM nested-guest interrupt.
2638 *
2639 * @returns The nested-guest interrupt to inject.
2640 * @param pCtx The guest-CPU context.
2641 */
2642VMM_INT_DECL(uint8_t) CPUMGetSvmNstGstInterrupt(PCCPUMCTX pCtx)
2643{
2644#ifdef IN_RC
2645 RT_NOREF(pCtx);
2646 AssertReleaseFailedReturn(0);
2647#else
2648 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2649 return pVmcbCtrl->IntCtrl.n.u8VIntrVector;
2650#endif
2651}
2652
2653
2654/**
2655 * Gets the SVM nested-guest virtual GIF.
2656 *
2657 * @returns The nested-guest virtual GIF.
2658 * @param pCtx The guest-CPU context.
2659 */
2660VMM_INT_DECL(bool) CPUMGetSvmNstGstVGif(PCCPUMCTX pCtx)
2661{
2662#ifdef IN_RC
2663 RT_NOREF(pCtx);
2664 AssertReleaseFailedReturn(false);
2665#else
2666 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
2667 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl;
2668 if (pVmcbIntCtrl->n.u1VGifEnable)
2669 return pVmcbIntCtrl->n.u1VGif;
2670 return true;
2671#endif
2672}
2673
2674
2675/**
2676 * Restores the host-state from the host-state save area as part of a \#VMEXIT.
2677 *
2678 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2679 * @param pCtx The guest-CPU context.
2680 */
2681VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPU pVCpu, PCPUMCTX pCtx)
2682{
2683 /*
2684 * Reload the guest's "host state".
2685 */
2686 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2687 pCtx->es = pHostState->es;
2688 pCtx->cs = pHostState->cs;
2689 pCtx->ss = pHostState->ss;
2690 pCtx->ds = pHostState->ds;
2691 pCtx->gdtr = pHostState->gdtr;
2692 pCtx->idtr = pHostState->idtr;
2693 CPUMSetGuestMsrEferNoCheck(pVCpu, pCtx->msrEFER, pHostState->uEferMsr);
2694 CPUMSetGuestCR0(pVCpu, pHostState->uCr0 | X86_CR0_PE);
2695 pCtx->cr3 = pHostState->uCr3;
2696 CPUMSetGuestCR4(pVCpu, pHostState->uCr4);
2697 pCtx->rflags = pHostState->rflags;
2698 pCtx->rflags.Bits.u1VM = 0;
2699 pCtx->rip = pHostState->uRip;
2700 pCtx->rsp = pHostState->uRsp;
2701 pCtx->rax = pHostState->uRax;
2702 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
2703 pCtx->dr[7] |= X86_DR7_RA1_MASK;
2704 Assert(pCtx->ss.Attr.n.u2Dpl == 0);
2705
2706 /** @todo if RIP is not canonical or outside the CS segment limit, we need to
2707 * raise \#GP(0) in the guest. */
2708
2709 /** @todo check the loaded host-state for consistency. Figure out what
2710 * exactly this involves? */
2711}
2712
2713
2714/**
2715 * Saves the host-state to the host-state save area as part of a VMRUN.
2716 *
2717 * @param pCtx The guest-CPU context.
2718 * @param cbInstr The length of the VMRUN instruction in bytes.
2719 */
2720VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr)
2721{
2722 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;
2723 pHostState->es = pCtx->es;
2724 pHostState->cs = pCtx->cs;
2725 pHostState->ss = pCtx->ss;
2726 pHostState->ds = pCtx->ds;
2727 pHostState->gdtr = pCtx->gdtr;
2728 pHostState->idtr = pCtx->idtr;
2729 pHostState->uEferMsr = pCtx->msrEFER;
2730 pHostState->uCr0 = pCtx->cr0;
2731 pHostState->uCr3 = pCtx->cr3;
2732 pHostState->uCr4 = pCtx->cr4;
2733 pHostState->rflags = pCtx->rflags;
2734 pHostState->uRip = pCtx->rip + cbInstr;
2735 pHostState->uRsp = pCtx->rsp;
2736 pHostState->uRax = pCtx->rax;
2737}
2738
2739
2740/**
2741 * Applies the TSC offset of a nested-guest if any and returns the new TSC
2742 * value for the guest (or nested-guest).
2743 *
2744 * @returns The TSC offset after applying any nested-guest TSC offset.
2745 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2746 * @param uTicks The guest TSC.
2747 *
2748 * @sa HMSvmNstGstApplyTscOffset.
2749 */
2750VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks)
2751{
2752#ifndef IN_RC
2753 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
2754 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
2755 {
2756 if (!pCtx->hwvirt.svm.fHMCachedVmcb)
2757 {
2758 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
2759 return uTicks + pVmcb->ctrl.u64TSCOffset;
2760 }
2761 return HMSvmNstGstApplyTscOffset(pVCpu, uTicks);
2762 }
2763
2764 /** @todo Intel. */
2765#else
2766 RT_NOREF(pVCpu);
2767#endif
2768 return uTicks;
2769}
2770
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette