- Timestamp:
- Oct 27, 2008 1:53:04 PM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 38476
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/SELM.cpp
r13232 r13577 1 1 /* $Id$ */ 2 2 /** @file 3 * SELM - The Selector manager.3 * SELM - The Selector Manager. 4 4 */ 5 5 … … 22 22 /** @page pg_selm SELM - The Selector Manager 23 23 * 24 * Manages the hypervisor GDT entires, monitors and shadows the guest GDT, LDT 25 * and TSS. Only active in raw-mode. 24 * SELM takes care of GDT, LDT and TSS shadowing in raw-mode, and the injection 25 * of a few hyper selector for the raw-mode context. In the hardware assisted 26 * virtualization mode its only task is to decode entries in the guest GDT or 27 * LDT once in a while. 26 28 * 27 29 * @see grp_selm 30 * 31 * 32 * @section seg_selm_shadowing Shadowing 33 * 34 * SELMR3UpdateFromCPUM() and SELMR3SyncTSS() does the bulk synchronization 35 * work. The three structures (GDT, LDT, TSS) are all shadowed wholesale atm. 36 * The idea is to do it in a more on-demand fashion when we get time. There 37 * also a whole bunch of issues with the current synchronization of all three 38 * tables, see notes and todos in the code. 39 * 40 * When the guest makes changes to the GDT we will try update the shadow copy 41 * without involving SELMR3UpdateFromCPUM(), see selmGCSyncGDTEntry(). 42 * 43 * When the guest make LDT changes we'll trigger a full resync of the LDT 44 * (SELMR3UpdateFromCPUM()), which, needless to say, isn't optimal. 45 * 46 * The TSS shadowing is limited to the fields we need to care about, namely SS0 47 * and ESP0. The Patch Manager makes use of these. We monitor updates to the 48 * guest TSS and will try keep our SS0 and ESP0 copies up to date this way 49 * rather than go the SELMR3SyncTSS() route. 50 * 51 * When in raw-mode SELM also injects a few extra GDT selectors which are used 52 * by the raw-mode (hyper) context. These start their life at the high end of 53 * the table and will be relocated when the guest tries to make use of them... 54 * Well, that was that idea at least, only the code isn't quite there yet which 55 * is why we have trouble with guests which actually have a full sized GDT. 56 * 57 * So, the summary of the current GDT, LDT and TSS shadowing is that there is a 58 * lot of relatively simple and enjoyable work to be done, see @bugref{3267}. 28 59 * 29 60 */ … … 76 107 #define SELM_SAVED_STATE_VERSION 5 77 108 109 78 110 /******************************************************************************* 79 111 * Internal Functions * 80 112 *******************************************************************************/ 81 static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM); 82 static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version); 83 static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM); 113 static DECLCALLBACK(int) selmR3Save(PVM pVM, PSSMHANDLE pSSM); 114 static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version); 115 static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM); 116 static DECLCALLBACK(int) selmR3GuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser); 117 static DECLCALLBACK(int) selmR3GuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser); 118 static DECLCALLBACK(int) selmR3GuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser); 84 119 static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 85 120 static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); … … 88 123 //static DECLCALLBACK(void) selmR3InfoTss(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 89 124 //static DECLCALLBACK(void) selmR3InfoTssGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 90 static DECLCALLBACK(int) selmGuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);91 static DECLCALLBACK(int) selmGuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);92 static DECLCALLBACK(int) selmGuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);93 125 94 126 … … 131 163 * Allocate GDT table. 132 164 */ 133 int rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdt HC[0]) * SELM_GDT_ELEMENTS,134 PAGE_SIZE, MM_TAG_SELM, (void **)&pVM->selm.s.paGdt HC);165 int rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtR3[0]) * SELM_GDT_ELEMENTS, 166 PAGE_SIZE, MM_TAG_SELM, (void **)&pVM->selm.s.paGdtR3); 135 167 AssertRCReturn(rc, rc); 136 168 … … 138 170 * Allocate LDT area. 139 171 */ 140 rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s. HCPtrLdt);172 rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.pvLdtR3); 141 173 AssertRCReturn(rc, rc); 142 174 … … 149 181 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX; 150 182 151 pVM->selm.s.paGdt GC = 0;152 pVM->selm.s. GCPtrLdt= RTRCPTR_MAX;153 pVM->selm.s. GCPtrTss= RTRCPTR_MAX;154 pVM->selm.s.GCSelTss = ~0;183 pVM->selm.s.paGdtRC = NIL_RTRCPTR; /* Must be set in SELMR3Relocate because of monitoring. */ 184 pVM->selm.s.pvLdtRC = RTRCPTR_MAX; 185 pVM->selm.s.pvMonShwTssRC = RTRCPTR_MAX; 186 pVM->selm.s.GCSelTss = RTSEL_MAX; 155 187 156 188 pVM->selm.s.fDisableMonitoring = false; … … 175 207 * Statistics. 176 208 */ 177 STAM_REG(pVM, &pVM->selm.s.Stat GCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT.");178 STAM_REG(pVM, &pVM->selm.s.Stat GCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT.");179 STAM_REG(pVM, &pVM->selm.s.Stat GCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected.");180 STAM_REG(pVM, &pVM->selm.s.Stat GCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS.");181 STAM_REG(pVM, &pVM->selm.s.Stat GCWriteGuestTSSRedir, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSRedir",STAMUNIT_OCCURENCES, "The number of handled redir bitmap writes to the Guest TSS.");182 STAM_REG(pVM, &pVM->selm.s.Stat GCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed.");183 STAM_REG(pVM, &pVM->selm.s.Stat GCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS.");209 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT."); 210 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT."); 211 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected."); 212 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS."); 213 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSRedir, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSRedir",STAMUNIT_OCCURENCES, "The number of handled redir bitmap writes to the Guest TSS."); 214 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed."); 215 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS."); 184 216 STAM_REG(pVM, &pVM->selm.s.StatTSSSync, STAMTYPE_PROFILE, "/PROF/SELM/TSSSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body."); 185 217 STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body."); … … 217 249 VMMR3DECL(int) SELMR3InitFinalize(PVM pVM) 218 250 { 219 /* 220 * Make Double Fault work with WP enabled?221 * 222 * The double fault is a task switch and thus requires write access to the GDT of the TSS223 * (to set it busy), to the old TSS (to store state), and to the Trap 8 TSS for the back link.224 * 225 * Since we in enabling write access to these pages make ourself vulnerable to attacks,226 * it is not possible to do this by default.251 /** @cfgm{/DoubleFault,bool,false} 252 * Enables catching of double faults in the raw-mode context VMM code. This can 253 * be used when the tripple faults or hangs occure and one suspect an unhandled 254 * double fault. This is not enabled by default because it means making the 255 * hyper selectors writeable for all supervisor code, including the guest's. 256 * The double fault is a task switch and thus requires write access to the GDT 257 * of the TSS (to set it busy), to the old TSS (to store state), and to the Trap 258 * 8 TSS for the back link. 227 259 */ 228 260 bool f; 229 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "DoubleFault", &f); 230 #if !defined(DEBUG_bird) 231 if (VBOX_SUCCESS(rc) && f) 261 #if defined(DEBUG_bird) 262 int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "DoubleFault", &f, true); 263 #else 264 int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "DoubleFault", &f, false); 232 265 #endif 233 { 234 PX86DESC paGdt = pVM->selm.s.paGdtHC; 266 AssertLogRelRCReturn(rc, rc); 267 if (f) 268 { 269 PX86DESC paGdt = pVM->selm.s.paGdtR3; 235 270 rc = PGMMapSetPage(pVM, MMHyperHC2GC(pVM, &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> 3]), sizeof(paGdt[0]), 236 271 X86_PTE_RW | X86_PTE_P | X86_PTE_A | X86_PTE_D); … … 257 292 static void selmR3SetupHyperGDTSelectors(PVM pVM) 258 293 { 259 PX86DESC paGdt = pVM->selm.s.paGdt HC;294 PX86DESC paGdt = pVM->selm.s.paGdtR3; 260 295 261 296 /* … … 358 393 VMMR3DECL(void) SELMR3Relocate(PVM pVM) 359 394 { 360 PX86DESC paGdt = pVM->selm.s.paGdt HC;395 PX86DESC paGdt = pVM->selm.s.paGdtR3; 361 396 LogFlow(("SELMR3Relocate\n")); 362 397 … … 419 454 int rc; 420 455 #ifdef SELM_TRACK_SHADOW_GDT_CHANGES 421 if (pVM->selm.s.paGdt GC != 0)422 { 423 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdt GC);456 if (pVM->selm.s.paGdtRC != NIL_RTRCPTR) 457 { 458 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtRC); 424 459 AssertRC(rc); 425 460 } 426 pVM->selm.s.paGdt GC = MMHyperHC2GC(pVM, paGdt);427 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.paGdt GC,428 pVM->selm.s.paGdt GC + SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1,429 0, 0, "selm gcShadowGDTWriteHandler", 0, "Shadow GDT write access handler");461 pVM->selm.s.paGdtRC = MMHyperR3ToRC(pVM, paGdt); 462 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.paGdtRC, 463 pVM->selm.s.paGdtRC + SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1, 464 0, 0, "selmRCShadowGDTWriteHandler", 0, "Shadow GDT write access handler"); 430 465 AssertRC(rc); 431 466 #endif 432 467 #ifdef SELM_TRACK_SHADOW_TSS_CHANGES 433 if (pVM->selm.s. GCPtrTss!= RTRCPTR_MAX)434 { 435 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s. GCPtrTss);468 if (pVM->selm.s.pvMonShwTssRC != RTRCPTR_MAX) 469 { 470 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvMonShwTssRC); 436 471 AssertRC(rc); 437 472 } 438 pVM->selm.s. GCPtrTss = VM_GUEST_ADDR(pVM, &pVM->selm.s.Tss);439 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s. GCPtrTss,440 pVM->selm.s. GCPtrTss+ sizeof(pVM->selm.s.Tss) - 1,441 0, 0, "selm gcShadowTSSWriteHandler", 0, "Shadow TSS write access handler");473 pVM->selm.s.pvMonShwTssRC = VM_RC_ADDR(pVM, &pVM->selm.s.Tss); 474 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.pvMonShwTssRC, 475 pVM->selm.s.pvMonShwTssRC + sizeof(pVM->selm.s.Tss) - 1, 476 0, 0, "selmRCShadowTSSWriteHandler", 0, "Shadow TSS write access handler"); 442 477 AssertRC(rc); 443 478 #endif … … 447 482 */ 448 483 #ifdef SELM_TRACK_SHADOW_LDT_CHANGES 449 if (pVM->selm.s. GCPtrLdt!= RTRCPTR_MAX)450 { 451 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s. GCPtrLdt);484 if (pVM->selm.s.pvLdtRC != RTRCPTR_MAX) 485 { 486 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvLdtRC); 452 487 AssertRC(rc); 453 488 } 454 489 #endif 455 pVM->selm.s. GCPtrLdt = MMHyperHC2GC(pVM, pVM->selm.s.HCPtrLdt);490 pVM->selm.s.pvLdtRC = MMHyperR3ToRC(pVM, pVM->selm.s.pvLdtR3); 456 491 #ifdef SELM_TRACK_SHADOW_LDT_CHANGES 457 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s. GCPtrLdt,458 pVM->selm.s. GCPtrLdt+ _64K + PAGE_SIZE - 1,459 0, 0, "selm gcShadowLDTWriteHandler", 0, "Shadow LDT write access handler");492 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_HYPERVISOR, pVM->selm.s.pvLdtRC, 493 pVM->selm.s.pvLdtRC + _64K + PAGE_SIZE - 1, 494 0, 0, "selmRCShadowLDTWriteHandler", 0, "Shadow LDT write access handler"); 460 495 AssertRC(rc); 461 496 #endif … … 534 569 AssertRC(rc); 535 570 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX; 536 pVM->selm.s.GCSelTss = ~0;571 pVM->selm.s.GCSelTss = RTSEL_MAX; 537 572 } 538 573 #endif … … 590 625 AssertRC(rc); 591 626 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX; 592 pVM->selm.s.GCSelTss = ~0;627 pVM->selm.s.GCSelTss = RTSEL_MAX; 593 628 } 594 629 #endif … … 598 633 */ 599 634 #ifdef SELM_TRACK_SHADOW_GDT_CHANGES 600 if (pVM->selm.s.paGdt GC != 0)601 { 602 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdt GC);635 if (pVM->selm.s.paGdtRC != NIL_RTRCPTR) 636 { 637 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.paGdtRC); 603 638 AssertRC(rc); 604 pVM->selm.s.paGdt GC = 0;639 pVM->selm.s.paGdtRC = NIL_RTRCPTR; 605 640 } 606 641 #endif 607 642 #ifdef SELM_TRACK_SHADOW_TSS_CHANGES 608 if (pVM->selm.s. GCPtrTss!= RTRCPTR_MAX)609 { 610 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s. GCPtrTss);643 if (pVM->selm.s.pvMonShwTssRC != RTRCPTR_MAX) 644 { 645 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvMonShwTssRC); 611 646 AssertRC(rc); 612 pVM->selm.s. GCPtrTss= RTRCPTR_MAX;647 pVM->selm.s.pvMonShwTssRC = RTRCPTR_MAX; 613 648 } 614 649 #endif 615 650 #ifdef SELM_TRACK_SHADOW_LDT_CHANGES 616 if (pVM->selm.s. GCPtrLdt!= RTRCPTR_MAX)617 { 618 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s. GCPtrLdt);651 if (pVM->selm.s.pvLdtRC != RTRCPTR_MAX) 652 { 653 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.pvLdtRC); 619 654 AssertRC(rc); 620 pVM->selm.s. GCPtrLdt= RTRCPTR_MAX;655 pVM->selm.s.pvLdtRC = RTRCPTR_MAX; 621 656 } 622 657 #endif … … 628 663 pVM->selm.s.fDisableMonitoring = true; 629 664 } 665 630 666 631 667 /** … … 650 686 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_DS]); 651 687 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS64]); 652 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS64]); / /reserved for DS64.688 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_CS64]); /* reserved for DS64. */ 653 689 SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_TSS]); 654 690 return SSMR3PutSel(pSSM, pSelm->aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]); … … 813 849 */ 814 850 RTUINT cbEffLimit = GDTR.cbGdt; 815 PX86DESC pGDTE = &pVM->selm.s.paGdt HC[1];851 PX86DESC pGDTE = &pVM->selm.s.paGdtR3[1]; 816 852 rc = PGMPhysSimpleReadGCPtr(pVM, pGDTE, GDTR.pGdt + sizeof(X86DESC), cbEffLimit + 1 - sizeof(X86DESC)); 817 853 if (VBOX_FAILURE(rc)) … … 828 864 RTUINT cbLeft = cbEffLimit + 1 - sizeof(X86DESC); 829 865 RTGCPTR GCPtrSrc = (RTGCPTR)GDTR.pGdt + sizeof(X86DESC); 830 uint8_t *pu8Dst = (uint8_t *)&pVM->selm.s.paGdt HC[1];866 uint8_t *pu8Dst = (uint8_t *)&pVM->selm.s.paGdtR3[1]; 831 867 uint8_t *pu8DstInvalid = pu8Dst; 832 868 … … 862 898 if (pu8DstInvalid != pu8Dst) 863 899 { 864 cbEffLimit = pu8DstInvalid - (uint8_t *)pVM->selm.s.paGdt HC- 1;900 cbEffLimit = pu8DstInvalid - (uint8_t *)pVM->selm.s.paGdtR3 - 1; 865 901 /* If any GDTEs was invalidated, zero them. */ 866 902 if (cbEffLimit < pVM->selm.s.cbEffGuestGdtLimit) … … 884 920 if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE) 885 921 { 886 PX86DESC pGDTEStart = pVM->selm.s.paGdt HC;922 PX86DESC pGDTEStart = pVM->selm.s.paGdtR3; 887 923 PX86DESC pGDTE = (PX86DESC)((char *)pGDTEStart + GDTR.cbGdt + 1 - sizeof(X86DESC)); 888 924 int iGDT = 0; … … 895 931 if (!pGDTE->Gen.u1Present) 896 932 { 897 aHyperSel[iGDT] = ((uintptr_t)pGDTE - (uintptr_t)pVM->selm.s.paGdt HC) / sizeof(X86DESC);933 aHyperSel[iGDT] = ((uintptr_t)pGDTE - (uintptr_t)pVM->selm.s.paGdtR3) / sizeof(X86DESC); 898 934 aHyperSel[iGDT] = aHyperSel[iGDT] << X86_SEL_SHIFT; 899 935 Log(("SELM: Found unused GDT %04X\n", aHyperSel[iGDT])); … … 1027 1063 1028 1064 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */, 1029 0, selm GuestGDTWriteHandler, "selmgcGuestGDTWriteHandler", 0, "Guest GDT write access handler");1065 0, selmR3GuestGDTWriteHandler, "selmRCGuestGDTWriteHandler", 0, "Guest GDT write access handler"); 1030 1066 if (VBOX_FAILURE(rc)) 1031 1067 return rc; … … 1091 1127 * Get the LDT selector. 1092 1128 */ 1093 PX86DESC pDesc = &pVM->selm.s.paGdtHC[SelLdt >> X86_SEL_SHIFT];1129 PX86DESC pDesc = &pVM->selm.s.paGdtR3[SelLdt >> X86_SEL_SHIFT]; 1094 1130 RTGCPTR GCPtrLdt = X86DESC_BASE(*pDesc); 1095 1131 unsigned cbLdt = X86DESC_LIMIT(*pDesc); … … 1130 1166 * (this is necessary due to redundant LDT updates; see todo above at GDT sync) 1131 1167 */ 1132 if (MMHyperIsInsideArea(pVM, GCPtrLdt) == true)1168 if (MMHyperIsInsideArea(pVM, GCPtrLdt)) 1133 1169 GCPtrLdt = pVM->selm.s.GCPtrGuestLdt; /* use the old one */ 1134 1170 … … 1161 1197 #endif 1162 1198 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */, 1163 0, selm GuestLDTWriteHandler, "selmgcGuestLDTWriteHandler", 0, "Guest LDT write access handler");1199 0, selmR3GuestLDTWriteHandler, "selmRCGuestLDTWriteHandler", 0, "Guest LDT write access handler"); 1164 1200 if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT) 1165 1201 { … … 1190 1226 unsigned off; 1191 1227 pVM->selm.s.offLdtHyper = off = (GCPtrLdt & PAGE_OFFSET_MASK); 1192 RTGCPTR GCPtrShadowLDT = (RTGCPTR)((RTGCUINTPTR)pVM->selm.s. GCPtrLdt+ off);1193 PX86DESC pShadowLDT = (PX86DESC)((uintptr_t)pVM->selm.s.HCPtrLdt+ off);1228 RTGCPTR GCPtrShadowLDT = (RTGCPTR)((RTGCUINTPTR)pVM->selm.s.pvLdtRC + off); 1229 PX86DESC pShadowLDT = (PX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + off); 1194 1230 1195 1231 /* … … 1295 1331 else 1296 1332 { 1297 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc=% d\n", rc));1333 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc=%Rrc\n", rc)); 1298 1334 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, 0); 1299 1335 AssertRC(rc); … … 1331 1367 * @param pvUser User argument. 1332 1368 */ 1333 static DECLCALLBACK(int) selm GuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)1369 static DECLCALLBACK(int) selmR3GuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser) 1334 1370 { 1335 1371 Assert(enmAccessType == PGMACCESSTYPE_WRITE); 1336 Log(("selm GuestGDTWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));1372 Log(("selmR3GuestGDTWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf)); 1337 1373 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT); 1338 1374 1339 1375 return VINF_PGM_HANDLER_DO_DEFAULT; 1340 1376 } 1377 1341 1378 1342 1379 /** … … 1356 1393 * @param pvUser User argument. 1357 1394 */ 1358 static DECLCALLBACK(int) selm GuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)1395 static DECLCALLBACK(int) selmR3GuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser) 1359 1396 { 1360 1397 Assert(enmAccessType == PGMACCESSTYPE_WRITE); 1361 Log(("selm GuestLDTWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));1398 Log(("selmR3GuestLDTWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf)); 1362 1399 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT); 1363 1400 return VINF_PGM_HANDLER_DO_DEFAULT; 1364 1401 } 1402 1365 1403 1366 1404 /** … … 1380 1418 * @param pvUser User argument. 1381 1419 */ 1382 static DECLCALLBACK(int) selm GuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)1420 static DECLCALLBACK(int) selmR3GuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser) 1383 1421 { 1384 1422 Assert(enmAccessType == PGMACCESSTYPE_WRITE); 1385 Log(("selm GuestTSSWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf));1423 Log(("selmR3GuestTSSWriteHandler: write to %VGv size %d\n", GCPtr, cbBuf)); 1386 1424 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS); 1387 1425 return VINF_PGM_HANDLER_DO_DEFAULT; 1388 1426 } 1427 1389 1428 1390 1429 /** … … 1428 1467 * Guest TR is not NULL. 1429 1468 */ 1430 PX86DESC pDesc = &pVM->selm.s.paGdtHC[SelTss >> X86_SEL_SHIFT];1469 PX86DESC pDesc = &pVM->selm.s.paGdtR3[SelTss >> X86_SEL_SHIFT]; 1431 1470 RTGCPTR GCPtrTss = X86DESC_BASE(*pDesc); 1432 1471 unsigned cbTss = X86DESC_LIMIT(*pDesc); … … 1489 1528 1490 1529 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbTss - 1, 1491 0, selm GuestTSSWriteHandler, "selmgcGuestTSSWriteHandler", 0, "Guest TSS write access handler");1530 0, selmR3GuestTSSWriteHandler, "selmRCGuestTSSWriteHandler", 0, "Guest TSS write access handler"); 1492 1531 if (VBOX_FAILURE(rc)) 1493 1532 { … … 1509 1548 if (VBOX_SUCCESS(rc)) 1510 1549 { 1511 #ifdef DEBUG 1550 #ifdef LOG_ENABLED 1512 1551 uint32_t ssr0, espr0; 1513 1552 … … 1518 1557 Log(("SELMR3SyncTSS: Updating TSS ring 0 stack to %04X:%08X\n", tss.ss0, tss.esp0)); 1519 1558 Log(("offIoBitmap=%#x\n", tss.offIoBitmap)); 1520 #endif 1559 #endif /* LOG_ENABLED */ 1521 1560 /* Update our TSS structure for the guest's ring 1 stack */ 1522 1561 SELMSetRing1Stack(pVM, tss.ss0 | 1, tss.esp0); … … 1585 1624 */ 1586 1625 RTGCPTR GCPtrGDTEGuest = GDTR.pGdt; 1587 PX86DESC pGDTE = pVM->selm.s.paGdt HC;1626 PX86DESC pGDTE = pVM->selm.s.paGdtR3; 1588 1627 PX86DESC pGDTEEnd = (PX86DESC)((uintptr_t)pGDTE + GDTR.cbGdt); 1589 1628 while (pGDTE < pGDTEEnd) … … 1603 1642 || pGDTE->Gen.u1DescType != GDTEGuest.Gen.u1DescType) 1604 1643 { 1605 unsigned iGDT = pGDTE - pVM->selm.s.paGdt HC;1644 unsigned iGDT = pGDTE - pVM->selm.s.paGdtR3; 1606 1645 SELMR3DumpDescriptor(*pGDTE, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, shadow"); 1607 1646 SELMR3DumpDescriptor(GDTEGuest, iGDT << 3, "SELMR3DebugCheck: GDT mismatch, guest"); … … 1658 1697 */ 1659 1698 unsigned off = (GCPtrLDTEGuest & PAGE_OFFSET_MASK); 1660 PX86DESC pLDTE = (PX86DESC)((uintptr_t)pVM->selm.s.HCPtrLdt+ off);1661 PX86DESC pLDTEEnd = (PX86DESC)((uintptr_t)pGDTE + cbLdt);1699 PX86DESC pLDTE = (PX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + off); 1700 PX86DESC pLDTEEnd = (PX86DESC)((uintptr_t)pGDTE + cbLdt); 1662 1701 while (pLDTE < pLDTEEnd) 1663 1702 { … … 1674 1713 || pLDTE->Gen.u1DescType != LDTEGuest.Gen.u1DescType) 1675 1714 { 1676 unsigned iLDT = pLDTE - (PX86DESC)((uintptr_t)pVM->selm.s. HCPtrLdt+ off);1715 unsigned iLDT = pLDTE - (PX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + off); 1677 1716 SELMR3DumpDescriptor(*pLDTE, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, shadow"); 1678 1717 SELMR3DumpDescriptor(LDTEGuest, iLDT << 3, "SELMR3DebugCheck: LDT mismatch, guest"); … … 1685 1724 } 1686 1725 1687 #else 1726 #else /* !VBOX_STRICT */ 1688 1727 NOREF(pVM); 1689 #endif 1728 #endif /* !VBOX_STRICT */ 1690 1729 1691 1730 return VINF_SUCCESS; … … 1712 1751 * Guest TR is not NULL. 1713 1752 */ 1714 PX86DESC pDesc = &pVM->selm.s.paGdtHC[SelTss >> X86_SEL_SHIFT];1753 PX86DESC pDesc = &pVM->selm.s.paGdtR3[SelTss >> X86_SEL_SHIFT]; 1715 1754 RTGCPTR GCPtrTss = X86DESC_BASE(*pDesc); 1716 1755 unsigned cbTss = X86DESC_LIMIT(*pDesc); … … 1718 1757 cbTss = (cbTss << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1719 1758 cbTss++; 1720 # if 11759 # if 1 1721 1760 /* Don't bother with anything but the core structure. (Actually all we care for is the r0 ss.) */ 1722 1761 if (cbTss > sizeof(VBOXTSS)) 1723 1762 cbTss = sizeof(VBOXTSS); 1724 # endif1763 # endif 1725 1764 AssertMsg((GCPtrTss >> PAGE_SHIFT) == ((GCPtrTss + sizeof(VBOXTSS) - 1) >> PAGE_SHIFT), 1726 1765 ("GCPtrTss=%VGv cbTss=%#x - We assume everything is inside one page!\n", GCPtrTss, cbTss)); … … 1774 1813 } 1775 1814 return false; 1776 #else 1815 #else /* !VBOX_STRICT */ 1777 1816 NOREF(pVM); 1778 1817 return true; 1779 #endif 1818 #endif /* !VBOX_STRICT */ 1780 1819 } 1781 1820 … … 1831 1870 } 1832 1871 1833 /** 1834 * Gets information about a selector. 1835 * Intended for the debugger mostly and will prefer the guest 1836 * descriptor tables over the shadow ones. 1837 * 1838 * @returns VINF_SUCCESS on success. 1839 * @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table. 1840 * @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present. 1841 * @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page 1842 * backing the selector table wasn't present. 1843 * @returns Other VBox status code on other errors. 1872 1873 /** 1874 * Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper. 1875 * 1876 * See SELMR3GetSelectorInfo for details. 1877 * 1878 * @returns VBox status code, see SELMR3GetSelectorInfo for details. 1844 1879 * 1845 1880 * @param pVM VM handle. … … 1847 1882 * @param pSelInfo Where to store the information. 1848 1883 */ 1849 static int selmr3GetSelectorInfo64(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo) 1850 { 1851 X86DESC64 Desc; 1852 1853 Assert(pSelInfo); 1884 static int selmR3GetSelectorInfo64(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo) 1885 { 1886 pSelInfo->fHyper = false; 1854 1887 1855 1888 /* 1856 1889 * Read it from the guest descriptor table. 1857 1890 */ 1858 pSelInfo->fHyper = false; 1859 1891 X86DESC64 Desc; 1860 1892 VBOXGDTR Gdtr; 1861 1893 RTGCPTR GCPtrDesc; … … 1871 1903 { 1872 1904 /* 1873 1874 1905 * LDT - must locate the LDT first... 1906 */ 1875 1907 RTSEL SelLdt = CPUMGetGuestLDTR(pVM); 1876 1908 if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(X86DESC) /* the first selector is invalid, right? */ … … 1921 1953 1922 1954 /** 1923 * Gets information about a selector. 1924 * Intended for the debugger mostly and will prefer the guest 1925 * descriptor tables over the shadow ones. 1926 * 1927 * @returns VINF_SUCCESS on success. 1928 * @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table. 1929 * @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present. 1930 * @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page 1931 * backing the selector table wasn't present. 1932 * @returns Other VBox status code on other errors. 1955 * Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper. 1956 * 1957 * See SELMR3GetSelectorInfo for details. 1958 * 1959 * @returns VBox status code, see SELMR3GetSelectorInfo for details. 1933 1960 * 1934 1961 * @param pVM VM handle. … … 1936 1963 * @param pSelInfo Where to store the information. 1937 1964 */ 1938 VMMR3DECL(int) SELMR3GetSelectorInfo(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo) 1939 { 1940 Assert(pSelInfo); 1941 1942 if (CPUMIsGuestInLongMode(pVM)) 1943 return selmr3GetSelectorInfo64(pVM, Sel, pSelInfo); 1944 1965 static int selmR3GetSelectorInfo32(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo) 1966 { 1945 1967 /* 1946 1968 * Read the descriptor entry … … 1959 1981 */ 1960 1982 pSelInfo->fHyper = true; 1961 Desc = pVM->selm.s.paGdt HC[Sel >> X86_SEL_SHIFT];1983 Desc = pVM->selm.s.paGdtR3[Sel >> X86_SEL_SHIFT]; 1962 1984 } 1963 1985 else if (CPUMIsGuestInProtectedMode(pVM)) … … 2045 2067 2046 2068 /** 2047 * Gets information about a selector from the shadow tables. 2048 * 2049 * This is intended to be faster than the SELMR3GetSelectorInfo() method, but requires 2050 * that the caller ensures that the shadow tables are up to date. 2069 * Gets information about a selector. 2070 * Intended for the debugger mostly and will prefer the guest 2071 * descriptor tables over the shadow ones. 2051 2072 * 2052 2073 * @returns VINF_SUCCESS on success. … … 2061 2082 * @param pSelInfo Where to store the information. 2062 2083 */ 2084 VMMR3DECL(int) SELMR3GetSelectorInfo(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo) 2085 { 2086 AssertPtr(pSelInfo); 2087 if (CPUMIsGuestInLongMode(pVM)) 2088 return selmR3GetSelectorInfo64(pVM, Sel, pSelInfo); 2089 return selmR3GetSelectorInfo32(pVM, Sel, pSelInfo); 2090 } 2091 2092 2093 /** 2094 * Gets information about a selector from the shadow tables. 2095 * 2096 * This is intended to be faster than the SELMR3GetSelectorInfo() method, but requires 2097 * that the caller ensures that the shadow tables are up to date. 2098 * 2099 * @returns VINF_SUCCESS on success. 2100 * @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table. 2101 * @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present. 2102 * @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page 2103 * backing the selector table wasn't present. 2104 * @returns Other VBox status code on other errors. 2105 * 2106 * @param pVM VM handle. 2107 * @param Sel The selector to get info about. 2108 * @param pSelInfo Where to store the information. 2109 */ 2063 2110 VMMR3DECL(int) SELMR3GetShadowSelectorInfo(PVM pVM, RTSEL Sel, PSELMSELINFO pSelInfo) 2064 2111 { … … 2074 2121 * Global descriptor. 2075 2122 */ 2076 Desc = pVM->selm.s.paGdt HC[Sel >> X86_SEL_SHIFT];2123 Desc = pVM->selm.s.paGdtR3[Sel >> X86_SEL_SHIFT]; 2077 2124 pSelInfo->fHyper = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == (Sel & X86_SEL_MASK) 2078 2125 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == (Sel & X86_SEL_MASK) … … 2087 2134 * Local Descriptor. 2088 2135 */ 2089 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s. HCPtrLdt+ pVM->selm.s.offLdtHyper);2136 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.pvLdtR3 + pVM->selm.s.offLdtHyper); 2090 2137 Desc = paLDT[Sel >> X86_SEL_SHIFT]; 2091 2138 /** @todo check if the LDT page is actually available. */ … … 2128 2175 } const aTypes[32] = 2129 2176 { 2130 2177 #define STRENTRY(str) { sizeof(str) - 1, str } 2131 2178 /* system */ 2132 2179 STRENTRY("Reserved0 "), /* 0x00 */ … … 2163 2210 STRENTRY("CodeConfER "), /* 0x1e */ 2164 2211 STRENTRY("CodeConfER Accessed ") /* 0x1f */ 2165 2212 #undef SYSENTRY 2166 2213 }; 2167 2214 #define ADD_STR(psz, pszAdd) do { strcpy(psz, pszAdd); psz += strlen(pszAdd); } while (0) 2168 2215 char szMsg[128]; 2169 2216 char *psz = &szMsg[0]; … … 2182 2229 else 2183 2230 ADD_STR(psz, "16-bit "); 2184 2231 #undef ADD_STR 2185 2232 *psz = '\0'; 2186 2233 … … 2223 2270 static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs) 2224 2271 { 2225 pHlp->pfnPrintf(pHlp, "Shadow GDT (GCAddr=%VGv):\n", MMHyperHC2GC(pVM, pVM->selm.s.paGdt HC));2272 pHlp->pfnPrintf(pHlp, "Shadow GDT (GCAddr=%VGv):\n", MMHyperHC2GC(pVM, pVM->selm.s.paGdtR3)); 2226 2273 for (unsigned iGDT = 0; iGDT < SELM_GDT_ELEMENTS; iGDT++) 2227 2274 { 2228 if (pVM->selm.s.paGdt HC[iGDT].Gen.u1Present)2275 if (pVM->selm.s.paGdtR3[iGDT].Gen.u1Present) 2229 2276 { 2230 2277 char szOutput[128]; 2231 selmR3FormatDescriptor(pVM->selm.s.paGdt HC[iGDT], iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));2278 selmR3FormatDescriptor(pVM->selm.s.paGdtR3[iGDT], iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput)); 2232 2279 const char *psz = ""; 2233 2280 if (iGDT == ((unsigned)pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] >> X86_SEL_SHIFT)) … … 2296 2343 { 2297 2344 unsigned cLDTs = ((unsigned)pVM->selm.s.cbLdtLimit + 1) >> X86_SEL_SHIFT; 2298 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.HCPtrLdt+ pVM->selm.s.offLdtHyper);2299 pHlp->pfnPrintf(pHlp, "Shadow LDT (GCAddr=%VGv limit=%d):\n", pVM->selm.s. GCPtrLdt+ pVM->selm.s.offLdtHyper, pVM->selm.s.cbLdtLimit);2345 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.pvLdtR3 + pVM->selm.s.offLdtHyper); 2346 pHlp->pfnPrintf(pHlp, "Shadow LDT (GCAddr=%VGv limit=%d):\n", pVM->selm.s.pvLdtRC + pVM->selm.s.offLdtHyper, pVM->selm.s.cbLdtLimit); 2300 2347 for (unsigned iLDT = 0; iLDT < cLDTs; iLDT++) 2301 2348 { … … 2331 2378 if (VBOX_FAILURE(rc)) 2332 2379 { 2333 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): rc=% Vrc\n", SelLdt, rc);2380 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x): rc=%Rrc\n", SelLdt, rc); 2334 2381 return; 2335 2382 } … … 2371 2418 } 2372 2419 2420 2373 2421 /** 2374 2422 * Dumps the hypervisor LDT … … 2381 2429 } 2382 2430 2431 2383 2432 /** 2384 2433 * Dumps the guest GDT … … 2391 2440 } 2392 2441 2442 2393 2443 /** 2394 2444 * Dumps the guest LDT -
trunk/src/VBox/VMM/SELMInternal.h
r12989 r13577 68 68 * @param pSELM Pointer to SELM instance data. 69 69 */ 70 #define SELM2VM(pSELM) ( (PVM)((char *)pSELM - pSELM->offVM) )70 #define SELM2VM(pSELM) ( (PVM)((char *)pSELM - pSELM->offVM) ) 71 71 72 72 … … 81 81 RTINT offVM; 82 82 83 /* Flat CS, DS, 64 bit mode CS, TSS & trap 8 TSS. */83 /** Flat CS, DS, 64 bit mode CS, TSS & trap 8 TSS. */ 84 84 RTSEL aHyperSel[SELM_HYPER_SEL_MAX]; 85 85 86 /** Pointer to the GCs - HCPtr.86 /** Pointer to the GCs - R3 Ptr. 87 87 * This size is governed by SELM_GDT_ELEMENTS. */ 88 R3 R0PTRTYPE(PX86DESC) paGdtHC;89 /** Pointer to the GCs - GC Ptr.88 R3PTRTYPE(PX86DESC) paGdtR3; 89 /** Pointer to the GCs - RC Ptr. 90 90 * This is not initialized until the first relocation because it's used to 91 91 * check if the shadow GDT virtual handler requires deregistration. */ 92 RCPTRTYPE(PX86DESC) paGdtGC; 93 /** Current (last) Guest's GDTR. */ 92 RCPTRTYPE(PX86DESC) paGdtRC; 93 /** Current (last) Guest's GDTR. 94 * The pGdt member is set to RTRCPTR_MAX if we're not monitoring the guest GDT. */ 94 95 VBOXGDTR GuestGdtr; 95 96 /** The current (last) effective Guest GDT size. */ … … 98 99 uint32_t padding0; 99 100 100 /** HC Pointer to the LDT shadow area placed in Hypervisor memory arena. */101 R3PTRTYPE(void *) HCPtrLdt;102 /** GC Pointer to the LDT shadow area placed in Hypervisor memory arena. */103 RCPTRTYPE(void *) GCPtrLdt;101 /** R3 pointer to the LDT shadow area in HMA. */ 102 R3PTRTYPE(void *) pvLdtR3; 103 /** RC pointer to the LDT shadow area in HMA. */ 104 RCPTRTYPE(void *) pvLdtRC; 104 105 #if GC_ARCH_BITS == 64 105 106 RTRCPTR padding1; 106 107 #endif 107 /** GC Pointer to the current Guest's LDT. */ 108 /** The address of the guest LDT. 109 * RTRCPTR_MAX if not monitored. */ 108 110 RTGCPTR GCPtrGuestLdt; 109 111 /** Current LDT limit, both Guest and Shadow. */ 110 112 RTUINT cbLdtLimit; 111 /** Current LDT offset relative to pvLdt *. */113 /** Current LDT offset relative to pvLdtR3/pvLdtRC. */ 112 114 RTUINT offLdtHyper; 113 115 #if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64 … … 121 123 VBOXTSS TssTrap08; 122 124 123 /** GC Pointer to the TSS shadow area (Tss) placed in Hypervisor memory arena. */124 RCPTRTYPE(void *) GCPtrTss;125 /** Monitored shadow TSS address. */ 126 RCPTRTYPE(void *) pvMonShwTssRC; 125 127 #if GC_ARCH_BITS == 64 126 128 RTRCPTR padding3; 127 129 #endif 128 /** GC Pointer to the current Guest's TSS. */ 130 /** GC Pointer to the current Guest's TSS. 131 * RTRCPTR_MAX if not monitored. */ 129 132 RTGCPTR GCPtrGuestTss; 130 133 /** The size of the guest TSS. */ … … 134 137 /** The size of the Guest's TSS part we're monitoring. */ 135 138 RTUINT cbMonitoredGuestTss; 136 /** GC shadow TSS selector */ 139 /** The guest TSS selector at last sync (part of monitoring). 140 * Contains RTSEL_MAX if not set. */ 137 141 RTSEL GCSelTss; 138 142 … … 154 158 155 159 /** GC: The number of handled writes to the Guest's GDT. */ 156 STAMCOUNTER Stat GCWriteGuestGDTHandled;160 STAMCOUNTER StatRCWriteGuestGDTHandled; 157 161 /** GC: The number of unhandled write to the Guest's GDT. */ 158 STAMCOUNTER Stat GCWriteGuestGDTUnhandled;162 STAMCOUNTER StatRCWriteGuestGDTUnhandled; 159 163 /** GC: The number of times writes to Guest's LDT was detected. */ 160 STAMCOUNTER Stat GCWriteGuestLDT;164 STAMCOUNTER StatRCWriteGuestLDT; 161 165 /** GC: The number of handled writes to the Guest's TSS. */ 162 STAMCOUNTER Stat GCWriteGuestTSSHandled;166 STAMCOUNTER StatRCWriteGuestTSSHandled; 163 167 /** GC: The number of handled writes to the Guest's TSS where we detected a change. */ 164 STAMCOUNTER Stat GCWriteGuestTSSHandledChanged;168 STAMCOUNTER StatRCWriteGuestTSSHandledChanged; 165 169 /** GC: The number of handled redir writes to the Guest's TSS where we detected a change. */ 166 STAMCOUNTER Stat GCWriteGuestTSSRedir;170 STAMCOUNTER StatRCWriteGuestTSSRedir; 167 171 /** GC: The number of unhandled writes to the Guest's TSS. */ 168 STAMCOUNTER Stat GCWriteGuestTSSUnhandled;172 STAMCOUNTER StatRCWriteGuestTSSUnhandled; 169 173 /** The number of times we had to relocate our hypervisor selectors. */ 170 174 STAMCOUNTER StatHyperSelsChanged; … … 175 179 __BEGIN_DECLS 176 180 177 VMMRCDECL(int) selm gcGuestGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);178 VMMRCDECL(int) selm gcGuestLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);179 VMMRCDECL(int) selm gcGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);181 VMMRCDECL(int) selmRCGuestGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange); 182 VMMRCDECL(int) selmRCGuestLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange); 183 VMMRCDECL(int) selmRCGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange); 180 184 181 VMMRCDECL(int) selm gcShadowGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);182 VMMRCDECL(int) selm gcShadowLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);183 VMMRCDECL(int) selm gcShadowTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);185 VMMRCDECL(int) selmRCShadowGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange); 186 VMMRCDECL(int) selmRCShadowLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange); 187 VMMRCDECL(int) selmRCShadowTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange); 184 188 185 189 __END_DECLS -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r13144 r13577 39 39 40 40 41 #ifndef IN_RING0 42 41 43 /** 42 44 * Converts a GC selector based address to a flat address. … … 58 60 X86DESC Desc; 59 61 if (!(Sel & X86_SEL_LDT)) 60 Desc = pVM->selm.s.CTX SUFF(paGdt)[Sel >> X86_SEL_SHIFT];62 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; 61 63 else 62 64 { 63 65 /** @todo handle LDT pages not present! */ 64 #ifdef IN_GC 65 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.GCPtrLdt + pVM->selm.s.offLdtHyper); 66 #else 67 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.HCPtrLdt + pVM->selm.s.offLdtHyper); 68 #endif 66 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper); 69 67 Desc = paLDT[Sel >> X86_SEL_SHIFT]; 70 68 } … … 72 70 return (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(Desc)); 73 71 } 72 #endif /* !IN_RING0 */ 74 73 75 74 … … 101 100 { 102 101 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff; 103 104 102 if (CPUMAreHiddenSelRegsValid(pVM)) 105 103 uFlat += pHiddenSel->u64Base; … … 109 107 } 110 108 109 #ifdef IN_RING0 110 Assert(CPUMAreHiddenSelRegsValid(pVM)); 111 #else 111 112 /** @todo when we're in 16 bits mode, we should cut off the address as well.. */ 112 113 if (!CPUMAreHiddenSelRegsValid(pVM)) 113 114 return SELMToFlatBySel(pVM, Sel, Addr); 115 #endif 114 116 115 117 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 (Intel® 64 and IA-32 Architectures Software Developer's Manual: 3.4.2.1). */ … … 119 121 switch (SelReg) 120 122 { 121 case DIS_SELREG_FS: 122 case DIS_SELREG_GS: 123 return (RTGCPTR)(pHiddenSel->u64Base + Addr); 124 125 default: 126 return Addr; /* base 0 */ 127 } 128 } 123 case DIS_SELREG_FS: 124 case DIS_SELREG_GS: 125 return (RTGCPTR)(pHiddenSel->u64Base + Addr); 126 127 default: 128 return Addr; /* base 0 */ 129 } 130 } 131 129 132 /* AMD64 manual: compatibility mode ignores the high 32 bits when calculating an effective address. */ 130 133 Assert(pHiddenSel->u64Base <= 0xffffffff); … … 149 152 VMMDECL(int) SELMToFlatEx(PVM pVM, DIS_SELREG SelReg, PCCPUMCTXCORE pCtxCore, RTGCPTR Addr, unsigned fFlags, PRTGCPTR ppvGC) 150 153 { 154 /* 155 * Fetch the selector first. 156 */ 151 157 PCPUMSELREGHID pHiddenSel; 152 158 RTSEL Sel; 153 int rc; 154 155 rc = DISFetchRegSegEx(pCtxCore, SelReg, &Sel, &pHiddenSel); AssertRC(rc); 159 int rc = DISFetchRegSegEx(pCtxCore, SelReg, &Sel, &pHiddenSel); 160 AssertRC(rc); 156 161 157 162 /* … … 189 194 u1DescType = pHiddenSel->Attr.n.u1DescType; 190 195 u4Type = pHiddenSel->Attr.n.u4Type; 191 192 196 u32Limit = pHiddenSel->u32Limit; 193 197 … … 199 203 switch (SelReg) 200 204 { 201 case DIS_SELREG_FS:202 case DIS_SELREG_GS:203 pvFlat = (pHiddenSel->u64Base + Addr);204 break;205 206 default:207 pvFlat = Addr;208 break;205 case DIS_SELREG_FS: 206 case DIS_SELREG_GS: 207 pvFlat = (pHiddenSel->u64Base + Addr); 208 break; 209 210 default: 211 pvFlat = Addr; 212 break; 209 213 } 210 214 } … … 304 308 && (unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.GuestGdtr.cbGdt) 305 309 return VERR_INVALID_SELECTOR; 306 Desc = pVM->selm.s.CTX SUFF(paGdt)[Sel >> X86_SEL_SHIFT];310 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; 307 311 } 308 312 else … … 312 316 313 317 /** @todo handle LDT page(s) not present! */ 314 #ifdef IN_GC 315 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.GCPtrLdt + pVM->selm.s.offLdtHyper); 316 #else 317 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.HCPtrLdt + pVM->selm.s.offLdtHyper); 318 #endif 318 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper); 319 319 Desc = paLDT[Sel >> X86_SEL_SHIFT]; 320 320 } … … 341 341 * Type check. 342 342 */ 343 # define BOTH(a, b) ((a << 16) | b)343 # define BOTH(a, b) ((a << 16) | b) 344 344 switch (BOTH(u1DescType, u4Type)) 345 345 { … … 429 429 430 430 } 431 # undef BOTH431 # undef BOTH 432 432 } 433 433 } … … 435 435 return VERR_SELECTOR_NOT_PRESENT; 436 436 } 437 437 438 438 439 #ifndef IN_RING0 … … 512 513 && (unsigned)(Sel & X86_SEL_MASK) >= pVM->selm.s.GuestGdtr.cbGdt) 513 514 return VERR_INVALID_SELECTOR; 514 Desc = pVM->selm.s.CTX SUFF(paGdt)[Sel >> X86_SEL_SHIFT];515 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; 515 516 } 516 517 else … … 520 521 521 522 /** @todo handle LDT page(s) not present! */ 522 #ifdef IN_GC 523 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.GCPtrLdt + pVM->selm.s.offLdtHyper); 524 #else 525 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.HCPtrLdt + pVM->selm.s.offLdtHyper); 526 #endif 523 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper); 527 524 Desc = paLDT[Sel >> X86_SEL_SHIFT]; 528 525 } … … 652 649 #endif /* !IN_RING0 */ 653 650 651 654 652 /** 655 653 * Validates and converts a GC selector based code address to a flat … … 675 673 676 674 675 #ifndef IN_RING0 677 676 /** 678 677 * Validates and converts a GC selector based code address to a flat … … 695 694 X86DESC Desc; 696 695 if (!(SelCS & X86_SEL_LDT)) 697 Desc = pVM->selm.s.CTX SUFF(paGdt)[SelCS >> X86_SEL_SHIFT];696 Desc = pVM->selm.s.CTX_SUFF(paGdt)[SelCS >> X86_SEL_SHIFT]; 698 697 else 699 698 { 700 699 /** @todo handle LDT page(s) not present! */ 701 #ifdef IN_GC 702 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.GCPtrLdt + pVM->selm.s.offLdtHyper); 703 #else 704 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.HCPtrLdt + pVM->selm.s.offLdtHyper); 705 #endif 700 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper); 706 701 Desc = paLDT[SelCS >> X86_SEL_SHIFT]; 707 702 } … … 748 743 return VERR_SELECTOR_NOT_PRESENT; 749 744 } 745 #endif /* !IN_RING0 */ 750 746 751 747 … … 813 809 814 810 811 #ifdef IN_GC 815 812 /** 816 813 * Validates and converts a GC selector based code address to a flat address. … … 840 837 return selmValidateAndConvertCSAddrStd(pVM, SelCPL, SelCS, Addr, ppvFlat, pcBits); 841 838 } 839 #endif /* IN_GC */ 842 840 843 841 … … 861 859 return selmValidateAndConvertCSAddrRealMode(pVM, SelCS, pHiddenCSSel, Addr, ppvFlat); 862 860 861 #ifdef IN_RING0 862 Assert(CPUMAreHiddenSelRegsValid(pVM)); 863 #else 863 864 /** @todo when we're in 16 bits mode, we should cut off the address as well? (like in selmValidateAndConvertCSAddrRealMode) */ 864 865 if (!CPUMAreHiddenSelRegsValid(pVM)) 865 866 return selmValidateAndConvertCSAddrStd(pVM, SelCPL, SelCS, Addr, ppvFlat, NULL); 867 #endif 866 868 return selmValidateAndConvertCSAddrHidden(pVM, SelCPL, SelCS, pHiddenCSSel, Addr, ppvFlat); 867 869 } 868 870 869 871 872 #ifndef IN_RING0 870 873 /** 871 874 * Return the cpu mode corresponding to the (CS) selector … … 882 885 X86DESC Desc; 883 886 if (!(Sel & X86_SEL_LDT)) 884 Desc = pVM->selm.s.CTX SUFF(paGdt)[Sel >> X86_SEL_SHIFT];887 Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; 885 888 else 886 889 { 887 890 /** @todo handle LDT page(s) not present! */ 888 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX MID(,PtrLdt) + pVM->selm.s.offLdtHyper);891 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper); 889 892 Desc = paLDT[Sel >> X86_SEL_SHIFT]; 890 893 } 891 894 return (Desc.Gen.u1DefBig) ? CPUMODE_32BIT : CPUMODE_16BIT; 892 895 } 896 #endif /* !IN_RING0 */ 893 897 894 898 … … 904 908 VMMDECL(DISCPUMODE) SELMGetCpuModeFromSelector(PVM pVM, X86EFLAGS eflags, RTSEL Sel, CPUMSELREGHID *pHiddenSel) 905 909 { 910 #ifdef IN_RING0 911 Assert(CPUMAreHiddenSelRegsValid(pVM)); 912 #else /* !IN_RING0 */ 906 913 if (!CPUMAreHiddenSelRegsValid(pVM)) 907 914 { … … 915 922 return selmGetCpuModeFromSelector(pVM, Sel); 916 923 } 924 #endif /* !IN_RING0 */ 917 925 if ( CPUMIsGuestInLongMode(pVM) 918 926 && pHiddenSel->Attr.n.u1Long) … … 924 932 } 925 933 934 926 935 /** 927 936 * Returns Hypervisor's Trap 08 (\#DF) selector. … … 960 969 pVM->selm.s.Tss.esp1 = (uint32_t)esp; 961 970 } 971 962 972 963 973 #ifndef IN_RING0 … … 969 979 * @param pSS Ring1 SS register value. 970 980 * @param pEsp Ring1 ESP register value. 981 * 982 * @todo Merge in the GC version of this, eliminating it - or move this to 983 * SELM.cpp, making it SELMR3GetRing1Stack. 971 984 */ 972 985 VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp) … … 980 993 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss); 981 994 982 # ifdef IN_GC995 # ifdef IN_GC 983 996 bool fTriedAlready = false; 984 997 … … 986 999 rc = MMGCRamRead(pVM, &tss.ss0, (RCPTRTYPE(void *))(GCPtrTss + RT_OFFSETOF(VBOXTSS, ss0)), sizeof(tss.ss0)); 987 1000 rc |= MMGCRamRead(pVM, &tss.esp0, (RCPTRTYPE(void *))(GCPtrTss + RT_OFFSETOF(VBOXTSS, esp0)), sizeof(tss.esp0)); 988 #ifdef DEBUG1001 # ifdef DEBUG 989 1002 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, (RCPTRTYPE(void *))(GCPtrTss + RT_OFFSETOF(VBOXTSS, offIoBitmap)), sizeof(tss.offIoBitmap)); 990 #endif1003 # endif 991 1004 992 1005 if (VBOX_FAILURE(rc)) … … 1006 1019 } 1007 1020 1008 # else /* !IN_GC */1021 # else /* !IN_GC */ 1009 1022 /* Reading too much. Could be cheaper than two seperate calls though. */ 1010 1023 rc = PGMPhysSimpleReadGCPtr(pVM, &tss, GCPtrTss, sizeof(VBOXTSS)); … … 1014 1027 return rc; 1015 1028 } 1016 # endif /* !IN_GC */1017 1018 # ifdef LOG_ENABLED1029 # endif /* !IN_GC */ 1030 1031 # ifdef LOG_ENABLED 1019 1032 uint32_t ssr0 = pVM->selm.s.Tss.ss1; 1020 1033 uint32_t espr0 = pVM->selm.s.Tss.esp1; … … 1025 1038 1026 1039 Log(("offIoBitmap=%#x\n", tss.offIoBitmap)); 1027 # endif1040 # endif 1028 1041 /* Update our TSS structure for the guest's ring 1 stack */ 1029 1042 SELMSetRing1Stack(pVM, tss.ss0 | 1, (RTGCPTR32)tss.esp0); … … 1036 1049 return VINF_SUCCESS; 1037 1050 } 1038 #endif 1051 #endif /* !IN_RING0 */ 1052 1039 1053 1040 1054 /** 1041 1055 * Returns Guest TSS pointer 1042 1056 * 1057 * @returns Pointer to the guest TSS, RTRCPTR_MAX if not being monitored. 1043 1058 * @param pVM VM Handle. 1044 1059 */ … … 1086 1101 1087 1102 #ifndef IN_RING0 1103 1088 1104 /** 1089 1105 * Gets the hypervisor code selector (CS). … … 1148 1164 * switchers. Don't exploit this API! 1149 1165 */ 1150 VMMDECL(RT GCPTR) SELMGetHyperGDT(PVM pVM)1166 VMMDECL(RTRCPTR) SELMGetHyperGDT(PVM pVM) 1151 1167 { 1152 1168 /* 1153 * Always convert this from the HC pointer since . We're can be1169 * Always convert this from the HC pointer since we can be 1154 1170 * called before the first relocation and have to work correctly 1155 1171 * without having dependencies on the relocation order. 1156 1172 */ 1157 return (RTGCPTR)MMHyperHC2GC(pVM, pVM->selm.s.paGdtHC); 1158 } 1159 #endif /* IN_RING0 */ 1173 return (RTRCPTR)MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3); 1174 } 1175 1176 #endif /* !IN_RING0 */ 1160 1177 1161 1178 /** … … 1178 1195 * Do we have a valid TSS? 1179 1196 */ 1180 if ( pVM->selm.s.GCSelTss == (RTSEL)~01197 if ( pVM->selm.s.GCSelTss == RTSEL_MAX 1181 1198 || !pVM->selm.s.fGuestTss32Bit) 1182 1199 return VERR_SELM_NO_TSS; … … 1205 1222 return VINF_SUCCESS; 1206 1223 } 1224 -
trunk/src/VBox/VMM/VMMGC/SELMGC.cpp
r13144 r13577 100 100 * 'little' adjustment we do for DPL 0 selectors. 101 101 */ 102 PX86DESC pShadowDescr = &pVM->selm.s.paGdt GC[iGDTEntry];102 PX86DESC pShadowDescr = &pVM->selm.s.paGdtRC[iGDTEntry]; 103 103 if (Desc.Gen.u1DescType) 104 104 { … … 150 150 if (Sel == (pRegFrame->cs & X86_SEL_MASK)) 151 151 Log(("GDT write to selector in CS register %04X\n", pRegFrame->cs)); 152 else 153 if (Sel == (pRegFrame->ds & X86_SEL_MASK)) 152 else if (Sel == (pRegFrame->ds & X86_SEL_MASK)) 154 153 Log(("GDT write to selector in DS register %04X\n", pRegFrame->ds)); 155 else 156 if (Sel == (pRegFrame->es & X86_SEL_MASK)) 154 else if (Sel == (pRegFrame->es & X86_SEL_MASK)) 157 155 Log(("GDT write to selector in ES register %04X\n", pRegFrame->es)); 158 else 159 if (Sel == (pRegFrame->fs & X86_SEL_MASK)) 156 else if (Sel == (pRegFrame->fs & X86_SEL_MASK)) 160 157 Log(("GDT write to selector in FS register %04X\n", pRegFrame->fs)); 161 else 162 if (Sel == (pRegFrame->gs & X86_SEL_MASK)) 158 else if (Sel == (pRegFrame->gs & X86_SEL_MASK)) 163 159 Log(("GDT write to selector in GS register %04X\n", pRegFrame->gs)); 164 else 165 if (Sel == (pRegFrame->ss & X86_SEL_MASK)) 160 else if (Sel == (pRegFrame->ss & X86_SEL_MASK)) 166 161 Log(("GDT write to selector in SS register %04X\n", pRegFrame->ss)); 167 162 #endif … … 182 177 * (If it's a EIP range this's the EIP, if not it's pvFault.) 183 178 */ 184 VMMRCDECL(int) selm gcGuestGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)185 { 186 LogFlow(("selm gcGuestGDTWriteHandler errcode=%x fault=%VGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));179 VMMRCDECL(int) selmRCGuestGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange) 180 { 181 LogFlow(("selmRCGuestGDTWriteHandler errcode=%x fault=%VGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange)); 187 182 188 183 /* … … 218 213 if (rc2 == VINF_SUCCESS) 219 214 { 220 STAM_COUNTER_INC(&pVM->selm.s.Stat GCWriteGuestGDTHandled);215 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled); 221 216 return rc; 222 217 } … … 238 233 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT); 239 234 } 240 STAM_COUNTER_INC(&pVM->selm.s.Stat GCWriteGuestGDTUnhandled);235 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled); 241 236 return rc; 242 237 } … … 255 250 * (If it's a EIP range this's the EIP, if not it's pvFault.) 256 251 */ 257 VMMRCDECL(int) selm gcGuestLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)252 VMMRCDECL(int) selmRCGuestLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange) 258 253 { 259 254 /** @todo To be implemented. */ 260 ////LogCom(("selm gcGuestLDTWriteHandler: eip=%08X pvFault=%VGv pvRange=%VGv\r\n", pRegFrame->eip, pvFault, pvRange));255 ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%VGv pvRange=%VGv\r\n", pRegFrame->eip, pvFault, pvRange)); 261 256 262 257 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT); 263 STAM_COUNTER_INC(&pVM->selm.s.Stat GCWriteGuestLDT);258 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT); 264 259 return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT; 265 260 } … … 278 273 * (If it's a EIP range this's the EIP, if not it's pvFault.) 279 274 */ 280 VMMRCDECL(int) selm gcGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)281 { 282 LogFlow(("selm gcGuestTSSWriteHandler errcode=%x fault=%VGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));275 VMMRCDECL(int) selmRCGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange) 276 { 277 LogFlow(("selmRCGuestTSSWriteHandler errcode=%x fault=%VGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange)); 283 278 284 279 /* … … 298 293 || pGuestTSS->ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */ 299 294 { 300 Log(("selm gcGuestTSSWriteHandler: R0 stack: %RTsel:%VGv -> %RTsel:%VGv\n",295 Log(("selmRCGuestTSSWriteHandler: R0 stack: %RTsel:%VGv -> %RTsel:%VGv\n", 301 296 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), pVM->selm.s.Tss.esp1, (RTSEL)pGuestTSS->ss0, pGuestTSS->esp0)); 302 297 pVM->selm.s.Tss.esp1 = pGuestTSS->esp0; 303 298 pVM->selm.s.Tss.ss1 = pGuestTSS->ss0 | 1; 304 STAM_COUNTER_INC(&pVM->selm.s.Stat GCWriteGuestTSSHandledChanged);299 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); 305 300 } 306 301 if (CPUMGetGuestCR4(pVM) & X86_CR4_VME) … … 331 326 AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %VGv failed with %Vrc\n", (uint8_t *)pGuestTSS + offIntRedirBitmap + i * 8, rc)); 332 327 } 333 STAM_COUNTER_INC(&pVM->selm.s.Stat GCWriteGuestTSSRedir);328 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir); 334 329 } 335 330 } 336 STAM_COUNTER_INC(&pVM->selm.s.Stat GCWriteGuestTSSHandled);331 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled); 337 332 } 338 333 else … … 340 335 Assert(VBOX_FAILURE(rc)); 341 336 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS); 342 STAM_COUNTER_INC(&pVM->selm.s.Stat GCWriteGuestTSSUnhandled);337 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled); 343 338 if (rc == VERR_EM_INTERPRETER) 344 339 rc = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT; … … 348 343 349 344 350 351 345 /** 352 346 * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT. … … 361 355 * (If it's a EIP range this's the EIP, if not it's pvFault.) 362 356 */ 363 VMMRCDECL(int) selm gcShadowGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)364 { 365 LogRel(("FATAL ERROR: selm gcShadowGDTWriteHandler: eip=%08X pvFault=%VGv pvRange=%VGv\r\n", pRegFrame->eip, pvFault, pvRange));357 VMMRCDECL(int) selmRCShadowGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange) 358 { 359 LogRel(("FATAL ERROR: selmRCShadowGDTWriteHandler: eip=%08X pvFault=%VGv pvRange=%VGv\r\n", pRegFrame->eip, pvFault, pvRange)); 366 360 return VERR_SELM_SHADOW_GDT_WRITE; 367 361 } 368 362 363 369 364 /** 370 365 * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT. … … 379 374 * (If it's a EIP range this's the EIP, if not it's pvFault.) 380 375 */ 381 VMMRCDECL(int) selm gcShadowLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)382 { 383 LogRel(("FATAL ERROR: selm gcShadowLDTWriteHandler: eip=%08X pvFault=%VGv pvRange=%VGv\r\n", pRegFrame->eip, pvFault, pvRange));384 Assert((RTRCPTR)pvFault >= pVM->selm.s. GCPtrLdt && (RTRCUINTPTR)pvFault < (RTRCUINTPTR)pVM->selm.s.GCPtrLdt+ 65536 + PAGE_SIZE);376 VMMRCDECL(int) selmRCShadowLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange) 377 { 378 LogRel(("FATAL ERROR: selmRCShadowLDTWriteHandler: eip=%08X pvFault=%VGv pvRange=%VGv\r\n", pRegFrame->eip, pvFault, pvRange)); 379 Assert((RTRCPTR)pvFault >= pVM->selm.s.pvLdtRC && (RTRCUINTPTR)pvFault < (RTRCUINTPTR)pVM->selm.s.pvLdtRC + 65536 + PAGE_SIZE); 385 380 return VERR_SELM_SHADOW_LDT_WRITE; 386 381 } 387 382 383 388 384 /** 389 385 * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS. … … 398 394 * (If it's a EIP range this's the EIP, if not it's pvFault.) 399 395 */ 400 VMMRCDECL(int) selm gcShadowTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)401 { 402 LogRel(("FATAL ERROR: selm gcShadowTSSWriteHandler: eip=%08X pvFault=%VGv pvRange=%VGv\r\n", pRegFrame->eip, pvFault, pvRange));396 VMMRCDECL(int) selmRCShadowTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange) 397 { 398 LogRel(("FATAL ERROR: selmRCShadowTSSWriteHandler: eip=%08X pvFault=%VGv pvRange=%VGv\r\n", pRegFrame->eip, pvFault, pvRange)); 403 399 return VERR_SELM_SHADOW_TSS_WRITE; 404 400 } … … 417 413 if (pVM->selm.s.fSyncTSSRing0Stack) 418 414 { 419 RCPTRTYPE(uint8_t *) GCPtrTss = (RCPTRTYPE(uint8_t *))pVM->selm.s.GCPtrGuestTss; 420 int rc; 421 VBOXTSS tss; 415 uint8_t * GCPtrGuestTss = (uint8_t *)(uintptr_t)pVM->selm.s.GCPtrGuestTss; 416 bool fTriedAlready = false; 417 int rc; 418 VBOXTSS tss; 422 419 423 420 Assert(pVM->selm.s.GCPtrGuestTss && pVM->selm.s.cbMonitoredGuestTss); 424 421 425 #ifdef IN_GC426 bool fTriedAlready = false;427 428 422 l_tryagain: 429 rc = MMGCRamRead(pVM, &tss.ss0, GCPtr Tss + RT_OFFSETOF(VBOXTSS, ss0), sizeof(tss.ss0));430 rc |= MMGCRamRead(pVM, &tss.esp0, GCPtr Tss + RT_OFFSETOF(VBOXTSS, esp0), sizeof(tss.esp0));431 432 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, GCPtr Tss + RT_OFFSETOF(VBOXTSS, offIoBitmap), sizeof(tss.offIoBitmap));433 423 rc = MMGCRamRead(pVM, &tss.ss0, GCPtrGuestTss + RT_OFFSETOF(VBOXTSS, ss0), sizeof(tss.ss0)); 424 rc |= MMGCRamRead(pVM, &tss.esp0, GCPtrGuestTss + RT_OFFSETOF(VBOXTSS, esp0), sizeof(tss.esp0)); 425 #ifdef DEBUG 426 rc |= MMGCRamRead(pVM, &tss.offIoBitmap, GCPtrGuestTss + RT_OFFSETOF(VBOXTSS, offIoBitmap), sizeof(tss.offIoBitmap)); 427 #endif 434 428 435 429 if (VBOX_FAILURE(rc)) … … 440 434 /** @todo might cross page boundary */ 441 435 fTriedAlready = true; 442 rc = PGMPrefetchPage(pVM, (RTGCPTR)( RTRCUINTPTR)GCPtrTss);436 rc = PGMPrefetchPage(pVM, (RTGCPTR)(uintptr_t)GCPtrGuestTss); 443 437 if (rc != VINF_SUCCESS) 444 438 return rc; 445 439 goto l_tryagain; 446 440 } 447 AssertMsgFailed(("Unable to read TSS structure at % 08X\n", GCPtrTss));441 AssertMsgFailed(("Unable to read TSS structure at %RRv\n", GCPtrGuestTss)); 448 442 return rc; 449 443 } 450 451 #else /* !IN_GC */452 /* Reading too much. Could be cheaper than two seperate calls though. */453 rc = PGMPhysSimpleReadGCPtr(pVM, &tss, GCPtrTss, sizeof(VBOXTSS));454 if (VBOX_FAILURE(rc))455 {456 AssertReleaseMsgFailed(("Unable to read TSS structure at %08X\n", GCPtrTss));457 return rc;458 }459 #endif /* !IN_GC */460 444 461 445 #ifdef LOG_ENABLED -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r13067 r13577 704 704 GEN_CHECK_OFF(SELM, aHyperSel[SELM_HYPER_SEL_TSS]); 705 705 GEN_CHECK_OFF(SELM, aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]); 706 GEN_CHECK_OFF(SELM, paGdt HC);707 GEN_CHECK_OFF(SELM, paGdt GC);706 GEN_CHECK_OFF(SELM, paGdtR3); 707 GEN_CHECK_OFF(SELM, paGdtRC); 708 708 GEN_CHECK_OFF(SELM, GuestGdtr); 709 709 GEN_CHECK_OFF(SELM, cbEffGuestGdtLimit); 710 GEN_CHECK_OFF(SELM, HCPtrLdt);711 GEN_CHECK_OFF(SELM, GCPtrLdt);710 GEN_CHECK_OFF(SELM, pvLdtR3); 711 GEN_CHECK_OFF(SELM, pvLdtRC); 712 712 GEN_CHECK_OFF(SELM, GCPtrGuestLdt); 713 713 GEN_CHECK_OFF(SELM, cbLdtLimit); … … 715 715 GEN_CHECK_OFF(SELM, Tss); 716 716 GEN_CHECK_OFF(SELM, TssTrap08); 717 GEN_CHECK_OFF(SELM, GCPtrTss);717 GEN_CHECK_OFF(SELM, pvMonShwTssRC); 718 718 GEN_CHECK_OFF(SELM, GCPtrGuestTss); 719 719 GEN_CHECK_OFF(SELM, cbGuestTss);
Note:
See TracChangeset
for help on using the changeset viewer.