Changeset 75234 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Nov 2, 2018 7:04:03 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r74785 r75234 3872 3872 3873 3873 3874 static void iemLoadallSetSelector(PVMCPU pVCpu, uint8_t iSegReg, uint16_t uSel) 3875 { 3876 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg); 3877 3878 pHid->Sel = uSel; 3879 pHid->ValidSel = uSel; 3880 pHid->fFlags = CPUMSELREG_FLAGS_VALID; 3881 } 3882 3883 3884 static void iemLoadall286SetDescCache(PVMCPU pVCpu, uint8_t iSegReg, uint8_t const *pbMem) 3885 { 3886 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg); 3887 3888 /* The base is in the first three bytes. */ 3889 pHid->u64Base = pbMem[0] + (pbMem[1] << 8) + (pbMem[2] << 16); 3890 /* The attributes are in the fourth byte. */ 3891 pHid->Attr.u = pbMem[3]; 3892 /* The limit is in the last two bytes. */ 3893 pHid->u32Limit = pbMem[4] + (pbMem[5] << 8); 3894 } 3895 3896 3897 /** 3898 * Implements 286 LOADALL (286 CPUs only). 3899 */ 3900 IEM_CIMPL_DEF_0(iemCImpl_loadall286) 3901 { 3902 NOREF(cbInstr); 3903 3904 /* Data is loaded from a buffer at 800h. No checks are done on the 3905 * validity of loaded state. 3906 * 3907 * LOADALL only loads the internal CPU state, it does not access any 3908 * GDT, LDT, or similar tables. 3909 */ 3910 3911 if (pVCpu->iem.s.uCpl != 0) 3912 { 3913 Log(("loadall286: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 3914 return iemRaiseGeneralProtectionFault0(pVCpu); 3915 } 3916 3917 uint8_t const *pbMem = NULL; 3918 uint16_t const *pa16Mem; 3919 uint8_t const *pa8Mem; 3920 RTGCPHYS GCPtrStart = 0x800; /* Fixed table location. */ 3921 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R); 3922 if (rcStrict != VINF_SUCCESS) 3923 return rcStrict; 3924 3925 /* The MSW is at offset 0x06. */ 3926 pa16Mem = (uint16_t const *)(pbMem + 0x06); 3927 /* Even LOADALL can't clear the MSW.PE bit, though it can set it. */ 3928 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 3929 uNewCr0 |= *pa16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 3930 uint64_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0; 3931 3932 CPUMSetGuestCR0(pVCpu, uNewCr0); 3933 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCr0); 3934 3935 /* Inform PGM if mode changed. */ 3936 if ((uNewCr0 & X86_CR0_PE) != (uOldCr0 & X86_CR0_PE)) 3937 { 3938 int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */); 3939 AssertRCReturn(rc, rc); 3940 /* ignore informational status codes */ 3941 } 3942 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER); 3943 3944 /* TR selector is at offset 0x16. */ 3945 pa16Mem = (uint16_t const *)(pbMem + 0x16); 3946 pVCpu->cpum.GstCtx.tr.Sel = pa16Mem[0]; 3947 pVCpu->cpum.GstCtx.tr.ValidSel = pa16Mem[0]; 3948 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID; 3949 3950 /* Followed by FLAGS... */ 3951 pVCpu->cpum.GstCtx.eflags.u = pa16Mem[1] | X86_EFL_1; 3952 pVCpu->cpum.GstCtx.ip = pa16Mem[2]; /* ...and IP. */ 3953 3954 /* LDT is at offset 0x1C. */ 3955 pa16Mem = (uint16_t const *)(pbMem + 0x1C); 3956 pVCpu->cpum.GstCtx.ldtr.Sel = pa16Mem[0]; 3957 pVCpu->cpum.GstCtx.ldtr.ValidSel = pa16Mem[0]; 3958 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 3959 3960 /* Segment registers are at offset 0x1E. */ 3961 pa16Mem = (uint16_t const *)(pbMem + 0x1E); 3962 iemLoadallSetSelector(pVCpu, X86_SREG_DS, pa16Mem[0]); 3963 iemLoadallSetSelector(pVCpu, X86_SREG_SS, pa16Mem[1]); 3964 iemLoadallSetSelector(pVCpu, X86_SREG_CS, pa16Mem[2]); 3965 iemLoadallSetSelector(pVCpu, X86_SREG_ES, pa16Mem[3]); 3966 3967 /* GPRs are at offset 0x26. */ 3968 pa16Mem = (uint16_t const *)(pbMem + 0x26); 3969 pVCpu->cpum.GstCtx.di = pa16Mem[0]; 3970 pVCpu->cpum.GstCtx.si = pa16Mem[1]; 3971 pVCpu->cpum.GstCtx.bp = pa16Mem[2]; 3972 pVCpu->cpum.GstCtx.sp = pa16Mem[3]; 3973 pVCpu->cpum.GstCtx.bx = pa16Mem[4]; 3974 pVCpu->cpum.GstCtx.dx = pa16Mem[5]; 3975 pVCpu->cpum.GstCtx.cx = pa16Mem[6]; 3976 pVCpu->cpum.GstCtx.ax = pa16Mem[7]; 3977 3978 /* Descriptor caches are at offset 0x36, 6 bytes per entry. */ 3979 iemLoadall286SetDescCache(pVCpu, X86_SREG_ES, pbMem + 0x36); 3980 iemLoadall286SetDescCache(pVCpu, X86_SREG_CS, pbMem + 0x3C); 3981 iemLoadall286SetDescCache(pVCpu, X86_SREG_SS, pbMem + 0x42); 3982 iemLoadall286SetDescCache(pVCpu, X86_SREG_DS, pbMem + 0x48); 3983 3984 /* GDTR contents are at offset 0x4E, 6 bytes. */ 3985 RTGCPHYS GCPtrBase; 3986 uint16_t cbLimit; 3987 pa8Mem = pbMem + 0x4E; 3988 /* NB: Fourth byte "should be zero"; we are ignoring it. */ 3989 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16); 3990 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8); 3991 CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit); 3992 3993 /* IDTR contents are at offset 0x5A, 6 bytes. */ 3994 pa8Mem = pbMem + 0x5A; 3995 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16); 3996 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8); 3997 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit); 3998 3999 Log(("LOADALL: GDTR:%08RX64/%04X, IDTR:%08RX64/%04X\n", pVCpu->cpum.GstCtx.gdtr.pGdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.idtr.pIdt, pVCpu->cpum.GstCtx.idtr.cbIdt)); 4000 Log(("LOADALL: CS:%04X, CS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.cs.u64Base, pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.Attr.u)); 4001 Log(("LOADALL: DS:%04X, DS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.ds.u64Base, pVCpu->cpum.GstCtx.ds.u32Limit, pVCpu->cpum.GstCtx.ds.Attr.u)); 4002 Log(("LOADALL: ES:%04X, ES base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.es.Sel, pVCpu->cpum.GstCtx.es.u64Base, pVCpu->cpum.GstCtx.es.u32Limit, pVCpu->cpum.GstCtx.es.Attr.u)); 4003 Log(("LOADALL: SS:%04X, SS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u)); 4004 Log(("LOADALL: SI:%04X, DI:%04X, AX:%04X, BX:%04X, CX:%04X, DX:%04X\n", pVCpu->cpum.GstCtx.si, pVCpu->cpum.GstCtx.di, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.cx, pVCpu->cpum.GstCtx.dx)); 4005 4006 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pbMem, IEM_ACCESS_SYS_R); 4007 if (rcStrict != VINF_SUCCESS) 4008 return rcStrict; 4009 4010 /* The CPL may change. It is taken from the "DPL fields of the SS and CS 4011 * descriptor caches" but there is no word as to what happens if those are 4012 * not identical (probably bad things). 4013 */ 4014 pVCpu->iem.s.uCpl = pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl; 4015 4016 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS | CPUM_CHANGED_IDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_TR | CPUM_CHANGED_LDTR); 4017 4018 /* Flush the prefetch buffer. */ 4019 #ifdef IEM_WITH_CODE_TLB 4020 pVCpu->iem.s.pbInstrBuf = NULL; 4021 #else 4022 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 4023 #endif 4024 return rcStrict; 4025 } 4026 4027 3874 4028 /** 3875 4029 * Implements SYSCALL (AMD and Intel64). … … 3879 4033 IEM_CIMPL_DEF_0(iemCImpl_syscall) 3880 4034 { 4035 #ifdef IEM_WITH_LOADALL286 4036 /** @todo hack, LOADALL should be decoded as such on a 286. */ 4037 if (pVCpu->iem.s.uTargetCpu == IEMTARGETCPU_286) 4038 return iemCImpl_loadall286(pVCpu, cbInstr); 4039 #endif 4040 3881 4041 /* 3882 4042 * Check preconditions.
Note:
See TracChangeset
for help on using the changeset viewer.