1 | /* $Id: IEMAllOpcodeFetch-x86.cpp 108278 2025-02-18 15:46:53Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IEM - Interpreted Execution Manager - All Contexts.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2011-2024 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.virtualbox.org.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 |
|
---|
29 | /*********************************************************************************************************************************
|
---|
30 | * Header Files *
|
---|
31 | *********************************************************************************************************************************/
|
---|
32 | #define LOG_GROUP LOG_GROUP_IEM
|
---|
33 | #define VMCPU_INCL_CPUM_GST_CTX
|
---|
34 | #ifdef IN_RING0
|
---|
35 | # define VBOX_VMM_TARGET_X86
|
---|
36 | #endif
|
---|
37 | #include <VBox/vmm/iem.h>
|
---|
38 | #include <VBox/vmm/cpum.h>
|
---|
39 | #include <VBox/vmm/pgm.h>
|
---|
40 | #include <VBox/vmm/dbgf.h>
|
---|
41 | #include "IEMInternal.h"
|
---|
42 | #include <VBox/vmm/vmcc.h>
|
---|
43 | #include <VBox/log.h>
|
---|
44 | #include <VBox/param.h>
|
---|
45 | #include <iprt/assert.h>
|
---|
46 | #include <iprt/errcore.h>
|
---|
47 | #include <iprt/string.h>
|
---|
48 | #include <iprt/x86.h>
|
---|
49 |
|
---|
50 | #include "IEMInline.h"
|
---|
51 | #include "IEMInline-x86.h"
|
---|
52 | #include "IEMAllTlbInline-x86.h"
|
---|
53 |
|
---|
54 |
|
---|
55 | #ifndef IEM_WITH_CODE_TLB
|
---|
56 | /**
|
---|
57 | * Prefetch opcodes the first time when starting executing.
|
---|
58 | *
|
---|
59 | * @returns Strict VBox status code.
|
---|
60 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
61 | * thread.
|
---|
62 | */
|
---|
63 | VBOXSTRICTRC iemOpcodeFetchPrefetch(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
64 | {
|
---|
65 | /*
|
---|
66 | * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
|
---|
67 | *
|
---|
68 | * First translate CS:rIP to a physical address.
|
---|
69 | *
|
---|
70 | * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
|
---|
71 | * all relevant bytes from the first page, as it ASSUMES it's only ever
|
---|
72 | * called for dealing with CS.LIM, page crossing and instructions that
|
---|
73 | * are too long.
|
---|
74 | */
|
---|
75 | uint32_t cbToTryRead;
|
---|
76 | RTGCPTR GCPtrPC;
|
---|
77 | if (IEM_IS_64BIT_CODE(pVCpu))
|
---|
78 | {
|
---|
79 | cbToTryRead = GUEST_PAGE_SIZE;
|
---|
80 | GCPtrPC = pVCpu->cpum.GstCtx.rip;
|
---|
81 | if (IEM_IS_CANONICAL(GCPtrPC))
|
---|
82 | cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
|
---|
83 | else
|
---|
84 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
85 | }
|
---|
86 | else
|
---|
87 | {
|
---|
88 | uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
|
---|
89 | AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
90 | if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
|
---|
91 | cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
|
---|
92 | else
|
---|
93 | return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
|
---|
94 | if (cbToTryRead) { /* likely */ }
|
---|
95 | else /* overflowed */
|
---|
96 | {
|
---|
97 | Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
|
---|
98 | cbToTryRead = UINT32_MAX;
|
---|
99 | }
|
---|
100 | GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
|
---|
101 | Assert(GCPtrPC <= UINT32_MAX);
|
---|
102 | }
|
---|
103 |
|
---|
104 | PGMPTWALKFAST WalkFast;
|
---|
105 | int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
|
---|
106 | IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
|
---|
107 | &WalkFast);
|
---|
108 | if (RT_SUCCESS(rc))
|
---|
109 | Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
|
---|
110 | else
|
---|
111 | {
|
---|
112 | Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
|
---|
113 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
114 | /** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
|
---|
115 | * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
|
---|
116 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
117 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
|
---|
118 | # endif
|
---|
119 | return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
|
---|
120 | }
|
---|
121 | #if 0
|
---|
122 | if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
|
---|
123 | else
|
---|
124 | {
|
---|
125 | Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
|
---|
126 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
127 | /** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
|
---|
128 | # error completely wrong
|
---|
129 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
130 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
|
---|
131 | # endif
|
---|
132 | return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
|
---|
133 | }
|
---|
134 | if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
|
---|
135 | else
|
---|
136 | {
|
---|
137 | Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
|
---|
138 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
139 | /** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
|
---|
140 | # error completely wrong.
|
---|
141 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
142 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
|
---|
143 | # endif
|
---|
144 | return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
|
---|
145 | }
|
---|
146 | #else
|
---|
147 | Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
|
---|
148 | Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
|
---|
149 | #endif
|
---|
150 | RTGCPHYS const GCPhys = WalkFast.GCPhys;
|
---|
151 |
|
---|
152 | /*
|
---|
153 | * Read the bytes at this address.
|
---|
154 | */
|
---|
155 | uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
|
---|
156 | if (cbToTryRead > cbLeftOnPage)
|
---|
157 | cbToTryRead = cbLeftOnPage;
|
---|
158 | if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
|
---|
159 | cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
|
---|
160 |
|
---|
161 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
162 | {
|
---|
163 | VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
|
---|
164 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
165 | { /* likely */ }
|
---|
166 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
167 | {
|
---|
168 | Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
|
---|
169 | GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
|
---|
170 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
171 | }
|
---|
172 | else
|
---|
173 | {
|
---|
174 | Log((RT_SUCCESS(rcStrict)
|
---|
175 | ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
|
---|
176 | : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
|
---|
177 | GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
|
---|
178 | return rcStrict;
|
---|
179 | }
|
---|
180 | }
|
---|
181 | else
|
---|
182 | {
|
---|
183 | rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
|
---|
184 | if (RT_SUCCESS(rc))
|
---|
185 | { /* likely */ }
|
---|
186 | else
|
---|
187 | {
|
---|
188 | Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
|
---|
189 | GCPtrPC, GCPhys, rc, cbToTryRead));
|
---|
190 | return rc;
|
---|
191 | }
|
---|
192 | }
|
---|
193 | pVCpu->iem.s.cbOpcode = cbToTryRead;
|
---|
194 | return VINF_SUCCESS;
|
---|
195 | }
|
---|
196 | #endif /* !IEM_WITH_CODE_TLB */
|
---|
197 |
|
---|
198 |
|
---|
199 | /**
|
---|
200 | * Flushes the prefetch buffer, light version.
|
---|
201 | */
|
---|
202 | void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
203 | {
|
---|
204 | #ifndef IEM_WITH_CODE_TLB
|
---|
205 | pVCpu->iem.s.cbOpcode = cbInstr;
|
---|
206 | #else
|
---|
207 | RT_NOREF(pVCpu, cbInstr);
|
---|
208 | #endif
|
---|
209 | }
|
---|
210 |
|
---|
211 |
|
---|
212 | /**
|
---|
213 | * Flushes the prefetch buffer, heavy version.
|
---|
214 | */
|
---|
215 | void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
216 | {
|
---|
217 | #ifndef IEM_WITH_CODE_TLB
|
---|
218 | pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
|
---|
219 | #elif 1
|
---|
220 | pVCpu->iem.s.cbInstrBufTotal = 0;
|
---|
221 | RT_NOREF(cbInstr);
|
---|
222 | #else
|
---|
223 | RT_NOREF(pVCpu, cbInstr);
|
---|
224 | #endif
|
---|
225 | }
|
---|
226 |
|
---|
227 |
|
---|
228 |
|
---|
229 | #ifdef IEM_WITH_CODE_TLB
|
---|
230 |
|
---|
231 | /**
|
---|
232 | * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
|
---|
233 | * failure and jumps.
|
---|
234 | *
|
---|
235 | * We end up here for a number of reasons:
|
---|
236 | * - pbInstrBuf isn't yet initialized.
|
---|
237 | * - Advancing beyond the buffer boundrary (e.g. cross page).
|
---|
238 | * - Advancing beyond the CS segment limit.
|
---|
239 | * - Fetching from non-mappable page (e.g. MMIO).
|
---|
240 | * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
|
---|
241 | *
|
---|
242 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
243 | * calling thread.
|
---|
244 | * @param pvDst Where to return the bytes.
|
---|
245 | * @param cbDst Number of bytes to read. A value of zero is
|
---|
246 | * allowed for initializing pbInstrBuf (the
|
---|
247 | * recompiler does this). In this case it is best
|
---|
248 | * to set pbInstrBuf to NULL prior to the call.
|
---|
249 | */
|
---|
250 | void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
251 | {
|
---|
252 | # ifdef IN_RING3
|
---|
253 | for (;;)
|
---|
254 | {
|
---|
255 | Assert(cbDst <= 8);
|
---|
256 | uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
|
---|
257 |
|
---|
258 | /*
|
---|
259 | * We might have a partial buffer match, deal with that first to make the
|
---|
260 | * rest simpler. This is the first part of the cross page/buffer case.
|
---|
261 | */
|
---|
262 | uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
|
---|
263 | if (pbInstrBuf != NULL)
|
---|
264 | {
|
---|
265 | Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
|
---|
266 | uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
|
---|
267 | if (offBuf < cbInstrBuf)
|
---|
268 | {
|
---|
269 | Assert(offBuf + cbDst > cbInstrBuf);
|
---|
270 | uint32_t const cbCopy = cbInstrBuf - offBuf;
|
---|
271 | memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
|
---|
272 |
|
---|
273 | cbDst -= cbCopy;
|
---|
274 | pvDst = (uint8_t *)pvDst + cbCopy;
|
---|
275 | offBuf += cbCopy;
|
---|
276 | }
|
---|
277 | }
|
---|
278 |
|
---|
279 | /*
|
---|
280 | * Check segment limit, figuring how much we're allowed to access at this point.
|
---|
281 | *
|
---|
282 | * We will fault immediately if RIP is past the segment limit / in non-canonical
|
---|
283 | * territory. If we do continue, there are one or more bytes to read before we
|
---|
284 | * end up in trouble and we need to do that first before faulting.
|
---|
285 | */
|
---|
286 | RTGCPTR GCPtrFirst;
|
---|
287 | uint32_t cbMaxRead;
|
---|
288 | if (IEM_IS_64BIT_CODE(pVCpu))
|
---|
289 | {
|
---|
290 | GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
|
---|
291 | if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
|
---|
292 | { /* likely */ }
|
---|
293 | else
|
---|
294 | iemRaiseGeneralProtectionFault0Jmp(pVCpu);
|
---|
295 | cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
|
---|
296 | }
|
---|
297 | else
|
---|
298 | {
|
---|
299 | GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
|
---|
300 | /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
|
---|
301 | if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
|
---|
302 | { /* likely */ }
|
---|
303 | else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
|
---|
304 | iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
|
---|
305 | cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
|
---|
306 | if (cbMaxRead != 0)
|
---|
307 | { /* likely */ }
|
---|
308 | else
|
---|
309 | {
|
---|
310 | /* Overflowed because address is 0 and limit is max. */
|
---|
311 | Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
|
---|
312 | cbMaxRead = X86_PAGE_SIZE;
|
---|
313 | }
|
---|
314 | GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
|
---|
315 | uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
|
---|
316 | if (cbMaxRead2 < cbMaxRead)
|
---|
317 | cbMaxRead = cbMaxRead2;
|
---|
318 | /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
|
---|
319 | }
|
---|
320 |
|
---|
321 | /*
|
---|
322 | * Get the TLB entry for this piece of code.
|
---|
323 | */
|
---|
324 | uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
|
---|
325 | PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
|
---|
326 | if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
|
---|
327 | || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
|
---|
328 | {
|
---|
329 | /* likely when executing lots of code, otherwise unlikely */
|
---|
330 | # ifdef IEM_WITH_TLB_STATISTICS
|
---|
331 | pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
|
---|
332 | # endif
|
---|
333 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
|
---|
334 |
|
---|
335 | /* Check TLB page table level access flags. */
|
---|
336 | if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
|
---|
337 | {
|
---|
338 | if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
|
---|
339 | {
|
---|
340 | Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
|
---|
341 | iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
|
---|
342 | }
|
---|
343 | if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
|
---|
344 | {
|
---|
345 | Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
|
---|
346 | iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
|
---|
347 | }
|
---|
348 | }
|
---|
349 |
|
---|
350 | /* Look up the physical page info if necessary. */
|
---|
351 | if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
|
---|
352 | { /* not necessary */ }
|
---|
353 | else
|
---|
354 | {
|
---|
355 | if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
|
---|
356 | { /* likely */ }
|
---|
357 | else
|
---|
358 | iemTlbInvalidateAllPhysicalSlow(pVCpu);
|
---|
359 | pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
|
---|
360 | int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
|
---|
361 | &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
|
---|
362 | AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
|
---|
363 | }
|
---|
364 | }
|
---|
365 | else
|
---|
366 | {
|
---|
367 | pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
|
---|
368 |
|
---|
369 | /* This page table walking will set A bits as required by the access while performing the walk.
|
---|
370 | ASSUMES these are set when the address is translated rather than on commit... */
|
---|
371 | /** @todo testcase: check when A bits are actually set by the CPU for code. */
|
---|
372 | PGMPTWALKFAST WalkFast;
|
---|
373 | int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
|
---|
374 | IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
|
---|
375 | &WalkFast);
|
---|
376 | if (RT_SUCCESS(rc))
|
---|
377 | Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
|
---|
378 | else
|
---|
379 | {
|
---|
380 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
381 | /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
|
---|
382 | Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
|
---|
383 | # endif
|
---|
384 | Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
|
---|
385 | iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
|
---|
386 | }
|
---|
387 |
|
---|
388 | AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
|
---|
389 | if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
|
---|
390 | || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
|
---|
391 | {
|
---|
392 | pTlbe--;
|
---|
393 | pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
|
---|
394 | if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
|
---|
395 | iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
|
---|
396 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
397 | else
|
---|
398 | ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
|
---|
399 | # endif
|
---|
400 | }
|
---|
401 | else
|
---|
402 | {
|
---|
403 | pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
|
---|
404 | pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
|
---|
405 | if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
|
---|
406 | iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
|
---|
407 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
408 | else
|
---|
409 | ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
|
---|
410 | # endif
|
---|
411 | }
|
---|
412 | pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
|
---|
413 | | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
|
---|
414 | | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
|
---|
415 | RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
|
---|
416 | pTlbe->GCPhys = GCPhysPg;
|
---|
417 | pTlbe->pbMappingR3 = NULL;
|
---|
418 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
|
---|
419 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
|
---|
420 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
|
---|
421 |
|
---|
422 | if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
|
---|
423 | IEMTLBTRACE_LOAD( pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
|
---|
424 | else
|
---|
425 | IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
|
---|
426 |
|
---|
427 | /* Resolve the physical address. */
|
---|
428 | if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
|
---|
429 | { /* likely */ }
|
---|
430 | else
|
---|
431 | iemTlbInvalidateAllPhysicalSlow(pVCpu);
|
---|
432 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
|
---|
433 | rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
|
---|
434 | &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
|
---|
435 | AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
|
---|
436 | }
|
---|
437 |
|
---|
438 | # if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
|
---|
439 | /*
|
---|
440 | * Try do a direct read using the pbMappingR3 pointer.
|
---|
441 | * Note! Do not recheck the physical TLB revision number here as we have the
|
---|
442 | * wrong response to changes in the else case. If someone is updating
|
---|
443 | * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
|
---|
444 | * pretending we always won the race.
|
---|
445 | */
|
---|
446 | if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
|
---|
447 | == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
|
---|
448 | {
|
---|
449 | uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
|
---|
450 | pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
|
---|
451 | if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
|
---|
452 | {
|
---|
453 | pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
|
---|
454 | pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
|
---|
455 | }
|
---|
456 | else
|
---|
457 | {
|
---|
458 | uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
|
---|
459 | if (cbInstr + (uint32_t)cbDst <= 15)
|
---|
460 | {
|
---|
461 | pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
|
---|
462 | pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
|
---|
463 | }
|
---|
464 | else
|
---|
465 | {
|
---|
466 | Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
|
---|
467 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
|
---|
468 | iemRaiseGeneralProtectionFault0Jmp(pVCpu);
|
---|
469 | }
|
---|
470 | }
|
---|
471 | if (cbDst <= cbMaxRead)
|
---|
472 | {
|
---|
473 | pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
|
---|
474 | # if 0 /* unused */
|
---|
475 | pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
|
---|
476 | # endif
|
---|
477 | pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
|
---|
478 | pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
|
---|
479 | pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
|
---|
480 | pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
|
---|
481 | if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
|
---|
482 | memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
|
---|
483 | else
|
---|
484 | Assert(!pvDst);
|
---|
485 | return;
|
---|
486 | }
|
---|
487 | pVCpu->iem.s.pbInstrBuf = NULL;
|
---|
488 |
|
---|
489 | memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
|
---|
490 | pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
|
---|
491 | }
|
---|
492 | # else
|
---|
493 | # error "refactor as needed"
|
---|
494 | /*
|
---|
495 | * If there is no special read handling, so we can read a bit more and
|
---|
496 | * put it in the prefetch buffer.
|
---|
497 | */
|
---|
498 | if ( cbDst < cbMaxRead
|
---|
499 | && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
|
---|
500 | {
|
---|
501 | VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
|
---|
502 | &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
|
---|
503 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
504 | { /* likely */ }
|
---|
505 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
506 | {
|
---|
507 | Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
|
---|
508 | GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
|
---|
509 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
510 | AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
|
---|
511 | }
|
---|
512 | else
|
---|
513 | {
|
---|
514 | Log((RT_SUCCESS(rcStrict)
|
---|
515 | ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
|
---|
516 | : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
|
---|
517 | GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
|
---|
518 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
519 | }
|
---|
520 | }
|
---|
521 | # endif
|
---|
522 | /*
|
---|
523 | * Special read handling, so only read exactly what's needed.
|
---|
524 | * This is a highly unlikely scenario.
|
---|
525 | */
|
---|
526 | else
|
---|
527 | {
|
---|
528 | pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
|
---|
529 |
|
---|
530 | /* Check instruction length. */
|
---|
531 | uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
|
---|
532 | if (RT_LIKELY(cbInstr + cbDst <= 15))
|
---|
533 | { /* likely */ }
|
---|
534 | else
|
---|
535 | {
|
---|
536 | Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
|
---|
537 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
|
---|
538 | iemRaiseGeneralProtectionFault0Jmp(pVCpu);
|
---|
539 | }
|
---|
540 |
|
---|
541 | /* Do the reading. */
|
---|
542 | uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
|
---|
543 | if (cbToRead > 0)
|
---|
544 | {
|
---|
545 | VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
|
---|
546 | pvDst, cbToRead, PGMACCESSORIGIN_IEM);
|
---|
547 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
548 | { /* likely */ }
|
---|
549 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
550 | {
|
---|
551 | Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
|
---|
552 | GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
|
---|
553 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
554 | AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
555 | }
|
---|
556 | else
|
---|
557 | {
|
---|
558 | Log((RT_SUCCESS(rcStrict)
|
---|
559 | ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
|
---|
560 | : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
|
---|
561 | GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
|
---|
562 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
563 | }
|
---|
564 | }
|
---|
565 |
|
---|
566 | /* Update the state and probably return. */
|
---|
567 | uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
|
---|
568 | pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
|
---|
569 | # if 0 /* unused */
|
---|
570 | pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
|
---|
571 | # endif
|
---|
572 | pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
|
---|
573 | pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
|
---|
574 | pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
|
---|
575 | pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
|
---|
576 | pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
|
---|
577 | pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
|
---|
578 | pVCpu->iem.s.pbInstrBuf = NULL;
|
---|
579 | if (cbToRead == cbDst)
|
---|
580 | return;
|
---|
581 | Assert(cbToRead == cbMaxRead);
|
---|
582 | }
|
---|
583 |
|
---|
584 | /*
|
---|
585 | * More to read, loop.
|
---|
586 | */
|
---|
587 | cbDst -= cbMaxRead;
|
---|
588 | pvDst = (uint8_t *)pvDst + cbMaxRead;
|
---|
589 | }
|
---|
590 | # else /* !IN_RING3 */
|
---|
591 | RT_NOREF(pvDst, cbDst);
|
---|
592 | if (pvDst || cbDst)
|
---|
593 | IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
|
---|
594 | # endif /* !IN_RING3 */
|
---|
595 | }
|
---|
596 |
|
---|
597 | #else /* !IEM_WITH_CODE_TLB */
|
---|
598 |
|
---|
599 | /**
|
---|
600 | * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
|
---|
601 | * exception if it fails.
|
---|
602 | *
|
---|
603 | * @returns Strict VBox status code.
|
---|
604 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
605 | * calling thread.
|
---|
606 | * @param cbMin The minimum number of bytes relative offOpcode
|
---|
607 | * that must be read.
|
---|
608 | */
|
---|
609 | VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
|
---|
610 | {
|
---|
611 | /*
|
---|
612 | * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
|
---|
613 | *
|
---|
614 | * First translate CS:rIP to a physical address.
|
---|
615 | */
|
---|
616 | uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
|
---|
617 | uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
|
---|
618 | uint8_t const cbLeft = cbOpcode - offOpcode;
|
---|
619 | Assert(cbLeft < cbMin);
|
---|
620 | Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
|
---|
621 |
|
---|
622 | uint32_t cbToTryRead;
|
---|
623 | RTGCPTR GCPtrNext;
|
---|
624 | if (IEM_IS_64BIT_CODE(pVCpu))
|
---|
625 | {
|
---|
626 | GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
|
---|
627 | if (!IEM_IS_CANONICAL(GCPtrNext))
|
---|
628 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
629 | cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
|
---|
630 | }
|
---|
631 | else
|
---|
632 | {
|
---|
633 | uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
|
---|
634 | /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
|
---|
635 | GCPtrNext32 += cbOpcode;
|
---|
636 | if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
|
---|
637 | /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
|
---|
638 | return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
|
---|
639 | cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
|
---|
640 | if (!cbToTryRead) /* overflowed */
|
---|
641 | {
|
---|
642 | Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
|
---|
643 | cbToTryRead = UINT32_MAX;
|
---|
644 | /** @todo check out wrapping around the code segment. */
|
---|
645 | }
|
---|
646 | if (cbToTryRead < cbMin - cbLeft)
|
---|
647 | return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
|
---|
648 | GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
|
---|
649 |
|
---|
650 | uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
|
---|
651 | if (cbToTryRead > cbLeftOnPage)
|
---|
652 | cbToTryRead = cbLeftOnPage;
|
---|
653 | }
|
---|
654 |
|
---|
655 | /* Restrict to opcode buffer space.
|
---|
656 |
|
---|
657 | We're making ASSUMPTIONS here based on work done previously in
|
---|
658 | iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
|
---|
659 | be fetched in case of an instruction crossing two pages. */
|
---|
660 | if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
|
---|
661 | cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
|
---|
662 | if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
|
---|
663 | { /* likely */ }
|
---|
664 | else
|
---|
665 | {
|
---|
666 | Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
|
---|
667 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
|
---|
668 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
669 | }
|
---|
670 |
|
---|
671 | PGMPTWALKFAST WalkFast;
|
---|
672 | int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
|
---|
673 | IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
|
---|
674 | &WalkFast);
|
---|
675 | if (RT_SUCCESS(rc))
|
---|
676 | Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
|
---|
677 | else
|
---|
678 | {
|
---|
679 | Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
|
---|
680 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
681 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
682 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
|
---|
683 | #endif
|
---|
684 | return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
|
---|
685 | }
|
---|
686 | Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
|
---|
687 | Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
|
---|
688 |
|
---|
689 | RTGCPHYS const GCPhys = WalkFast.GCPhys;
|
---|
690 | Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
|
---|
691 |
|
---|
692 | /*
|
---|
693 | * Read the bytes at this address.
|
---|
694 | *
|
---|
695 | * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
|
---|
696 | * and since PATM should only patch the start of an instruction there
|
---|
697 | * should be no need to check again here.
|
---|
698 | */
|
---|
699 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
700 | {
|
---|
701 | VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
|
---|
702 | cbToTryRead, PGMACCESSORIGIN_IEM);
|
---|
703 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
704 | { /* likely */ }
|
---|
705 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
706 | {
|
---|
707 | Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
|
---|
708 | GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
|
---|
709 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
710 | }
|
---|
711 | else
|
---|
712 | {
|
---|
713 | Log((RT_SUCCESS(rcStrict)
|
---|
714 | ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
|
---|
715 | : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
|
---|
716 | GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
|
---|
717 | return rcStrict;
|
---|
718 | }
|
---|
719 | }
|
---|
720 | else
|
---|
721 | {
|
---|
722 | rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
|
---|
723 | if (RT_SUCCESS(rc))
|
---|
724 | { /* likely */ }
|
---|
725 | else
|
---|
726 | {
|
---|
727 | Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
|
---|
728 | return rc;
|
---|
729 | }
|
---|
730 | }
|
---|
731 | pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
|
---|
732 | Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
|
---|
733 |
|
---|
734 | return VINF_SUCCESS;
|
---|
735 | }
|
---|
736 |
|
---|
737 | #endif /* !IEM_WITH_CODE_TLB */
|
---|
738 |
|
---|
739 | /**
|
---|
740 | * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
|
---|
741 | *
|
---|
742 | * @returns The opcode byte.
|
---|
743 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
744 | */
|
---|
745 | uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
746 | {
|
---|
747 | #ifdef IEM_WITH_CODE_TLB
|
---|
748 | uint8_t u8;
|
---|
749 | iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
|
---|
750 | return u8;
|
---|
751 | #else
|
---|
752 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
|
---|
753 | if (rcStrict == VINF_SUCCESS)
|
---|
754 | return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
|
---|
755 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
756 | #endif
|
---|
757 | }
|
---|
758 |
|
---|
759 |
|
---|
760 | /**
|
---|
761 | * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
|
---|
762 | *
|
---|
763 | * @returns The opcode word.
|
---|
764 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
765 | */
|
---|
766 | uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
767 | {
|
---|
768 | #ifdef IEM_WITH_CODE_TLB
|
---|
769 | uint16_t u16;
|
---|
770 | iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
|
---|
771 | return u16;
|
---|
772 | #else
|
---|
773 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
|
---|
774 | if (rcStrict == VINF_SUCCESS)
|
---|
775 | {
|
---|
776 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
777 | pVCpu->iem.s.offOpcode += 2;
|
---|
778 | # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
|
---|
779 | return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
|
---|
780 | # else
|
---|
781 | return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
|
---|
782 | # endif
|
---|
783 | }
|
---|
784 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
785 | #endif
|
---|
786 | }
|
---|
787 |
|
---|
788 |
|
---|
789 | /**
|
---|
790 | * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
|
---|
791 | *
|
---|
792 | * @returns The opcode dword.
|
---|
793 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
794 | */
|
---|
795 | uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
796 | {
|
---|
797 | #ifdef IEM_WITH_CODE_TLB
|
---|
798 | uint32_t u32;
|
---|
799 | iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
|
---|
800 | return u32;
|
---|
801 | #else
|
---|
802 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
|
---|
803 | if (rcStrict == VINF_SUCCESS)
|
---|
804 | {
|
---|
805 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
806 | pVCpu->iem.s.offOpcode = offOpcode + 4;
|
---|
807 | # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
|
---|
808 | return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
|
---|
809 | # else
|
---|
810 | return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
|
---|
811 | pVCpu->iem.s.abOpcode[offOpcode + 1],
|
---|
812 | pVCpu->iem.s.abOpcode[offOpcode + 2],
|
---|
813 | pVCpu->iem.s.abOpcode[offOpcode + 3]);
|
---|
814 | # endif
|
---|
815 | }
|
---|
816 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
817 | #endif
|
---|
818 | }
|
---|
819 |
|
---|
820 |
|
---|
821 | /**
|
---|
822 | * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
|
---|
823 | *
|
---|
824 | * @returns The opcode qword.
|
---|
825 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
826 | */
|
---|
827 | uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
828 | {
|
---|
829 | #ifdef IEM_WITH_CODE_TLB
|
---|
830 | uint64_t u64;
|
---|
831 | iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
|
---|
832 | return u64;
|
---|
833 | #else
|
---|
834 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
|
---|
835 | if (rcStrict == VINF_SUCCESS)
|
---|
836 | {
|
---|
837 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
838 | pVCpu->iem.s.offOpcode = offOpcode + 8;
|
---|
839 | # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
|
---|
840 | return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
|
---|
841 | # else
|
---|
842 | return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
|
---|
843 | pVCpu->iem.s.abOpcode[offOpcode + 1],
|
---|
844 | pVCpu->iem.s.abOpcode[offOpcode + 2],
|
---|
845 | pVCpu->iem.s.abOpcode[offOpcode + 3],
|
---|
846 | pVCpu->iem.s.abOpcode[offOpcode + 4],
|
---|
847 | pVCpu->iem.s.abOpcode[offOpcode + 5],
|
---|
848 | pVCpu->iem.s.abOpcode[offOpcode + 6],
|
---|
849 | pVCpu->iem.s.abOpcode[offOpcode + 7]);
|
---|
850 | # endif
|
---|
851 | }
|
---|
852 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
853 | #endif
|
---|
854 | }
|
---|
855 |
|
---|