VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 100788

Last change on this file since 100788 was 100788, checked in by vboxsync, 21 months ago

VMM/IEM: Added missing CheckMode call to iemThreadedRecompilerMcDeferToCImpl0. Issue mode change check on IEM_CIMPL_F_VMEXIT. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 24.0 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 100788 2023-08-03 22:12:58Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3);
76 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo different status code... */
77}
78
79
80/**
81 * Built-in function that calls a C-implemention function taking zero arguments.
82 */
83IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
84{
85 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
86 uint8_t const cbInstr = (uint8_t)uParam1;
87 RT_NOREF(uParam2);
88 return pfnCImpl(pVCpu, cbInstr);
89}
90
91
92/**
93 * Built-in function that checks for pending interrupts that can be delivered.
94 *
95 * This triggers after the completion of an instruction, so EIP is already at
96 * the next instruction. If an IRQ or important FF is pending, this will return
97 * a non-zero status that stops TB execution.
98 */
99IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
100{
101 RT_NOREF(uParam0, uParam1, uParam2);
102
103 /*
104 * Check for IRQs and other FFs that needs servicing.
105 */
106 uint64_t fCpu = pVCpu->fLocalForcedActions;
107 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
108 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
109 | VMCPU_FF_TLB_FLUSH
110 | VMCPU_FF_UNHALT );
111 if (RT_LIKELY( ( !fCpu
112 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
113 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
114 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) ) )
115 && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
116 return VINF_SUCCESS;
117
118 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
119 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpu,
120 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions & VM_FF_ALL_MASK, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
121 return VINF_IEM_REEXEC_MODE_CHANGED;
122}
123
124
125
126/**
127 * Built-in function that compares the fExec mask against uParam0.
128 *
129 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
130 * an instruction.
131 */
132IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
133{
134 uint32_t const fExpectedExec = (uint32_t)uParam0;
135 if (pVCpu->iem.s.fExec == fExpectedExec)
136 return VINF_SUCCESS;
137 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
138 fExpectedExec, pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec));
139 RT_NOREF(uParam1, uParam2);
140 return VINF_IEM_REEXEC_MODE_CHANGED;
141}
142
143
144DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
145{
146 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
147 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
148 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
149 if (idxPage == 0)
150 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
151 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
152 return pTb->aGCPhysPages[idxPage - 1];
153}
154
155
156/**
157 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
158 * number of functions.
159 */
160#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
161 if (RT_LIKELY(pVCpu->cpum.GstCtx.eip - pVCpu->cpum.GstCtx.cs.u32Limit >= cbInstr)) \
162 { /* likely */ } \
163 else \
164 { \
165 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
166 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
167 return iemRaiseGeneralProtectionFault0(pVCpu); \
168 } \
169 } while(0)
170
171/**
172 * Macro that implements opcode (re-)checking.
173 */
174#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
175 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
176 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
177 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
178 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
179 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
180 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
181 { /* likely */ } \
182 else \
183 { \
184 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
185 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
186 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
187 RT_NOREF(a_cbInstr); \
188 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
189 } \
190 } while(0)
191
192/**
193 * Macro that implements TLB loading and updating pbInstrBuf updating for an
194 * instruction crossing into a new page.
195 *
196 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
197 */
198#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
199 pVCpu->iem.s.pbInstrBuf = NULL; \
200 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
201 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
202 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
203 \
204 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
205 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
206 && pVCpu->iem.s.pbInstrBuf)) \
207 { /* likely */ } \
208 else \
209 { \
210 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
211 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
212 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
213 RT_NOREF(a_cbInstr); \
214 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
215 } \
216 } while(0)
217
218/**
219 * Macro that implements TLB loading and updating pbInstrBuf updating when
220 * branching or when crossing a page on an instruction boundrary.
221 *
222 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
223 * it is an inter-page branch and also check the page offset.
224 *
225 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
226 */
227#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
228 /* Is RIP within the current code page? */ \
229 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
230 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
231 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
232 if (off < pVCpu->iem.s.cbInstrBufTotal) \
233 { \
234 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
235 Assert(pVCpu->iem.s.pbInstrBuf); \
236 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
237 | pTb->aRanges[(a_idxRange)].offPhysPage; \
238 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
239 { /* we're good */ } \
240 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
241 { \
242 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
243 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
244 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
245 RT_NOREF(a_cbInstr); \
246 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
247 } \
248 else \
249 { \
250 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
251 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
252 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
253 RT_NOREF(a_cbInstr); \
254 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
255 } \
256 } \
257 else \
258 { \
259 /* Must translate new RIP. */ \
260 pVCpu->iem.s.pbInstrBuf = NULL; \
261 pVCpu->iem.s.offCurInstrStart = 0; \
262 pVCpu->iem.s.offInstrNextByte = 0; \
263 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
264 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
265 \
266 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
267 | pTb->aRanges[(a_idxRange)].offPhysPage; \
268 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
269 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
270 && pVCpu->iem.s.pbInstrBuf) \
271 { /* likely */ } \
272 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
273 && pVCpu->iem.s.pbInstrBuf) \
274 { \
275 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
276 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
277 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
278 RT_NOREF(a_cbInstr); \
279 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
280 } \
281 else \
282 { \
283 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
284 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
285 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
286 RT_NOREF(a_cbInstr); \
287 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
288 } \
289 } \
290 } while(0)
291
292/**
293 * Macro that implements PC check after a conditional branch.
294 */
295#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
296 /* Is RIP within the current code page? */ \
297 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
298 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
299 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
300 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
301 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
302 | pTb->aRanges[(a_idxRange)].offPhysPage; \
303 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
304 && off < pVCpu->iem.s.cbInstrBufTotal) \
305 { /* we're good */ } \
306 else \
307 { \
308 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
309 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
310 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
311 RT_NOREF(a_cbInstr); \
312 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
313 } \
314 } while(0)
315
316
317/**
318 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
319 * raising a \#GP(0) if this isn't the case.
320 */
321IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
322{
323 uint32_t const cbInstr = (uint32_t)uParam0;
324 RT_NOREF(uParam1, uParam2);
325 BODY_CHECK_CS_LIM(cbInstr);
326 return VINF_SUCCESS;
327}
328
329
330/**
331 * Built-in function for re-checking opcodes and CS.LIM after an instruction
332 * that may have modified them.
333 */
334IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
335{
336 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
337 uint32_t const cbInstr = (uint32_t)uParam0;
338 uint32_t const idxRange = (uint32_t)uParam1;
339 uint32_t const offRange = (uint32_t)uParam2;
340 BODY_CHECK_CS_LIM(cbInstr);
341 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
342 return VINF_SUCCESS;
343}
344
345
346/**
347 * Built-in function for re-checking opcodes after an instruction that may have
348 * modified them.
349 */
350IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
351{
352 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
353 uint32_t const cbInstr = (uint32_t)uParam0;
354 uint32_t const idxRange = (uint32_t)uParam1;
355 uint32_t const offRange = (uint32_t)uParam2;
356 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
357 return VINF_SUCCESS;
358}
359
360
361/*
362 * Post-branching checkers.
363 */
364
365/**
366 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
367 * after conditional branching within the same page.
368 *
369 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
370 */
371IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
372{
373 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
374 uint32_t const cbInstr = (uint32_t)uParam0;
375 uint32_t const idxRange = (uint32_t)uParam1;
376 uint32_t const offRange = (uint32_t)uParam2;
377 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
378 BODY_CHECK_CS_LIM(cbInstr);
379 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
380 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
381 //LogFunc(("okay\n"));
382 return VINF_SUCCESS;
383}
384
385
386/**
387 * Built-in function for checking the PC and checking opcodes after conditional
388 * branching within the same page.
389 *
390 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
391 */
392IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
393{
394 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
395 uint32_t const cbInstr = (uint32_t)uParam0;
396 uint32_t const idxRange = (uint32_t)uParam1;
397 uint32_t const offRange = (uint32_t)uParam2;
398 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
399 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
400 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
401 //LogFunc(("okay\n"));
402 return VINF_SUCCESS;
403}
404
405
406/**
407 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
408 * transitioning to a different code page.
409 *
410 * The code page transition can either be natural over onto the next page (with
411 * the instruction starting at page offset zero) or by means of branching.
412 *
413 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
414 */
415IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
416{
417 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
418 uint32_t const cbInstr = (uint32_t)uParam0;
419 uint32_t const idxRange = (uint32_t)uParam1;
420 uint32_t const offRange = (uint32_t)uParam2;
421 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
422 BODY_CHECK_CS_LIM(cbInstr);
423 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
424 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
425 //LogFunc(("okay\n"));
426 return VINF_SUCCESS;
427}
428
429
430/**
431 * Built-in function for loading TLB and checking opcodes when transitioning to
432 * a different code page.
433 *
434 * The code page transition can either be natural over onto the next page (with
435 * the instruction starting at page offset zero) or by means of branching.
436 *
437 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
438 */
439IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
440{
441 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
442 uint32_t const cbInstr = (uint32_t)uParam0;
443 uint32_t const idxRange = (uint32_t)uParam1;
444 uint32_t const offRange = (uint32_t)uParam2;
445 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
446 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
447 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
448 //LogFunc(("okay\n"));
449 return VINF_SUCCESS;
450}
451
452
453
454/*
455 * Natural page crossing checkers.
456 */
457
458/**
459 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
460 * both pages when transitioning to a different code page.
461 *
462 * This is used when the previous instruction requires revalidation of opcodes
463 * bytes and the current instruction stries a page boundrary with opcode bytes
464 * in both the old and new page.
465 *
466 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
467 */
468IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
469{
470 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
471 uint32_t const cbInstr = (uint32_t)uParam0;
472 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
473 uint32_t const idxRange1 = (uint32_t)uParam1;
474 uint32_t const offRange1 = (uint32_t)uParam2;
475 uint32_t const idxRange2 = idxRange1 + 1;
476 BODY_CHECK_CS_LIM(cbInstr);
477 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
478 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
479 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
480 return VINF_SUCCESS;
481}
482
483
484/**
485 * Built-in function for loading TLB and checking opcodes on both pages when
486 * transitioning to a different code page.
487 *
488 * This is used when the previous instruction requires revalidation of opcodes
489 * bytes and the current instruction stries a page boundrary with opcode bytes
490 * in both the old and new page.
491 *
492 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
493 */
494IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
495{
496 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
497 uint32_t const cbInstr = (uint32_t)uParam0;
498 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
499 uint32_t const idxRange1 = (uint32_t)uParam1;
500 uint32_t const offRange1 = (uint32_t)uParam2;
501 uint32_t const idxRange2 = idxRange1 + 1;
502 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
503 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
504 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
505 return VINF_SUCCESS;
506}
507
508
509/**
510 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
511 * advancing naturally to a different code page.
512 *
513 * Only opcodes on the new page is checked.
514 *
515 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
516 */
517IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
518{
519 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
520 uint32_t const cbInstr = (uint32_t)uParam0;
521 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
522 uint32_t const idxRange1 = (uint32_t)uParam1;
523 //uint32_t const offRange1 = (uint32_t)uParam2;
524 uint32_t const idxRange2 = idxRange1 + 1;
525 BODY_CHECK_CS_LIM(cbInstr);
526 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
527 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
528 RT_NOREF(uParam2);
529 return VINF_SUCCESS;
530}
531
532
533/**
534 * Built-in function for loading TLB and checking opcodes when advancing
535 * naturally to a different code page.
536 *
537 * Only opcodes on the new page is checked.
538 *
539 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
540 */
541IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
542{
543 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
544 uint32_t const cbInstr = (uint32_t)uParam0;
545 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
546 uint32_t const idxRange1 = (uint32_t)uParam1;
547 //uint32_t const offRange1 = (uint32_t)uParam2;
548 uint32_t const idxRange2 = idxRange1 + 1;
549 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
550 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
551 RT_NOREF(uParam2);
552 return VINF_SUCCESS;
553}
554
555
556/**
557 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
558 * advancing naturally to a different code page with first instr at byte 0.
559 *
560 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
561 */
562IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
563{
564 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
565 uint32_t const cbInstr = (uint32_t)uParam0;
566 uint32_t const idxRange = (uint32_t)uParam1;
567 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
568 BODY_CHECK_CS_LIM(cbInstr);
569 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
570 Assert(pVCpu->iem.s.offCurInstrStart == 0);
571 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
572 return VINF_SUCCESS;
573}
574
575
576/**
577 * Built-in function for loading TLB and checking opcodes when advancing
578 * naturally to a different code page with first instr at byte 0.
579 *
580 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
581 */
582IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
583{
584 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
585 uint32_t const cbInstr = (uint32_t)uParam0;
586 uint32_t const idxRange = (uint32_t)uParam1;
587 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
588 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
589 Assert(pVCpu->iem.s.offCurInstrStart == 0);
590 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
591 return VINF_SUCCESS;
592}
593
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette