VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 100790

Last change on this file since 100790 was 100790, checked in by vboxsync, 21 months ago

VMM/IEM: Don't generate IRQ checks as the first call in a code block, it prevents anything from getting done if we get the IRQ/NMI scheduling conditions wrong. Account for instructions executed in iemThreadedTbExec when completing blocks. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 24.1 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 100790 2023-08-04 08:53:14Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3);
76 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo different status code... */
77}
78
79
80/**
81 * Built-in function that calls a C-implemention function taking zero arguments.
82 */
83IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
84{
85 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
86 uint8_t const cbInstr = (uint8_t)uParam1;
87 RT_NOREF(uParam2);
88 return pfnCImpl(pVCpu, cbInstr);
89}
90
91
92/**
93 * Built-in function that checks for pending interrupts that can be delivered.
94 *
95 * This triggers after the completion of an instruction, so EIP is already at
96 * the next instruction. If an IRQ or important FF is pending, this will return
97 * a non-zero status that stops TB execution.
98 */
99IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
100{
101 RT_NOREF(uParam0, uParam1, uParam2);
102
103 /*
104 * Check for IRQs and other FFs that needs servicing.
105 */
106 uint64_t fCpu = pVCpu->fLocalForcedActions;
107 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
108 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
109 | VMCPU_FF_TLB_FLUSH
110 | VMCPU_FF_UNHALT );
111 /** @todo this isn't even close to the NMI and interrupt conditions in EM! */
112 if (RT_LIKELY( ( !fCpu
113 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
114 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
115 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) ) )
116 && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
117 return VINF_SUCCESS;
118
119 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
120 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpu,
121 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions & VM_FF_ALL_MASK, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
122 return VINF_IEM_REEXEC_MODE_CHANGED;
123}
124
125
126
127/**
128 * Built-in function that compares the fExec mask against uParam0.
129 *
130 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
131 * an instruction.
132 */
133IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
134{
135 uint32_t const fExpectedExec = (uint32_t)uParam0;
136 if (pVCpu->iem.s.fExec == fExpectedExec)
137 return VINF_SUCCESS;
138 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
139 fExpectedExec, pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec));
140 RT_NOREF(uParam1, uParam2);
141 return VINF_IEM_REEXEC_MODE_CHANGED;
142}
143
144
145DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
146{
147 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
148 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
149 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
150 if (idxPage == 0)
151 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
152 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
153 return pTb->aGCPhysPages[idxPage - 1];
154}
155
156
157/**
158 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
159 * number of functions.
160 */
161#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
162 if (RT_LIKELY(pVCpu->cpum.GstCtx.eip - pVCpu->cpum.GstCtx.cs.u32Limit >= cbInstr)) \
163 { /* likely */ } \
164 else \
165 { \
166 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
167 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
168 return iemRaiseGeneralProtectionFault0(pVCpu); \
169 } \
170 } while(0)
171
172/**
173 * Macro that implements opcode (re-)checking.
174 */
175#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
176 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
177 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
178 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
179 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
180 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
181 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
182 { /* likely */ } \
183 else \
184 { \
185 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
186 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
187 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
188 RT_NOREF(a_cbInstr); \
189 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
190 } \
191 } while(0)
192
193/**
194 * Macro that implements TLB loading and updating pbInstrBuf updating for an
195 * instruction crossing into a new page.
196 *
197 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
198 */
199#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
200 pVCpu->iem.s.pbInstrBuf = NULL; \
201 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
202 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
203 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
204 \
205 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
206 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
207 && pVCpu->iem.s.pbInstrBuf)) \
208 { /* likely */ } \
209 else \
210 { \
211 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
212 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
213 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
214 RT_NOREF(a_cbInstr); \
215 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
216 } \
217 } while(0)
218
219/**
220 * Macro that implements TLB loading and updating pbInstrBuf updating when
221 * branching or when crossing a page on an instruction boundrary.
222 *
223 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
224 * it is an inter-page branch and also check the page offset.
225 *
226 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
227 */
228#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
229 /* Is RIP within the current code page? */ \
230 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
231 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
232 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
233 if (off < pVCpu->iem.s.cbInstrBufTotal) \
234 { \
235 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
236 Assert(pVCpu->iem.s.pbInstrBuf); \
237 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
238 | pTb->aRanges[(a_idxRange)].offPhysPage; \
239 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
240 { /* we're good */ } \
241 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
242 { \
243 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
244 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
245 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
246 RT_NOREF(a_cbInstr); \
247 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
248 } \
249 else \
250 { \
251 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
252 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
253 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
254 RT_NOREF(a_cbInstr); \
255 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
256 } \
257 } \
258 else \
259 { \
260 /* Must translate new RIP. */ \
261 pVCpu->iem.s.pbInstrBuf = NULL; \
262 pVCpu->iem.s.offCurInstrStart = 0; \
263 pVCpu->iem.s.offInstrNextByte = 0; \
264 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
265 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
266 \
267 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
268 | pTb->aRanges[(a_idxRange)].offPhysPage; \
269 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
270 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
271 && pVCpu->iem.s.pbInstrBuf) \
272 { /* likely */ } \
273 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
274 && pVCpu->iem.s.pbInstrBuf) \
275 { \
276 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
277 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
278 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
279 RT_NOREF(a_cbInstr); \
280 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
281 } \
282 else \
283 { \
284 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
285 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
286 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
287 RT_NOREF(a_cbInstr); \
288 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
289 } \
290 } \
291 } while(0)
292
293/**
294 * Macro that implements PC check after a conditional branch.
295 */
296#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
297 /* Is RIP within the current code page? */ \
298 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
299 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
300 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
301 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
302 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
303 | pTb->aRanges[(a_idxRange)].offPhysPage; \
304 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
305 && off < pVCpu->iem.s.cbInstrBufTotal) \
306 { /* we're good */ } \
307 else \
308 { \
309 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
310 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
311 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
312 RT_NOREF(a_cbInstr); \
313 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
314 } \
315 } while(0)
316
317
318/**
319 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
320 * raising a \#GP(0) if this isn't the case.
321 */
322IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
323{
324 uint32_t const cbInstr = (uint32_t)uParam0;
325 RT_NOREF(uParam1, uParam2);
326 BODY_CHECK_CS_LIM(cbInstr);
327 return VINF_SUCCESS;
328}
329
330
331/**
332 * Built-in function for re-checking opcodes and CS.LIM after an instruction
333 * that may have modified them.
334 */
335IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
336{
337 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
338 uint32_t const cbInstr = (uint32_t)uParam0;
339 uint32_t const idxRange = (uint32_t)uParam1;
340 uint32_t const offRange = (uint32_t)uParam2;
341 BODY_CHECK_CS_LIM(cbInstr);
342 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
343 return VINF_SUCCESS;
344}
345
346
347/**
348 * Built-in function for re-checking opcodes after an instruction that may have
349 * modified them.
350 */
351IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
352{
353 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
354 uint32_t const cbInstr = (uint32_t)uParam0;
355 uint32_t const idxRange = (uint32_t)uParam1;
356 uint32_t const offRange = (uint32_t)uParam2;
357 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
358 return VINF_SUCCESS;
359}
360
361
362/*
363 * Post-branching checkers.
364 */
365
366/**
367 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
368 * after conditional branching within the same page.
369 *
370 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
371 */
372IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
373{
374 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
375 uint32_t const cbInstr = (uint32_t)uParam0;
376 uint32_t const idxRange = (uint32_t)uParam1;
377 uint32_t const offRange = (uint32_t)uParam2;
378 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
379 BODY_CHECK_CS_LIM(cbInstr);
380 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
381 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
382 //LogFunc(("okay\n"));
383 return VINF_SUCCESS;
384}
385
386
387/**
388 * Built-in function for checking the PC and checking opcodes after conditional
389 * branching within the same page.
390 *
391 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
392 */
393IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
394{
395 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
396 uint32_t const cbInstr = (uint32_t)uParam0;
397 uint32_t const idxRange = (uint32_t)uParam1;
398 uint32_t const offRange = (uint32_t)uParam2;
399 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
400 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
401 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
402 //LogFunc(("okay\n"));
403 return VINF_SUCCESS;
404}
405
406
407/**
408 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
409 * transitioning to a different code page.
410 *
411 * The code page transition can either be natural over onto the next page (with
412 * the instruction starting at page offset zero) or by means of branching.
413 *
414 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
415 */
416IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
417{
418 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
419 uint32_t const cbInstr = (uint32_t)uParam0;
420 uint32_t const idxRange = (uint32_t)uParam1;
421 uint32_t const offRange = (uint32_t)uParam2;
422 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
423 BODY_CHECK_CS_LIM(cbInstr);
424 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
425 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
426 //LogFunc(("okay\n"));
427 return VINF_SUCCESS;
428}
429
430
431/**
432 * Built-in function for loading TLB and checking opcodes when transitioning to
433 * a different code page.
434 *
435 * The code page transition can either be natural over onto the next page (with
436 * the instruction starting at page offset zero) or by means of branching.
437 *
438 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
439 */
440IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
441{
442 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
443 uint32_t const cbInstr = (uint32_t)uParam0;
444 uint32_t const idxRange = (uint32_t)uParam1;
445 uint32_t const offRange = (uint32_t)uParam2;
446 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
447 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
448 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
449 //LogFunc(("okay\n"));
450 return VINF_SUCCESS;
451}
452
453
454
455/*
456 * Natural page crossing checkers.
457 */
458
459/**
460 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
461 * both pages when transitioning to a different code page.
462 *
463 * This is used when the previous instruction requires revalidation of opcodes
464 * bytes and the current instruction stries a page boundrary with opcode bytes
465 * in both the old and new page.
466 *
467 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
468 */
469IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
470{
471 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
472 uint32_t const cbInstr = (uint32_t)uParam0;
473 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
474 uint32_t const idxRange1 = (uint32_t)uParam1;
475 uint32_t const offRange1 = (uint32_t)uParam2;
476 uint32_t const idxRange2 = idxRange1 + 1;
477 BODY_CHECK_CS_LIM(cbInstr);
478 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
479 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
480 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
481 return VINF_SUCCESS;
482}
483
484
485/**
486 * Built-in function for loading TLB and checking opcodes on both pages when
487 * transitioning to a different code page.
488 *
489 * This is used when the previous instruction requires revalidation of opcodes
490 * bytes and the current instruction stries a page boundrary with opcode bytes
491 * in both the old and new page.
492 *
493 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
494 */
495IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
496{
497 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
498 uint32_t const cbInstr = (uint32_t)uParam0;
499 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
500 uint32_t const idxRange1 = (uint32_t)uParam1;
501 uint32_t const offRange1 = (uint32_t)uParam2;
502 uint32_t const idxRange2 = idxRange1 + 1;
503 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
504 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
505 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
506 return VINF_SUCCESS;
507}
508
509
510/**
511 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
512 * advancing naturally to a different code page.
513 *
514 * Only opcodes on the new page is checked.
515 *
516 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
517 */
518IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
519{
520 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
521 uint32_t const cbInstr = (uint32_t)uParam0;
522 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
523 uint32_t const idxRange1 = (uint32_t)uParam1;
524 //uint32_t const offRange1 = (uint32_t)uParam2;
525 uint32_t const idxRange2 = idxRange1 + 1;
526 BODY_CHECK_CS_LIM(cbInstr);
527 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
528 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
529 RT_NOREF(uParam2);
530 return VINF_SUCCESS;
531}
532
533
534/**
535 * Built-in function for loading TLB and checking opcodes when advancing
536 * naturally to a different code page.
537 *
538 * Only opcodes on the new page is checked.
539 *
540 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
541 */
542IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
543{
544 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
545 uint32_t const cbInstr = (uint32_t)uParam0;
546 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
547 uint32_t const idxRange1 = (uint32_t)uParam1;
548 //uint32_t const offRange1 = (uint32_t)uParam2;
549 uint32_t const idxRange2 = idxRange1 + 1;
550 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
551 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
552 RT_NOREF(uParam2);
553 return VINF_SUCCESS;
554}
555
556
557/**
558 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
559 * advancing naturally to a different code page with first instr at byte 0.
560 *
561 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
562 */
563IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
564{
565 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
566 uint32_t const cbInstr = (uint32_t)uParam0;
567 uint32_t const idxRange = (uint32_t)uParam1;
568 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
569 BODY_CHECK_CS_LIM(cbInstr);
570 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
571 Assert(pVCpu->iem.s.offCurInstrStart == 0);
572 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
573 return VINF_SUCCESS;
574}
575
576
577/**
578 * Built-in function for loading TLB and checking opcodes when advancing
579 * naturally to a different code page with first instr at byte 0.
580 *
581 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
582 */
583IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
584{
585 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
586 uint32_t const cbInstr = (uint32_t)uParam0;
587 uint32_t const idxRange = (uint32_t)uParam1;
588 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
589 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
590 Assert(pVCpu->iem.s.offCurInstrStart == 0);
591 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
592 return VINF_SUCCESS;
593}
594
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette