VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 100828

Last change on this file since 100828 was 100828, checked in by vboxsync, 20 months ago

VMM/IEM: Corrected BODY_CHECK_CS_LIM calc was off by one. Untested. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.8 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 100828 2023-08-09 12:03:53Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3);
76 return VINF_IEM_REEXEC_BREAK;
77}
78
79
80/**
81 * Built-in function that calls a C-implemention function taking zero arguments.
82 */
83IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
84{
85 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
86 uint8_t const cbInstr = (uint8_t)uParam1;
87 RT_NOREF(uParam2);
88 return pfnCImpl(pVCpu, cbInstr);
89}
90
91
92/**
93 * Built-in function that checks for pending interrupts that can be delivered or
94 * forced action flags.
95 *
96 * This triggers after the completion of an instruction, so EIP is already at
97 * the next instruction. If an IRQ or important FF is pending, this will return
98 * a non-zero status that stops TB execution.
99 */
100IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
101{
102 RT_NOREF(uParam0, uParam1, uParam2);
103
104 /*
105 * Check for IRQs and other FFs that needs servicing.
106 */
107 uint64_t fCpu = pVCpu->fLocalForcedActions;
108 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
109 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
110 | VMCPU_FF_TLB_FLUSH
111 | VMCPU_FF_UNHALT );
112 /** @todo this isn't even close to the NMI and interrupt conditions in EM! */
113 if (RT_LIKELY( ( !fCpu
114 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
115 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
116 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) ) )
117 && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
118 return VINF_SUCCESS;
119
120 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
121 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpu,
122 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions & VM_FF_ALL_MASK, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
123 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckIrqBreaks);
124 return VINF_IEM_REEXEC_BREAK;
125}
126
127
128
129/**
130 * Built-in function that compares the fExec mask against uParam0.
131 *
132 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
133 * an instruction.
134 */
135IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
136{
137 uint32_t const fExpectedExec = (uint32_t)uParam0;
138 if ((pVCpu->iem.s.fExec & IEMTB_F_KEY_MASK) == (fExpectedExec & IEMTB_F_KEY_MASK))
139 return VINF_SUCCESS;
140 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x, xor-key: %#x)\n",
141 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fExpectedExec,
142 pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec, (fExpectedExec ^ pVCpu->iem.s.fExec) & IEMTB_F_KEY_MASK));
143 RT_NOREF(uParam1, uParam2);
144 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckModeBreaks);
145 return VINF_IEM_REEXEC_BREAK;
146}
147
148
149DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
150{
151 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
152 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
153 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
154 if (idxPage == 0)
155 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
156 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
157 return pTb->aGCPhysPages[idxPage - 1];
158}
159
160
161/**
162 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
163 * number of functions.
164 */
165/** @todo consider 32-bit EIP mid-instruction wrap-around... Difficult to
166 * test, since it would require replacing the default firmware. */
167#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
168 if (RT_LIKELY((uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr - 1U) <= pVCpu->cpum.GstCtx.cs.u32Limit)) \
169 { /* likely */ } \
170 else \
171 { \
172 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
173 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
174 return iemRaiseGeneralProtectionFault0(pVCpu); \
175 } \
176 } while(0)
177
178/**
179 * Macro that implements opcode (re-)checking.
180 */
181#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
182 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
183 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
184 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
185 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
186 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
187 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
188 { /* likely */ } \
189 else \
190 { \
191 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
192 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
193 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
194 RT_NOREF(a_cbInstr); \
195 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
196 } \
197 } while(0)
198
199/**
200 * Macro that implements TLB loading and updating pbInstrBuf updating for an
201 * instruction crossing into a new page.
202 *
203 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
204 */
205#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
206 pVCpu->iem.s.pbInstrBuf = NULL; \
207 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
208 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
209 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
210 \
211 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
212 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
213 && pVCpu->iem.s.pbInstrBuf)) \
214 { /* likely */ } \
215 else \
216 { \
217 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
218 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
219 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
220 RT_NOREF(a_cbInstr); \
221 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
222 } \
223 } while(0)
224
225/**
226 * Macro that implements TLB loading and updating pbInstrBuf updating when
227 * branching or when crossing a page on an instruction boundrary.
228 *
229 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
230 * it is an inter-page branch and also check the page offset.
231 *
232 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
233 */
234#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
235 /* Is RIP within the current code page? */ \
236 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
237 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
238 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
239 if (off < pVCpu->iem.s.cbInstrBufTotal) \
240 { \
241 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
242 Assert(pVCpu->iem.s.pbInstrBuf); \
243 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
244 | pTb->aRanges[(a_idxRange)].offPhysPage; \
245 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
246 { /* we're good */ } \
247 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
248 { \
249 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
250 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
251 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
252 RT_NOREF(a_cbInstr); \
253 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
254 return VINF_IEM_REEXEC_BREAK; \
255 } \
256 else \
257 { \
258 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
259 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
260 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
261 RT_NOREF(a_cbInstr); \
262 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
263 } \
264 } \
265 else \
266 { \
267 /* Must translate new RIP. */ \
268 pVCpu->iem.s.pbInstrBuf = NULL; \
269 pVCpu->iem.s.offCurInstrStart = 0; \
270 pVCpu->iem.s.offInstrNextByte = 0; \
271 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
272 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
273 \
274 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
275 | pTb->aRanges[(a_idxRange)].offPhysPage; \
276 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
277 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
278 && pVCpu->iem.s.pbInstrBuf) \
279 { /* likely */ } \
280 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
281 && pVCpu->iem.s.pbInstrBuf) \
282 { \
283 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
284 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
285 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
286 RT_NOREF(a_cbInstr); \
287 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
288 return VINF_IEM_REEXEC_BREAK; \
289 } \
290 else \
291 { \
292 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
293 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
294 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
295 RT_NOREF(a_cbInstr); \
296 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
297 } \
298 } \
299 } while(0)
300
301/**
302 * Macro that implements PC check after a conditional branch.
303 */
304#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
305 /* Is RIP within the current code page? */ \
306 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
307 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
308 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
309 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
310 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
311 | pTb->aRanges[(a_idxRange)].offPhysPage; \
312 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
313 && off < pVCpu->iem.s.cbInstrBufTotal) \
314 { /* we're good */ } \
315 else \
316 { \
317 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
318 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
319 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
320 RT_NOREF(a_cbInstr); \
321 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
322 return VINF_IEM_REEXEC_BREAK; \
323 } \
324 } while(0)
325
326/**
327 * Macro that considers whether we need CS.LIM checking after a branch or
328 * crossing over to a new page.
329 *
330 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
331 */
332#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
333 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
334 if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
335 { /* likely */ } \
336 else \
337 { \
338 Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
339 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
340 pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
341 RT_NOREF(a_pTb, a_cbInstr); \
342 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking); \
343 return VINF_IEM_REEXEC_BREAK; \
344 } \
345 } while(0)
346
347
348
349/**
350 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
351 * raising a \#GP(0) if this isn't the case.
352 */
353IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
354{
355 uint32_t const cbInstr = (uint32_t)uParam0;
356 RT_NOREF(uParam1, uParam2);
357 BODY_CHECK_CS_LIM(cbInstr);
358 return VINF_SUCCESS;
359}
360
361
362/**
363 * Built-in function for re-checking opcodes and CS.LIM after an instruction
364 * that may have modified them.
365 */
366IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
367{
368 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
369 uint32_t const cbInstr = (uint32_t)uParam0;
370 uint32_t const idxRange = (uint32_t)uParam1;
371 uint32_t const offRange = (uint32_t)uParam2;
372 BODY_CHECK_CS_LIM(cbInstr);
373 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
374 return VINF_SUCCESS;
375}
376
377
378/**
379 * Built-in function for re-checking opcodes after an instruction that may have
380 * modified them.
381 */
382IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
383{
384 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
385 uint32_t const cbInstr = (uint32_t)uParam0;
386 uint32_t const idxRange = (uint32_t)uParam1;
387 uint32_t const offRange = (uint32_t)uParam2;
388 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
389 return VINF_SUCCESS;
390}
391
392
393/**
394 * Built-in function for re-checking opcodes and considering the need for CS.LIM
395 * checking after an instruction that may have modified them.
396 */
397IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim)
398{
399 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
400 uint32_t const cbInstr = (uint32_t)uParam0;
401 uint32_t const idxRange = (uint32_t)uParam1;
402 uint32_t const offRange = (uint32_t)uParam2;
403 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
404 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
405 return VINF_SUCCESS;
406}
407
408
409/*
410 * Post-branching checkers.
411 */
412
413/**
414 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
415 * after conditional branching within the same page.
416 *
417 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
418 */
419IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
420{
421 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
422 uint32_t const cbInstr = (uint32_t)uParam0;
423 uint32_t const idxRange = (uint32_t)uParam1;
424 uint32_t const offRange = (uint32_t)uParam2;
425 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
426 BODY_CHECK_CS_LIM(cbInstr);
427 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
428 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
429 //LogFunc(("okay\n"));
430 return VINF_SUCCESS;
431}
432
433
434/**
435 * Built-in function for checking the PC and checking opcodes after conditional
436 * branching within the same page.
437 *
438 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
439 */
440IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
441{
442 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
443 uint32_t const cbInstr = (uint32_t)uParam0;
444 uint32_t const idxRange = (uint32_t)uParam1;
445 uint32_t const offRange = (uint32_t)uParam2;
446 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
447 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
448 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
449 //LogFunc(("okay\n"));
450 return VINF_SUCCESS;
451}
452
453
454/**
455 * Built-in function for checking the PC and checking opcodes and considering
456 * the need for CS.LIM checking after conditional branching within the same
457 * page.
458 *
459 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
460 */
461IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
462{
463 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
464 uint32_t const cbInstr = (uint32_t)uParam0;
465 uint32_t const idxRange = (uint32_t)uParam1;
466 uint32_t const offRange = (uint32_t)uParam2;
467 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
468 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
469 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
470 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
471 //LogFunc(("okay\n"));
472 return VINF_SUCCESS;
473}
474
475
476/**
477 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
478 * transitioning to a different code page.
479 *
480 * The code page transition can either be natural over onto the next page (with
481 * the instruction starting at page offset zero) or by means of branching.
482 *
483 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
484 */
485IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
486{
487 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
488 uint32_t const cbInstr = (uint32_t)uParam0;
489 uint32_t const idxRange = (uint32_t)uParam1;
490 uint32_t const offRange = (uint32_t)uParam2;
491 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
492 BODY_CHECK_CS_LIM(cbInstr);
493 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
494 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
495 //LogFunc(("okay\n"));
496 return VINF_SUCCESS;
497}
498
499
500/**
501 * Built-in function for loading TLB and checking opcodes when transitioning to
502 * a different code page.
503 *
504 * The code page transition can either be natural over onto the next page (with
505 * the instruction starting at page offset zero) or by means of branching.
506 *
507 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
508 */
509IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
510{
511 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
512 uint32_t const cbInstr = (uint32_t)uParam0;
513 uint32_t const idxRange = (uint32_t)uParam1;
514 uint32_t const offRange = (uint32_t)uParam2;
515 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
516 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
517 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
518 //LogFunc(("okay\n"));
519 return VINF_SUCCESS;
520}
521
522
523/**
524 * Built-in function for loading TLB and checking opcodes and considering the
525 * need for CS.LIM checking when transitioning to a different code page.
526 *
527 * The code page transition can either be natural over onto the next page (with
528 * the instruction starting at page offset zero) or by means of branching.
529 *
530 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
531 */
532IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
533{
534 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
535 uint32_t const cbInstr = (uint32_t)uParam0;
536 uint32_t const idxRange = (uint32_t)uParam1;
537 uint32_t const offRange = (uint32_t)uParam2;
538 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
539 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
540 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
541 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
542 //LogFunc(("okay\n"));
543 return VINF_SUCCESS;
544}
545
546
547
548/*
549 * Natural page crossing checkers.
550 */
551
552/**
553 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
554 * both pages when transitioning to a different code page.
555 *
556 * This is used when the previous instruction requires revalidation of opcodes
557 * bytes and the current instruction stries a page boundrary with opcode bytes
558 * in both the old and new page.
559 *
560 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
561 */
562IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
563{
564 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
565 uint32_t const cbInstr = (uint32_t)uParam0;
566 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
567 uint32_t const idxRange1 = (uint32_t)uParam1;
568 uint32_t const offRange1 = (uint32_t)uParam2;
569 uint32_t const idxRange2 = idxRange1 + 1;
570 BODY_CHECK_CS_LIM(cbInstr);
571 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
572 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
573 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
574 return VINF_SUCCESS;
575}
576
577
578/**
579 * Built-in function for loading TLB and checking opcodes on both pages when
580 * transitioning to a different code page.
581 *
582 * This is used when the previous instruction requires revalidation of opcodes
583 * bytes and the current instruction stries a page boundrary with opcode bytes
584 * in both the old and new page.
585 *
586 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
587 */
588IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
589{
590 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
591 uint32_t const cbInstr = (uint32_t)uParam0;
592 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
593 uint32_t const idxRange1 = (uint32_t)uParam1;
594 uint32_t const offRange1 = (uint32_t)uParam2;
595 uint32_t const idxRange2 = idxRange1 + 1;
596 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
597 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
598 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
599 return VINF_SUCCESS;
600}
601
602
603/**
604 * Built-in function for loading TLB and checking opcodes on both pages and
605 * considering the need for CS.LIM checking when transitioning to a different
606 * code page.
607 *
608 * This is used when the previous instruction requires revalidation of opcodes
609 * bytes and the current instruction stries a page boundrary with opcode bytes
610 * in both the old and new page.
611 *
612 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
613 */
614IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
615{
616 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
617 uint32_t const cbInstr = (uint32_t)uParam0;
618 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
619 uint32_t const idxRange1 = (uint32_t)uParam1;
620 uint32_t const offRange1 = (uint32_t)uParam2;
621 uint32_t const idxRange2 = idxRange1 + 1;
622 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
623 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
624 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
625 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
626 return VINF_SUCCESS;
627}
628
629
630/**
631 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
632 * advancing naturally to a different code page.
633 *
634 * Only opcodes on the new page is checked.
635 *
636 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
637 */
638IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
639{
640 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
641 uint32_t const cbInstr = (uint32_t)uParam0;
642 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
643 uint32_t const idxRange1 = (uint32_t)uParam1;
644 //uint32_t const offRange1 = (uint32_t)uParam2;
645 uint32_t const idxRange2 = idxRange1 + 1;
646 BODY_CHECK_CS_LIM(cbInstr);
647 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
648 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
649 RT_NOREF(uParam2);
650 return VINF_SUCCESS;
651}
652
653
654/**
655 * Built-in function for loading TLB and checking opcodes when advancing
656 * naturally to a different code page.
657 *
658 * Only opcodes on the new page is checked.
659 *
660 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
661 */
662IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
663{
664 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
665 uint32_t const cbInstr = (uint32_t)uParam0;
666 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
667 uint32_t const idxRange1 = (uint32_t)uParam1;
668 //uint32_t const offRange1 = (uint32_t)uParam2;
669 uint32_t const idxRange2 = idxRange1 + 1;
670 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
671 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
672 RT_NOREF(uParam2);
673 return VINF_SUCCESS;
674}
675
676
677/**
678 * Built-in function for loading TLB and checking opcodes and considering the
679 * need for CS.LIM checking when advancing naturally to a different code page.
680 *
681 * Only opcodes on the new page is checked.
682 *
683 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
684 */
685IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
686{
687 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
688 uint32_t const cbInstr = (uint32_t)uParam0;
689 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
690 uint32_t const idxRange1 = (uint32_t)uParam1;
691 //uint32_t const offRange1 = (uint32_t)uParam2;
692 uint32_t const idxRange2 = idxRange1 + 1;
693 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
694 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
695 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
696 RT_NOREF(uParam2);
697 return VINF_SUCCESS;
698}
699
700
701/**
702 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
703 * advancing naturally to a different code page with first instr at byte 0.
704 *
705 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
706 */
707IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
708{
709 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
710 uint32_t const cbInstr = (uint32_t)uParam0;
711 uint32_t const idxRange = (uint32_t)uParam1;
712 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
713 BODY_CHECK_CS_LIM(cbInstr);
714 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
715 Assert(pVCpu->iem.s.offCurInstrStart == 0);
716 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
717 return VINF_SUCCESS;
718}
719
720
721/**
722 * Built-in function for loading TLB and checking opcodes when advancing
723 * naturally to a different code page with first instr at byte 0.
724 *
725 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
726 */
727IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
728{
729 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
730 uint32_t const cbInstr = (uint32_t)uParam0;
731 uint32_t const idxRange = (uint32_t)uParam1;
732 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
733 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
734 Assert(pVCpu->iem.s.offCurInstrStart == 0);
735 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
736 return VINF_SUCCESS;
737}
738
739
740/**
741 * Built-in function for loading TLB and checking opcodes and considering the
742 * need for CS.LIM checking when advancing naturally to a different code page
743 * with first instr at byte 0.
744 *
745 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
746 */
747IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
748{
749 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
750 uint32_t const cbInstr = (uint32_t)uParam0;
751 uint32_t const idxRange = (uint32_t)uParam1;
752 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
753 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
754 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
755 Assert(pVCpu->iem.s.offCurInstrStart == 0);
756 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
757 return VINF_SUCCESS;
758}
759
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette