VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 100802

Last change on this file since 100802 was 100802, checked in by vboxsync, 21 months ago

VMM/IEM: More complete CS.LIM checking, now considering it on branching and page crossings. [fix] bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.1 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 100802 2023-08-04 21:33:13Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3);
76 return VINF_IEM_REEXEC_BREAK;
77}
78
79
80/**
81 * Built-in function that calls a C-implemention function taking zero arguments.
82 */
83IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
84{
85 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
86 uint8_t const cbInstr = (uint8_t)uParam1;
87 RT_NOREF(uParam2);
88 return pfnCImpl(pVCpu, cbInstr);
89}
90
91
92/**
93 * Built-in function that checks for pending interrupts that can be delivered.
94 *
95 * This triggers after the completion of an instruction, so EIP is already at
96 * the next instruction. If an IRQ or important FF is pending, this will return
97 * a non-zero status that stops TB execution.
98 */
99IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
100{
101 RT_NOREF(uParam0, uParam1, uParam2);
102
103 /*
104 * Check for IRQs and other FFs that needs servicing.
105 */
106 uint64_t fCpu = pVCpu->fLocalForcedActions;
107 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
108 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
109 | VMCPU_FF_TLB_FLUSH
110 | VMCPU_FF_UNHALT );
111 /** @todo this isn't even close to the NMI and interrupt conditions in EM! */
112 if (RT_LIKELY( ( !fCpu
113 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
114 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
115 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) ) )
116 && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
117 return VINF_SUCCESS;
118
119 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
120 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpu,
121 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions & VM_FF_ALL_MASK, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
122 return VINF_IEM_REEXEC_BREAK;
123}
124
125
126
127/**
128 * Built-in function that compares the fExec mask against uParam0.
129 *
130 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
131 * an instruction.
132 */
133IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
134{
135 uint32_t const fExpectedExec = (uint32_t)uParam0;
136 if (pVCpu->iem.s.fExec == fExpectedExec)
137 return VINF_SUCCESS;
138 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
139 fExpectedExec, pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec));
140 RT_NOREF(uParam1, uParam2);
141 return VINF_IEM_REEXEC_BREAK;
142}
143
144
145DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
146{
147 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
148 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
149 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
150 if (idxPage == 0)
151 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
152 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
153 return pTb->aGCPhysPages[idxPage - 1];
154}
155
156
157/**
158 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
159 * number of functions.
160 */
161#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
162 if (RT_LIKELY(pVCpu->cpum.GstCtx.eip - pVCpu->cpum.GstCtx.cs.u32Limit >= cbInstr)) \
163 { /* likely */ } \
164 else \
165 { \
166 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
167 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
168 return iemRaiseGeneralProtectionFault0(pVCpu); \
169 } \
170 } while(0)
171
172/**
173 * Macro that implements opcode (re-)checking.
174 */
175#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
176 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
177 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
178 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
179 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
180 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
181 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
182 { /* likely */ } \
183 else \
184 { \
185 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
186 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
187 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
188 RT_NOREF(a_cbInstr); \
189 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
190 } \
191 } while(0)
192
193/**
194 * Macro that implements TLB loading and updating pbInstrBuf updating for an
195 * instruction crossing into a new page.
196 *
197 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
198 */
199#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
200 pVCpu->iem.s.pbInstrBuf = NULL; \
201 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
202 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
203 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
204 \
205 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
206 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
207 && pVCpu->iem.s.pbInstrBuf)) \
208 { /* likely */ } \
209 else \
210 { \
211 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
212 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
213 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
214 RT_NOREF(a_cbInstr); \
215 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
216 } \
217 } while(0)
218
219/**
220 * Macro that implements TLB loading and updating pbInstrBuf updating when
221 * branching or when crossing a page on an instruction boundrary.
222 *
223 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
224 * it is an inter-page branch and also check the page offset.
225 *
226 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
227 */
228#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
229 /* Is RIP within the current code page? */ \
230 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
231 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
232 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
233 if (off < pVCpu->iem.s.cbInstrBufTotal) \
234 { \
235 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
236 Assert(pVCpu->iem.s.pbInstrBuf); \
237 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
238 | pTb->aRanges[(a_idxRange)].offPhysPage; \
239 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
240 { /* we're good */ } \
241 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
242 { \
243 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
244 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
245 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
246 RT_NOREF(a_cbInstr); \
247 return VINF_IEM_REEXEC_BREAK; \
248 } \
249 else \
250 { \
251 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
252 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
253 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
254 RT_NOREF(a_cbInstr); \
255 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
256 } \
257 } \
258 else \
259 { \
260 /* Must translate new RIP. */ \
261 pVCpu->iem.s.pbInstrBuf = NULL; \
262 pVCpu->iem.s.offCurInstrStart = 0; \
263 pVCpu->iem.s.offInstrNextByte = 0; \
264 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
265 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
266 \
267 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
268 | pTb->aRanges[(a_idxRange)].offPhysPage; \
269 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
270 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
271 && pVCpu->iem.s.pbInstrBuf) \
272 { /* likely */ } \
273 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
274 && pVCpu->iem.s.pbInstrBuf) \
275 { \
276 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
277 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
278 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
279 RT_NOREF(a_cbInstr); \
280 return VINF_IEM_REEXEC_BREAK; \
281 } \
282 else \
283 { \
284 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
285 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
286 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
287 RT_NOREF(a_cbInstr); \
288 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
289 } \
290 } \
291 } while(0)
292
293/**
294 * Macro that implements PC check after a conditional branch.
295 */
296#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
297 /* Is RIP within the current code page? */ \
298 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
299 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
300 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
301 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
302 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
303 | pTb->aRanges[(a_idxRange)].offPhysPage; \
304 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
305 && off < pVCpu->iem.s.cbInstrBufTotal) \
306 { /* we're good */ } \
307 else \
308 { \
309 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
310 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
311 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
312 RT_NOREF(a_cbInstr); \
313 return VINF_IEM_REEXEC_BREAK; \
314 } \
315 } while(0)
316
317/**
318 * Macro that considers whether we need CS.LIM checking after a branch or
319 * crossing over to a new page.
320 *
321 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
322 */
323#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
324 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
325 if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
326 { /* likely */ } \
327 else \
328 { \
329 Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
330 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
331 pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
332 RT_NOREF(a_pTb, a_cbInstr); \
333 return VINF_IEM_REEXEC_BREAK; \
334 } \
335 } while(0)
336
337
338
339/**
340 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
341 * raising a \#GP(0) if this isn't the case.
342 */
343IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
344{
345 uint32_t const cbInstr = (uint32_t)uParam0;
346 RT_NOREF(uParam1, uParam2);
347 BODY_CHECK_CS_LIM(cbInstr);
348 return VINF_SUCCESS;
349}
350
351
352/**
353 * Built-in function for re-checking opcodes and CS.LIM after an instruction
354 * that may have modified them.
355 */
356IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
357{
358 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
359 uint32_t const cbInstr = (uint32_t)uParam0;
360 uint32_t const idxRange = (uint32_t)uParam1;
361 uint32_t const offRange = (uint32_t)uParam2;
362 BODY_CHECK_CS_LIM(cbInstr);
363 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
364 return VINF_SUCCESS;
365}
366
367
368/**
369 * Built-in function for re-checking opcodes after an instruction that may have
370 * modified them.
371 */
372IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
373{
374 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
375 uint32_t const cbInstr = (uint32_t)uParam0;
376 uint32_t const idxRange = (uint32_t)uParam1;
377 uint32_t const offRange = (uint32_t)uParam2;
378 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
379 return VINF_SUCCESS;
380}
381
382
383/**
384 * Built-in function for re-checking opcodes and considering the need for CS.LIM
385 * checking after an instruction that may have modified them.
386 */
387IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim)
388{
389 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
390 uint32_t const cbInstr = (uint32_t)uParam0;
391 uint32_t const idxRange = (uint32_t)uParam1;
392 uint32_t const offRange = (uint32_t)uParam2;
393 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
394 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
395 return VINF_SUCCESS;
396}
397
398
399/*
400 * Post-branching checkers.
401 */
402
403/**
404 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
405 * after conditional branching within the same page.
406 *
407 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
408 */
409IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
410{
411 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
412 uint32_t const cbInstr = (uint32_t)uParam0;
413 uint32_t const idxRange = (uint32_t)uParam1;
414 uint32_t const offRange = (uint32_t)uParam2;
415 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
416 BODY_CHECK_CS_LIM(cbInstr);
417 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
418 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
419 //LogFunc(("okay\n"));
420 return VINF_SUCCESS;
421}
422
423
424/**
425 * Built-in function for checking the PC and checking opcodes after conditional
426 * branching within the same page.
427 *
428 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
429 */
430IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
431{
432 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
433 uint32_t const cbInstr = (uint32_t)uParam0;
434 uint32_t const idxRange = (uint32_t)uParam1;
435 uint32_t const offRange = (uint32_t)uParam2;
436 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
437 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
438 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
439 //LogFunc(("okay\n"));
440 return VINF_SUCCESS;
441}
442
443
444/**
445 * Built-in function for checking the PC and checking opcodes and considering
446 * the need for CS.LIM checking after conditional branching within the same
447 * page.
448 *
449 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
450 */
451IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
452{
453 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
454 uint32_t const cbInstr = (uint32_t)uParam0;
455 uint32_t const idxRange = (uint32_t)uParam1;
456 uint32_t const offRange = (uint32_t)uParam2;
457 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
458 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
459 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
460 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
461 //LogFunc(("okay\n"));
462 return VINF_SUCCESS;
463}
464
465
466/**
467 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
468 * transitioning to a different code page.
469 *
470 * The code page transition can either be natural over onto the next page (with
471 * the instruction starting at page offset zero) or by means of branching.
472 *
473 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
474 */
475IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
476{
477 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
478 uint32_t const cbInstr = (uint32_t)uParam0;
479 uint32_t const idxRange = (uint32_t)uParam1;
480 uint32_t const offRange = (uint32_t)uParam2;
481 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
482 BODY_CHECK_CS_LIM(cbInstr);
483 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
484 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
485 //LogFunc(("okay\n"));
486 return VINF_SUCCESS;
487}
488
489
490/**
491 * Built-in function for loading TLB and checking opcodes when transitioning to
492 * a different code page.
493 *
494 * The code page transition can either be natural over onto the next page (with
495 * the instruction starting at page offset zero) or by means of branching.
496 *
497 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
498 */
499IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
500{
501 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
502 uint32_t const cbInstr = (uint32_t)uParam0;
503 uint32_t const idxRange = (uint32_t)uParam1;
504 uint32_t const offRange = (uint32_t)uParam2;
505 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
506 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
507 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
508 //LogFunc(("okay\n"));
509 return VINF_SUCCESS;
510}
511
512
513/**
514 * Built-in function for loading TLB and checking opcodes and considering the
515 * need for CS.LIM checking when transitioning to a different code page.
516 *
517 * The code page transition can either be natural over onto the next page (with
518 * the instruction starting at page offset zero) or by means of branching.
519 *
520 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
521 */
522IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
523{
524 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
525 uint32_t const cbInstr = (uint32_t)uParam0;
526 uint32_t const idxRange = (uint32_t)uParam1;
527 uint32_t const offRange = (uint32_t)uParam2;
528 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
529 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
530 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
531 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
532 //LogFunc(("okay\n"));
533 return VINF_SUCCESS;
534}
535
536
537
538/*
539 * Natural page crossing checkers.
540 */
541
542/**
543 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
544 * both pages when transitioning to a different code page.
545 *
546 * This is used when the previous instruction requires revalidation of opcodes
547 * bytes and the current instruction stries a page boundrary with opcode bytes
548 * in both the old and new page.
549 *
550 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
551 */
552IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
553{
554 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
555 uint32_t const cbInstr = (uint32_t)uParam0;
556 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
557 uint32_t const idxRange1 = (uint32_t)uParam1;
558 uint32_t const offRange1 = (uint32_t)uParam2;
559 uint32_t const idxRange2 = idxRange1 + 1;
560 BODY_CHECK_CS_LIM(cbInstr);
561 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
562 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
563 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
564 return VINF_SUCCESS;
565}
566
567
568/**
569 * Built-in function for loading TLB and checking opcodes on both pages when
570 * transitioning to a different code page.
571 *
572 * This is used when the previous instruction requires revalidation of opcodes
573 * bytes and the current instruction stries a page boundrary with opcode bytes
574 * in both the old and new page.
575 *
576 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
577 */
578IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
579{
580 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
581 uint32_t const cbInstr = (uint32_t)uParam0;
582 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
583 uint32_t const idxRange1 = (uint32_t)uParam1;
584 uint32_t const offRange1 = (uint32_t)uParam2;
585 uint32_t const idxRange2 = idxRange1 + 1;
586 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
587 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
588 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
589 return VINF_SUCCESS;
590}
591
592
593/**
594 * Built-in function for loading TLB and checking opcodes on both pages and
595 * considering the need for CS.LIM checking when transitioning to a different
596 * code page.
597 *
598 * This is used when the previous instruction requires revalidation of opcodes
599 * bytes and the current instruction stries a page boundrary with opcode bytes
600 * in both the old and new page.
601 *
602 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
603 */
604IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
605{
606 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
607 uint32_t const cbInstr = (uint32_t)uParam0;
608 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
609 uint32_t const idxRange1 = (uint32_t)uParam1;
610 uint32_t const offRange1 = (uint32_t)uParam2;
611 uint32_t const idxRange2 = idxRange1 + 1;
612 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
613 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
614 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
615 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
616 return VINF_SUCCESS;
617}
618
619
620/**
621 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
622 * advancing naturally to a different code page.
623 *
624 * Only opcodes on the new page is checked.
625 *
626 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
627 */
628IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
629{
630 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
631 uint32_t const cbInstr = (uint32_t)uParam0;
632 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
633 uint32_t const idxRange1 = (uint32_t)uParam1;
634 //uint32_t const offRange1 = (uint32_t)uParam2;
635 uint32_t const idxRange2 = idxRange1 + 1;
636 BODY_CHECK_CS_LIM(cbInstr);
637 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
638 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
639 RT_NOREF(uParam2);
640 return VINF_SUCCESS;
641}
642
643
644/**
645 * Built-in function for loading TLB and checking opcodes when advancing
646 * naturally to a different code page.
647 *
648 * Only opcodes on the new page is checked.
649 *
650 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
651 */
652IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
653{
654 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
655 uint32_t const cbInstr = (uint32_t)uParam0;
656 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
657 uint32_t const idxRange1 = (uint32_t)uParam1;
658 //uint32_t const offRange1 = (uint32_t)uParam2;
659 uint32_t const idxRange2 = idxRange1 + 1;
660 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
661 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
662 RT_NOREF(uParam2);
663 return VINF_SUCCESS;
664}
665
666
667/**
668 * Built-in function for loading TLB and checking opcodes and considering the
669 * need for CS.LIM checking when advancing naturally to a different code page.
670 *
671 * Only opcodes on the new page is checked.
672 *
673 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
674 */
675IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
676{
677 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
678 uint32_t const cbInstr = (uint32_t)uParam0;
679 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
680 uint32_t const idxRange1 = (uint32_t)uParam1;
681 //uint32_t const offRange1 = (uint32_t)uParam2;
682 uint32_t const idxRange2 = idxRange1 + 1;
683 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
684 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
685 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
686 RT_NOREF(uParam2);
687 return VINF_SUCCESS;
688}
689
690
691/**
692 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
693 * advancing naturally to a different code page with first instr at byte 0.
694 *
695 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
696 */
697IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
698{
699 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
700 uint32_t const cbInstr = (uint32_t)uParam0;
701 uint32_t const idxRange = (uint32_t)uParam1;
702 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
703 BODY_CHECK_CS_LIM(cbInstr);
704 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
705 Assert(pVCpu->iem.s.offCurInstrStart == 0);
706 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
707 return VINF_SUCCESS;
708}
709
710
711/**
712 * Built-in function for loading TLB and checking opcodes when advancing
713 * naturally to a different code page with first instr at byte 0.
714 *
715 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
716 */
717IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
718{
719 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
720 uint32_t const cbInstr = (uint32_t)uParam0;
721 uint32_t const idxRange = (uint32_t)uParam1;
722 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
723 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
724 Assert(pVCpu->iem.s.offCurInstrStart == 0);
725 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
726 return VINF_SUCCESS;
727}
728
729
730/**
731 * Built-in function for loading TLB and checking opcodes and considering the
732 * need for CS.LIM checking when advancing naturally to a different code page
733 * with first instr at byte 0.
734 *
735 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
736 */
737IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
738{
739 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
740 uint32_t const cbInstr = (uint32_t)uParam0;
741 uint32_t const idxRange = (uint32_t)uParam1;
742 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
743 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
744 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
745 Assert(pVCpu->iem.s.offCurInstrStart == 0);
746 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
747 return VINF_SUCCESS;
748}
749
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette