VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 101189

Last change on this file since 101189 was 101182, checked in by vboxsync, 15 months ago

VMM/IEM: Eliminated a parameter for three more builtin threaded functions. bugref:10370

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 32.1 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 101182 2023-09-19 23:38:24Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 /* We set fSafeToFree to false where as we're being called in the context
76 of a TB callback function, which for native TBs means we cannot release
77 the executable memory till we've returned our way back to iemTbExec as
78 that return path codes via the native code generated for the TB. */
79 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/);
80 return VINF_IEM_REEXEC_BREAK;
81}
82
83
84/**
85 * Built-in function that calls a C-implemention function taking zero arguments.
86 */
87IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
88{
89 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
90 uint8_t const cbInstr = (uint8_t)uParam1;
91 RT_NOREF(uParam2);
92 return pfnCImpl(pVCpu, cbInstr);
93}
94
95
96/**
97 * Built-in function that checks for pending interrupts that can be delivered or
98 * forced action flags.
99 *
100 * This triggers after the completion of an instruction, so EIP is already at
101 * the next instruction. If an IRQ or important FF is pending, this will return
102 * a non-zero status that stops TB execution.
103 */
104IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
105{
106 RT_NOREF(uParam0, uParam1, uParam2);
107
108 /*
109 * Check for IRQs and other FFs that needs servicing.
110 */
111 uint64_t fCpu = pVCpu->fLocalForcedActions;
112 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
113 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
114 | VMCPU_FF_TLB_FLUSH
115 | VMCPU_FF_UNHALT );
116 /** @todo this isn't even close to the NMI and interrupt conditions in EM! */
117 if (RT_LIKELY( ( !fCpu
118 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
119 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
120 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) ) )
121 && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
122 return VINF_SUCCESS;
123
124 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
125 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpu,
126 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions & VM_FF_ALL_MASK, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
127 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckIrqBreaks);
128 return VINF_IEM_REEXEC_BREAK;
129}
130
131
132
133/**
134 * Built-in function that compares the fExec mask against uParam0.
135 *
136 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
137 * an instruction.
138 */
139IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
140{
141 uint32_t const fExpectedExec = (uint32_t)uParam0;
142 if ((pVCpu->iem.s.fExec & IEMTB_F_KEY_MASK) == (fExpectedExec & IEMTB_F_KEY_MASK))
143 return VINF_SUCCESS;
144 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x, xor-key: %#x)\n",
145 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fExpectedExec,
146 pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec, (fExpectedExec ^ pVCpu->iem.s.fExec) & IEMTB_F_KEY_MASK));
147 RT_NOREF(uParam1, uParam2);
148 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckModeBreaks);
149 return VINF_IEM_REEXEC_BREAK;
150}
151
152
153/**
154 * Built-in function that checks for hardware instruction breakpoints.
155 */
156IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckHwInstrBps)
157{
158 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
159 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
160 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
161 return VINF_SUCCESS;
162
163 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
164 {
165 LogFlow(("Guest HW bp at %04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
166 rcStrict = iemRaiseDebugException(pVCpu);
167 Assert(rcStrict != VINF_SUCCESS);
168 }
169 else
170 LogFlow(("VBoxDbg HW bp at %04x:%08RX64: %Rrc\n",
171 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict) ));
172 RT_NOREF(uParam0, uParam1, uParam2);
173 return rcStrict;
174}
175
176
177DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
178{
179 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
180 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
181 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
182 if (idxPage == 0)
183 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
184 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
185 return pTb->aGCPhysPages[idxPage - 1];
186}
187
188
189/**
190 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
191 * number of functions.
192 */
193/** @todo consider 32-bit EIP mid-instruction wrap-around... Difficult to
194 * test, since it would require replacing the default firmware. */
195#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
196 if (RT_LIKELY((uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr - 1U) <= pVCpu->cpum.GstCtx.cs.u32Limit)) \
197 { /* likely */ } \
198 else \
199 { \
200 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
201 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
202 return iemRaiseGeneralProtectionFault0(pVCpu); \
203 } \
204 } while(0)
205
206/**
207 * Macro that implements opcode (re-)checking.
208 */
209#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
210 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
211 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
212 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
213 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
214 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
215 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
216 { /* likely */ } \
217 else \
218 { \
219 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
220 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
221 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
222 RT_NOREF(a_cbInstr); \
223 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
224 } \
225 } while(0)
226
227/**
228 * Macro that implements TLB loading and updating pbInstrBuf updating for an
229 * instruction crossing into a new page.
230 *
231 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
232 */
233#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
234 pVCpu->iem.s.pbInstrBuf = NULL; \
235 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
236 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
237 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
238 \
239 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
240 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
241 && pVCpu->iem.s.pbInstrBuf)) \
242 { /* likely */ } \
243 else \
244 { \
245 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
246 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
247 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
248 RT_NOREF(a_cbInstr); \
249 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
250 } \
251 } while(0)
252
253/**
254 * Macro that implements TLB loading and updating pbInstrBuf updating when
255 * branching or when crossing a page on an instruction boundrary.
256 *
257 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
258 * it is an inter-page branch and also check the page offset.
259 *
260 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
261 */
262#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
263 /* Is RIP within the current code page? */ \
264 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
265 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
266 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
267 if (off < pVCpu->iem.s.cbInstrBufTotal) \
268 { \
269 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
270 Assert(pVCpu->iem.s.pbInstrBuf); \
271 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
272 | pTb->aRanges[(a_idxRange)].offPhysPage; \
273 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
274 { /* we're good */ } \
275 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
276 { \
277 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
278 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
279 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
280 RT_NOREF(a_cbInstr); \
281 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
282 return VINF_IEM_REEXEC_BREAK; \
283 } \
284 else \
285 { \
286 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
287 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
288 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
289 RT_NOREF(a_cbInstr); \
290 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
291 } \
292 } \
293 else \
294 { \
295 /* Must translate new RIP. */ \
296 pVCpu->iem.s.pbInstrBuf = NULL; \
297 pVCpu->iem.s.offCurInstrStart = 0; \
298 pVCpu->iem.s.offInstrNextByte = 0; \
299 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
300 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
301 \
302 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
303 | pTb->aRanges[(a_idxRange)].offPhysPage; \
304 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
305 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
306 && pVCpu->iem.s.pbInstrBuf) \
307 { /* likely */ } \
308 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
309 && pVCpu->iem.s.pbInstrBuf) \
310 { \
311 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
312 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
313 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
314 RT_NOREF(a_cbInstr); \
315 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
316 return VINF_IEM_REEXEC_BREAK; \
317 } \
318 else \
319 { \
320 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
321 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
322 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
323 RT_NOREF(a_cbInstr); \
324 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
325 } \
326 } \
327 } while(0)
328
329/**
330 * Macro that implements PC check after a conditional branch.
331 */
332#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
333 /* Is RIP within the current code page? */ \
334 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
335 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
336 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
337 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
338 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
339 | pTb->aRanges[(a_idxRange)].offPhysPage; \
340 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
341 && off < pVCpu->iem.s.cbInstrBufTotal) \
342 { /* we're good */ } \
343 else \
344 { \
345 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
346 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
347 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
348 RT_NOREF(a_cbInstr); \
349 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
350 return VINF_IEM_REEXEC_BREAK; \
351 } \
352 } while(0)
353
354/**
355 * Macro that considers whether we need CS.LIM checking after a branch or
356 * crossing over to a new page.
357 *
358 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
359 */
360#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
361 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
362 if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
363 { /* likely */ } \
364 else \
365 { \
366 Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
367 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
368 pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
369 RT_NOREF(a_pTb, a_cbInstr); \
370 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking); \
371 return VINF_IEM_REEXEC_BREAK; \
372 } \
373 } while(0)
374
375
376
377/**
378 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
379 * raising a \#GP(0) if this isn't the case.
380 */
381IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
382{
383 uint32_t const cbInstr = (uint32_t)uParam0;
384 RT_NOREF(uParam1, uParam2);
385 BODY_CHECK_CS_LIM(cbInstr);
386 return VINF_SUCCESS;
387}
388
389
390/**
391 * Built-in function for re-checking opcodes and CS.LIM after an instruction
392 * that may have modified them.
393 */
394IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
395{
396 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
397 uint32_t const cbInstr = (uint32_t)uParam0;
398 uint32_t const idxRange = (uint32_t)uParam1;
399 uint32_t const offRange = (uint32_t)uParam2;
400 BODY_CHECK_CS_LIM(cbInstr);
401 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
402 return VINF_SUCCESS;
403}
404
405
406/**
407 * Built-in function for re-checking opcodes after an instruction that may have
408 * modified them.
409 */
410IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
411{
412 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
413 uint32_t const cbInstr = (uint32_t)uParam0;
414 uint32_t const idxRange = (uint32_t)uParam1;
415 uint32_t const offRange = (uint32_t)uParam2;
416 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
417 return VINF_SUCCESS;
418}
419
420
421/**
422 * Built-in function for re-checking opcodes and considering the need for CS.LIM
423 * checking after an instruction that may have modified them.
424 */
425IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim)
426{
427 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
428 uint32_t const cbInstr = (uint32_t)uParam0;
429 uint32_t const idxRange = (uint32_t)uParam1;
430 uint32_t const offRange = (uint32_t)uParam2;
431 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
432 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
433 return VINF_SUCCESS;
434}
435
436
437/*
438 * Post-branching checkers.
439 */
440
441/**
442 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
443 * after conditional branching within the same page.
444 *
445 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
446 */
447IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
448{
449 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
450 uint32_t const cbInstr = (uint32_t)uParam0;
451 uint32_t const idxRange = (uint32_t)uParam1;
452 uint32_t const offRange = (uint32_t)uParam2;
453 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
454 BODY_CHECK_CS_LIM(cbInstr);
455 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
456 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
457 //LogFunc(("okay\n"));
458 return VINF_SUCCESS;
459}
460
461
462/**
463 * Built-in function for checking the PC and checking opcodes after conditional
464 * branching within the same page.
465 *
466 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
467 */
468IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
469{
470 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
471 uint32_t const cbInstr = (uint32_t)uParam0;
472 uint32_t const idxRange = (uint32_t)uParam1;
473 uint32_t const offRange = (uint32_t)uParam2;
474 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
475 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
476 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
477 //LogFunc(("okay\n"));
478 return VINF_SUCCESS;
479}
480
481
482/**
483 * Built-in function for checking the PC and checking opcodes and considering
484 * the need for CS.LIM checking after conditional branching within the same
485 * page.
486 *
487 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
488 */
489IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
490{
491 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
492 uint32_t const cbInstr = (uint32_t)uParam0;
493 uint32_t const idxRange = (uint32_t)uParam1;
494 uint32_t const offRange = (uint32_t)uParam2;
495 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
496 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
497 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
498 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
499 //LogFunc(("okay\n"));
500 return VINF_SUCCESS;
501}
502
503
504/**
505 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
506 * transitioning to a different code page.
507 *
508 * The code page transition can either be natural over onto the next page (with
509 * the instruction starting at page offset zero) or by means of branching.
510 *
511 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
512 */
513IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
514{
515 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
516 uint32_t const cbInstr = (uint32_t)uParam0;
517 uint32_t const idxRange = (uint32_t)uParam1;
518 uint32_t const offRange = (uint32_t)uParam2;
519 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
520 BODY_CHECK_CS_LIM(cbInstr);
521 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
522 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
523 //LogFunc(("okay\n"));
524 return VINF_SUCCESS;
525}
526
527
528/**
529 * Built-in function for loading TLB and checking opcodes when transitioning to
530 * a different code page.
531 *
532 * The code page transition can either be natural over onto the next page (with
533 * the instruction starting at page offset zero) or by means of branching.
534 *
535 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
536 */
537IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
538{
539 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
540 uint32_t const cbInstr = (uint32_t)uParam0;
541 uint32_t const idxRange = (uint32_t)uParam1;
542 uint32_t const offRange = (uint32_t)uParam2;
543 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
544 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
545 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
546 //LogFunc(("okay\n"));
547 return VINF_SUCCESS;
548}
549
550
551/**
552 * Built-in function for loading TLB and checking opcodes and considering the
553 * need for CS.LIM checking when transitioning to a different code page.
554 *
555 * The code page transition can either be natural over onto the next page (with
556 * the instruction starting at page offset zero) or by means of branching.
557 *
558 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
559 */
560IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
561{
562 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
563 uint32_t const cbInstr = (uint32_t)uParam0;
564 uint32_t const idxRange = (uint32_t)uParam1;
565 uint32_t const offRange = (uint32_t)uParam2;
566 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
567 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
568 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
569 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
570 //LogFunc(("okay\n"));
571 return VINF_SUCCESS;
572}
573
574
575
576/*
577 * Natural page crossing checkers.
578 */
579
580/**
581 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
582 * both pages when transitioning to a different code page.
583 *
584 * This is used when the previous instruction requires revalidation of opcodes
585 * bytes and the current instruction stries a page boundrary with opcode bytes
586 * in both the old and new page.
587 *
588 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
589 */
590IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
591{
592 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
593 uint32_t const cbInstr = (uint32_t)uParam0;
594 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
595 uint32_t const idxRange1 = (uint32_t)uParam1;
596 uint32_t const offRange1 = (uint32_t)uParam2;
597 uint32_t const idxRange2 = idxRange1 + 1;
598 BODY_CHECK_CS_LIM(cbInstr);
599 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
600 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
601 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
602 return VINF_SUCCESS;
603}
604
605
606/**
607 * Built-in function for loading TLB and checking opcodes on both pages when
608 * transitioning to a different code page.
609 *
610 * This is used when the previous instruction requires revalidation of opcodes
611 * bytes and the current instruction stries a page boundrary with opcode bytes
612 * in both the old and new page.
613 *
614 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
615 */
616IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
617{
618 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
619 uint32_t const cbInstr = (uint32_t)uParam0;
620 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
621 uint32_t const idxRange1 = (uint32_t)uParam1;
622 uint32_t const offRange1 = (uint32_t)uParam2;
623 uint32_t const idxRange2 = idxRange1 + 1;
624 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
625 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
626 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
627 return VINF_SUCCESS;
628}
629
630
631/**
632 * Built-in function for loading TLB and checking opcodes on both pages and
633 * considering the need for CS.LIM checking when transitioning to a different
634 * code page.
635 *
636 * This is used when the previous instruction requires revalidation of opcodes
637 * bytes and the current instruction stries a page boundrary with opcode bytes
638 * in both the old and new page.
639 *
640 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
641 */
642IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
643{
644 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
645 uint32_t const cbInstr = (uint32_t)uParam0;
646 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
647 uint32_t const idxRange1 = (uint32_t)uParam1;
648 uint32_t const offRange1 = (uint32_t)uParam2;
649 uint32_t const idxRange2 = idxRange1 + 1;
650 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
651 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
652 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
653 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
654 return VINF_SUCCESS;
655}
656
657
658/**
659 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
660 * advancing naturally to a different code page.
661 *
662 * Only opcodes on the new page is checked.
663 *
664 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
665 */
666IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
667{
668 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
669 uint32_t const cbInstr = (uint32_t)uParam0;
670 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
671 uint32_t const idxRange1 = (uint32_t)uParam1;
672 //uint32_t const offRange1 = (uint32_t)uParam2;
673 uint32_t const idxRange2 = idxRange1 + 1;
674 BODY_CHECK_CS_LIM(cbInstr);
675 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
676 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
677 RT_NOREF(uParam2);
678 return VINF_SUCCESS;
679}
680
681
682/**
683 * Built-in function for loading TLB and checking opcodes when advancing
684 * naturally to a different code page.
685 *
686 * Only opcodes on the new page is checked.
687 *
688 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
689 */
690IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
691{
692 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
693 uint32_t const cbInstr = (uint32_t)uParam0;
694 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
695 uint32_t const idxRange1 = (uint32_t)uParam1;
696 //uint32_t const offRange1 = (uint32_t)uParam2;
697 uint32_t const idxRange2 = idxRange1 + 1;
698 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
699 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
700 RT_NOREF(uParam2);
701 return VINF_SUCCESS;
702}
703
704
705/**
706 * Built-in function for loading TLB and checking opcodes and considering the
707 * need for CS.LIM checking when advancing naturally to a different code page.
708 *
709 * Only opcodes on the new page is checked.
710 *
711 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
712 */
713IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
714{
715 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
716 uint32_t const cbInstr = (uint32_t)uParam0;
717 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
718 uint32_t const idxRange1 = (uint32_t)uParam1;
719 //uint32_t const offRange1 = (uint32_t)uParam2;
720 uint32_t const idxRange2 = idxRange1 + 1;
721 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
722 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
723 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
724 RT_NOREF(uParam2);
725 return VINF_SUCCESS;
726}
727
728
729/**
730 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
731 * advancing naturally to a different code page with first instr at byte 0.
732 *
733 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
734 */
735IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
736{
737 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
738 uint32_t const cbInstr = (uint32_t)uParam0;
739 uint32_t const idxRange = (uint32_t)uParam1;
740 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
741 BODY_CHECK_CS_LIM(cbInstr);
742 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
743 Assert(pVCpu->iem.s.offCurInstrStart == 0);
744 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
745 return VINF_SUCCESS;
746}
747
748
749/**
750 * Built-in function for loading TLB and checking opcodes when advancing
751 * naturally to a different code page with first instr at byte 0.
752 *
753 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
754 */
755IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
756{
757 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
758 uint32_t const cbInstr = (uint32_t)uParam0;
759 uint32_t const idxRange = (uint32_t)uParam1;
760 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
761 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
762 Assert(pVCpu->iem.s.offCurInstrStart == 0);
763 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
764 return VINF_SUCCESS;
765}
766
767
768/**
769 * Built-in function for loading TLB and checking opcodes and considering the
770 * need for CS.LIM checking when advancing naturally to a different code page
771 * with first instr at byte 0.
772 *
773 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
774 */
775IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
776{
777 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
778 uint32_t const cbInstr = (uint32_t)uParam0;
779 uint32_t const idxRange = (uint32_t)uParam1;
780 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
781 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
782 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
783 Assert(pVCpu->iem.s.offCurInstrStart == 0);
784 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
785 return VINF_SUCCESS;
786}
787
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette