VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThreadedFunctionsBltIn.cpp@ 100694

Last change on this file since 100694 was 100694, checked in by vboxsync, 17 months ago

IEM/VMM: Deal with opcode checking cross page boundraries and tentativiely for branches. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.8 KB
Line 
1/* $Id: IEMAllThreadedFunctionsBltIn.cpp 100694 2023-07-25 10:34:22Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.virtualbox.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3);
76 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo different status code... */
77}
78
79
80
81/**
82 * Built-in function that compares the fExec mask against uParam0.
83 */
84IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckMode,
85 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
86{
87 uint32_t const fExpectedExec = (uint32_t)uParam0;
88 if (pVCpu->iem.s.fExec == fExpectedExec)
89 return VINF_SUCCESS;
90 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
91 fExpectedExec, pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec));
92 RT_NOREF(uParam1, uParam2);
93 return VINF_IEM_REEXEC_MODE_CHANGED;
94}
95
96
97DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
98{
99 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
100 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
101 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
102 if (idxPage == 0)
103 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
104 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
105 return pTb->aGCPhysPages[idxPage - 1];
106}
107
108
109/**
110 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
111 * number of functions.
112 */
113#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
114 if (RT_LIKELY(pVCpu->cpum.GstCtx.eip - pVCpu->cpum.GstCtx.cs.u32Limit >= cbInstr)) \
115 { /* likely */ } \
116 else \
117 { \
118 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
119 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
120 return iemRaiseGeneralProtectionFault0(pVCpu); \
121 } \
122 } while(0)
123
124/**
125 * Macro that implements opcode (re-)checking.
126 */
127#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
128 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges < RT_ELEMENTS((a_pTb)->aRanges)); \
129 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
130 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
131 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
132 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
133 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
134 { /* likely */ } \
135 else \
136 { \
137 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
138 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
139 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
140 RT_NOREF(a_cbInstr); \
141 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
142 } \
143 } while(0)
144
145/**
146 * Macro that implements TLB loading and updating pbInstrBuf updating for an
147 * instruction crossing into a new page.
148 *
149 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
150 */
151#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
152 pVCpu->iem.s.pbInstrBuf = NULL; \
153 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
154 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
155 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
156 \
157 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
158 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
159 && pVCpu->iem.s.pbInstrBuf)) \
160 { /* likely */ } \
161 else \
162 { \
163 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
164 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
165 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
166 RT_NOREF(a_cbInstr); \
167 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
168 } \
169 } while(0)
170
171/**
172 * Macro that implements TLB loading and updating pbInstrBuf updating when
173 * branching or when crossing a page on an instruction boundrary.
174 *
175 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
176 * it is an inter-page branch.
177 *
178 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
179 */
180#define BODY_LOAD_TLB_FOR_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
181 /* Is RIP within the current code page? */ \
182 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
183 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
184 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
185 if (off < pVCpu->iem.s.cbInstrBufTotal) \
186 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
187 else \
188 { \
189 /* Must translate new RIP. */ \
190 pVCpu->iem.s.pbInstrBuf = NULL; \
191 pVCpu->iem.s.offCurInstrStart = 0; \
192 pVCpu->iem.s.offInstrNextByte = 0; \
193 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
194 \
195 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
196 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
197 && pVCpu->iem.s.pbInstrBuf)) \
198 { /* likely */ } \
199 else \
200 { \
201 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
202 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
203 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
204 RT_NOREF(a_cbInstr); \
205 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
206 } \
207 } \
208 } while(0)
209
210
211/**
212 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
213 * raising a \#GP(0) if this isn't the case.
214 */
215IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLim,
216 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
217{
218 uint32_t const cbInstr = (uint32_t)uParam0;
219 RT_NOREF(uParam1, uParam2);
220 BODY_CHECK_CS_LIM(cbInstr);
221 return VINF_SUCCESS;
222}
223
224
225/**
226 * Built-in function for re-checking opcodes and CS.LIM after an instruction
227 * that may have modified them.
228 */
229IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodes,
230 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
231{
232 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
233 uint32_t const cbInstr = (uint32_t)uParam0;
234 uint32_t const idxRange = (uint32_t)uParam1;
235 uint32_t const offRange = (uint32_t)uParam2;
236 BODY_CHECK_CS_LIM(cbInstr);
237 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
238 return VINF_SUCCESS;
239}
240
241
242/**
243 * Built-in function for re-checking opcodes after an instruction that may have
244 * modified them.
245 */
246IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodes,
247 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
248{
249 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
250 uint32_t const cbInstr = (uint32_t)uParam0;
251 uint32_t const idxRange = (uint32_t)uParam1;
252 uint32_t const offRange = (uint32_t)uParam2;
253 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
254 return VINF_SUCCESS;
255}
256
257
258/**
259 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
260 * both pages when transitioning to a different code page.
261 *
262 * This is used when the previous instruction requires revalidation of opcodes
263 * bytes and the current instruction stries a page boundrary with opcode bytes
264 * in both the old and new page.
265 *
266 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
267 */
268IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb,
269 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
270{
271 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
272 uint32_t const cbInstr = (uint32_t)uParam0;
273 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
274 uint32_t const idxRange1 = (uint32_t)uParam1;
275 uint32_t const offRange1 = (uint32_t)uParam2;
276 uint32_t const idxRange2 = idxRange1 + 1;
277 BODY_CHECK_CS_LIM(cbInstr);
278 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
279 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
280 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
281 return VINF_SUCCESS;
282}
283
284
285/**
286 * Built-in function for loading TLB and checking opcodes on both pages when
287 * transitioning to a different code page.
288 *
289 * This is used when the previous instruction requires revalidation of opcodes
290 * bytes and the current instruction stries a page boundrary with opcode bytes
291 * in both the old and new page.
292 *
293 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
294 */
295IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb,
296 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
297{
298 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
299 uint32_t const cbInstr = (uint32_t)uParam0;
300 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
301 uint32_t const idxRange1 = (uint32_t)uParam1;
302 uint32_t const offRange1 = (uint32_t)uParam2;
303 uint32_t const idxRange2 = idxRange1 + 1;
304 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
305 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
306 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
307 return VINF_SUCCESS;
308}
309
310
311/**
312 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
313 * transitioning to a different code page.
314 *
315 * The code page transition can either be natural over onto the next page (with
316 * the instruction starting at page offset zero) or by means of branching.
317 *
318 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
319 */
320IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb,
321 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
322{
323 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
324 uint32_t const cbInstr = (uint32_t)uParam0;
325 uint32_t const idxRange = (uint32_t)uParam1;
326 uint32_t const offRange = (uint32_t)uParam2;
327 BODY_CHECK_CS_LIM(cbInstr);
328 BODY_LOAD_TLB_FOR_BRANCH(pTb, idxRange, cbInstr);
329 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
330 return VINF_SUCCESS;
331}
332
333
334/**
335 * Built-in function for loading TLB and checking opcodes when transitioning to
336 * a different code page.
337 *
338 * The code page transition can either be natural over onto the next page (with
339 * the instruction starting at page offset zero) or by means of branching.
340 *
341 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
342 */
343IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb,
344 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
345{
346 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
347 uint32_t const cbInstr = (uint32_t)uParam0;
348 uint32_t const idxRange = (uint32_t)uParam1;
349 uint32_t const offRange = (uint32_t)uParam2;
350 BODY_LOAD_TLB_FOR_BRANCH(pTb, idxRange, cbInstr);
351 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
352 return VINF_SUCCESS;
353}
354
355
356/**
357 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
358 * advancing naturally to a different code page.
359 *
360 * Only opcodes on the new page is checked.
361 *
362 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
363 */
364IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb,
365 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
366{
367 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
368 uint32_t const cbInstr = (uint32_t)uParam0;
369 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
370 uint32_t const idxRange1 = (uint32_t)uParam1;
371 //uint32_t const offRange1 = (uint32_t)uParam2;
372 uint32_t const idxRange2 = idxRange1 + 1;
373 BODY_CHECK_CS_LIM(cbInstr);
374 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
375 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
376 RT_NOREF(uParam2);
377 return VINF_SUCCESS;
378}
379
380
381/**
382 * Built-in function for loading TLB and checking opcodes when advancing
383 * naturally to a different code page.
384 *
385 * Only opcodes on the new page is checked.
386 *
387 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
388 */
389IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb,
390 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
391{
392 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
393 uint32_t const cbInstr = (uint32_t)uParam0;
394 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
395 uint32_t const idxRange1 = (uint32_t)uParam1;
396 //uint32_t const offRange1 = (uint32_t)uParam2;
397 uint32_t const idxRange2 = idxRange1 + 1;
398 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
399 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
400 RT_NOREF(uParam2);
401 return VINF_SUCCESS;
402}
403
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette