VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllN8veEmit-x86.h@ 103659

Last change on this file since 103659 was 103657, checked in by vboxsync, 14 months ago

VMM/IEM: A little 'test' optimization, saving a register alloc+fetch when both 'test' operands refers to the same register. bugref:10376

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 18.6 KB
Line 
1/* $Id: IEMAllN8veEmit-x86.h 103657 2024-03-04 09:53:49Z vboxsync $ */
2/** @file
3 * IEM - Native Recompiler, x86 Target - Code Emitters.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_VMMAll_target_x86_IEMAllN8veEmit_x86_h
29#define VMM_INCLUDED_SRC_VMMAll_target_x86_IEMAllN8veEmit_x86_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/**
36 * This is an implementation of IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGICAL
37 * and friends.
38 *
39 * It takes liveness stuff into account.
40 */
41DECL_INLINE_THROW(uint32_t)
42iemNativeEmitEFlagsForLogical(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEfl,
43 uint8_t cOpBits, uint8_t idxRegResult)
44{
45#ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS
46 if (1) /** @todo check if all bits are clobbered. */
47#endif
48 {
49#ifdef RT_ARCH_AMD64
50 /*
51 * Collect flags and merge them with eflags.
52 */
53 /** @todo we could alternatively use SAHF here when host rax is free since,
54 * OF is cleared. */
55 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
56 /* pushf - do this before any reg allocations as they may emit instructions too. */
57 pCodeBuf[off++] = 0x9c;
58
59 uint8_t const idxRegEfl = iemNativeVarRegisterAcquire(pReNative, idxVarEfl, &off, true /*fInitialized*/);
60 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
61 pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2 + 7 + 7 + 3);
62 /* pop tmp */
63 if (idxTmpReg >= 8)
64 pCodeBuf[off++] = X86_OP_REX_B;
65 pCodeBuf[off++] = 0x58 + (idxTmpReg & 7);
66 /* and tmp, X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF */
67 off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, idxTmpReg, X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF);
68 /* Clear the status bits in EFLs. */
69 off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, idxRegEfl, ~X86_EFL_STATUS_BITS);
70 /* OR in the flags we collected. */
71 off = iemNativeEmitOrGpr32ByGprEx(pCodeBuf, off, idxRegEfl, idxTmpReg);
72 iemNativeVarRegisterRelease(pReNative, idxVarEfl);
73 iemNativeRegFreeTmp(pReNative, idxTmpReg);
74 RT_NOREF(cOpBits, idxRegResult);
75
76#elif defined(RT_ARCH_ARM64)
77 /*
78 * Calculate flags.
79 */
80 uint8_t const idxRegEfl = iemNativeVarRegisterAcquire(pReNative, idxVarEfl, &off, true /*fInitialized*/);
81 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
82 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 15);
83
84 /* Clear the status bits. ~0x8D5 (or ~0x8FD) can't be AND immediate, so use idxTmpReg for constant. */
85 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, idxTmpReg, ~X86_EFL_STATUS_BITS);
86 off = iemNativeEmitAndGpr32ByGpr32Ex(pCodeBuf, off, idxRegEfl, idxTmpReg);
87
88 /* Calculate zero: mov tmp, zf; cmp result,zero; csel.eq tmp,tmp,wxr */
89 if (cOpBits > 32)
90 off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, idxRegResult, ARMV8_A64_REG_XZR);
91 else
92 off = iemNativeEmitCmpGpr32WithGprEx(pCodeBuf, off, idxRegResult, ARMV8_A64_REG_XZR);
93 pCodeBuf[off++] = Armv8A64MkInstrCSet(idxTmpReg, kArmv8InstrCond_Eq, false /*f64Bit*/);
94 pCodeBuf[off++] = Armv8A64MkInstrOrr(idxRegEfl, idxRegEfl, idxTmpReg, false /*f64Bit*/, X86_EFL_ZF_BIT);
95
96 /* Calculate signed: We could use the native SF flag, but it's just as simple to calculate it by shifting. */
97 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxTmpReg, idxRegResult, cOpBits - 1, cOpBits > 32 /*f64Bit*/);
98# if 0 /* BFI and ORR hsould have the same performance characteristics, so use BFI like we'll have to do for SUB/ADD/++. */
99 pCodeBuf[off++] = Armv8A64MkInstrOrr(idxRegEfl, idxRegEfl, idxTmpReg, false /*f64Bit*/, X86_EFL_SF_BIT);
100# else
101 pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxTmpReg, X86_EFL_SF_BIT, 1, false /*f64Bit*/);
102# endif
103
104 /* Calculate 8-bit parity of the result. */
105 pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxRegResult, idxRegResult, false /*f64Bit*/,
106 4 /*offShift6*/, kArmv8A64InstrShift_Lsr);
107 pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxTmpReg, idxTmpReg, false /*f64Bit*/,
108 2 /*offShift6*/, kArmv8A64InstrShift_Lsr);
109 pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxTmpReg, idxTmpReg, false /*f64Bit*/,
110 1 /*offShift6*/, kArmv8A64InstrShift_Lsr);
111 Assert(Armv8A64ConvertImmRImmS2Mask32(0, 0) == 1);
112 pCodeBuf[off++] = Armv8A64MkInstrEorImm(idxTmpReg, idxTmpReg, 0, 0, false /*f64Bit*/);
113 pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxTmpReg, X86_EFL_PF_BIT, 1, false /*f64Bit*/);
114
115 iemNativeVarRegisterRelease(pReNative, idxVarEfl);
116 iemNativeRegFreeTmp(pReNative, idxTmpReg);
117#else
118# error "port me"
119#endif
120 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
121 }
122 return off;
123}
124
125
126DECL_INLINE_THROW(uint32_t)
127iemNativeEmit_and_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
128 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
129{
130 /*
131 * The AND instruction will clear OF, CF and AF (latter is undefined),
132 * so we don't need the initial destination value.
133 *
134 * On AMD64 we must use the correctly sized AND instructions to get the
135 * right EFLAGS.SF value, while the rest will just lump 16-bit and 8-bit
136 * in the 32-bit ones.
137 */
138 /** @todo we could use ANDS on ARM64 and get the ZF for free for all
139 * variants, and SF for 32-bit and 64-bit. */
140 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
141 uint8_t const idxRegSrc = iemNativeVarRegisterAcquire(pReNative, idxVarSrc, &off, true /*fInitialized*/);
142 //off = iemNativeEmitBrk(pReNative, off, 0x2222);
143 switch (cOpBits)
144 {
145 case 32:
146#ifndef RT_ARCH_AMD64
147 case 16:
148 case 8:
149#endif
150 off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, idxRegDst, idxRegSrc);
151 break;
152
153 default: AssertFailed(); RT_FALL_THRU();
154 case 64:
155 off = iemNativeEmitAndGprByGpr(pReNative, off, idxRegDst, idxRegSrc);
156 break;
157
158#ifdef RT_ARCH_AMD64
159 case 16:
160 {
161 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
162 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
163 off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, idxRegDst, idxRegSrc);
164 break;
165 }
166
167 case 8:
168 {
169 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
170 if (idxRegDst >= 8 || idxRegSrc >= 8)
171 pCodeBuf[off++] = (idxRegDst >= 8 ? X86_OP_REX_R : 0) | (idxRegSrc >= 8 ? X86_OP_REX_B : 0);
172 else if (idxRegDst >= 4 || idxRegSrc >= 4)
173 pCodeBuf[off++] = X86_OP_REX;
174 pCodeBuf[off++] = 0x22;
175 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxRegDst & 7, idxRegSrc & 7);
176 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
177 break;
178 }
179#endif
180 }
181 iemNativeVarRegisterRelease(pReNative, idxVarSrc);
182
183 off = iemNativeEmitEFlagsForLogical(pReNative, off, idxVarEfl, cOpBits, idxRegDst);
184 iemNativeVarRegisterRelease(pReNative, idxVarDst);
185 return off;
186}
187
188
189DECL_INLINE_THROW(uint32_t)
190iemNativeEmit_test_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
191 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
192{
193 /*
194 * The TESTS instruction will clear OF, CF and AF (latter is undefined),
195 * so we don't need the initial destination value.
196 *
197 * On AMD64 we use the matching native instruction.
198 *
199 * On ARM64 we need a real register for the AND result so we can calculate
200 * PF correctly for it. This means that we have to use a three operand
201 * AND variant, which makes the code widely different from AMD64.
202 */
203 /** @todo we could use ANDS on ARM64 and get the ZF for free for all
204 * variants, and SF for 32-bit and 64-bit. */
205 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
206 uint8_t const idxRegSrc = idxVarSrc == idxVarDst ? idxRegDst /* special case of 'test samereg,samereg' */
207 : iemNativeVarRegisterAcquire(pReNative, idxVarSrc, &off, true /*fInitialized*/);
208#ifndef RT_ARCH_AMD64
209 uint8_t const idxRegResult = iemNativeRegAllocTmp(pReNative, &off);
210#endif
211// off = iemNativeEmitBrk(pReNative, off, 0x2222);
212 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, RT_ARCH_VAL == RT_ARCH_VAL_AMD64 ? 4 : 1);
213#ifdef RT_ARCH_ARM64
214 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxRegResult, idxRegDst, idxRegSrc, cOpBits > 32 /*f64Bit*/);
215
216#elif defined(RT_ARCH_AMD64)
217 switch (cOpBits)
218 {
219 case 16:
220 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
221 RT_FALL_THRU();
222 case 32:
223 if (idxRegDst >= 8 || idxRegSrc >= 8)
224 pCodeBuf[off++] = (idxRegDst >= 8 ? X86_OP_REX_B : 0) | (idxRegSrc >= 8 ? X86_OP_REX_R : 0);
225 pCodeBuf[off++] = 0x85;
226 break;
227
228 default: AssertFailed(); RT_FALL_THRU();
229 case 64:
230 pCodeBuf[off++] = X86_OP_REX_W | (idxRegDst >= 8 ? X86_OP_REX_B : 0) | (idxRegSrc >= 8 ? X86_OP_REX_R : 0);
231 pCodeBuf[off++] = 0x85;
232 break;
233
234 case 8:
235 if (idxRegDst >= 8 || idxRegSrc >= 8)
236 pCodeBuf[off++] = (idxRegDst >= 8 ? X86_OP_REX_B : 0) | (idxRegSrc >= 8 ? X86_OP_REX_R : 0);
237 else if (idxRegDst >= 4 || idxRegSrc >= 4)
238 pCodeBuf[off++] = X86_OP_REX;
239 pCodeBuf[off++] = 0x84;
240 break;
241 }
242 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxRegSrc & 7, idxRegDst & 7);
243#else
244# error "port me"
245#endif
246 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
247 if (idxVarSrc != idxVarDst)
248 iemNativeVarRegisterRelease(pReNative, idxVarSrc);
249 iemNativeVarRegisterRelease(pReNative, idxVarDst);
250
251#ifdef RT_ARCH_AMD64
252 off = iemNativeEmitEFlagsForLogical(pReNative, off, idxVarEfl, cOpBits, UINT8_MAX);
253#else
254 off = iemNativeEmitEFlagsForLogical(pReNative, off, idxVarEfl, cOpBits, idxRegResult);
255 iemNativeRegFreeTmp(pReNative, idxRegResult);
256#endif
257 return off;
258}
259
260
261DECL_INLINE_THROW(uint32_t)
262iemNativeEmit_or_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
263 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
264{
265 /*
266 * The OR instruction will clear OF, CF and AF (latter is undefined),
267 * so we don't need the initial destination value.
268 *
269 * On AMD64 we must use the correctly sized OR instructions to get the
270 * right EFLAGS.SF value, while the rest will just lump 16-bit and 8-bit
271 * in the 32-bit ones.
272 */
273 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
274 uint8_t const idxRegSrc = iemNativeVarRegisterAcquire(pReNative, idxVarSrc, &off, true /*fInitialized*/);
275 //off = iemNativeEmitBrk(pReNative, off, 0x2222);
276 switch (cOpBits)
277 {
278 case 32:
279#ifndef RT_ARCH_AMD64
280 case 16:
281 case 8:
282#endif
283 off = iemNativeEmitOrGpr32ByGpr(pReNative, off, idxRegDst, idxRegSrc);
284 break;
285
286 default: AssertFailed(); RT_FALL_THRU();
287 case 64:
288 off = iemNativeEmitOrGprByGpr(pReNative, off, idxRegDst, idxRegSrc);
289 break;
290
291#ifdef RT_ARCH_AMD64
292 case 16:
293 {
294 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
295 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
296 off = iemNativeEmitOrGpr32ByGpr(pReNative, off, idxRegDst, idxRegSrc);
297 break;
298 }
299
300 case 8:
301 {
302 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
303 if (idxRegDst >= 8 || idxRegSrc >= 8)
304 pCodeBuf[off++] = (idxRegDst >= 8 ? X86_OP_REX_R : 0) | (idxRegSrc >= 8 ? X86_OP_REX_B : 0);
305 else if (idxRegDst >= 4 || idxRegSrc >= 4)
306 pCodeBuf[off++] = X86_OP_REX;
307 pCodeBuf[off++] = 0x0a;
308 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxRegDst & 7, idxRegSrc & 7);
309 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
310 break;
311 }
312#endif
313 }
314 iemNativeVarRegisterRelease(pReNative, idxVarSrc);
315
316 off = iemNativeEmitEFlagsForLogical(pReNative, off, idxVarEfl, cOpBits, idxRegDst);
317 iemNativeVarRegisterRelease(pReNative, idxVarDst);
318 return off;
319}
320
321
322DECL_INLINE_THROW(uint32_t)
323iemNativeEmit_xor_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
324 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
325{
326 /*
327 * The XOR instruction will clear OF, CF and AF (latter is undefined),
328 * so we don't need the initial destination value.
329 *
330 * On AMD64 we must use the correctly sizeed XOR instructions to get the
331 * right EFLAGS.SF value, while the rest will just lump 16-bit and 8-bit
332 * in the 32-bit ones.
333 */
334 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
335 uint8_t const idxRegSrc = iemNativeVarRegisterAcquire(pReNative, idxVarSrc, &off, true /*fInitialized*/);
336 //off = iemNativeEmitBrk(pReNative, off, 0x2222);
337 switch (cOpBits)
338 {
339 case 32:
340#ifndef RT_ARCH_AMD64
341 case 16:
342 case 8:
343#endif
344 off = iemNativeEmitXorGpr32ByGpr32(pReNative, off, idxRegDst, idxRegSrc);
345 break;
346
347 default: AssertFailed(); RT_FALL_THRU();
348 case 64:
349 off = iemNativeEmitXorGprByGpr(pReNative, off, idxRegDst, idxRegSrc);
350 break;
351
352#ifdef RT_ARCH_AMD64
353 case 16:
354 {
355 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
356 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
357 off = iemNativeEmitXorGpr32ByGpr32(pReNative, off, idxRegDst, idxRegSrc);
358 break;
359 }
360
361 case 8:
362 {
363 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
364 if (idxRegDst >= 8 || idxRegSrc >= 8)
365 pCodeBuf[off++] = (idxRegDst >= 8 ? X86_OP_REX_R : 0) | (idxRegSrc >= 8 ? X86_OP_REX_B : 0);
366 else if (idxRegDst >= 4 || idxRegSrc >= 4)
367 pCodeBuf[off++] = X86_OP_REX;
368 pCodeBuf[off++] = 0x32;
369 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxRegDst & 7, idxRegSrc & 7);
370 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
371 break;
372 }
373#endif
374 }
375 iemNativeVarRegisterRelease(pReNative, idxVarSrc);
376
377 off = iemNativeEmitEFlagsForLogical(pReNative, off, idxVarEfl, cOpBits, idxRegDst);
378 iemNativeVarRegisterRelease(pReNative, idxVarDst);
379 return off;
380}
381
382
383DECL_INLINE_THROW(uint32_t)
384iemNativeEmit_add_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
385 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
386{
387 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
388 AssertFailed();
389 return iemNativeEmitBrk(pReNative, off, 0x666);
390}
391
392
393DECL_INLINE_THROW(uint32_t)
394iemNativeEmit_adc_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
395 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
396{
397 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
398 AssertFailed();
399 return iemNativeEmitBrk(pReNative, off, 0x666);
400}
401
402
403DECL_INLINE_THROW(uint32_t)
404iemNativeEmit_sub_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
405 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
406{
407 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
408 AssertFailed();
409 return iemNativeEmitBrk(pReNative, off, 0x666);
410}
411
412
413DECL_INLINE_THROW(uint32_t)
414iemNativeEmit_cmp_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
415 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
416{
417 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
418 AssertFailed();
419 return iemNativeEmitBrk(pReNative, off, 0x666);
420}
421
422
423DECL_INLINE_THROW(uint32_t)
424iemNativeEmit_sbb_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
425 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
426{
427 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
428 AssertFailed();
429 return iemNativeEmitBrk(pReNative, off, 0x666);
430}
431
432
433DECL_INLINE_THROW(uint32_t)
434iemNativeEmit_imul_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
435 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
436{
437 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
438 AssertFailed();
439 return iemNativeEmitBrk(pReNative, off, 0x666);
440}
441
442
443DECL_INLINE_THROW(uint32_t)
444iemNativeEmit_popcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
445 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
446{
447 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
448 AssertFailed();
449 return iemNativeEmitBrk(pReNative, off, 0x666);
450}
451
452
453DECL_INLINE_THROW(uint32_t)
454iemNativeEmit_tzcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
455 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
456{
457 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
458 AssertFailed();
459 return iemNativeEmitBrk(pReNative, off, 0x666);
460}
461
462
463DECL_INLINE_THROW(uint32_t)
464iemNativeEmit_lzcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
465 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
466{
467 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
468 AssertFailed();
469 return iemNativeEmitBrk(pReNative, off, 0x666);
470}
471
472
473#endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMAllN8veEmit_x86_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette