Changeset 101248 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Sep 24, 2023 2:48:56 AM (15 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r101247 r101248 501 501 502 502 /* Allocate a chunk. */ 503 #ifdef RT_OS_DARWIN /** @todo oh carp! This isn't going to work very well with the unpredictability of the simple heap... */ 504 void *pvChunk = RTMemPageAllocEx(pExecMemAllocator->cbChunk, 0); 505 #else 503 506 void *pvChunk = RTMemPageAllocEx(pExecMemAllocator->cbChunk, RTMEMPAGEALLOC_F_EXECUTABLE); 507 #endif 504 508 AssertLogRelReturn(pvChunk, VERR_NO_EXEC_MEMORY); 505 509 … … 765 769 static void iemExecMemAllocatorReadyForUse(PVMCPUCC pVCpu, void *pv, size_t cb) 766 770 { 771 #ifdef RT_OS_DARWIN 772 int rc = RTMemProtect(pv, cb, RTMEM_PROT_EXEC | RTMEM_PROT_READ); 773 AssertRC(rc); RT_NOREF(pVCpu); 774 #else 767 775 RT_NOREF(pVCpu, pv, cb); 776 #endif 768 777 } 769 778 … … 1051 1060 { 1052 1061 #ifdef RT_ARCH_AMD64 1053 /* eax = call status code.*/ 1062 /* 1063 * AMD64: eax = call status code. 1064 */ 1054 1065 1055 1066 /* edx = rcPassUp */ … … 1081 1092 1082 1093 #elif RT_ARCH_ARM64 1083 RT_NOREF(pReNative, idxInstr); 1084 off = UINT32_MAX; 1094 /* 1095 * ARM64: w0 = call status code. 1096 */ 1097 off = iemNativeEmitLoadGprImm64(pReNative, off, ARMV8_A64_REG_X2, idxInstr); /** @todo 32-bit imm load? Fixed counter register? */ 1098 off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, ARMV8_A64_REG_X3, RT_UOFFSETOF(VMCPUCC, iem.s.rcPassUp)); 1099 1100 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 1101 AssertReturn(pu32CodeBuf, UINT32_MAX); 1102 1103 pu32CodeBuf[off++] = Armv8A64MkInstrOrr(ARMV8_A64_REG_X4, ARMV8_A64_REG_X3, ARMV8_A64_REG_X0, false /*f64Bit*/); 1104 1105 uint32_t const idxLabel = iemNativeMakeLabel(pReNative, kIemNativeLabelType_NonZeroRetOrPassUp); 1106 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX); 1107 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5), UINT32_MAX); 1108 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(true /*fJmpIfNotZero*/, ARMV8_A64_REG_X4, false /*f64Bit*/); 1085 1109 1086 1110 #else … … 1177 1201 # endif 1178 1202 1179 /* Check the status code. */ 1203 #elif RT_ARCH_ARM64 1204 /* 1205 * ARM64: 1206 */ 1207 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 1208 if (cParams > 0) 1209 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, pCallEntry->auParams[0]); 1210 if (cParams > 1) 1211 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, pCallEntry->auParams[1]); 1212 if (cParams > 2) 1213 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, pCallEntry->auParams[2]); 1214 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, 1215 (uintptr_t)g_apfnIemThreadedFunctions[pCallEntry->enmFunction]); 1216 1217 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1218 AssertReturn(pu32CodeBuf, UINT32_MAX); 1219 1220 pu32CodeBuf[off++] = Armv8A64MkInstrBlr(IEMNATIVE_REG_FIXED_TMP0); 1221 1222 #else 1223 # error "port me" 1224 #endif 1225 1226 /* 1227 * Check the status code. 1228 */ 1180 1229 off = iemNativeEmitCheckCallRetAndPassUp(pReNative, off, pCallEntry->idxInstr); 1181 1230 AssertReturn(off != UINT32_MAX, off); 1182 1231 1232 return off; 1233 } 1234 1235 1236 /** 1237 * Emits a standard epilog. 1238 */ 1239 static uint32_t iemNativeEmitRcFiddling(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel) 1240 { 1241 /* 1242 * Generate the rc + rcPassUp fiddling code if needed. 1243 */ 1244 uint32_t idxLabel = iemNativeFindLabel(pReNative, kIemNativeLabelType_NonZeroRetOrPassUp); 1245 if (idxLabel != UINT32_MAX) 1246 { 1247 Assert(pReNative->paLabels[idxLabel].off == UINT32_MAX); 1248 pReNative->paLabels[idxLabel].off = off; 1249 1250 /* iemNativeHlpExecStatusCodeFiddling(PVMCPUCC pVCpu, int rc, uint8_t idxInstr) */ 1251 #ifdef RT_ARCH_AMD64 1252 /* 1253 * AMD64: 1254 */ 1255 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20); 1256 AssertReturn(pbCodeBuf, UINT32_MAX); 1257 1258 /* Call helper and jump to return point. */ 1259 # ifdef RT_OS_WINDOWS 1260 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_x8, X86_GREG_xCX); /* cl = instruction number */ 1261 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1262 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xCX, IEMNATIVE_REG_FIXED_PVMCPU); 1263 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1264 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xAX); 1265 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1266 # else 1267 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDI, IEMNATIVE_REG_FIXED_PVMCPU); 1268 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1269 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xSI, X86_GREG_xAX); 1270 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1271 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xCX); /* cl = instruction number */ 1272 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1273 # endif 1274 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, (uintptr_t)iemNativeHlpExecStatusCodeFiddling); 1275 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1276 1277 pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); 1278 AssertReturn(pbCodeBuf, UINT32_MAX); 1279 pbCodeBuf[off++] = 0xff; /* call rax */ 1280 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX); 1281 1282 /* Jump to common return point. */ 1283 uint32_t offRel = pReNative->paLabels[idxReturnLabel].off - (off + 2); 1284 if (-(int32_t)offRel <= 127) 1285 { 1286 pbCodeBuf[off++] = 0xeb; /* jmp rel8 */ 1287 pbCodeBuf[off++] = (uint8_t)offRel; 1288 off++; 1289 } 1290 else 1291 { 1292 offRel -= 3; 1293 pbCodeBuf[off++] = 0xe9; /* jmp rel32 */ 1294 pbCodeBuf[off++] = RT_BYTE1(offRel); 1295 pbCodeBuf[off++] = RT_BYTE2(offRel); 1296 pbCodeBuf[off++] = RT_BYTE3(offRel); 1297 pbCodeBuf[off++] = RT_BYTE4(offRel); 1298 } 1299 pbCodeBuf[off++] = 0xcc; /* int3 poison */ 1183 1300 1184 1301 #elif RT_ARCH_ARM64 1185 RT_NOREF(pReNative, pCallEntry, cParams); 1186 off = UINT32_MAX; 1187 1302 /* 1303 * ARM64: 1304 */ 1305 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_CALL_RET_GREG); 1306 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1307 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 1308 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1309 /* IEMNATIVE_CALL_ARG2_GREG is already set. */ 1310 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, (uintptr_t)iemNativeHlpExecStatusCodeFiddling); 1311 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1312 1313 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 1314 AssertReturn(pu32CodeBuf, UINT32_MAX); 1315 pu32CodeBuf[off++] = Armv8A64MkInstrBlr(IEMNATIVE_REG_FIXED_TMP0); 1316 1317 /* Jump back to the common return point. */ 1318 int32_t const offRel = pReNative->paLabels[idxReturnLabel].off - off; 1319 pu32CodeBuf[off++] = Armv8A64MkInstrB(offRel); 1188 1320 #else 1189 1321 # error "port me" 1190 1322 #endif 1323 } 1191 1324 return off; 1192 1325 } … … 1198 1331 static uint32_t iemNativeEmitEpilog(PIEMRECOMPILERSTATE pReNative, uint32_t off) 1199 1332 { 1333 /* 1334 * Successful return, so clear the return register (eax, w0). 1335 */ 1336 off = iemNativeEmitGprZero(pReNative,off, IEMNATIVE_CALL_RET_GREG); 1337 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1338 1339 /* 1340 * Define label for common return point. 1341 */ 1342 uint32_t const idxReturn = iemNativeMakeLabel(pReNative, kIemNativeLabelType_Return, off); 1343 AssertReturn(idxReturn != UINT32_MAX, UINT32_MAX); 1344 1345 /* 1346 * Restore registers and return. 1347 */ 1200 1348 #ifdef RT_ARCH_AMD64 1201 1349 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20); 1202 1350 AssertReturn(pbCodeBuf, UINT32_MAX); 1203 1204 /*1205 * Successful return, so clear eax.1206 */1207 pbCodeBuf[off++] = 0x33; /* xor eax, eax */1208 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, X86_GREG_xAX, X86_GREG_xAX);1209 1210 /*1211 * Define label for common return point.1212 */1213 uint32_t const idxReturn = iemNativeMakeLabel(pReNative, kIemNativeLabelType_Return, off);1214 AssertReturn(idxReturn != UINT32_MAX, UINT32_MAX);1215 1351 1216 1352 /* Reposition esp at the r15 restore point. */ … … 1238 1374 pbCodeBuf[off++] = 0xcc; /* int3 poison */ 1239 1375 1240 /*1241 * Generate the rc + rcPassUp fiddling code if needed.1242 */1243 uint32_t idxLabel = iemNativeFindLabel(pReNative, kIemNativeLabelType_NonZeroRetOrPassUp);1244 if (idxLabel != UINT32_MAX)1245 {1246 Assert(pReNative->paLabels[idxLabel].off == UINT32_MAX);1247 pReNative->paLabels[idxLabel].off = off;1248 1249 /* Call helper and jump to return point. */1250 # ifdef RT_OS_WINDOWS1251 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_x8, X86_GREG_xCX); /* cl = instruction number */1252 AssertReturn(off != UINT32_MAX, UINT32_MAX);1253 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xCX, IEMNATIVE_REG_FIXED_PVMCPU);1254 AssertReturn(off != UINT32_MAX, UINT32_MAX);1255 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xAX);1256 AssertReturn(off != UINT32_MAX, UINT32_MAX);1257 # else1258 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDI, IEMNATIVE_REG_FIXED_PVMCPU);1259 AssertReturn(off != UINT32_MAX, UINT32_MAX);1260 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xSI, X86_GREG_xAX);1261 AssertReturn(off != UINT32_MAX, UINT32_MAX);1262 off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xCX); /* cl = instruction number */1263 AssertReturn(off != UINT32_MAX, UINT32_MAX);1264 # endif1265 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, (uintptr_t)iemNativeHlpExecStatusCodeFiddling);1266 AssertReturn(off != UINT32_MAX, UINT32_MAX);1267 1268 pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);1269 AssertReturn(pbCodeBuf, UINT32_MAX);1270 pbCodeBuf[off++] = 0xff; /* call rax */1271 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX);1272 1273 /* Jump to common return point. */1274 uint32_t offRel = pReNative->paLabels[idxReturn].off - (off + 2);1275 if (-(int32_t)offRel <= 127)1276 {1277 pbCodeBuf[off++] = 0xeb; /* jmp rel8 */1278 pbCodeBuf[off++] = (uint8_t)offRel;1279 off++;1280 }1281 else1282 {1283 offRel -= 3;1284 pbCodeBuf[off++] = 0xe9; /* jmp rel32 */1285 pbCodeBuf[off++] = RT_BYTE1(offRel);1286 pbCodeBuf[off++] = RT_BYTE2(offRel);1287 pbCodeBuf[off++] = RT_BYTE3(offRel);1288 pbCodeBuf[off++] = RT_BYTE4(offRel);1289 }1290 pbCodeBuf[off++] = 0xcc; /* int3 poison */1291 }1292 1293 1376 #elif RT_ARCH_ARM64 1294 RT_NOREF(pReNative); 1295 off = UINT32_MAX; 1377 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); 1378 AssertReturn(pu32CodeBuf, UINT32_MAX); 1379 1380 /* ldp x19, x20, [sp #IEMNATIVE_FRAME_VAR_SIZE]! ; Unallocate the variable space and restore x19+x20. */ 1381 AssertCompile(IEMNATIVE_FRAME_VAR_SIZE < 64*8); 1382 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kPreIndex, 1383 ARMV8_A64_REG_X19, ARMV8_A64_REG_X20, ARMV8_A64_REG_SP, 1384 IEMNATIVE_FRAME_VAR_SIZE / 8); 1385 /* Restore x21 thru x28 + BP and LR (ret address) (SP remains unchanged in the kSigned variant). */ 1386 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned, 1387 ARMV8_A64_REG_X21, ARMV8_A64_REG_X22, ARMV8_A64_REG_SP, 2); 1388 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned, 1389 ARMV8_A64_REG_X23, ARMV8_A64_REG_X24, ARMV8_A64_REG_SP, 4); 1390 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned, 1391 ARMV8_A64_REG_X25, ARMV8_A64_REG_X26, ARMV8_A64_REG_SP, 6); 1392 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned, 1393 ARMV8_A64_REG_X27, ARMV8_A64_REG_X28, ARMV8_A64_REG_SP, 8); 1394 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned, 1395 ARMV8_A64_REG_BP, ARMV8_A64_REG_LR, ARMV8_A64_REG_SP, 10); 1396 AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE / 8 == 12); 1397 1398 /* add sp, sp, IEMNATIVE_FRAME_SAVE_REG_SIZE ; */ 1399 AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE < 4096); 1400 pu32CodeBuf[off++] = Armv8A64MkInstrAddSub(false /*fSub*/, ARMV8_A64_REG_SP, ARMV8_A64_REG_SP, IEMNATIVE_FRAME_SAVE_REG_SIZE); 1401 1402 /* ret */ 1403 pu32CodeBuf[off++] = ARMV8_A64_INSTR_RET; 1296 1404 1297 1405 #else 1298 1406 # error "port me" 1299 1407 #endif 1300 return off; 1301 } 1302 1303 1304 typedef enum 1305 { 1306 kArm64InstrStLdPairType_kPostIndex = 1, 1307 kArm64InstrStLdPairType_kSigned = 2, 1308 kArm64InstrStLdPairType_kPreIndex = 3 1309 } ARM64INSTRSTLDPAIRTYPE; 1310 1311 DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrStLdPair(bool fLoad, uint32_t iOpc, ARM64INSTRSTLDPAIRTYPE enmType, 1312 uint32_t iReg1, uint32_t iReg2, uint32_t iBaseReg, int32_t iImm7 = 0) 1313 { 1314 Assert(iOpc < 3); Assert(iReg1 <= 31); Assert(iReg2 <= 31); Assert(iBaseReg <= 31); Assert(iImm7 < 64 && iImm7 >= -64); 1315 return (iOpc << 30) 1316 | UINT32_C(0x28000000) 1317 | ((uint32_t)enmType << 23) 1318 | ((uint32_t)fLoad << 22) 1319 | ((uint32_t)iImm7 << 15) 1320 | (iReg2 << 10) 1321 | (iBaseReg << 5) 1322 | iReg1; 1323 } 1324 1408 1409 return iemNativeEmitRcFiddling(pReNative, off, idxReturn); 1410 } 1325 1411 1326 1412 … … 1384 1470 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); 1385 1471 AssertReturn(pu32CodeBuf, UINT32_MAX); 1472 1386 1473 /* stp x19, x20, [sp, #-IEMNATIVE_FRAME_SAVE_REG_SIZE] ; Allocate space for saving registers and place x19+x20 at the bottom. */ 1387 1474 AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE < 64*8); … … 1400 1487 /* Save the BP and LR (ret address) registers at the top of the frame. */ 1401 1488 pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned, 1402 ARMV8_A64_REG_BP, ARMV8_A64_REG_LR,ARMV8_A64_REG_SP, 10);1489 ARMV8_A64_REG_BP, ARMV8_A64_REG_LR, ARMV8_A64_REG_SP, 10); 1403 1490 AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE / 8 == 12); 1404 /* subbp, sp, IEMNATIVE_FRAME_SAVE_REG_SIZE - 16 ; Set BP to point to the old BP stack address. */1405 AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE - 16 < 4096);1406 pu32CodeBuf[off++] = UINT32_C(0xd1000000) | ((IEMNATIVE_FRAME_SAVE_REG_SIZE - 16) << 10) | ARMV8_A64_REG_SP | ARMV8_A64_REG_BP;1491 /* add bp, sp, IEMNATIVE_FRAME_SAVE_REG_SIZE - 16 ; Set BP to point to the old BP stack address. */ 1492 pu32CodeBuf[off++] = Armv8A64MkInstrAddSub(false /*fSub*/, ARMV8_A64_REG_BP, 1493 ARMV8_A64_REG_SP, IEMNATIVE_FRAME_SAVE_REG_SIZE - 16); 1407 1494 1408 1495 /* sub sp, sp, IEMNATIVE_FRAME_VAR_SIZE ; Allocate the variable area from SP. */ 1409 AssertCompile(IEMNATIVE_FRAME_VAR_SIZE < 4096); 1410 pu32CodeBuf[off++] = UINT32_C(0xd1000000) | (IEMNATIVE_FRAME_VAR_SIZE << 10) | ARMV8_A64_REG_SP | ARMV8_A64_REG_SP; 1496 pu32CodeBuf[off++] = Armv8A64MkInstrAddSub(true /*fSub*/, ARMV8_A64_REG_SP, ARMV8_A64_REG_SP, IEMNATIVE_FRAME_VAR_SIZE); 1497 1498 /* mov r28, r0 */ 1499 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_REG_FIXED_PVMCPU, IEMNATIVE_CALL_ARG0_GREG); 1411 1500 1412 1501 #else … … 1507 1596 1508 1597 #elif defined(RT_ARCH_ARM64) 1598 case kIemNativeFixupType_RelImm19At5: 1599 { 1600 Assert(paFixups[i].off < off); 1601 int32_t const offDisp = paLabels[paFixups[i].idxLabel].off - paFixups[i].off + paFixups[i].offAddend; 1602 Assert(offDisp >= -262144 && offDisp < 262144); 1603 *Ptr.pu32 = (*Ptr.pu32 & UINT32_C(0xff00001f)) | (offDisp << 5); 1604 continue; 1605 } 1509 1606 #endif 1510 1607 case kIemNativeFixupType_Invalid: -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r101247 r101248 116 116 #ifdef RT_ARCH_AMD64 117 117 # define IEMNATIVE_REG_FIXED_PVMCPU X86_GREG_xBX 118 #elif RT_ARCH_ARM64 118 119 #elif defined(RT_ARCH_ARM64) 119 120 # define IEMNATIVE_REG_FIXED_PVMCPU ARMV8_A64_REG_X28 120 121 /** Dedicated temporary register. 121 122 * @todo replace this by a register allocator and content tracker. */ 122 123 # define IEMNATIVE_REG_FIXED_TMP0 ARMV8_A64_REG_X15 124 123 125 #else 124 126 # error "port me" 125 127 #endif 128 /** @} */ 129 130 /** @name Call related registers. 131 * @{ */ 132 /** @def IEMNATIVE_CALL_RET_GREG 133 * The return value register. */ 134 /** @def IEMNATIVE_CALL_ARG_GREG_COUNT 135 * Number of arguments in registers. */ 136 /** @def IEMNATIVE_CALL_ARG0_GREG 137 * The general purpose register carrying argument \#0. */ 138 /** @def IEMNATIVE_CALL_ARG1_GREG 139 * The general purpose register carrying argument \#1. */ 140 /** @def IEMNATIVE_CALL_ARG2_GREG 141 * The general purpose register carrying argument \#2. */ 142 /** @def IEMNATIVE_CALL_ARG3_GREG 143 * The general purpose register carrying argument \#3. */ 144 #ifdef RT_ARCH_AMD64 145 # define IEMNATIVE_CALL_RET_GREG X86_GREG_xAX 146 147 # ifdef RT_OS_WINDOWS 148 # define IEMNATIVE_CALL_ARG_GREG_COUNT 4 149 # define IEMNATIVE_CALL_ARG0_GREG X86_GREG_xCX 150 # define IEMNATIVE_CALL_ARG1_GREG X86_GREG_xDX 151 # define IEMNATIVE_CALL_ARG2_GREG X86_GREG_x8 152 # define IEMNATIVE_CALL_ARG3_GREG X86_GREG_x9 153 # else 154 # define IEMNATIVE_CALL_ARG_GREG_COUNT 6 155 # define IEMNATIVE_CALL_ARG0_GREG X86_GREG_xDI 156 # define IEMNATIVE_CALL_ARG1_GREG X86_GREG_xSI 157 # define IEMNATIVE_CALL_ARG2_GREG X86_GREG_xDX 158 # define IEMNATIVE_CALL_ARG3_GREG X86_GREG_xCX 159 # define IEMNATIVE_CALL_ARG4_GREG X86_GREG_x8 160 # define IEMNATIVE_CALL_ARG5_GREG X86_GREG_x9 161 # endif 162 163 #elif defined(RT_ARCH_ARM64) 164 # define IEMNATIVE_CALL_RET_GREG ARMV8_A64_REG_X0 165 # define IEMNATIVE_CALL_ARG_GREG_COUNT 8 166 # define IEMNATIVE_CALL_ARG0_GREG ARMV8_A64_REG_X0 167 # define IEMNATIVE_CALL_ARG1_GREG ARMV8_A64_REG_X1 168 # define IEMNATIVE_CALL_ARG2_GREG ARMV8_A64_REG_X2 169 # define IEMNATIVE_CALL_ARG3_GREG ARMV8_A64_REG_X3 170 # define IEMNATIVE_CALL_ARG4_GREG ARMV8_A64_REG_X4 171 # define IEMNATIVE_CALL_ARG5_GREG ARMV8_A64_REG_X5 172 # define IEMNATIVE_CALL_ARG6_GREG ARMV8_A64_REG_X6 173 # define IEMNATIVE_CALL_ARG7_GREG ARMV8_A64_REG_X7 174 175 #endif 176 126 177 /** @} */ 127 178 … … 158 209 kIemNativeFixupType_Rel32, 159 210 #elif defined(RT_ARCH_ARM64) 211 /** ARM64 fixup: PC relative offset at bits 23:5 (CBZ, CBNZ). */ 212 kIemNativeFixupType_RelImm19At5, 160 213 #endif 161 214 kIemNativeFixupType_End
Note:
See TracChangeset
for help on using the changeset viewer.