- Timestamp:
- Oct 19, 2008 8:45:30 PM (16 years ago)
- Location:
- trunk/src/recompiler_new
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler_new/VBoxRecompiler.c
r13358 r13382 245 245 uint32_t u32Dummy; 246 246 unsigned i; 247 int rc; 247 248 248 249 /* … … 276 277 277 278 /* ctx. */ 278 intrc = CPUMQueryGuestCtxPtr(pVM, &pVM->rem.s.pCtx);279 rc = CPUMQueryGuestCtxPtr(pVM, &pVM->rem.s.pCtx); 279 280 if (VBOX_FAILURE(rc)) 280 281 { … … 451 452 static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM) 452 453 { 453 LogFlow(("remR3Save:\n"));454 455 454 /* 456 455 * Save the required CPU Env bits. … … 458 457 */ 459 458 PREM pRem = &pVM->rem.s; 459 LogFlow(("remR3Save:\n")); 460 460 Assert(!pRem->fInREM); 461 461 SSMR3PutU32(pSSM, pRem->Env.hflags); … … 482 482 uint32_t u32Dummy; 483 483 uint32_t fRawRing0 = false; 484 uint32_t u32Sep; 485 int rc; 486 PREM pRem; 484 487 LogFlow(("remR3Load:\n")); 485 488 … … 509 512 * (Not much because we're never in REM when doing the save.) 510 513 */ 511 PREMpRem = &pVM->rem.s;514 pRem = &pVM->rem.s; 512 515 Assert(!pRem->fInREM); 513 516 SSMR3GetU32(pSSM, &pRem->Env.hflags); … … 519 522 } 520 523 521 uint32_t u32Sep; 522 int rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */ 524 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */ 523 525 if (VBOX_FAILURE(rc)) 524 526 return rc; … … 536 538 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6) 537 539 { 540 unsigned i; 541 538 542 /* 539 543 * Load the REM stuff. … … 547 551 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 548 552 } 549 unsigned i;550 553 for (i = 0; i < pRem->cInvalidatedPages; i++) 551 554 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]); … … 608 611 REMR3DECL(int) REMR3Step(PVM pVM) 609 612 { 613 int rc, interrupt_request; 614 RTGCPTR GCPtrPC; 615 bool fBp; 616 610 617 /* 611 618 * Lock the REM - we don't wanna have anyone interrupting us … … 613 620 * pending interrupts and suchlike. 614 621 */ 615 int interrupt_request = pVM->rem.s.Env.interrupt_request;622 interrupt_request = pVM->rem.s.Env.interrupt_request; 616 623 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))); 617 624 pVM->rem.s.Env.interrupt_request = 0; … … 621 628 * If we're standing at a breakpoint, that have to be disabled before we start stepping. 622 629 */ 623 RTGCPTRGCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;624 boolfBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);630 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base; 631 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC); 625 632 626 633 /* … … 629 636 * just flip it on and off to make sure it moves 630 637 */ 631 intrc = cpu_exec(&pVM->rem.s.Env);638 rc = cpu_exec(&pVM->rem.s.Env); 632 639 if (rc == EXCP_DEBUG) 633 640 { … … 728 735 REMR3DECL(int) REMR3EmulateInstruction(PVM pVM) 729 736 { 737 int rc, rc2; 730 738 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM))); 731 739 … … 739 747 * Sync the state and enable single instruction / single stepping. 740 748 */ 741 intrc = REMR3State(pVM, false /* no need to flush the TBs; we always compile. */);749 rc = REMR3State(pVM, false /* no need to flush the TBs; we always compile. */); 742 750 if (VBOX_SUCCESS(rc)) 743 751 { … … 951 959 #endif 952 960 pVM->rem.s.Env.interrupt_request = interrupt_request; 953 intrc2 = REMR3StateBack(pVM);961 rc2 = REMR3StateBack(pVM); 954 962 AssertRC(rc2); 955 963 } … … 975 983 REMR3DECL(int) REMR3Run(PVM pVM) 976 984 { 985 int rc; 977 986 Log2(("REMR3Run: (cs:eip=%04x:%VGv)\n", pVM->rem.s.Env.segs[R_CS].selector, pVM->rem.s.Env.eip)); 978 987 Assert(pVM->rem.s.fInREM); 979 988 980 989 TMNotifyStartOfExecution(pVM); 981 intrc = cpu_exec(&pVM->rem.s.Env);990 rc = cpu_exec(&pVM->rem.s.Env); 982 991 TMNotifyEndOfExecution(pVM); 983 992 switch (rc) … … 1104 1113 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */ 1105 1114 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */ 1115 uint32_t u32CR0; 1106 1116 1107 1117 /* Update counter. */ … … 1110 1120 if (HWACCMIsEnabled(env->pVM)) 1111 1121 { 1122 CPUMCTX Ctx; 1123 1112 1124 env->state |= CPU_RAW_HWACC; 1113 1125 … … 1115 1127 * Create partial context for HWACCMR3CanExecuteGuest 1116 1128 */ 1117 CPUMCTX Ctx;1118 1129 Ctx.cr0 = env->cr[0]; 1119 1130 Ctx.cr3 = env->cr[3]; … … 1215 1226 } 1216 1227 1217 u int32_t u32CR0 = env->cr[0];1228 u32CR0 = env->cr[0]; 1218 1229 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE)) 1219 1230 { … … 1344 1355 { 1345 1356 PVM pVM = env->pVM; 1357 PCPUMCTX pCtx; 1358 int rc; 1346 1359 1347 1360 /* … … 1359 1372 * Update the control registers before calling PGMFlushPage. 1360 1373 */ 1361 PCPUMCTXpCtx = (PCPUMCTX)pVM->rem.s.pCtx;1374 pCtx = (PCPUMCTX)pVM->rem.s.pCtx; 1362 1375 pCtx->cr0 = env->cr[0]; 1363 1376 pCtx->cr3 = env->cr[3]; … … 1367 1380 * Let PGM do the rest. 1368 1381 */ 1369 intrc = PGMInvalidatePage(pVM, GCPtr);1382 rc = PGMInvalidatePage(pVM, GCPtr); 1370 1383 if (VBOX_FAILURE(rc)) 1371 1384 { … … 1426 1439 { 1427 1440 PVM pVM = env->pVM; 1441 PCPUMCTX pCtx; 1428 1442 1429 1443 /* … … 1445 1459 * Update the control registers before calling PGMR3FlushTLB. 1446 1460 */ 1447 PCPUMCTXpCtx = (PCPUMCTX)pVM->rem.s.pCtx;1461 pCtx = (PCPUMCTX)pVM->rem.s.pCtx; 1448 1462 pCtx->cr0 = env->cr[0]; 1449 1463 pCtx->cr3 = env->cr[3]; … … 1466 1480 int rc; 1467 1481 PVM pVM = env->pVM; 1482 PCPUMCTX pCtx; 1468 1483 1469 1484 /* … … 1479 1494 * as it may need to map whatever cr3 is pointing to. 1480 1495 */ 1481 PCPUMCTXpCtx = (PCPUMCTX)pVM->rem.s.pCtx;1496 pCtx = (PCPUMCTX)pVM->rem.s.pCtx; 1482 1497 pCtx->cr0 = env->cr[0]; 1483 1498 pCtx->cr3 = env->cr[3]; … … 1628 1643 REMR3DECL(int) REMR3State(PVM pVM, bool fFlushTBs) 1629 1644 { 1645 register const CPUMCTX *pCtx; 1646 register unsigned fFlags; 1647 bool fHiddenSelRegsValid; 1648 unsigned i; 1649 TRPMEVENT enmType; 1650 uint8_t u8TrapNo; 1651 int rc; 1652 1630 1653 Log2(("REMR3State:\n")); 1631 1654 STAM_PROFILE_START(&pVM->rem.s.StatsState, a); 1632 register const CPUMCTX *pCtx = pVM->rem.s.pCtx; 1633 register unsigned fFlags; 1634 bool fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM); 1635 unsigned i; 1655 1656 pCtx = pVM->rem.s.pCtx; 1657 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM); 1636 1658 1637 1659 Assert(!pVM->rem.s.fInREM); … … 1716 1738 if (pVM->rem.s.cInvalidatedPages) 1717 1739 { 1740 RTUINT i; 1741 1718 1742 pVM->rem.s.fIgnoreInvlPg = true; 1719 RTUINT i;1720 1743 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++) 1721 1744 { … … 1976 1999 */ 1977 2000 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */ 1978 TRPMEVENT enmType; 1979 uint8_t u8TrapNo; 1980 int rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType); 2001 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType); 1981 2002 if (VBOX_SUCCESS(rc)) 1982 2003 { … … 2076 2097 REMR3DECL(int) REMR3StateBack(PVM pVM) 2077 2098 { 2099 register PCPUMCTX pCtx = pVM->rem.s.pCtx; 2100 unsigned i; 2101 2078 2102 Log2(("REMR3StateBack:\n")); 2079 2103 Assert(pVM->rem.s.fInREM); 2080 2104 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a); 2081 register PCPUMCTX pCtx = pVM->rem.s.pCtx; 2082 unsigned i; 2083 2084 /* 2105 /* 2085 2106 * Copy back the registers. 2086 2107 * This is done in the order they are declared in the CPUMCTX structure. … … 2269 2290 && pVM->rem.s.Env.exception_index < 256) 2270 2291 { 2292 int rc; 2293 2271 2294 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int)); 2272 intrc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);2295 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT); 2273 2296 AssertRC(rc); 2274 2297 switch (pVM->rem.s.Env.exception_index) … … 2301 2324 static void remR3StateUpdate(PVM pVM) 2302 2325 { 2303 Assert(pVM->rem.s.fInREM);2304 2326 register PCPUMCTX pCtx = pVM->rem.s.pCtx; 2305 2327 unsigned i; 2306 2328 2329 Assert(pVM->rem.s.fInREM); 2330 2307 2331 /* 2308 2332 * Copy back the registers. … … 2492 2516 REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable) 2493 2517 { 2518 bool fSaved; 2519 2494 2520 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable)); 2495 2521 VM_ASSERT_EMT(pVM); 2496 2522 2497 boolfSaved = pVM->rem.s.fIgnoreAll; /* just in case. */2523 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */ 2498 2524 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM; 2499 2525 … … 2512 2538 REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM) 2513 2539 { 2540 RTUINT i; 2541 2514 2542 VM_ASSERT_EMT(pVM); 2515 2543 … … 2526 2554 */ 2527 2555 pVM->rem.s.fIgnoreInvlPg = true; 2528 RTUINT i;2529 2556 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++) 2530 2557 { … … 2545 2572 REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM) 2546 2573 { 2574 /* 2575 * Replay the flushes. 2576 */ 2577 RTUINT i; 2578 const RTUINT c = pVM->rem.s.cHandlerNotifications; 2579 2547 2580 LogFlow(("REMR3ReplayInvalidatedPages:\n")); 2548 2581 VM_ASSERT_EMT(pVM); 2549 2582 2550 /*2551 * Replay the flushes.2552 */2553 RTUINT i;2554 const RTUINT c = pVM->rem.s.cHandlerNotifications;2555 2583 pVM->rem.s.cHandlerNotifications = 0; 2556 2584 for (i = 0; i < c; i++) … … 2745 2773 int rc; 2746 2774 PVM pVM = cpu_single_env->pVM; 2775 const RTGCPHYS GCPhys = physaddr; 2747 2776 2748 2777 LogFlow(("remR3GrowDynRange %VGp\n", physaddr)); 2749 const RTGCPHYS GCPhys = physaddr;2750 2778 rc = PGM3PhysGrowRange(pVM, &GCPhys); 2751 2779 if (VBOX_SUCCESS(rc)) … … 3274 3302 static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32) 3275 3303 { 3304 int rc; 3276 3305 Log2(("remR3MMIOWriteU8: GCPhys=%VGp u32=%#x\n", GCPhys, u32)); 3277 intrc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);3306 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1); 3278 3307 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc); 3279 3308 } … … 3282 3311 static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32) 3283 3312 { 3313 int rc; 3284 3314 Log2(("remR3MMIOWriteU16: GCPhys=%VGp u32=%#x\n", GCPhys, u32)); 3285 intrc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);3315 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2); 3286 3316 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc); 3287 3317 } … … 3290 3320 static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32) 3291 3321 { 3322 int rc; 3292 3323 Log2(("remR3MMIOWriteU32: GCPhys=%VGp u32=%#x\n", GCPhys, u32)); 3293 intrc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);3324 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4); 3294 3325 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc); 3295 3326 } … … 3303 3334 static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys) 3304 3335 { 3336 uint8_t u8; 3305 3337 Log2(("remR3HandlerReadU8: GCPhys=%VGp\n", GCPhys)); 3306 uint8_t u8;3307 3338 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8)); 3308 3339 return u8; … … 3311 3342 static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys) 3312 3343 { 3344 uint16_t u16; 3313 3345 Log2(("remR3HandlerReadU16: GCPhys=%VGp\n", GCPhys)); 3314 uint16_t u16;3315 3346 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16)); 3316 3347 return u16; … … 3319 3350 static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys) 3320 3351 { 3352 uint32_t u32; 3321 3353 Log2(("remR3HandlerReadU32: GCPhys=%VGp\n", GCPhys)); 3322 uint32_t u32;3323 3354 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32)); 3324 3355 return u32; … … 3434 3465 bool remR3DisasBlock(CPUState *env, int f32BitCode, int nrInstructions, char *pszPrefix) 3435 3466 { 3436 int i; 3467 int i, rc; 3468 RTGCPTR GCPtrPC; 3469 uint8_t *pvPC; 3470 RTINTPTR off; 3471 DISCPUSTATE Cpu; 3437 3472 3438 3473 /* … … 3446 3481 * We don't care to much about cross page correctness presently. 3447 3482 */ 3448 RTGCPTR GCPtrPC = env->segs[R_CS].base + env->eip; 3449 void *pvPC; 3483 GCPtrPC = env->segs[R_CS].base + env->eip; 3450 3484 if (f32BitCode && (env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)) 3451 3485 { … … 3453 3487 3454 3488 /* convert eip to physical address. */ 3455 intrc = PGMPhysGCPtr2HCPtrByGstCR3(env->pVM,3456 3457 3458 3459 3489 rc = PGMPhysGCPtr2HCPtrByGstCR3(env->pVM, 3490 GCPtrPC, 3491 env->cr[3], 3492 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE), /** @todo add longmode flag */ 3493 &pvPC); 3460 3494 if (VBOX_FAILURE(rc)) 3461 3495 { … … 3469 3503 { 3470 3504 /* physical address */ 3471 intrc = PGMPhysGCPhys2HCPtr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16, &pvPC);3505 rc = PGMPhysGCPhys2HCPtr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16, &pvPC); 3472 3506 if (VBOX_FAILURE(rc)) 3473 3507 return false; … … 3477 3511 * Disassemble. 3478 3512 */ 3479 RTINTPTR off = env->eip - (RTGCUINTPTR)pvPC; 3480 DISCPUSTATE Cpu; 3513 off = env->eip - (RTGCUINTPTR)pvPC; 3481 3514 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT; 3482 3515 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */ … … 3518 3551 { 3519 3552 #ifdef USE_OLD_DUMP_AND_DISASSEMBLY 3520 PVM pVM = env->pVM; 3553 PVM pVM = env->pVM; 3554 RTGCPTR GCPtrPC; 3555 uint8_t *pvPC; 3556 char szOutput[256]; 3557 uint32_t cbOp; 3558 RTINTPTR off; 3559 DISCPUSTATE Cpu; 3560 3521 3561 3522 3562 /* Doesn't work in long mode. */ … … 3543 3583 * We don't care to much about cross page correctness presently. 3544 3584 */ 3545 RTGCPTR GCPtrPC = env->segs[R_CS].base + env->eip; 3546 void *pvPC; 3585 GCPtrPC = env->segs[R_CS].base + env->eip; 3547 3586 if ((env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)) 3548 3587 { … … 3573 3612 * Disassemble. 3574 3613 */ 3575 RTINTPTR off = env->eip - (RTGCUINTPTR)pvPC; 3576 DISCPUSTATE Cpu; 3614 off = env->eip - (RTGCUINTPTR)pvPC; 3577 3615 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT; 3578 3616 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */ … … 3580 3618 //Cpu.dwUserData[1] = (uintptr_t)pvPC; 3581 3619 //Cpu.dwUserData[2] = GCPtrPC; 3582 char szOutput[256];3583 uint32_t cbOp;3584 3620 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0]))) 3585 3621 return false; … … 3691 3727 { 3692 3728 PVM pVM = cpu_single_env->pVM; 3729 RTSEL cs; 3730 RTGCUINTPTR eip; 3693 3731 3694 3732 /* … … 3701 3739 */ 3702 3740 RTLogPrintf("Guest Code: PC=%VGp #VGp (%VGp) bytes fFlags=%d\n", uCode, cb, cb, fFlags); 3703 RTSELcs = cpu_single_env->segs[R_CS].selector;3704 RTGCUINTPTReip = uCode - cpu_single_env->segs[R_CS].base;3741 cs = cpu_single_env->segs[R_CS].selector; 3742 eip = uCode - cpu_single_env->segs[R_CS].base; 3705 3743 for (;;) 3706 3744 { … … 4120 4158 void cpu_outb(CPUState *env, int addr, int val) 4121 4159 { 4160 int rc; 4161 4122 4162 if (addr != 0x80 && addr != 0x70 && addr != 0x61) 4123 4163 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val)); 4124 4164 4125 intrc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);4165 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1); 4126 4166 if (RT_LIKELY(rc == VINF_SUCCESS)) 4127 4167 return; … … 4152 4192 void cpu_outl(CPUState *env, int addr, int val) 4153 4193 { 4194 int rc; 4154 4195 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val)); 4155 intrc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);4196 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4); 4156 4197 if (RT_LIKELY(rc == VINF_SUCCESS)) 4157 4198 return; … … 4286 4327 void cpu_abort(CPUState *env, const char *pszFormat, ...) 4287 4328 { 4329 va_list args; 4330 PVM pVM; 4331 4288 4332 /* 4289 4333 * Bitch about it. 4290 4334 */ 4335 #ifndef _MSC_VER 4336 /** @todo: MSVC is right - it's not valid C */ 4291 4337 RTLogFlags(NULL, "nodisabled nobuffered"); 4292 va_list args; 4338 #endif 4293 4339 va_start(args, pszFormat); 4294 4340 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args); … … 4302 4348 * the EMs failure handling. 4303 4349 */ 4304 PVMpVM = cpu_single_env->pVM;4350 pVM = cpu_single_env->pVM; 4305 4351 if (pVM->rem.s.fInREM) 4306 4352 REMR3StateBack(pVM); … … 4318 4364 static void remAbort(int rc, const char *pszTip) 4319 4365 { 4366 PVM pVM; 4367 4320 4368 /* 4321 4369 * Bitch about it. … … 4327 4375 * Jump back to where we entered the recompiler. 4328 4376 */ 4329 PVMpVM = cpu_single_env->pVM;4377 pVM = cpu_single_env->pVM; 4330 4378 if (pVM->rem.s.fInREM) 4331 4379 REMR3StateBack(pVM); -
trunk/src/recompiler_new/bswap.h
r2422 r13382 79 79 80 80 #ifndef bswap16 /* BSD endian.h clash */ 81 #ifndef VBOX 81 82 static inline uint16_t bswap16(uint16_t x) 83 #else 84 DECLINLINE(uint16_t) bswap16(uint16_t x) 85 #endif 82 86 { 83 87 return bswap_16(x); … … 86 90 87 91 #ifndef bswap32 /* BSD endian.h clash */ 92 #ifndef VBOX 88 93 static inline uint32_t bswap32(uint32_t x) 94 #else 95 DECLINLINE(uint32_t) bswap32(uint32_t x) 96 #endif 89 97 { 90 98 return bswap_32(x); … … 93 101 94 102 #ifndef bswap64 /* BSD endian.h clash. */ 103 #ifndef VBOX 95 104 static inline uint64_t bswap64(uint64_t x) 105 #else 106 DECLINLINE(uint64_t) bswap64(uint64_t x) 107 #endif 96 108 { 97 109 return bswap_64(x); … … 99 111 #endif 100 112 113 #ifndef VBOX 101 114 static inline void bswap16s(uint16_t *s) 115 #else 116 DECLINLINE(void) bswap16s(uint16_t *s) 117 #endif 102 118 { 103 119 *s = bswap16(*s); 104 120 } 105 121 122 #ifndef VBOX 106 123 static inline void bswap32s(uint32_t *s) 124 #else 125 DECLINLINE(void) bswap32s(uint32_t *s) 126 #endif 107 127 { 108 128 *s = bswap32(*s); 109 129 } 110 130 131 #ifndef VBOX 111 132 static inline void bswap64s(uint64_t *s) 133 #else 134 DECLINLINE(void) bswap64s(uint64_t *s) 135 #endif 112 136 { 113 137 *s = bswap64(*s); … … 126 150 #endif 127 151 152 #ifndef VBOX 128 153 #define CPU_CONVERT(endian, size, type)\ 129 154 static inline type endian ## size ## _to_cpu(type v)\ … … 156 181 *p = cpu_to_ ## endian ## size(v);\ 157 182 } 183 #else /* VBOX */ 184 #define CPU_CONVERT(endian, size, type)\ 185 DECLINLINE(type) endian ## size ## _to_cpu(type v)\ 186 {\ 187 return endian ## _bswap(v, size);\ 188 }\ 189 \ 190 DECLINLINE(type) cpu_to_ ## endian ## size(type v)\ 191 {\ 192 return endian ## _bswap(v, size);\ 193 }\ 194 \ 195 DECLINLINE(void) endian ## size ## _to_cpus(type *p)\ 196 {\ 197 endian ## _bswaps(p, size)\ 198 }\ 199 \ 200 DECLINLINE(void) cpu_to_ ## endian ## size ## s(type *p)\ 201 {\ 202 endian ## _bswaps(p, size)\ 203 }\ 204 \ 205 DECLINLINE(type) endian ## size ## _to_cpup(const type *p)\ 206 {\ 207 return endian ## size ## _to_cpu(*p);\ 208 }\ 209 \ 210 DECLINLINE(void) cpu_to_ ## endian ## size ## w(type *p, type v)\ 211 {\ 212 *p = cpu_to_ ## endian ## size(v);\ 213 } 214 #endif /* VBOX */ 158 215 159 216 CPU_CONVERT(be, 16, uint16_t) -
trunk/src/recompiler_new/cpu-all.h
r13230 r13382 32 32 #ifdef VBOX 33 33 # ifndef LOG_GROUP 34 # include <VBox/log.h>35 34 # define LOG_GROUP LOG_GROUP_REM 36 35 # endif 36 # include <VBox/log.h> 37 37 # include <VBox/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */ 38 38 #endif … … 95 95 #else 96 96 97 #ifndef VBOX 97 98 static inline uint16_t tswap16(uint16_t s) 99 #else 100 DECLINLINE(uint16_t) tswap16(uint16_t s) 101 #endif 98 102 { 99 103 return s; 100 104 } 101 105 106 #ifndef VBOX 102 107 static inline uint32_t tswap32(uint32_t s) 108 #else 109 DECLINLINE(uint32_t) tswap32(uint32_t s) 110 #endif 103 111 { 104 112 return s; 105 113 } 106 114 115 #ifndef VBOX 107 116 static inline uint64_t tswap64(uint64_t s) 117 #else 118 DECLINLINE(uint64_t) tswap64(uint64_t s) 119 #endif 108 120 { 109 121 return s; 110 122 } 111 123 124 #ifndef VBOX 112 125 static inline void tswap16s(uint16_t *s) 113 { 114 } 115 126 #else 127 DECLINLINE(void) tswap16s(uint16_t *s) 128 #endif 129 { 130 } 131 132 #ifndef VBOX 116 133 static inline void tswap32s(uint32_t *s) 117 { 118 } 119 134 #else 135 DECLINLINE(void) tswap32s(uint32_t *s) 136 #endif 137 { 138 } 139 140 #ifndef VBOX 120 141 static inline void tswap64s(uint64_t *s) 142 #else 143 DECLINLINE(void) tswap64s(uint64_t *s) 144 #endif 121 145 { 122 146 } … … 249 273 #endif 250 274 251 static inline intldub_p(void *ptr)275 DECLINLINE(int) ldub_p(void *ptr) 252 276 { 253 277 VBOX_CHECK_ADDR(ptr); … … 255 279 } 256 280 257 static inline intldsb_p(void *ptr)281 DECLINLINE(int) ldsb_p(void *ptr) 258 282 { 259 283 VBOX_CHECK_ADDR(ptr); … … 261 285 } 262 286 263 static inline voidstb_p(void *ptr, int v)287 DECLINLINE(void) stb_p(void *ptr, int v) 264 288 { 265 289 VBOX_CHECK_ADDR(ptr); … … 267 291 } 268 292 269 static inline intlduw_le_p(void *ptr)293 DECLINLINE(int) lduw_le_p(void *ptr) 270 294 { 271 295 VBOX_CHECK_ADDR(ptr); … … 273 297 } 274 298 275 static inline intldsw_le_p(void *ptr)299 DECLINLINE(int) ldsw_le_p(void *ptr) 276 300 { 277 301 VBOX_CHECK_ADDR(ptr); … … 279 303 } 280 304 281 static inline voidstw_le_p(void *ptr, int v)305 DECLINLINE(void) stw_le_p(void *ptr, int v) 282 306 { 283 307 VBOX_CHECK_ADDR(ptr); … … 285 309 } 286 310 287 static inline intldl_le_p(void *ptr)311 DECLINLINE(int) ldl_le_p(void *ptr) 288 312 { 289 313 VBOX_CHECK_ADDR(ptr); … … 291 315 } 292 316 293 static inline voidstl_le_p(void *ptr, int v)317 DECLINLINE(void) stl_le_p(void *ptr, int v) 294 318 { 295 319 VBOX_CHECK_ADDR(ptr); … … 297 321 } 298 322 299 static inline voidstq_le_p(void *ptr, uint64_t v)323 DECLINLINE(void) stq_le_p(void *ptr, uint64_t v) 300 324 { 301 325 VBOX_CHECK_ADDR(ptr); … … 303 327 } 304 328 305 static inline uint64_tldq_le_p(void *ptr)329 DECLINLINE(uint64_t) ldq_le_p(void *ptr) 306 330 { 307 331 VBOX_CHECK_ADDR(ptr); … … 313 337 /* float access */ 314 338 315 static inline float32ldfl_le_p(void *ptr)339 DECLINLINE(float32) ldfl_le_p(void *ptr) 316 340 { 317 341 union { … … 323 347 } 324 348 325 static inline voidstfl_le_p(void *ptr, float32 v)349 DECLINLINE(void) stfl_le_p(void *ptr, float32 v) 326 350 { 327 351 union { … … 333 357 } 334 358 335 static inline float64ldfq_le_p(void *ptr)359 DECLINLINE(float64) ldfq_le_p(void *ptr) 336 360 { 337 361 CPU_DoubleU u; 338 362 u.l.lower = ldl_le_p(ptr); 339 u.l.upper = ldl_le_p( ptr + 4);363 u.l.upper = ldl_le_p((uint8_t*)ptr + 4); 340 364 return u.d; 341 365 } 342 366 343 static inline voidstfq_le_p(void *ptr, float64 v)367 DECLINLINE(void) stfq_le_p(void *ptr, float64 v) 344 368 { 345 369 CPU_DoubleU u; 346 370 u.d = v; 347 371 stl_le_p(ptr, u.l.lower); 348 stl_le_p( ptr + 4, u.l.upper);372 stl_le_p((uint8_t*)ptr + 4, u.l.upper); 349 373 } 350 374 … … 549 573 #if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) 550 574 575 #ifndef VBOX 551 576 static inline int lduw_be_p(void *ptr) 552 577 { … … 563 588 #endif 564 589 } 565 590 #else /* VBOX */ 591 DECLINLINE(int) lduw_be_p(void *ptr) 592 { 593 #if defined(__i386__) && !defined(_MSC_VER) 594 int val; 595 asm volatile ("movzwl %1, %0\n" 596 "xchgb %b0, %h0\n" 597 : "=q" (val) 598 : "m" (*(uint16_t *)ptr)); 599 return val; 600 #else 601 uint8_t *b = (uint8_t *) ptr; 602 return ((b[0] << 8) | b[1]); 603 #endif 604 } 605 #endif 606 607 #ifndef VBOX 566 608 static inline int ldsw_be_p(void *ptr) 567 609 { … … 578 620 #endif 579 621 } 580 622 #else 623 DECLINLINE(int) ldsw_be_p(void *ptr) 624 { 625 #if defined(__i386__) && !defined(_MSC_VER) 626 int val; 627 asm volatile ("movzwl %1, %0\n" 628 "xchgb %b0, %h0\n" 629 : "=q" (val) 630 : "m" (*(uint16_t *)ptr)); 631 return (int16_t)val; 632 #else 633 uint8_t *b = (uint8_t *) ptr; 634 return (int16_t)((b[0] << 8) | b[1]); 635 #endif 636 } 637 #endif 638 639 #ifndef VBOX 581 640 static inline int ldl_be_p(void *ptr) 582 641 { … … 593 652 #endif 594 653 } 595 654 #else 655 DECLINLINE(int) ldl_be_p(void *ptr) 656 { 657 #if (defined(__i386__) || defined(__x86_64__)) && !defined(_MSC_VER) 658 int val; 659 asm volatile ("movl %1, %0\n" 660 "bswap %0\n" 661 : "=r" (val) 662 : "m" (*(uint32_t *)ptr)); 663 return val; 664 #else 665 uint8_t *b = (uint8_t *) ptr; 666 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; 667 #endif 668 } 669 #endif 670 671 #ifndef VBOX 596 672 static inline uint64_t ldq_be_p(void *ptr) 673 #else 674 DECLINLINE(uint64_t) ldq_be_p(void *ptr) 675 #endif 597 676 { 598 677 uint32_t a,b; 599 678 a = ldl_be_p(ptr); 600 b = ldl_be_p( ptr+4);679 b = ldl_be_p((uint8_t*)ptr+4); 601 680 return (((uint64_t)a<<32)|b); 602 681 } 603 682 683 #ifndef VBOX 604 684 static inline void stw_be_p(void *ptr, int v) 605 685 { … … 615 695 #endif 616 696 } 617 697 #else 698 DECLINLINE(void) stw_be_p(void *ptr, int v) 699 { 700 #if defined(__i386__) && !defined(_MSC_VER) 701 asm volatile ("xchgb %b0, %h0\n" 702 "movw %w0, %1\n" 703 : "=q" (v) 704 : "m" (*(uint16_t *)ptr), "0" (v)); 705 #else 706 uint8_t *d = (uint8_t *) ptr; 707 d[0] = v >> 8; 708 d[1] = v; 709 #endif 710 } 711 712 #endif /* VBOX */ 713 714 #ifndef VBOX 618 715 static inline void stl_be_p(void *ptr, int v) 619 716 { … … 631 728 #endif 632 729 } 633 730 #else 731 DECLINLINE(void) stl_be_p(void *ptr, int v) 732 { 733 #if !defined(_MSC_VER) && (defined(__i386__) || defined(__x86_64__)) 734 asm volatile ("bswap %0\n" 735 "movl %0, %1\n" 736 : "=r" (v) 737 : "m" (*(uint32_t *)ptr), "0" (v)); 738 #else 739 uint8_t *d = (uint8_t *) ptr; 740 d[0] = v >> 24; 741 d[1] = v >> 16; 742 d[2] = v >> 8; 743 d[3] = v; 744 #endif 745 } 746 #endif /* VBOX */ 747 748 #ifndef VBOX 634 749 static inline void stq_be_p(void *ptr, uint64_t v) 750 #else 751 DECLINLINE(void) stq_be_p(void *ptr, uint64_t v) 752 #endif 635 753 { 636 754 stl_be_p(ptr, v >> 32); 637 stl_be_p( ptr + 4, v);755 stl_be_p((uint8_t*)ptr + 4, v); 638 756 } 639 757 640 758 /* float access */ 641 759 #ifndef VBOX 642 760 static inline float32 ldfl_be_p(void *ptr) 761 #else 762 DECLINLINE(float32) ldfl_be_p(void *ptr) 763 #endif 643 764 { 644 765 union { … … 650 771 } 651 772 773 #ifndef VBOX 652 774 static inline void stfl_be_p(void *ptr, float32 v) 775 #else 776 DECLINLINE(void) stfl_be_p(void *ptr, float32 v) 777 #endif 653 778 { 654 779 union { … … 660 785 } 661 786 787 #ifndef VBOX 662 788 static inline float64 ldfq_be_p(void *ptr) 789 #else 790 DECLINLINE(float64) ldfq_be_p(void *ptr) 791 #endif 663 792 { 664 793 CPU_DoubleU u; 665 794 u.l.upper = ldl_be_p(ptr); 666 u.l.lower = ldl_be_p( ptr + 4);795 u.l.lower = ldl_be_p((uint8_t*)ptr + 4); 667 796 return u.d; 668 797 } 669 798 799 #ifndef VBOX 670 800 static inline void stfq_be_p(void *ptr, float64 v) 801 #else 802 DECLINLINE(void) stfq_be_p(void *ptr, float64 v) 803 #endif 671 804 { 672 805 CPU_DoubleU u; 673 806 u.d = v; 674 807 stl_be_p(ptr, u.l.upper); 675 stl_be_p( ptr + 4, u.l.lower);808 stl_be_p((uint8_t*)ptr + 4, u.l.lower); 676 809 } 677 810 … … 1103 1236 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 1104 1237 int len, int is_write); 1238 #ifndef VBOX 1105 1239 static inline void cpu_physical_memory_read(target_phys_addr_t addr, 1106 1240 uint8_t *buf, int len) 1241 #else 1242 DECLINLINE(void) cpu_physical_memory_read(target_phys_addr_t addr, 1243 uint8_t *buf, int len) 1244 #endif 1107 1245 { 1108 1246 cpu_physical_memory_rw(addr, buf, len, 0); 1109 1247 } 1248 #ifndef VBOX 1110 1249 static inline void cpu_physical_memory_write(target_phys_addr_t addr, 1111 1250 const uint8_t *buf, int len) 1251 #else 1252 DECLINLINE(void) cpu_physical_memory_write(target_phys_addr_t addr, 1253 const uint8_t *buf, int len) 1254 #endif 1112 1255 { 1113 1256 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1); … … 1135 1278 1136 1279 /* read dirty bit (return 0 or 1) */ 1280 #ifndef VBOX 1137 1281 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) 1138 1282 { 1139 #ifdef VBOX 1283 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; 1284 } 1285 #else 1286 DECLINLINE(int) cpu_physical_memory_is_dirty(ram_addr_t addr) 1287 { 1140 1288 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1141 1289 { … … 1144 1292 return 0; 1145 1293 } 1146 #endif1147 1294 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; 1148 1295 } 1149 1296 #endif 1297 1298 #ifndef VBOX 1150 1299 static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, 1151 1300 int dirty_flags) 1152 1301 { 1153 #ifdef VBOX 1302 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; 1303 } 1304 #else 1305 DECLINLINE(int) cpu_physical_memory_get_dirty(ram_addr_t addr, 1306 int dirty_flags) 1307 { 1154 1308 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1155 1309 { … … 1158 1312 return 0xff & dirty_flags; /** @todo I don't think this is the right thing to return, fix! */ 1159 1313 } 1160 #endif1161 1314 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; 1162 1315 } 1163 1316 #endif 1317 1318 #ifndef VBOX 1164 1319 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) 1165 1320 { 1166 #ifdef VBOX 1321 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff; 1322 } 1323 #else 1324 DECLINLINE(void) cpu_physical_memory_set_dirty(ram_addr_t addr) 1325 { 1167 1326 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1168 1327 { … … 1171 1330 return; 1172 1331 } 1173 #endif1174 1332 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff; 1175 1333 } 1334 #endif 1176 1335 1177 1336 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, … … 1189 1348 /* host CPU ticks (if available) */ 1190 1349 1191 #if defined(__powerpc__) 1350 #ifdef VBOX 1351 1352 DECLINLINE(int64_t) cpu_get_real_ticks(void) 1353 { 1354 return ASMReadTSC(); 1355 } 1356 1357 #elif defined(__powerpc__) 1192 1358 1193 1359 static inline uint32_t get_tbl(void) -
trunk/src/recompiler_new/cpu-exec.c
r13301 r13382 169 169 } 170 170 171 #ifndef VBOX 171 172 static inline TranslationBlock *tb_find_fast(void) 173 #else 174 DECLINLINE(TranslationBlock *) tb_find_fast(void) 175 #endif 172 176 { 173 177 TranslationBlock *tb; -
trunk/src/recompiler_new/exec-all.h
r13337 r13382 112 112 target_phys_addr_t paddr, int prot, 113 113 int mmu_idx, int is_softmmu); 114 #ifndef VBOX 114 115 static inline int tlb_set_page(CPUState *env1, target_ulong vaddr, 115 116 target_phys_addr_t paddr, int prot, 116 117 int mmu_idx, int is_softmmu) 118 #else 119 DECLINLINE(int) tlb_set_page(CPUState *env1, target_ulong vaddr, 120 target_phys_addr_t paddr, int prot, 121 int mmu_idx, int is_softmmu) 122 #endif 117 123 { 118 124 if (prot & PAGE_READ) … … 190 196 }; 191 197 198 #ifndef VBOX 192 199 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) 200 #else 201 DECLINLINE(unsigned int) tb_jmp_cache_hash_page(target_ulong pc) 202 #endif 193 203 { 194 204 target_ulong tmp; … … 197 207 } 198 208 209 #ifndef VBOX 199 210 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) 211 #else 212 DECLINLINE(unsigned int) tb_jmp_cache_hash_func(target_ulong pc) 213 #endif 214 200 215 { 201 216 target_ulong tmp; … … 205 220 } 206 221 222 #ifndef VBOX 207 223 static inline unsigned int tb_phys_hash_func(unsigned long pc) 224 #else 225 DECLINLINE(unsigned int) tb_phys_hash_func(unsigned long pc) 226 #endif 208 227 { 209 228 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1); … … 265 284 266 285 /* set the jump target */ 286 #ifndef VBOX 267 287 static inline void tb_set_jmp_target(TranslationBlock *tb, 268 288 int n, unsigned long addr) 289 #else 290 DECLINLINE(void) tb_set_jmp_target(TranslationBlock *tb, 291 int n, unsigned long addr) 292 #endif 269 293 { 270 294 tb->tb_next[n] = addr; … … 273 297 #endif 274 298 299 #ifndef VBOX 275 300 static inline void tb_add_jump(TranslationBlock *tb, int n, 276 301 TranslationBlock *tb_next) 302 #else 303 DECLINLINE(void) tb_add_jump(TranslationBlock *tb, int n, 304 TranslationBlock *tb_next) 305 #endif 277 306 { 278 307 /* NOTE: this test is only needed for thread safety */ … … 361 390 /* NOTE2: the returned address is not exactly the physical address: it 362 391 is the offset relative to phys_ram_base */ 392 #ifndef VBOX 363 393 static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) 394 #else 395 DECLINLINE(target_ulong) get_phys_addr_code(CPUState *env, target_ulong addr) 396 #endif 364 397 { 365 398 int is_user, index, pd; … … 381 414 #error unimplemented CPU 382 415 #endif 416 #ifndef VBOX 383 417 if (__builtin_expect(env->tlb_table[is_user][index].addr_code != 384 418 (addr & TARGET_PAGE_MASK), 0)) { 419 #else 420 if (RT_UNLIKELY(env->tlb_table[is_user][index].addr_code != 421 (addr & TARGET_PAGE_MASK))) { 422 #endif 385 423 ldub_code(addr); 386 424 } … … 406 444 /* Deterministic execution requires that IO only be performed on the last 407 445 instruction of a TB so that interrupts take effect immediately. */ 446 #ifndef VBOX 408 447 static inline int can_do_io(CPUState *env) 448 #else 449 DECLINLINE(int) can_do_io(CPUState *env) 450 #endif 409 451 { 410 452 if (!use_icount) -
trunk/src/recompiler_new/exec.c
r13337 r13382 45 45 # include <stdlib.h> 46 46 # include <stdio.h> 47 # include <inttypes.h>48 47 # include <iprt/alloc.h> 49 48 # include <iprt/string.h> … … 102 101 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; 103 102 103 #ifndef VBOX 104 104 #if defined(__arm__) || defined(__sparc_v9__) 105 105 /* The prologue must be reachable with a direct jump. ARM and Sparc64 … … 113 113 __attribute__((aligned (32))) 114 114 #endif 115 116 115 uint8_t code_gen_prologue[1024] code_gen_section; 116 117 #else /* VBOX */ 118 ALIGNED_MEMBER(uint8_t, code_gen_prologue[1024], 32); 119 #endif /* VBOX */ 120 117 121 static uint8_t *code_gen_buffer; 118 122 static unsigned long code_gen_buffer_size; … … 331 335 } 332 336 337 #ifndef VBOX 333 338 static inline PageDesc **page_l1_map(target_ulong index) 339 #else 340 DECLINLINE(PageDesc **) page_l1_map(target_ulong index) 341 #endif 334 342 { 335 343 #if TARGET_LONG_BITS > 32 … … 342 350 } 343 351 352 #ifndef VBOX 344 353 static inline PageDesc *page_find_alloc(target_ulong index) 354 #else 355 DECLINLINE(PageDesc *) page_find_alloc(target_ulong index) 356 #endif 345 357 { 346 358 PageDesc **lp, *p; … … 373 385 } 374 386 387 #ifndef VBOX 375 388 static inline PageDesc *page_find(target_ulong index) 389 #else 390 DECLINLINE(PageDesc *) page_find(target_ulong index) 391 #endif 376 392 { 377 393 PageDesc **lp, *p; … … 430 446 } 431 447 448 #ifndef VBOX 432 449 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) 450 #else 451 DECLINLINE(PhysPageDesc *) phys_page_find(target_phys_addr_t index) 452 #endif 433 453 { 434 454 return phys_page_find_alloc(index, 0); … … 624 644 } 625 645 646 #ifndef VBOX 626 647 static inline void invalidate_page_bitmap(PageDesc *p) 648 #else 649 DECLINLINE(void) invalidate_page_bitmap(PageDesc *p) 650 #endif 627 651 { 628 652 if (p->code_bitmap) { … … 737 761 738 762 /* invalidate one TB */ 763 #ifndef VBOX 739 764 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, 740 765 int next_offset) 766 #else 767 DECLINLINE(void) tb_remove(TranslationBlock **ptb, TranslationBlock *tb, 768 int next_offset) 769 #endif 741 770 { 742 771 TranslationBlock *tb1; … … 751 780 } 752 781 782 #ifndef VBOX 753 783 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) 784 #else 785 DECLINLINE(void) tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) 786 #endif 754 787 { 755 788 TranslationBlock *tb1; … … 768 801 } 769 802 803 #ifndef VBOX 770 804 static inline void tb_jmp_remove(TranslationBlock *tb, int n) 805 #else 806 DECLINLINE(void) tb_jmp_remove(TranslationBlock *tb, int n) 807 #endif 771 808 { 772 809 TranslationBlock *tb1, **ptb; … … 798 835 /* reset the jump entry 'n' of a TB so that it is not chained to 799 836 another TB */ 837 #ifndef VBOX 800 838 static inline void tb_reset_jump(TranslationBlock *tb, int n) 839 #else 840 DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n) 841 #endif 801 842 { 802 843 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); … … 907 948 #endif /* VBOX */ 908 949 950 #ifndef VBOX 909 951 static inline void set_bits(uint8_t *tab, int start, int len) 952 #else 953 DECLINLINE(void) set_bits(uint8_t *tab, int start, int len) 954 #endif 910 955 { 911 956 int end, mask, end1; … … 1120 1165 1121 1166 /* len must be <= 8 and start must be a multiple of len */ 1167 #ifndef VBOX 1122 1168 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len) 1169 #else 1170 DECLINLINE(void) tb_invalidate_phys_page_fast(target_phys_addr_t start, int len) 1171 #endif 1123 1172 { 1124 1173 PageDesc *p; … … 1218 1267 1219 1268 /* add the tb in the target page and protect it if necessary */ 1269 #ifndef VBOX 1220 1270 static inline void tb_alloc_page(TranslationBlock *tb, 1221 1271 unsigned int n, target_ulong page_addr) 1272 #else 1273 DECLINLINE(void) tb_alloc_page(TranslationBlock *tb, 1274 unsigned int n, target_ulong page_addr) 1275 #endif 1222 1276 { 1223 1277 PageDesc *p; … … 1371 1425 static void tb_reset_jump_recursive(TranslationBlock *tb); 1372 1426 1427 #ifndef VBOX 1373 1428 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) 1429 #else 1430 DECLINLINE(void) tb_reset_jump_recursive2(TranslationBlock *tb, int n) 1431 #endif 1374 1432 { 1375 1433 TranslationBlock *tb1, *tb_next, **ptb; … … 1748 1806 #if !defined(CONFIG_USER_ONLY) 1749 1807 1808 #ifndef VBOX 1750 1809 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr) 1810 #else 1811 DECLINLINE(void) tlb_flush_jmp_cache(CPUState *env, target_ulong addr) 1812 #endif 1751 1813 { 1752 1814 unsigned int i; … … 1814 1876 } 1815 1877 1878 #ifndef VBOX 1816 1879 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) 1880 #else 1881 DECLINLINE(void) tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) 1882 #endif 1817 1883 { 1818 1884 if (addr == (tlb_entry->addr_read & … … 1883 1949 } 1884 1950 1951 #ifndef VBOX 1885 1952 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 1886 1953 unsigned long start, unsigned long length) 1954 #else 1955 DECLINLINE(void) tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 1956 unsigned long start, unsigned long length) 1957 #endif 1887 1958 { 1888 1959 unsigned long addr; … … 1968 2039 #endif 1969 2040 2041 #ifndef VBOX 1970 2042 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) 2043 #else 2044 DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry) 2045 #endif 1971 2046 { 1972 2047 ram_addr_t ram_addr; … … 2006 2081 } 2007 2082 2083 #ifndef VBOX 2008 2084 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) 2085 #else 2086 DECLINLINE(void) tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) 2087 #endif 2009 2088 { 2010 2089 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) … … 2015 2094 /* update the TLB corresponding to virtual page vaddr and phys addr 2016 2095 addr so that it is no longer dirty */ 2096 #ifndef VBOX 2017 2097 static inline void tlb_set_dirty(CPUState *env, 2018 2098 unsigned long addr, target_ulong vaddr) 2099 #else 2100 DECLINLINE(void) tlb_set_dirty(CPUState *env, 2101 unsigned long addr, target_ulong vaddr) 2102 #endif 2019 2103 { 2020 2104 int i; -
trunk/src/recompiler_new/hostregs_helper.h
r11982 r13382 37 37 #if defined(DECLARE_HOST_REGS) 38 38 39 #ifndef VBOX 39 40 #define DO_REG(REG) \ 40 41 register host_reg_t reg_AREG##REG asm(AREG##REG); \ 41 42 volatile host_reg_t saved_AREG##REG; 43 #else 44 #define DO_REG(REG) \ 45 REGISTER_BOUND_GLOBAL(host_reg_t, reg_AREG##REG, AREG##REG); \ 46 volatile host_reg_t saved_AREG##REG; 47 #endif 42 48 43 49 #elif defined(SAVE_HOST_REGS) 44 50 51 #ifndef VBOX 45 52 #define DO_REG(REG) \ 46 53 __asm__ __volatile__ ("" : "=r" (reg_AREG##REG)); \ 47 54 saved_AREG##REG = reg_AREG##REG; 55 #else /* VBOX */ 56 #define DO_REG(REG) \ 57 SAVE_GLOBAL_REGISTER(REG, reg_AREG##REG); \ 58 saved_AREG##REG = reg_AREG##REG; 59 #endif /* VBOX */ 48 60 49 61 #else 50 62 63 #ifndef VBOX 51 64 #define DO_REG(REG) \ 52 65 reg_AREG##REG = saved_AREG##REG; \ 53 66 __asm__ __volatile__ ("" : : "r" (reg_AREG##REG)); 67 #else /* VBOX */ 68 #define DO_REG(REG) \ 69 reg_AREG##REG = saved_AREG##REG; \ 70 RESTORE_GLOBAL_REGISTER(REG, reg_AREG##REG); 71 #endif 54 72 55 73 #endif -
trunk/src/recompiler_new/osdep.h
r13370 r13382 111 111 112 112 #ifdef __i386__ 113 #ifdef _MSC_VER 114 /** @todo: maybe wrong, or slow */ 115 #define REGPARM 116 #else 113 117 #define REGPARM __attribute((regparm(3))) 118 #endif 114 119 #else 115 120 #define REGPARM … … 123 128 #endif 124 129 130 #ifndef VBOX 125 131 void *qemu_memalign(size_t alignment, size_t size); 126 132 void *qemu_vmalloc(size_t size); 127 133 void qemu_vfree(void *ptr); 128 134 129 #ifndef VBOX130 135 int qemu_create_pidfile(const char *filename); 131 136 … … 144 149 #endif /* !VBOX */ 145 150 151 #ifdef VBOX 152 #ifdef _MSC_VER 153 #define ALIGNED_MEMBER(type, name, bytes) type name 154 #define ALIGNED_MEMBER_DEF(type, name) type name 155 #define PACKED_STRUCT(name) struct name 156 #define REGISTER_BOUND_GLOBAL(type, var, reg) type var 157 #define SAVE_GLOBAL_REGISTER(reg, var) 158 #define RESTORE_GLOBAL_REGISTER(reg, var) 159 #define DECLALWAYSINLINE(type) DECLINLINE(type) 160 #else /* ! _MSC_VER */ 161 #define ALIGNED_MEMBER(type, name, bytes) type name __attribute__((aligned(bytes))) 162 #define ALIGNED_MEMBER_DEF(type, name) type name __attribute__((aligned())) 163 #define PACKED_STRUCT(name) struct __attribute__ ((__packed__)) name 164 #define REGISTER_BOUND_GLOBAL(type, var, reg) register type var asm(reg) 165 #define SAVE_GLOBAL_REGISTER(reg, var) __asm__ __volatile__ ("" : "=r" (var)) 166 #define RESTORE_GLOBAL_REGISTER(reg, var) __asm__ __volatile__ ("" : : "r" (var)) 167 #define DECLALWAYSINLINE(type) static always_inline type 168 #endif /* !_MSC_VER */ 169 #endif /* VBOX */ 170 146 171 #endif -
trunk/src/recompiler_new/softmmu_header.h
r13337 r13382 86 86 /* generic store macro */ 87 87 88 static inline voidglue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)88 DELCINLINE(void) glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v) 89 89 { 90 90 int index; … … 162 162 /* generic load/store macros */ 163 163 164 #ifndef VBOX 164 165 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) 166 #else 167 DECLINLINE(RES_TYPE) glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr) 168 #endif 165 169 { 166 170 int index; … … 173 177 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 174 178 is_user = CPU_MMU_INDEX; 179 #ifndef VBOX 175 180 if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ != 176 181 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 182 #else 183 if (RT_UNLIKELY(env->tlb_table[is_user][index].ADDR_READ != 184 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { 185 #endif 177 186 res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user); 178 187 } else { … … 184 193 185 194 #if DATA_SIZE <= 2 195 #ifndef VBOX 186 196 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr) 197 #else 198 DECLINLINE(int) glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr) 199 #endif 187 200 { 188 201 int res, index; … … 194 207 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 195 208 is_user = CPU_MMU_INDEX; 209 #ifndef VBOX 196 210 if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ != 197 211 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 212 #else 213 if (RT_UNLIKELY(env->tlb_table[is_user][index].ADDR_READ != 214 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { 215 #endif 198 216 res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user); 199 217 } else { … … 208 226 209 227 /* generic store macro */ 210 228 #ifndef VBOX 211 229 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v) 230 #else 231 DECLINLINE(void) glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v) 232 #endif 212 233 { 213 234 int index; … … 219 240 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 220 241 is_user = CPU_MMU_INDEX; 242 #ifndef VBOX 221 243 if (__builtin_expect(env->tlb_table[is_user][index].addr_write != 222 244 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { 245 #else 246 if (RT_UNLIKELY(env->tlb_table[is_user][index].addr_write != 247 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { 248 #endif 223 249 glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user); 224 250 } else { … … 235 261 236 262 #if DATA_SIZE == 8 263 #ifndef VBOX 237 264 static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr) 265 #else 266 DECLINLINE(float64) glue(ldfq, MEMSUFFIX)(target_ulong ptr) 267 #endif 238 268 { 239 269 union { … … 245 275 } 246 276 277 #ifndef VBOX 247 278 static inline void glue(stfq, MEMSUFFIX)(target_ulong ptr, float64 v) 279 #else 280 DECLINLINE(void) glue(stfq, MEMSUFFIX)(target_ulong ptr, float64 v) 281 #endif 248 282 { 249 283 union { … … 257 291 258 292 #if DATA_SIZE == 4 293 #ifndef VBOX 259 294 static inline float32 glue(ldfl, MEMSUFFIX)(target_ulong ptr) 295 #else 296 DECLINLINE(float32) glue(ldfl, MEMSUFFIX)(target_ulong ptr) 297 #endif 260 298 { 261 299 union { … … 267 305 } 268 306 307 #ifndef VBOX 269 308 static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v) 309 #else 310 DECLINLINE(void) glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v) 311 #endif 270 312 { 271 313 union { -
trunk/src/recompiler_new/softmmu_template.h
r13337 r13382 60 60 int is_user, 61 61 void *retaddr); 62 #ifndef VBOX 62 63 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr, 63 64 target_ulong addr, 64 65 void *retaddr) 66 #else 67 DECLINLINE(DATA_TYPE) glue(io_read, SUFFIX)(target_phys_addr_t physaddr, 68 target_ulong addr, 69 void *retaddr) 70 #endif 65 71 { 66 72 DATA_TYPE res; … … 204 210 void *retaddr); 205 211 212 #ifndef VBOX 206 213 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr, 207 214 DATA_TYPE val, 208 215 target_ulong addr, 209 216 void *retaddr) 217 #else 218 DECLINLINE(void) glue(io_write, SUFFIX)(target_phys_addr_t physaddr, 219 DATA_TYPE val, 220 target_ulong addr, 221 void *retaddr) 222 #endif 210 223 { 211 224 int index; -
trunk/src/recompiler_new/target-i386/cpu.h
r13337 r13382 569 569 union { 570 570 #ifdef USE_X86LDOUBLE 571 #ifndef VBOX 571 572 CPU86_LDouble d __attribute__((aligned(16))); 573 #else 574 ALIGNED_MEMBER(CPU86_LDouble, d, 16); 575 #endif 572 576 #else 573 577 CPU86_LDouble d; … … 729 733 union { 730 734 #ifdef USE_X86LDOUBLE 735 #ifndef VBOX 731 736 CPU86_LDouble d __attribute__((aligned(16))); 737 #else 738 ALIGNED_MEMBER(CPU86_LDouble, d, 16); 739 #endif 732 740 #else 733 741 CPU86_LDouble d; … … 805 813 /* this function must always be used to load data in the segment 806 814 cache: it synchronizes the hflags with the segment cache values */ 815 #ifndef VBOX 807 816 static inline void cpu_x86_load_seg_cache(CPUX86State *env, 808 817 int seg_reg, unsigned int selector, … … 810 819 unsigned int limit, 811 820 unsigned int flags) 821 #else 822 DECLINLINE(void) cpu_x86_load_seg_cache(CPUX86State *env, 823 int seg_reg, unsigned int selector, 824 target_ulong base, 825 unsigned int limit, 826 unsigned int flags) 827 828 #endif 812 829 { 813 830 SegmentCache *sc; … … 866 883 867 884 /* wrapper, just in case memory mappings must be changed */ 885 #ifndef VBOX 868 886 static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) 887 #else 888 DECLINLINE(void) cpu_x86_set_cpl(CPUX86State *s, int cpl) 889 #endif 869 890 { 870 891 #if HF_CPL_MASK == 3 … … 956 977 #define MMU_MODE1_SUFFIX _user 957 978 #define MMU_USER_IDX 1 979 #ifndef VBOX 958 980 static inline int cpu_mmu_index (CPUState *env) 981 #else 982 DECLINLINE(int) cpu_mmu_index (CPUState *env) 983 #endif 959 984 { 960 985 return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0; -
trunk/src/recompiler_new/target-i386/exec.h
r13230 r13382 39 39 #include "cpu-defs.h" 40 40 41 #ifndef VBOX 41 42 /* at least 4 register variables are defined */ 42 43 register struct CPUX86State *env asm(AREG0); 43 44 #ifndef VBOX 44 #else 45 REGISTER_BOUND_GLOBAL(struct CPUX86State*, env, AREG0); 46 #endif /* VBOX */ 47 45 48 #include "qemu-log.h" 46 #endif47 49 48 50 #ifndef reg_EAX … … 113 115 114 116 /* n must be a constant to be efficient */ 117 #ifndef VBOX 115 118 static inline target_long lshift(target_long x, int n) 119 #else 120 DECLINLINE(target_long) lshift(target_long x, int n) 121 #endif 116 122 { 117 123 if (n >= 0) … … 123 129 #include "helper.h" 124 130 131 #ifndef VBOX 125 132 static inline void svm_check_intercept(uint32_t type) 133 #else 134 DECLINLINE(void) svm_check_intercept(uint32_t type) 135 #endif 126 136 { 127 137 helper_svm_check_intercept_param(type, 0); … … 139 149 #include "softmmu_exec.h" 140 150 151 #ifndef VBOX 141 152 static inline double ldfq(target_ulong ptr) 153 #else 154 DECLINLINE(double) ldfq(target_ulong ptr) 155 #endif 142 156 { 143 157 union { … … 149 163 } 150 164 165 #ifndef VBOX 151 166 static inline void stfq(target_ulong ptr, double v) 167 #else 168 DECLINLINE(void) stfq(target_ulong ptr, double v) 169 #endif 152 170 { 153 171 union { … … 159 177 } 160 178 179 #ifndef VBOX 161 180 static inline float ldfl(target_ulong ptr) 181 #else 182 DECLINLINE(float) ldfl(target_ulong ptr) 183 #endif 162 184 { 163 185 union { … … 169 191 } 170 192 193 #ifndef VBOX 171 194 static inline void stfl(target_ulong ptr, float v) 195 #else 196 DECLINLINE(void) stfl(target_ulong ptr, float v) 197 #endif 172 198 { 173 199 union { … … 239 265 240 266 #ifdef VBOX 267 #ifndef _MSC_VER 241 268 extern CPU86_LDouble sin(CPU86_LDouble x); 242 269 extern CPU86_LDouble cos(CPU86_LDouble x); … … 248 275 extern CPU86_LDouble floor(CPU86_LDouble x); 249 276 extern CPU86_LDouble ceil(CPU86_LDouble x); 250 #endif 277 #endif /* !_MSC_VER */ 278 #endif /* VBOX */ 251 279 252 280 #define RC_MASK 0xc00 281 #ifndef RC_NEAR 253 282 #define RC_NEAR 0x000 283 #endif 284 #ifndef RC_DOWN 254 285 #define RC_DOWN 0x400 286 #endif 287 #ifndef RC_UP 255 288 #define RC_UP 0x800 289 #endif 290 #ifndef RC_CHOP 256 291 #define RC_CHOP 0xc00 292 #endif 257 293 258 294 #define MAXTAN 9223372036854775808.0 … … 311 347 #endif 312 348 349 #ifndef VBOX 313 350 static inline void fpush(void) 351 #else 352 DECLINLINE(void) fpush(void) 353 #endif 314 354 { 315 355 env->fpstt = (env->fpstt - 1) & 7; … … 317 357 } 318 358 359 #ifndef VBOX 319 360 static inline void fpop(void) 361 #else 362 DECLINLINE(void) fpop(void) 363 #endif 320 364 { 321 365 env->fptags[env->fpstt] = 1; /* invvalidate stack entry */ … … 378 422 /* we use memory access macros */ 379 423 424 #ifndef VBOX 380 425 static inline CPU86_LDouble helper_fldt(target_ulong ptr) 426 #else 427 DECLINLINE(CPU86_LDouble) helper_fldt(target_ulong ptr) 428 #endif 381 429 { 382 430 CPU86_LDoubleU temp; … … 387 435 } 388 436 437 #ifndef VBOX 389 438 static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr) 439 #else 440 DECLINLINE(void) helper_fstt(CPU86_LDouble f, target_ulong ptr) 441 #endif 390 442 { 391 443 CPU86_LDoubleU temp; … … 422 474 extern const uint8_t rclb_table[32]; 423 475 476 #ifndef VBOX 424 477 static inline uint32_t compute_eflags(void) 478 #else 479 DECLINLINE(uint32_t) compute_eflags(void) 480 #endif 425 481 { 426 482 return env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); … … 428 484 429 485 /* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */ 486 #ifndef VBOX 430 487 static inline void load_eflags(int eflags, int update_mask) 488 #else 489 DECLINLINE(void) load_eflags(int eflags, int update_mask) 490 #endif 431 491 { 432 492 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); … … 436 496 } 437 497 498 #ifndef VBOX 438 499 static inline void env_to_regs(void) 500 #else 501 DECLINLINE(void) env_to_regs(void) 502 #endif 439 503 { 440 504 #ifdef reg_EAX … … 464 528 } 465 529 530 #ifndef VBOX 466 531 static inline void regs_to_env(void) 532 #else 533 DECLINLINE(void) regs_to_env(void) 534 #endif 467 535 { 468 536 #ifdef reg_EAX … … 492 560 } 493 561 562 #ifndef VBOX 494 563 static inline int cpu_halted(CPUState *env) { 564 #else 565 DECLINLINE(int) cpu_halted(CPUState *env) { 566 #endif 495 567 /* handle exit of HALTED state */ 496 568 if (!env->halted) … … 508 580 /* load efer and update the corresponding hflags. XXX: do consistency 509 581 checks with cpuid bits ? */ 582 #ifndef VBOX 510 583 static inline void cpu_load_efer(CPUState *env, uint64_t val) 584 #else 585 DECLINLINE(void) cpu_load_efer(CPUState *env, uint64_t val) 586 #endif 511 587 { 512 588 env->efer = val; -
trunk/src/recompiler_new/translate-all.c
r13358 r13382 31 31 #include <stdio.h> 32 32 #include <string.h> 33 #include <inttypes.h>34 33 35 34 #include "config.h"
Note:
See TracChangeset
for help on using the changeset viewer.