- Timestamp:
- May 5, 2008 11:50:50 AM (17 years ago)
- Location:
- trunk/src
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/CFGM.cpp
r8155 r8564 143 143 } 144 144 else 145 N oDmik(AssertMsgFailed(("Constructor failed with rc=%Vrc pfnCFGMConstructor=%p\n", rc, pfnCFGMConstructor)));145 NOT_DMIK(AssertMsgFailed(("Constructor failed with rc=%Vrc pfnCFGMConstructor=%p\n", rc, pfnCFGMConstructor))); 146 146 147 147 return rc; -
trunk/src/VBox/VMM/PDMDevice.cpp
r8155 r8564 942 942 if (VBOX_FAILURE(rc)) 943 943 { 944 N oDmik(AssertMsgFailed(("Failed to construct '%s'/%d! %Vra\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, rc)));944 NOT_DMIK(AssertMsgFailed(("Failed to construct '%s'/%d! %Vra\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, rc))); 945 945 /* because we're damn lazy right now, we'll say that the destructor will be called even if the constructor fails. */ 946 946 return rc; -
trunk/src/VBox/VMM/VM.cpp
r8155 r8564 241 241 default: 242 242 pszError = N_("Unknown error creating VM"); 243 N oDmik(AssertMsgFailed(("Add error message for rc=%d (%Vrc)\n", rc, rc)));243 NOT_DMIK(AssertMsgFailed(("Add error message for rc=%d (%Vrc)\n", rc, rc))); 244 244 break; 245 245 } -
trunk/src/recompiler/target-i386/helper.c
r8450 r8564 1 1 /* 2 2 * i386 helpers 3 * 3 * 4 4 * Copyright (c) 2003 Fabrice Bellard 5 5 * … … 71 71 /* modulo 17 table */ 72 72 const uint8_t rclw_table[32] = { 73 0, 1, 2, 3, 4, 5, 6, 7, 73 0, 1, 2, 3, 4, 5, 6, 7, 74 74 8, 9,10,11,12,13,14,15, 75 75 16, 0, 1, 2, 3, 4, 5, 6, … … 79 79 /* modulo 9 table */ 80 80 const uint8_t rclb_table[32] = { 81 0, 1, 2, 3, 4, 5, 6, 7, 81 0, 1, 2, 3, 4, 5, 6, 7, 82 82 8, 0, 1, 2, 3, 4, 5, 6, 83 7, 8, 0, 1, 2, 3, 4, 5, 83 7, 8, 0, 1, 2, 3, 4, 5, 84 84 6, 7, 8, 0, 1, 2, 3, 4, 85 85 }; … … 95 95 3.32192809488736234781L, /*l2t*/ 96 96 }; 97 97 98 98 /* thread support */ 99 99 … … 138 138 return 0; 139 139 } 140 140 141 141 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 142 142 { … … 164 164 { 165 165 selector &= 0xffff; 166 cpu_x86_load_seg_cache(env, seg, selector, 166 cpu_x86_load_seg_cache(env, seg, selector, 167 167 (selector << 4), 0xffff, 0); 168 168 } 169 169 170 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 170 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, 171 171 uint32_t *esp_ptr, int dpl) 172 172 { 173 173 int type, index, shift; 174 174 175 175 #if 0 176 176 { … … 243 243 if (!(e2 & DESC_P_MASK)) 244 244 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); 245 cpu_x86_load_seg_cache(env, seg_reg, selector, 245 cpu_x86_load_seg_cache(env, seg_reg, selector, 246 246 get_seg_base(e1, e2), 247 247 get_seg_limit(e1, e2), 248 248 e2); 249 249 } else { 250 if (seg_reg == R_SS || seg_reg == R_CS) 250 if (seg_reg == R_SS || seg_reg == R_CS) 251 251 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 252 252 } … … 258 258 259 259 /* XXX: restore CPU state in registers (PowerPC case) */ 260 static void switch_tss(int tss_selector, 260 static void switch_tss(int tss_selector, 261 261 uint32_t e1, uint32_t e2, int source, 262 262 uint32_t next_eip) … … 306 306 tss_limit = get_seg_limit(e1, e2); 307 307 tss_base = get_seg_base(e1, e2); 308 if ((tss_selector & 4) != 0 || 308 if ((tss_selector & 4) != 0 || 309 309 tss_limit < tss_limit_max) 310 310 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); … … 341 341 new_trap = 0; 342 342 } 343 343 344 344 /* NOTE: we must avoid memory exceptions during the task switch, 345 345 so we make dummy accesses before */ … … 351 351 stb_kernel(env->tr.base, v1); 352 352 stb_kernel(env->tr.base + old_tss_limit_max, v2); 353 353 354 354 /* clear busy bit (it is restartable) */ 355 355 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { … … 364 364 if (source == SWITCH_TSS_IRET) 365 365 old_eflags &= ~NT_MASK; 366 366 367 367 /* save the current state in the old TSS */ 368 368 if (type & 8) { … … 399 399 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector); 400 400 } 401 401 402 402 /* now if an exception occurs, it will occurs in the next task 403 403 context */ … … 426 426 env->tr.limit = tss_limit; 427 427 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 428 428 429 429 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 430 430 cpu_x86_update_cr3(env, new_cr3); 431 431 } 432 432 433 433 /* load all registers without an exception, then reload them with 434 434 possible exception */ 435 435 env->eip = new_eip; 436 eflags_mask = TF_MASK | AC_MASK | ID_MASK | 436 eflags_mask = TF_MASK | AC_MASK | ID_MASK | 437 437 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 438 438 if (!(type & 8)) … … 449 449 EDI = new_regs[7]; 450 450 if (new_eflags & VM_MASK) { 451 for(i = 0; i < 6; i++) 451 for(i = 0; i < 6; i++) 452 452 load_seg_vm(i, new_segs[i]); 453 453 /* in vm86, CPL is always 3 */ … … 460 460 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 461 461 } 462 462 463 463 env->ldt.selector = new_ldt & ~4; 464 464 env->ldt.base = 0; … … 484 484 load_seg_cache_raw_dt(&env->ldt, e1, e2); 485 485 } 486 486 487 487 /* load the segments */ 488 488 if (!(new_eflags & VM_MASK)) { … … 494 494 tss_load_seg(R_GS, new_segs[R_GS]); 495 495 } 496 496 497 497 /* check that EIP is in the CS segment limits */ 498 498 if (new_eip > env->segs[R_CS].limit) { … … 506 506 { 507 507 int io_offset, val, mask; 508 508 509 509 /* TSS must be a valid 32 bit one */ 510 510 if (!(env->tr.flags & DESC_P_MASK) || … … 792 792 } 793 793 } 794 794 795 795 if (new_stack) { 796 796 if (env->eflags & VM_MASK) { … … 801 801 } 802 802 ss = (ss & ~3) | dpl; 803 cpu_x86_load_seg_cache(env, R_SS, ss, 803 cpu_x86_load_seg_cache(env, R_SS, ss, 804 804 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); 805 805 } … … 807 807 808 808 selector = (selector & ~3) | dpl; 809 cpu_x86_load_seg_cache(env, R_CS, selector, 809 cpu_x86_load_seg_cache(env, R_CS, selector, 810 810 get_seg_base(e1, e2), 811 811 get_seg_limit(e1, e2), … … 828 828 int io_offset, intredir_offset; 829 829 unsigned char val, mask; 830 830 831 831 /* TSS must be a valid 32 bit one */ 832 832 if (!(env->tr.flags & DESC_P_MASK) || … … 837 837 /* the virtual interrupt redirection bitmap is located below the io bitmap */ 838 838 intredir_offset = io_offset - 0x20; 839 839 840 840 intredir_offset += (intno >> 3); 841 841 if ((intredir_offset) > env->tr.limit) … … 900 900 PUSHW(ssp, esp, 0xffff, old_cs); 901 901 PUSHW(ssp, esp, 0xffff, next_eip); 902 902 903 903 /* update processor state */ 904 904 ESP = (ESP & ~0xffff) | (esp & 0xffff); … … 932 932 { 933 933 int index; 934 934 935 935 #if 0 936 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 936 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 937 937 env->tr.base, env->tr.limit); 938 938 #endif … … 1052 1052 PUSHQ(esp, error_code); 1053 1053 } 1054 1054 1055 1055 if (new_stack) { 1056 1056 ss = 0 | dpl; … … 1060 1060 1061 1061 selector = (selector & ~3) | dpl; 1062 cpu_x86_load_seg_cache(env, R_CS, selector, 1062 cpu_x86_load_seg_cache(env, R_CS, selector, 1063 1063 get_seg_base(e1, e2), 1064 1064 get_seg_limit(e1, e2), … … 1089 1089 ECX = env->eip + next_eip_addend; 1090 1090 env->regs[11] = compute_eflags(); 1091 1091 1092 1092 code64 = env->hflags & HF_CS64_MASK; 1093 1093 1094 1094 cpu_x86_set_cpl(env, 0); 1095 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 1096 0, 0xffffffff, 1095 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 1096 0, 0xffffffff, 1097 1097 DESC_G_MASK | DESC_P_MASK | 1098 1098 DESC_S_MASK | 1099 1099 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); 1100 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 1100 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 1101 1101 0, 0xffffffff, 1102 1102 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | … … 1108 1108 else 1109 1109 env->eip = env->cstar; 1110 } else 1110 } else 1111 1111 #endif 1112 1112 { 1113 1113 ECX = (uint32_t)(env->eip + next_eip_addend); 1114 1114 1115 1115 cpu_x86_set_cpl(env, 0); 1116 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 1117 0, 0xffffffff, 1116 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 1117 0, 0xffffffff, 1118 1118 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1119 1119 DESC_S_MASK | 1120 1120 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1121 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 1121 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 1122 1122 0, 0xffffffff, 1123 1123 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | … … 1144 1144 if (env->hflags & HF_LMA_MASK) { 1145 1145 if (dflag == 2) { 1146 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1147 0, 0xffffffff, 1146 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1147 0, 0xffffffff, 1148 1148 DESC_G_MASK | DESC_P_MASK | 1149 1149 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1150 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1150 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1151 1151 DESC_L_MASK); 1152 1152 env->eip = ECX; 1153 1153 } else { 1154 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1155 0, 0xffffffff, 1154 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1155 0, 0xffffffff, 1156 1156 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1157 1157 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | … … 1159 1159 env->eip = (uint32_t)ECX; 1160 1160 } 1161 cpu_x86_load_seg_cache(env, R_SS, selector + 8, 1161 cpu_x86_load_seg_cache(env, R_SS, selector + 8, 1162 1162 0, 0xffffffff, 1163 1163 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1164 1164 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1165 1165 DESC_W_MASK | DESC_A_MASK); 1166 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | 1166 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | 1167 1167 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK); 1168 1168 cpu_x86_set_cpl(env, 3); 1169 } else 1169 } else 1170 1170 #endif 1171 1171 { 1172 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1173 0, 0xffffffff, 1172 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1173 0, 0xffffffff, 1174 1174 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1175 1175 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1176 1176 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1177 1177 env->eip = (uint32_t)ECX; 1178 cpu_x86_load_seg_cache(env, R_SS, selector + 8, 1178 cpu_x86_load_seg_cache(env, R_SS, selector + 8, 1179 1179 0, 0xffffffff, 1180 1180 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | … … 1196 1196 #ifdef VBOX 1197 1197 /** 1198 * Checks and processes external VMM events. 1198 * Checks and processes external VMM events. 1199 1199 * Called by op_check_external_event() when any of the flags is set and can be serviced. 1200 1200 */ … … 1265 1265 PUSHW(ssp, esp, 0xffff, old_cs); 1266 1266 PUSHW(ssp, esp, 0xffff, old_eip); 1267 1267 1268 1268 /* update processor state */ 1269 1269 ESP = (ESP & ~0xffff) | (esp & 0xffff); … … 1275 1275 1276 1276 /* fake user mode interrupt */ 1277 void do_interrupt_user(int intno, int is_int, int error_code, 1277 void do_interrupt_user(int intno, int is_int, int error_code, 1278 1278 target_ulong next_eip) 1279 1279 { … … 1286 1286 ptr = dt->base + (intno * 8); 1287 1287 e2 = ldl_kernel(ptr + 4); 1288 1288 1289 1289 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1290 1290 cpl = env->hflags & HF_CPL_MASK; … … 1303 1303 * Begin execution of an interruption. is_int is TRUE if coming from 1304 1304 * the int instruction. next_eip is the EIP value AFTER the interrupt 1305 * instruction. It is only relevant if is_int is TRUE. 1305 * instruction. It is only relevant if is_int is TRUE. 1306 1306 */ 1307 void do_interrupt(int intno, int is_int, int error_code, 1307 void do_interrupt(int intno, int is_int, int error_code, 1308 1308 target_ulong next_eip, int is_hw) 1309 1309 { … … 1348 1348 #ifdef VBOX 1349 1349 /* int xx *, v86 code and VME enabled? */ 1350 if ( (env->eflags & VM_MASK) 1350 if ( (env->eflags & VM_MASK) 1351 1351 && (env->cr[4] & CR4_VME_MASK) 1352 1352 && is_int … … 1368 1368 * is_int is TRUE if coming from the int instruction. next_eip is the 1369 1369 * EIP value AFTER the interrupt instruction. It is only relevant if 1370 * is_int is TRUE. 1370 * is_int is TRUE. 1371 1371 */ 1372 void raise_interrupt(int intno, int is_int, int error_code, 1372 void raise_interrupt(int intno, int is_int, int error_code, 1373 1373 int next_eip_addend) 1374 1374 { 1375 1375 #if defined(VBOX) && defined(DEBUG) 1376 N oDmik(Log2(("raise_interrupt: %x %x %x %08x\n", intno, is_int, error_code, env->eip + next_eip_addend)));1376 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %08x\n", intno, is_int, error_code, env->eip + next_eip_addend))); 1377 1377 #endif 1378 1378 env->exception_index = intno; … … 1407 1407 /* SMM support */ 1408 1408 1409 #if defined(CONFIG_USER_ONLY) 1409 #if defined(CONFIG_USER_ONLY) 1410 1410 1411 1411 void do_smm_enter(void) … … 1427 1427 void do_smm_enter(void) 1428 1428 { 1429 #ifdef VBOX 1429 #ifdef VBOX 1430 1430 cpu_abort(env, "do_ssm_enter"); 1431 1431 #else /* !VBOX */ … … 1443 1443 1444 1444 sm_state = env->smbase + 0x8000; 1445 1445 1446 1446 #ifdef TARGET_X86_64 1447 1447 for(i = 0; i < 6; i++) { … … 1461 1461 stl_phys(sm_state + 0x7e74, env->ldt.limit); 1462 1462 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff); 1463 1463 1464 1464 stq_phys(sm_state + 0x7e88, env->idt.base); 1465 1465 stl_phys(sm_state + 0x7e84, env->idt.limit); … … 1469 1469 stl_phys(sm_state + 0x7e94, env->tr.limit); 1470 1470 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff); 1471 1471 1472 1472 stq_phys(sm_state + 0x7ed0, env->efer); 1473 1473 … … 1480 1480 stq_phys(sm_state + 0x7fc8, ESI); 1481 1481 stq_phys(sm_state + 0x7fc0, EDI); 1482 for(i = 8; i < 16; i++) 1482 for(i = 8; i < 16; i++) 1483 1483 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]); 1484 1484 stq_phys(sm_state + 0x7f78, env->eip); … … 1508 1508 stl_phys(sm_state + 0x7fcc, env->dr[6]); 1509 1509 stl_phys(sm_state + 0x7fc8, env->dr[7]); 1510 1510 1511 1511 stl_phys(sm_state + 0x7fc4, env->tr.selector); 1512 1512 stl_phys(sm_state + 0x7f64, env->tr.base); 1513 1513 stl_phys(sm_state + 0x7f60, env->tr.limit); 1514 1514 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff); 1515 1515 1516 1516 stl_phys(sm_state + 0x7fc0, env->ldt.selector); 1517 1517 stl_phys(sm_state + 0x7f80, env->ldt.base); 1518 1518 stl_phys(sm_state + 0x7f7c, env->ldt.limit); 1519 1519 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff); 1520 1520 1521 1521 stl_phys(sm_state + 0x7f74, env->gdt.base); 1522 1522 stl_phys(sm_state + 0x7f70, env->gdt.limit); … … 1556 1556 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0); 1557 1557 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0); 1558 1559 cpu_x86_update_cr0(env, 1558 1559 cpu_x86_update_cr0(env, 1560 1560 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK)); 1561 1561 cpu_x86_update_cr4(env, 0); … … 1584 1584 for(i = 0; i < 6; i++) { 1585 1585 offset = 0x7e00 + i * 16; 1586 cpu_x86_load_seg_cache(env, i, 1586 cpu_x86_load_seg_cache(env, i, 1587 1587 lduw_phys(sm_state + offset), 1588 1588 ldq_phys(sm_state + offset + 8), … … 1598 1598 env->ldt.limit = ldl_phys(sm_state + 0x7e74); 1599 1599 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8; 1600 1600 1601 1601 env->idt.base = ldq_phys(sm_state + 0x7e88); 1602 1602 env->idt.limit = ldl_phys(sm_state + 0x7e84); … … 1606 1606 env->tr.limit = ldl_phys(sm_state + 0x7e94); 1607 1607 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8; 1608 1608 1609 1609 EAX = ldq_phys(sm_state + 0x7ff8); 1610 1610 ECX = ldq_phys(sm_state + 0x7ff0); … … 1615 1615 ESI = ldq_phys(sm_state + 0x7fc8); 1616 1616 EDI = ldq_phys(sm_state + 0x7fc0); 1617 for(i = 8; i < 16; i++) 1617 for(i = 8; i < 16; i++) 1618 1618 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8); 1619 1619 env->eip = ldq_phys(sm_state + 0x7f78); 1620 load_eflags(ldl_phys(sm_state + 0x7f70), 1620 load_eflags(ldl_phys(sm_state + 0x7f70), 1621 1621 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); 1622 1622 env->dr[6] = ldl_phys(sm_state + 0x7f68); … … 1634 1634 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc)); 1635 1635 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8)); 1636 load_eflags(ldl_phys(sm_state + 0x7ff4), 1636 load_eflags(ldl_phys(sm_state + 0x7ff4), 1637 1637 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); 1638 1638 env->eip = ldl_phys(sm_state + 0x7ff0); … … 1647 1647 env->dr[6] = ldl_phys(sm_state + 0x7fcc); 1648 1648 env->dr[7] = ldl_phys(sm_state + 0x7fc8); 1649 1649 1650 1650 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff; 1651 1651 env->tr.base = ldl_phys(sm_state + 0x7f64); 1652 1652 env->tr.limit = ldl_phys(sm_state + 0x7f60); 1653 1653 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8; 1654 1654 1655 1655 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff; 1656 1656 env->ldt.base = ldl_phys(sm_state + 0x7f80); 1657 1657 env->ldt.limit = ldl_phys(sm_state + 0x7f7c); 1658 1658 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8; 1659 1659 1660 1660 env->gdt.base = ldl_phys(sm_state + 0x7f74); 1661 1661 env->gdt.limit = ldl_phys(sm_state + 0x7f70); … … 1669 1669 else 1670 1670 offset = 0x7f2c + (i - 3) * 12; 1671 cpu_x86_load_seg_cache(env, i, 1671 cpu_x86_load_seg_cache(env, i, 1672 1672 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff, 1673 1673 ldl_phys(sm_state + offset + 8), … … 1716 1716 unsigned int den, r; 1717 1717 uint64_t num, q; 1718 1718 1719 1719 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); 1720 1720 den = T0; … … 1738 1738 int den, r; 1739 1739 int64_t num, q; 1740 1740 1741 1741 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); 1742 1742 den = T0; … … 1785 1785 uint32_t index; 1786 1786 index = (uint32_t)EAX; 1787 1787 1788 1788 /* test if maximum index reached */ 1789 1789 if (index & 0x80000000) { 1790 if (index > env->cpuid_xlevel) 1790 if (index > env->cpuid_xlevel) 1791 1791 index = env->cpuid_level; 1792 1792 } else { 1793 if (index > env->cpuid_level) 1793 if (index > env->cpuid_level) 1794 1794 index = env->cpuid_level; 1795 1795 } 1796 1796 1797 1797 switch(index) { 1798 1798 case 0: … … 1940 1940 target_ulong ptr; 1941 1941 #ifdef VBOX 1942 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%VGv, .limit=%VGv} new=%RTsel\n", 1942 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%VGv, .limit=%VGv} new=%RTsel\n", 1943 1943 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(T0 & 0xffff))); 1944 #endif 1945 1944 #endif 1945 1946 1946 selector = T0 & 0xffff; 1947 1947 if ((selector & 0xfffc) == 0) { … … 1958 1958 entry_limit = 15; 1959 1959 else 1960 #endif 1960 #endif 1961 1961 entry_limit = 7; 1962 1962 if ((index + entry_limit) > dt->limit) … … 1983 1983 env->ldt.selector = selector; 1984 1984 #ifdef VBOX 1985 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%VGv, .limit=%VGv}\n", 1985 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%VGv, .limit=%VGv}\n", 1986 1986 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit)); 1987 #endif 1987 #endif 1988 1988 } 1989 1989 … … 1995 1995 int index, type, entry_limit; 1996 1996 target_ulong ptr; 1997 1997 1998 1998 #ifdef VBOX 1999 Log(("helper_ltr_T0: old tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n", 2000 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit, 1999 Log(("helper_ltr_T0: old tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n", 2000 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit, 2001 2001 env->tr.flags, (RTSEL)(T0 & 0xffff))); 2002 #endif 2002 #endif 2003 2003 2004 2004 selector = T0 & 0xffff; … … 2017 2017 entry_limit = 15; 2018 2018 else 2019 #endif 2019 #endif 2020 2020 entry_limit = 7; 2021 2021 if ((index + entry_limit) > dt->limit) … … 2025 2025 e2 = ldl_kernel(ptr + 4); 2026 2026 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2027 if ((e2 & DESC_S_MASK) || 2027 if ((e2 & DESC_S_MASK) || 2028 2028 (type != 1 && type != 9)) 2029 2029 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); … … 2036 2036 load_seg_cache_raw_dt(&env->tr, e1, e2); 2037 2037 env->tr.base |= (target_ulong)e3 << 32; 2038 } else 2038 } else 2039 2039 #endif 2040 2040 { … … 2046 2046 env->tr.selector = selector; 2047 2047 #ifdef VBOX 2048 Log(("helper_ltr_T0: new tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n", 2049 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit, 2048 Log(("helper_ltr_T0: new tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n", 2049 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit, 2050 2050 env->tr.flags, (RTSEL)(T0 & 0xffff))); 2051 #endif 2051 #endif 2052 2052 } 2053 2053 … … 2066 2066 #ifdef VBOX 2067 2067 /* Trying to load a selector with CPL=1? */ 2068 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0)) 2068 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0)) 2069 2069 { 2070 2070 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc)); … … 2083 2083 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 2084 2084 } else { 2085 2085 2086 2086 if (selector & 0x4) 2087 2087 dt = &env->ldt; … … 2094 2094 e1 = ldl_kernel(ptr); 2095 2095 e2 = ldl_kernel(ptr + 4); 2096 2096 2097 2097 if (!(e2 & DESC_S_MASK)) 2098 2098 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); … … 2109 2109 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) 2110 2110 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2111 2111 2112 2112 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 2113 2113 /* if not conforming code, test rights */ 2114 if (dpl < cpl || dpl < rpl) 2114 if (dpl < cpl || dpl < rpl) 2115 2115 raise_exception_err(EXCP0D_GPF, selector & 0xfffc); 2116 2116 } … … 2130 2130 } 2131 2131 2132 cpu_x86_load_seg_cache(env, seg_reg, selector, 2132 cpu_x86_load_seg_cache(env, seg_reg, selector, 2133 2133 get_seg_base(e1, e2), 2134 2134 get_seg_limit(e1, e2), 2135 2135 e2); 2136 2136 #if 0 2137 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 2137 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 2138 2138 selector, (unsigned long)sc->base, sc->limit, sc->flags); 2139 2139 #endif … … 2147 2147 uint32_t e1, e2, cpl, dpl, rpl, limit; 2148 2148 target_ulong new_eip, next_eip; 2149 2149 2150 2150 new_cs = T0; 2151 2151 new_eip = T1; … … 2174 2174 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); 2175 2175 limit = get_seg_limit(e1, e2); 2176 if (new_eip > limit && 2176 if (new_eip > limit && 2177 2177 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) 2178 2178 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); … … 2210 2210 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2211 2211 /* must be code segment */ 2212 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 2212 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 2213 2213 (DESC_S_MASK | DESC_CS_MASK))) 2214 2214 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); 2215 if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 2215 if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 2216 2216 (!(e2 & DESC_C_MASK) && (dpl != cpl))) 2217 2217 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); … … 2321 2321 ESP = rsp; 2322 2322 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 2323 get_seg_base(e1, e2), 2323 get_seg_base(e1, e2), 2324 2324 get_seg_limit(e1, e2), e2); 2325 2325 EIP = new_eip; 2326 } else 2326 } else 2327 2327 #endif 2328 2328 { … … 2337 2337 PUSHW(ssp, sp, sp_mask, next_eip); 2338 2338 } 2339 2339 2340 2340 limit = get_seg_limit(e1, e2); 2341 2341 if (new_eip > limit) … … 2396 2396 #ifdef DEBUG_PCALL 2397 2397 if (loglevel & CPU_LOG_PCALL) 2398 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", 2398 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", 2399 2399 ss, sp, param_count, ESP); 2400 2400 #endif … … 2418 2418 raise_exception_err(EXCP0A_TSS, ss & 0xfffc); 2419 2419 #endif 2420 2420 2421 2421 // push_size = ((param_count * 2) + 8) << shift; 2422 2422 2423 2423 old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 2424 2424 old_ssp = env->segs[R_SS].base; 2425 2425 2426 2426 sp_mask = get_sp_mask(ss_e2); 2427 2427 ssp = get_seg_base(ss_e1, ss_e2); … … 2463 2463 if (new_stack) { 2464 2464 ss = (ss & ~3) | dpl; 2465 cpu_x86_load_seg_cache(env, R_SS, ss, 2465 cpu_x86_load_seg_cache(env, R_SS, ss, 2466 2466 ssp, 2467 2467 get_seg_limit(ss_e1, ss_e2), … … 2470 2470 2471 2471 selector = (selector & ~3) | dpl; 2472 cpu_x86_load_seg_cache(env, R_CS, selector, 2472 cpu_x86_load_seg_cache(env, R_CS, selector, 2473 2473 get_seg_base(e1, e2), 2474 2474 get_seg_limit(e1, e2), … … 2521 2521 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */ 2522 2522 /* if TF will be set -> #GP */ 2523 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK)) 2524 || (new_eflags & TF_MASK)) 2523 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK)) 2524 || (new_eflags & TF_MASK)) 2525 2525 raise_exception(EXCP0D_GPF); 2526 2526 } … … 2562 2562 they may still contain a valid base. I would be interested to 2563 2563 know how a real x86_64 CPU behaves */ 2564 if ((seg_reg == R_FS || seg_reg == R_GS) && 2564 if ((seg_reg == R_FS || seg_reg == R_GS) && 2565 2565 (env->segs[seg_reg].selector & 0xfffc) == 0) 2566 2566 return; … … 2584 2584 int cpl, dpl, rpl, eflags_mask, iopl; 2585 2585 target_ulong ssp, sp, new_eip, new_esp, sp_mask; 2586 2586 2587 2587 #ifdef TARGET_X86_64 2588 2588 if (shift == 2) … … 2622 2622 } 2623 2623 #ifdef VBOX 2624 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0)) 2624 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0)) 2625 2625 { 2626 2626 #ifdef DEBUG … … 2667 2667 } 2668 2668 cpl = env->hflags & HF_CPL_MASK; 2669 rpl = new_cs & 3; 2669 rpl = new_cs & 3; 2670 2670 if (rpl < cpl) 2671 2671 { … … 2701 2701 } 2702 2702 sp += addend; 2703 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2703 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2704 2704 ((env->hflags & HF_CS64_MASK) && !is_iret))) { 2705 2705 /* return to same priledge level */ 2706 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2706 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2707 2707 get_seg_base(e1, e2), 2708 2708 get_seg_limit(e1, e2), … … 2738 2738 /* XXX: test CS64 ? */ 2739 2739 if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2740 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2740 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2741 2741 0, 0xffffffff, 2742 2742 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | … … 2744 2744 DESC_W_MASK | DESC_A_MASK); 2745 2745 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */ 2746 } else 2746 } else 2747 2747 #endif 2748 2748 { … … 2763 2763 if (!(ss_e2 & DESC_P_MASK)) 2764 2764 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc); 2765 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2765 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2766 2766 get_seg_base(ss_e1, ss_e2), 2767 2767 get_seg_limit(ss_e1, ss_e2), … … 2769 2769 } 2770 2770 2771 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2771 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2772 2772 get_seg_base(e1, e2), 2773 2773 get_seg_limit(e1, e2), … … 2825 2825 POPL(ssp, sp, sp_mask, new_fs); 2826 2826 POPL(ssp, sp, sp_mask, new_gs); 2827 2827 2828 2828 /* modify processor state */ 2829 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 2829 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | 2830 2830 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK); 2831 2831 load_seg_vm(R_CS, new_cs & 0xffff); … … 2845 2845 int tss_selector, type; 2846 2846 uint32_t e1, e2; 2847 2847 2848 2848 #ifdef VBOX 2849 2849 remR3TrapClear(env->pVM); … … 2896 2896 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 2897 2897 cpu_x86_set_cpl(env, 0); 2898 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2899 0, 0xffffffff, 2898 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2899 0, 0xffffffff, 2900 2900 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2901 2901 DESC_S_MASK | 2902 2902 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2903 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2903 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2904 2904 0, 0xffffffff, 2905 2905 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | … … 2919 2919 } 2920 2920 cpu_x86_set_cpl(env, 3); 2921 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 2922 0, 0xffffffff, 2921 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 2922 0, 0xffffffff, 2923 2923 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2924 2924 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2925 2925 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2926 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 2926 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 2927 2927 0, 0xffffffff, 2928 2928 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | … … 2941 2941 void helper_movl_crN_T0(int reg) 2942 2942 { 2943 #if !defined(CONFIG_USER_ONLY) 2943 #if !defined(CONFIG_USER_ONLY) 2944 2944 switch(reg) { 2945 2945 case 0: … … 2985 2985 } 2986 2986 2987 #if defined(CONFIG_USER_ONLY) 2987 #if defined(CONFIG_USER_ONLY) 2988 2988 void helper_wrmsr(void) 2989 2989 { … … 3025 3025 if (env->cpuid_ext2_features & CPUID_EXT2_NX) 3026 3026 update_mask |= MSR_EFER_NXE; 3027 env->efer = (env->efer & ~update_mask) | 3027 env->efer = (env->efer & ~update_mask) | 3028 3028 (val & update_mask); 3029 3029 } … … 3057 3057 default: 3058 3058 /* XXX: exception ? */ 3059 break; 3059 break; 3060 3060 } 3061 3061 } … … 3109 3109 /* XXX: exception ? */ 3110 3110 val = 0; 3111 break; 3111 break; 3112 3112 } 3113 3113 EAX = (uint32_t)(val); … … 3296 3296 CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b) 3297 3297 { 3298 if (b == 0.0) 3298 if (b == 0.0) 3299 3299 fpu_set_exception(FPUS_ZE); 3300 3300 return a / b; … … 3305 3305 if (env->cr[0] & CR0_NE_MASK) { 3306 3306 raise_exception(EXCP10_COPR); 3307 } 3308 #if !defined(CONFIG_USER_ONLY) 3307 } 3308 #if !defined(CONFIG_USER_ONLY) 3309 3309 else { 3310 3310 cpu_set_ferr(env); … … 3370 3370 { 3371 3371 CPU86_LDouble fptemp; 3372 3372 3373 3373 fptemp = ST0; 3374 3374 if (fptemp>0.0){ … … 3376 3376 ST1 *= fptemp; 3377 3377 fpop(); 3378 } else { 3378 } else { 3379 3379 env->fpus &= (~0x4700); 3380 3380 env->fpus |= 0x400; … … 3461 3461 int expdif; 3462 3462 int q; 3463 3463 3464 3464 fpsrcop = ST0; 3465 3465 fptemp = ST1; … … 3497 3497 ST1 *= fptemp; 3498 3498 fpop(); 3499 } else { 3499 } else { 3500 3500 env->fpus &= (~0x4700); 3501 3501 env->fpus |= 0x400; … … 3508 3508 3509 3509 fptemp = ST0; 3510 if (fptemp<0.0) { 3510 if (fptemp<0.0) { 3511 3511 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ 3512 3512 env->fpus |= 0x400; … … 3538 3538 void helper_fscale(void) 3539 3539 { 3540 ST0 = ldexp (ST0, (int)(ST1)); 3540 ST0 = ldexp (ST0, (int)(ST1)); 3541 3541 } 3542 3542 … … 3737 3737 addr += 16; 3738 3738 } 3739 3739 3740 3740 if (env->cr[4] & CR4_OSFXSR_MASK) { 3741 3741 /* XXX: finish it */ … … 3797 3797 u64 = ldq(addr + 4); 3798 3798 env->xmm_regs[i].XMM_Q(1) = u64; 3799 # endif 3799 # endif 3800 3800 #endif 3801 3801 addr += 16; … … 3890 3890 b0 = b; 3891 3891 b1 = b >> 32; 3892 3892 3893 3893 v = (uint64_t)a0 * (uint64_t)b0; 3894 3894 *plow = v; … … 3897 3897 v = (uint64_t)a0 * (uint64_t)b1; 3898 3898 add128(plow, phigh, v << 32, v >> 32); 3899 3899 3900 3900 v = (uint64_t)a1 * (uint64_t)b0; 3901 3901 add128(plow, phigh, v << 32, v >> 32); 3902 3902 3903 3903 v = (uint64_t)a1 * (uint64_t)b1; 3904 3904 *phigh += v; … … 4133 4133 } 4134 4134 4135 #if !defined(CONFIG_USER_ONLY) 4135 #if !defined(CONFIG_USER_ONLY) 4136 4136 4137 4137 #define MMUSUFFIX _mmu … … 4314 4314 uint32_t e1, e2; 4315 4315 load_segment(&e1, &e2, selector); 4316 cpu_x86_load_seg_cache(env, R_CS, selector, 4316 cpu_x86_load_seg_cache(env, R_CS, selector, 4317 4317 get_seg_base(e1, e2), 4318 4318 get_seg_limit(e1, e2), … … 4326 4326 env1->segs[seg_reg].newselector = 0; 4327 4327 } 4328 else 4328 else 4329 4329 { 4330 4330 env = savedenv; … … 4342 4342 /** 4343 4343 * Correctly loads a new ldtr selector. 4344 * 4344 * 4345 4345 * @param env1 CPU environment. 4346 4346 * @param selector Selector to load. … … 4358 4358 env = saved_env; 4359 4359 } 4360 else 4360 else 4361 4361 { 4362 4362 T0 = saved_T0; … … 4364 4364 #ifdef VBOX_STRICT 4365 4365 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector); 4366 #endif 4366 #endif 4367 4367 } 4368 4368 } … … 4370 4370 /** 4371 4371 * Correctly loads a new tr selector. 4372 * 4372 * 4373 4373 * @param env1 CPU environment. 4374 4374 * @param selector Selector to load. … … 4399 4399 entry_limit = 15; 4400 4400 else 4401 #endif 4401 #endif 4402 4402 entry_limit = 7; 4403 4403 if ((index + entry_limit) > dt->limit) … … 4407 4407 e2 = ldl_kernel(ptr + 4); 4408 4408 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 4409 if ((e2 & DESC_S_MASK) /*|| 4409 if ((e2 & DESC_S_MASK) /*|| 4410 4410 (type != 1 && type != 9)*/) 4411 4411 goto l_failure; … … 4418 4418 load_seg_cache_raw_dt(&env->tr, e1, e2); 4419 4419 env->tr.base |= (target_ulong)e3 << 32; 4420 } else 4420 } else 4421 4421 #endif 4422 4422 { … … 4438 4438 { 4439 4439 #if 1 /* single stepping is broken when using a static tb... feel free to figure out why. :-) */ 4440 /* This has to be static because it needs to be addressible 4440 /* This has to be static because it needs to be addressible 4441 4441 using 32-bit immediate addresses on 64-bit machines. This 4442 4442 is dictated by the gcc code model used when building this 4443 module / op.o. Using a static here pushes the problem 4443 module / op.o. Using a static here pushes the problem 4444 4444 onto the module loader. */ 4445 static TranslationBlock tb_temp; 4445 static TranslationBlock tb_temp; 4446 4446 #endif 4447 4447 TranslationBlock *tb; … … 4455 4455 CPUX86State *savedenv = env; 4456 4456 env = env1; 4457 4457 4458 4458 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR); 4459 4459 … … 4474 4474 #else 4475 4475 tb = tb_alloc(env->segs[R_CS].base + env->eip); 4476 if (!tb) 4476 if (!tb) 4477 4477 { 4478 4478 tb_flush(env); … … 4504 4504 env->current_tb = NULL; 4505 4505 4506 /* 4506 /* 4507 4507 * Translate only one instruction. 4508 4508 */ … … 4551 4551 env->current_tb = tb; 4552 4552 4553 // eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code 4553 // eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code 4554 4554 // perhaps not a very safe hack 4555 4555 while(old_eip == env->eip) … … 4582 4582 Assert(tb->jmp_next[1] == NULL); 4583 4583 Assert(tb->jmp_first == NULL); */ 4584 4584 4585 4585 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR); 4586 4586 4587 /* 4587 /* 4588 4588 * Execute the next instruction when we encounter instruction fusing. 4589 4589 */ … … 4599 4599 } 4600 4600 4601 int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr, 4601 int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr, 4602 4602 uint32_t *esp_ptr, int dpl) 4603 4603 { … … 4678 4678 addr += 16; 4679 4679 } 4680 4680 4681 4681 if (env->cr[4] & CR4_OSFXSR_MASK) { 4682 4682 /* XXX: finish it */ … … 4694 4694 stl(addr + 8, env->xmm_regs[i].XMM_L(2)); 4695 4695 stl(addr + 12, env->xmm_regs[i].XMM_L(3)); 4696 #endif 4696 #endif 4697 4697 addr += 16; 4698 4698 }
Note:
See TracChangeset
for help on using the changeset viewer.