Changeset 48221 in vbox
- Timestamp:
- Sep 1, 2013 11:27:56 PM (11 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r48132 r48221 62 62 ifdef VBOX_WITH_RAW_RING1 63 63 VMM_COMMON_DEFS += VBOX_WITH_RAW_RING1 64 endif 65 ifdef VBOX_WITH_64ON32_IDT 66 VMM_COMMON_DEFS += VBOX_WITH_64ON32_IDT 64 67 endif 65 68 -
trunk/src/VBox/VMM/VMMR3/VMMSwitcher.cpp
r45786 r48221 134 134 135 135 136 # ifdef VBOX_WITH_64ON32_IDT 137 /** 138 * Initializes the 64-bit IDT for 64-bit guest on 32-bit host switchers. 139 * 140 * This is only used as a debugging aid when we cannot find out why something 141 * goes haywire in the intermediate context. 142 * 143 * @param pVM The cross context VM structure. 144 * @param pSwitcher The switcher descriptor. 145 * @param pbDst Where the switcher code was just copied. 146 * @param HCPhysDst The host physical address corresponding to @a pbDst. 147 */ 148 static void vmmR3Switcher32On64IdtInit(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pbDst, RTHCPHYS HCPhysDst) 149 { 150 AssertRelease(pSwitcher->offGCCode > 0 && pSwitcher->offGCCode < pSwitcher->cbCode); 151 AssertRelease(pSwitcher->cbCode < _64K); 152 RTSEL uCs64 = SELMGetHyperCS64(pVM); 153 154 PX86DESC64GATE paIdt = (PX86DESC64GATE)(pbDst + pSwitcher->offGCCode); 155 for (uint32_t i = 0 ; i < 256; i++) 156 { 157 AssertRelease(((uint64_t *)&paIdt[i])[0] < pSwitcher->cbCode); 158 AssertRelease(((uint64_t *)&paIdt[i])[1] == 0); 159 uint64_t uHandler = HCPhysDst + paIdt[i].u16OffsetLow; 160 paIdt[i].u16OffsetLow = (uint16_t)uHandler; 161 paIdt[i].u16Sel = uCs64; 162 paIdt[i].u3IST = 0; 163 paIdt[i].u5Reserved = 0; 164 paIdt[i].u4Type = AMD64_SEL_TYPE_SYS_INT_GATE; 165 paIdt[i].u1DescType = 0 /* system */; 166 paIdt[i].u2Dpl = 3; 167 paIdt[i].u1Present = 1; 168 paIdt[i].u16OffsetHigh = (uint16_t)(uHandler >> 16); 169 paIdt[i].u32Reserved = (uint32_t)(uHandler >> 32); 170 } 171 172 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++) 173 { 174 uint64_t uIdtr = HCPhysDst + pSwitcher->offGCCode; AssertRelease(uIdtr < UINT32_MAX); 175 CPUMSetHyperIDTR(&pVM->aCpus[iCpu], uIdtr, 16*256 + iCpu); 176 } 177 } 178 179 180 /** 181 * Relocates the 64-bit IDT for 64-bit guest on 32-bit host switchers. 182 * 183 * @param pVM The cross context VM structure. 184 * @param pSwitcher The switcher descriptor. 185 * @param pbDst Where the switcher code was just copied. 186 * @param HCPhysDst The host physical address corresponding to @a pbDst. 187 */ 188 static void vmmR3Switcher32On64IdtRelocate(PVM pVM, PVMMSWITCHERDEF pSwitcher, uint8_t *pbDst, RTHCPHYS HCPhysDst) 189 { 190 AssertRelease(pSwitcher->offGCCode > 0 && pSwitcher->offGCCode < pSwitcher->cbCode && pSwitcher->cbCode < _64K); 191 192 /* The intermediate context doesn't move, but the CS may. */ 193 RTSEL uCs64 = SELMGetHyperCS64(pVM); 194 PX86DESC64GATE paIdt = (PX86DESC64GATE)(pbDst + pSwitcher->offGCCode); 195 for (uint32_t i = 0 ; i < 256; i++) 196 paIdt[i].u16Sel = uCs64; 197 198 /* Just in case... */ 199 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++) 200 { 201 uint64_t uIdtr = HCPhysDst + pSwitcher->offGCCode; AssertRelease(uIdtr < UINT32_MAX); 202 CPUMSetHyperIDTR(&pVM->aCpus[iCpu], uIdtr, 16*256 + iCpu); 203 } 204 } 205 # endif /* VBOX_WITH_64ON32_IDT */ 206 207 136 208 /** 137 209 * VMMR3Init worker that initiates the switcher code (aka core code). … … 225 297 { 226 298 /* 227 * copy the code.299 * Copy the code. 228 300 */ 229 301 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++) … … 231 303 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher]; 232 304 if (pSwitcher) 233 memcpy((uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher], 234 pSwitcher->pvCode, pSwitcher->cbCode); 305 { 306 uint8_t *pbDst = (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher]; 307 memcpy(pbDst, pSwitcher->pvCode, pSwitcher->cbCode); 308 # ifdef VBOX_WITH_64ON32_IDT 309 if ( pSwitcher->enmType == VMMSWITCHER_32_TO_AMD64 310 || pSwitcher->enmType == VMMSWITCHER_PAE_TO_AMD64) 311 vmmR3Switcher32On64IdtInit(pVM, pSwitcher, pbDst, 312 pVM->vmm.s.HCPhysCoreCode + pVM->vmm.s.aoffSwitchers[iSwitcher]); 313 # endif 314 } 235 315 } 236 316 … … 299 379 pVM->vmm.s.pvCoreCodeRC + off, 300 380 pVM->vmm.s.HCPhysCoreCode + off); 381 # ifdef VBOX_WITH_64ON32_IDT 382 if ( pSwitcher->enmType == VMMSWITCHER_32_TO_AMD64 383 || pSwitcher->enmType == VMMSWITCHER_PAE_TO_AMD64) 384 vmmR3Switcher32On64IdtRelocate(pVM, pSwitcher, 385 (uint8_t *)pVM->vmm.s.pvCoreCodeR3 + off, 386 pVM->vmm.s.HCPhysCoreCode + off); 387 # endif 301 388 } 302 389 } -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r47844 r48221 537 537 %endif 538 538 539 %ifdef VBOX_WITH_64ON32_IDT 540 ; Set up emergency trap handlers. 541 lidt [rdx + CPUMCPU.Hyper.idtr] 542 %endif 539 543 540 544 ; load the hypervisor function address … … 812 816 vmwrite rax, rdx 813 817 814 sub rsp, 8*2815 sgdt [rsp ]818 sub rsp, 16 819 sgdt [rsp + 6] ; (The 64-bit base should be aligned, not the word.) 816 820 mov eax, VMX_VMCS_HOST_GDTR_BASE 817 vmwrite rax, [rsp+2] 818 add rsp, 8*2 821 vmwrite rax, [rsp + 6 + 2] 822 add rsp, 16 823 824 %ifdef VBOX_WITH_64ON32_IDT 825 sub rsp, 16 826 sidt [rsp + 6] 827 mov eax, VMX_VMCS_HOST_IDTR_BASE 828 vmwrite rax, [rsp + 6 + 2] ; [rsi + CPUMCPU.Hyper.idtr + 2] - why doesn't this work? 829 add rsp, 16 830 ;call NAME(vmm64On32PrintIdtr) 831 %endif 819 832 820 833 %ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 892 905 ALIGNCODE(16) 893 906 .vmlaunch64_done: 907 %if 0 ;fixme later - def VBOX_WITH_64ON32_IDT 908 push rdx 909 mov rdx, [rsp + 8] ; pCtx 910 lidt [rdx + CPUMCPU.Hyper.idtr] 911 pop rdx 912 %endif 894 913 jc near .vmstart64_invalid_vmcs_ptr 895 914 jz near .vmstart64_start_failed … … 1201 1220 1202 1221 1222 %ifdef VBOX_WITH_64ON32_IDT 1223 ; 1224 ; Trap handling. 1225 ; 1226 1227 ;; Here follows an array of trap handler entry points, 8 byte in size. 1228 BEGINPROC vmm64On32TrapHandlers 1229 %macro vmm64On32TrapEntry 1 1230 GLOBALNAME vmm64On32Trap %+ i 1231 db 06ah, i ; push imm8 - note that this is a signextended value. 1232 jmp NAME(%1) 1233 ALIGNCODE(8) 1234 %assign i i+1 1235 %endmacro 1236 %assign i 0 ; start counter. 1237 vmm64On32TrapEntry vmm64On32Trap ; 0 1238 vmm64On32TrapEntry vmm64On32Trap ; 1 1239 vmm64On32TrapEntry vmm64On32Trap ; 2 1240 vmm64On32TrapEntry vmm64On32Trap ; 3 1241 vmm64On32TrapEntry vmm64On32Trap ; 4 1242 vmm64On32TrapEntry vmm64On32Trap ; 5 1243 vmm64On32TrapEntry vmm64On32Trap ; 6 1244 vmm64On32TrapEntry vmm64On32Trap ; 7 1245 vmm64On32TrapEntry vmm64On32TrapErrCode ; 8 1246 vmm64On32TrapEntry vmm64On32Trap ; 9 1247 vmm64On32TrapEntry vmm64On32TrapErrCode ; a 1248 vmm64On32TrapEntry vmm64On32TrapErrCode ; b 1249 vmm64On32TrapEntry vmm64On32TrapErrCode ; c 1250 vmm64On32TrapEntry vmm64On32TrapErrCode ; d 1251 vmm64On32TrapEntry vmm64On32TrapErrCode ; e 1252 vmm64On32TrapEntry vmm64On32Trap ; f (reserved) 1253 vmm64On32TrapEntry vmm64On32Trap ; 10 1254 vmm64On32TrapEntry vmm64On32TrapErrCode ; 11 1255 vmm64On32TrapEntry vmm64On32Trap ; 12 1256 vmm64On32TrapEntry vmm64On32Trap ; 13 1257 %rep (0x100 - 0x14) 1258 vmm64On32TrapEntry vmm64On32Trap 1259 %endrep 1260 ENDPROC vmm64On32TrapHandlers 1261 1262 ;; Fake an error code and jump to the real thing. 1263 BEGINPROC vmm64On32Trap 1264 push qword [rsp] 1265 jmp NAME(vmm64On32TrapErrCode) 1266 ENDPROC vmm64On32Trap 1267 1268 1269 ;; 1270 ; Trap frame: 1271 ; [rbp + 38h] = ss 1272 ; [rbp + 30h] = rsp 1273 ; [rbp + 28h] = eflags 1274 ; [rbp + 20h] = cs 1275 ; [rbp + 18h] = rip 1276 ; [rbp + 10h] = error code (or trap number) 1277 ; [rbp + 08h] = trap number 1278 ; [rbp + 00h] = rbp 1279 ; [rbp - 08h] = rax 1280 ; [rbp - 10h] = rbx 1281 ; [rbp - 18h] = ds 1282 ; 1283 BEGINPROC vmm64On32TrapErrCode 1284 push rbp 1285 mov rbp, rsp 1286 push rax 1287 push rbx 1288 mov ax, ds 1289 push rax 1290 sub rsp, 20h 1291 1292 mov ax, cs 1293 mov ds, ax 1294 1295 %if 1 1296 COM64_S_NEWLINE 1297 COM64_S_CHAR '!' 1298 COM64_S_CHAR 't' 1299 COM64_S_CHAR 'r' 1300 COM64_S_CHAR 'a' 1301 COM64_S_CHAR 'p' 1302 movzx eax, byte [rbp + 08h] 1303 COM64_S_DWORD_REG eax 1304 COM64_S_CHAR '!' 1305 %endif 1306 1307 %if 0 ;; @todo Figure the offset of the CPUMCPU relative to CPUM 1308 sidt [rsp] 1309 movsx eax, word [rsp] 1310 shr eax, 12 ; div by 16 * 256 (0x1000). 1311 %else 1312 ; hardcoded VCPU(0) for now... 1313 mov rbx, [NAME(pCpumIC) wrt rip] 1314 mov eax, [rbx + CPUM.offCPUMCPU0] 1315 %endif 1316 push rax ; Save the offset for rbp later. 1317 1318 add rbx, rax ; rbx = CPUMCPU 1319 1320 ; 1321 ; Deal with recursive traps due to vmxoff (lazy bird). 1322 ; 1323 lea rax, [.vmxoff_trap_location wrt rip] 1324 cmp rax, [rbp + 18h] 1325 je .not_vmx_root 1326 1327 ; 1328 ; Save the context. 1329 ; 1330 mov rax, [rbp - 8] 1331 mov [rbx + CPUMCPU.Hyper.eax], rax 1332 mov [rbx + CPUMCPU.Hyper.ecx], rcx 1333 mov [rbx + CPUMCPU.Hyper.edx], rdx 1334 mov rax, [rbp - 10h] 1335 mov [rbx + CPUMCPU.Hyper.ebx], rax 1336 mov rax, [rbp] 1337 mov [rbx + CPUMCPU.Hyper.ebp], rax 1338 mov rax, [rbp + 30h] 1339 mov [rbx + CPUMCPU.Hyper.esp], rax 1340 mov [rbx + CPUMCPU.Hyper.edi], rdi 1341 mov [rbx + CPUMCPU.Hyper.esi], rsi 1342 mov [rbx + CPUMCPU.Hyper.r8], r8 1343 mov [rbx + CPUMCPU.Hyper.r9], r9 1344 mov [rbx + CPUMCPU.Hyper.r10], r10 1345 mov [rbx + CPUMCPU.Hyper.r11], r11 1346 mov [rbx + CPUMCPU.Hyper.r12], r12 1347 mov [rbx + CPUMCPU.Hyper.r13], r13 1348 mov [rbx + CPUMCPU.Hyper.r14], r14 1349 mov [rbx + CPUMCPU.Hyper.r15], r15 1350 1351 mov rax, [rbp + 18h] 1352 mov [rbx + CPUMCPU.Hyper.eip], rax 1353 movzx ax, [rbp + 20h] 1354 mov [rbx + CPUMCPU.Hyper.cs.Sel], ax 1355 mov ax, [rbp + 38h] 1356 mov [rbx + CPUMCPU.Hyper.ss.Sel], ax 1357 mov ax, [rbp - 18h] 1358 mov [rbx + CPUMCPU.Hyper.ds.Sel], ax 1359 1360 mov rax, [rbp + 28h] 1361 mov [rbx + CPUMCPU.Hyper.eflags], rax 1362 1363 mov rax, cr2 1364 mov [rbx + CPUMCPU.Hyper.cr2], rax 1365 1366 mov rax, [rbp + 10h] 1367 mov [rbx + CPUMCPU.Hyper.r14], rax ; r14 = error code 1368 movzx eax, byte [rbp + 08h] 1369 mov [rbx + CPUMCPU.Hyper.r15], rax ; r15 = trap number 1370 1371 ; 1372 ; Finally, leave VMX root operation before trying to return to the host. 1373 ; 1374 mov rax, cr4 1375 test rax, X86_CR4_VMXE 1376 jz .not_vmx_root 1377 .vmxoff_trap_location: 1378 vmxoff 1379 .not_vmx_root: 1380 1381 ; 1382 ; Go back to the host. 1383 ; 1384 pop rbp 1385 mov dword [rbx + CPUMCPU.u32RetCode], VERR_TRPM_DONT_PANIC 1386 jmp NAME(vmmRCToHostAsm) 1387 ENDPROC vmm64On32TrapErrCode 1388 1389 ;; We allocate the IDT here to avoid having to allocate memory separately somewhere. 1390 ALIGNCODE(16) 1391 GLOBALNAME vmm64On32Idt 1392 %assign i 0 1393 %rep 256 1394 dq NAME(vmm64On32Trap %+ i) - NAME(Start) ; Relative trap handler offsets. 1395 dq 0 1396 %assign i (i + 1) 1397 %endrep 1398 1399 1400 %if 0 1401 ;; For debugging purposes. 1402 BEGINPROC vmm64On32PrintIdtr 1403 push rax 1404 push rsi ; paranoia 1405 push rdi ; ditto 1406 sub rsp, 16 1407 1408 COM64_S_CHAR ';' 1409 COM64_S_CHAR 'i' 1410 COM64_S_CHAR 'd' 1411 COM64_S_CHAR 't' 1412 COM64_S_CHAR 'r' 1413 COM64_S_CHAR '=' 1414 sidt [rsp + 6] 1415 mov eax, [rsp + 8 + 4] 1416 COM64_S_DWORD_REG eax 1417 mov eax, [rsp + 8] 1418 COM64_S_DWORD_REG eax 1419 COM64_S_CHAR ':' 1420 movzx eax, word [rsp + 6] 1421 COM64_S_DWORD_REG eax 1422 COM64_S_CHAR '!' 1423 1424 add rsp, 16 1425 pop rdi 1426 pop rsi 1427 pop rax 1428 ret 1429 ENDPROC vmm64On32PrintIdtr 1430 %endif 1431 1432 %endif ; VBOX_WITH_64ON32_IDT 1433 1203 1434 1204 1435 … … 1261 1492 ; been messing with the guest at all. 1262 1493 ; 1263 ; @param eax Return code.1264 ; @ uses eax, edx, ecx (or it may use them in the future)1494 ; @param rbp The virtual cpu number. 1495 ; @param 1265 1496 ; 1266 1497 BITS 64 … … 1330 1561 1331 1562 ;; 1332 ;; When we arrive at this label we're at the 1333 ;; intermediate mapping of the switching code.1563 ;; When we arrive at this label we're at the host mapping of the 1564 ;; switcher code, but with intermediate page tables. 1334 1565 ;; 1335 1566 BITS 32 1336 1567 ALIGNCODE(16) 1337 1568 GLOBALNAME ICExitTarget 1338 DEBUG32_CHAR(' 8')1569 DEBUG32_CHAR('9') 1339 1570 1340 1571 ; load the hypervisor data selector into ds & es … … 1343 1574 mov ds, eax 1344 1575 mov es, eax 1576 DEBUG32_CHAR('a') 1345 1577 1346 1578 FIXUP FIX_GC_CPUM_OFF, 1, 0 1347 1579 mov edx, 0ffffffffh 1348 1580 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp 1581 1582 DEBUG32_CHAR('b') 1349 1583 mov esi, [edx + CPUMCPU.Host.cr3] 1350 1584 mov cr3, esi 1585 DEBUG32_CHAR('c') 1351 1586 1352 1587 ;; now we're in host memory context, let's restore regs … … 1354 1589 mov edx, 0ffffffffh 1355 1590 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp 1591 DEBUG32_CHAR('e') 1356 1592 1357 1593 ; restore the host EFER … … 1360 1596 mov eax, [ebx + CPUMCPU.Host.efer] 1361 1597 mov edx, [ebx + CPUMCPU.Host.efer + 4] 1598 DEBUG32_CHAR('f') 1362 1599 wrmsr 1363 1600 mov edx, ebx 1601 DEBUG32_CHAR('g') 1364 1602 1365 1603 ; activate host gdt and idt … … 1455 1693 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start) 1456 1694 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start) 1695 %ifdef VBOX_WITH_64ON32_IDT ; Hack! Use offGCCode to find the IDT. 1696 at VMMSWITCHERDEF.offGCCode, dd NAME(vmm64On32Idt) - NAME(Start) 1697 %else 1457 1698 at VMMSWITCHERDEF.offGCCode, dd 0 1699 %endif 1458 1700 at VMMSWITCHERDEF.cbGCCode, dd 0 1459 1701 -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r47845 r48221 456 456 ;; 457 457 ; Converts the CPUM pointer to CPUMCPU 458 ; @param %1 register name ( PVM)458 ; @param %1 register name (CPUM) 459 459 ; @param %2 register name (CPUMCPU offset) 460 460 %macro CPUMCPU_FROM_CPUM_WITH_OFFSET 2 … … 471 471 ;; 472 472 ; Converts the CPUMCPU pointer to CPUM 473 ; @param %1 register name ( PVM)473 ; @param %1 register name (CPUM) 474 474 ; @param %2 register name (CPUMCPU offset) 475 475 %macro CPUM_FROM_CPUMCPU_WITH_OFFSET 2
Note:
See TracChangeset
for help on using the changeset viewer.