Changeset 48221 in vbox for trunk/src/VBox/VMM/VMMSwitcher
- Timestamp:
- Sep 1, 2013 11:27:56 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 88611
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r47844 r48221 537 537 %endif 538 538 539 %ifdef VBOX_WITH_64ON32_IDT 540 ; Set up emergency trap handlers. 541 lidt [rdx + CPUMCPU.Hyper.idtr] 542 %endif 539 543 540 544 ; load the hypervisor function address … … 812 816 vmwrite rax, rdx 813 817 814 sub rsp, 8*2815 sgdt [rsp ]818 sub rsp, 16 819 sgdt [rsp + 6] ; (The 64-bit base should be aligned, not the word.) 816 820 mov eax, VMX_VMCS_HOST_GDTR_BASE 817 vmwrite rax, [rsp+2] 818 add rsp, 8*2 821 vmwrite rax, [rsp + 6 + 2] 822 add rsp, 16 823 824 %ifdef VBOX_WITH_64ON32_IDT 825 sub rsp, 16 826 sidt [rsp + 6] 827 mov eax, VMX_VMCS_HOST_IDTR_BASE 828 vmwrite rax, [rsp + 6 + 2] ; [rsi + CPUMCPU.Hyper.idtr + 2] - why doesn't this work? 829 add rsp, 16 830 ;call NAME(vmm64On32PrintIdtr) 831 %endif 819 832 820 833 %ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 892 905 ALIGNCODE(16) 893 906 .vmlaunch64_done: 907 %if 0 ;fixme later - def VBOX_WITH_64ON32_IDT 908 push rdx 909 mov rdx, [rsp + 8] ; pCtx 910 lidt [rdx + CPUMCPU.Hyper.idtr] 911 pop rdx 912 %endif 894 913 jc near .vmstart64_invalid_vmcs_ptr 895 914 jz near .vmstart64_start_failed … … 1201 1220 1202 1221 1222 %ifdef VBOX_WITH_64ON32_IDT 1223 ; 1224 ; Trap handling. 1225 ; 1226 1227 ;; Here follows an array of trap handler entry points, 8 byte in size. 1228 BEGINPROC vmm64On32TrapHandlers 1229 %macro vmm64On32TrapEntry 1 1230 GLOBALNAME vmm64On32Trap %+ i 1231 db 06ah, i ; push imm8 - note that this is a signextended value. 1232 jmp NAME(%1) 1233 ALIGNCODE(8) 1234 %assign i i+1 1235 %endmacro 1236 %assign i 0 ; start counter. 1237 vmm64On32TrapEntry vmm64On32Trap ; 0 1238 vmm64On32TrapEntry vmm64On32Trap ; 1 1239 vmm64On32TrapEntry vmm64On32Trap ; 2 1240 vmm64On32TrapEntry vmm64On32Trap ; 3 1241 vmm64On32TrapEntry vmm64On32Trap ; 4 1242 vmm64On32TrapEntry vmm64On32Trap ; 5 1243 vmm64On32TrapEntry vmm64On32Trap ; 6 1244 vmm64On32TrapEntry vmm64On32Trap ; 7 1245 vmm64On32TrapEntry vmm64On32TrapErrCode ; 8 1246 vmm64On32TrapEntry vmm64On32Trap ; 9 1247 vmm64On32TrapEntry vmm64On32TrapErrCode ; a 1248 vmm64On32TrapEntry vmm64On32TrapErrCode ; b 1249 vmm64On32TrapEntry vmm64On32TrapErrCode ; c 1250 vmm64On32TrapEntry vmm64On32TrapErrCode ; d 1251 vmm64On32TrapEntry vmm64On32TrapErrCode ; e 1252 vmm64On32TrapEntry vmm64On32Trap ; f (reserved) 1253 vmm64On32TrapEntry vmm64On32Trap ; 10 1254 vmm64On32TrapEntry vmm64On32TrapErrCode ; 11 1255 vmm64On32TrapEntry vmm64On32Trap ; 12 1256 vmm64On32TrapEntry vmm64On32Trap ; 13 1257 %rep (0x100 - 0x14) 1258 vmm64On32TrapEntry vmm64On32Trap 1259 %endrep 1260 ENDPROC vmm64On32TrapHandlers 1261 1262 ;; Fake an error code and jump to the real thing. 1263 BEGINPROC vmm64On32Trap 1264 push qword [rsp] 1265 jmp NAME(vmm64On32TrapErrCode) 1266 ENDPROC vmm64On32Trap 1267 1268 1269 ;; 1270 ; Trap frame: 1271 ; [rbp + 38h] = ss 1272 ; [rbp + 30h] = rsp 1273 ; [rbp + 28h] = eflags 1274 ; [rbp + 20h] = cs 1275 ; [rbp + 18h] = rip 1276 ; [rbp + 10h] = error code (or trap number) 1277 ; [rbp + 08h] = trap number 1278 ; [rbp + 00h] = rbp 1279 ; [rbp - 08h] = rax 1280 ; [rbp - 10h] = rbx 1281 ; [rbp - 18h] = ds 1282 ; 1283 BEGINPROC vmm64On32TrapErrCode 1284 push rbp 1285 mov rbp, rsp 1286 push rax 1287 push rbx 1288 mov ax, ds 1289 push rax 1290 sub rsp, 20h 1291 1292 mov ax, cs 1293 mov ds, ax 1294 1295 %if 1 1296 COM64_S_NEWLINE 1297 COM64_S_CHAR '!' 1298 COM64_S_CHAR 't' 1299 COM64_S_CHAR 'r' 1300 COM64_S_CHAR 'a' 1301 COM64_S_CHAR 'p' 1302 movzx eax, byte [rbp + 08h] 1303 COM64_S_DWORD_REG eax 1304 COM64_S_CHAR '!' 1305 %endif 1306 1307 %if 0 ;; @todo Figure the offset of the CPUMCPU relative to CPUM 1308 sidt [rsp] 1309 movsx eax, word [rsp] 1310 shr eax, 12 ; div by 16 * 256 (0x1000). 1311 %else 1312 ; hardcoded VCPU(0) for now... 1313 mov rbx, [NAME(pCpumIC) wrt rip] 1314 mov eax, [rbx + CPUM.offCPUMCPU0] 1315 %endif 1316 push rax ; Save the offset for rbp later. 1317 1318 add rbx, rax ; rbx = CPUMCPU 1319 1320 ; 1321 ; Deal with recursive traps due to vmxoff (lazy bird). 1322 ; 1323 lea rax, [.vmxoff_trap_location wrt rip] 1324 cmp rax, [rbp + 18h] 1325 je .not_vmx_root 1326 1327 ; 1328 ; Save the context. 1329 ; 1330 mov rax, [rbp - 8] 1331 mov [rbx + CPUMCPU.Hyper.eax], rax 1332 mov [rbx + CPUMCPU.Hyper.ecx], rcx 1333 mov [rbx + CPUMCPU.Hyper.edx], rdx 1334 mov rax, [rbp - 10h] 1335 mov [rbx + CPUMCPU.Hyper.ebx], rax 1336 mov rax, [rbp] 1337 mov [rbx + CPUMCPU.Hyper.ebp], rax 1338 mov rax, [rbp + 30h] 1339 mov [rbx + CPUMCPU.Hyper.esp], rax 1340 mov [rbx + CPUMCPU.Hyper.edi], rdi 1341 mov [rbx + CPUMCPU.Hyper.esi], rsi 1342 mov [rbx + CPUMCPU.Hyper.r8], r8 1343 mov [rbx + CPUMCPU.Hyper.r9], r9 1344 mov [rbx + CPUMCPU.Hyper.r10], r10 1345 mov [rbx + CPUMCPU.Hyper.r11], r11 1346 mov [rbx + CPUMCPU.Hyper.r12], r12 1347 mov [rbx + CPUMCPU.Hyper.r13], r13 1348 mov [rbx + CPUMCPU.Hyper.r14], r14 1349 mov [rbx + CPUMCPU.Hyper.r15], r15 1350 1351 mov rax, [rbp + 18h] 1352 mov [rbx + CPUMCPU.Hyper.eip], rax 1353 movzx ax, [rbp + 20h] 1354 mov [rbx + CPUMCPU.Hyper.cs.Sel], ax 1355 mov ax, [rbp + 38h] 1356 mov [rbx + CPUMCPU.Hyper.ss.Sel], ax 1357 mov ax, [rbp - 18h] 1358 mov [rbx + CPUMCPU.Hyper.ds.Sel], ax 1359 1360 mov rax, [rbp + 28h] 1361 mov [rbx + CPUMCPU.Hyper.eflags], rax 1362 1363 mov rax, cr2 1364 mov [rbx + CPUMCPU.Hyper.cr2], rax 1365 1366 mov rax, [rbp + 10h] 1367 mov [rbx + CPUMCPU.Hyper.r14], rax ; r14 = error code 1368 movzx eax, byte [rbp + 08h] 1369 mov [rbx + CPUMCPU.Hyper.r15], rax ; r15 = trap number 1370 1371 ; 1372 ; Finally, leave VMX root operation before trying to return to the host. 1373 ; 1374 mov rax, cr4 1375 test rax, X86_CR4_VMXE 1376 jz .not_vmx_root 1377 .vmxoff_trap_location: 1378 vmxoff 1379 .not_vmx_root: 1380 1381 ; 1382 ; Go back to the host. 1383 ; 1384 pop rbp 1385 mov dword [rbx + CPUMCPU.u32RetCode], VERR_TRPM_DONT_PANIC 1386 jmp NAME(vmmRCToHostAsm) 1387 ENDPROC vmm64On32TrapErrCode 1388 1389 ;; We allocate the IDT here to avoid having to allocate memory separately somewhere. 1390 ALIGNCODE(16) 1391 GLOBALNAME vmm64On32Idt 1392 %assign i 0 1393 %rep 256 1394 dq NAME(vmm64On32Trap %+ i) - NAME(Start) ; Relative trap handler offsets. 1395 dq 0 1396 %assign i (i + 1) 1397 %endrep 1398 1399 1400 %if 0 1401 ;; For debugging purposes. 1402 BEGINPROC vmm64On32PrintIdtr 1403 push rax 1404 push rsi ; paranoia 1405 push rdi ; ditto 1406 sub rsp, 16 1407 1408 COM64_S_CHAR ';' 1409 COM64_S_CHAR 'i' 1410 COM64_S_CHAR 'd' 1411 COM64_S_CHAR 't' 1412 COM64_S_CHAR 'r' 1413 COM64_S_CHAR '=' 1414 sidt [rsp + 6] 1415 mov eax, [rsp + 8 + 4] 1416 COM64_S_DWORD_REG eax 1417 mov eax, [rsp + 8] 1418 COM64_S_DWORD_REG eax 1419 COM64_S_CHAR ':' 1420 movzx eax, word [rsp + 6] 1421 COM64_S_DWORD_REG eax 1422 COM64_S_CHAR '!' 1423 1424 add rsp, 16 1425 pop rdi 1426 pop rsi 1427 pop rax 1428 ret 1429 ENDPROC vmm64On32PrintIdtr 1430 %endif 1431 1432 %endif ; VBOX_WITH_64ON32_IDT 1433 1203 1434 1204 1435 … … 1261 1492 ; been messing with the guest at all. 1262 1493 ; 1263 ; @param eax Return code.1264 ; @ uses eax, edx, ecx (or it may use them in the future)1494 ; @param rbp The virtual cpu number. 1495 ; @param 1265 1496 ; 1266 1497 BITS 64 … … 1330 1561 1331 1562 ;; 1332 ;; When we arrive at this label we're at the 1333 ;; intermediate mapping of the switching code.1563 ;; When we arrive at this label we're at the host mapping of the 1564 ;; switcher code, but with intermediate page tables. 1334 1565 ;; 1335 1566 BITS 32 1336 1567 ALIGNCODE(16) 1337 1568 GLOBALNAME ICExitTarget 1338 DEBUG32_CHAR(' 8')1569 DEBUG32_CHAR('9') 1339 1570 1340 1571 ; load the hypervisor data selector into ds & es … … 1343 1574 mov ds, eax 1344 1575 mov es, eax 1576 DEBUG32_CHAR('a') 1345 1577 1346 1578 FIXUP FIX_GC_CPUM_OFF, 1, 0 1347 1579 mov edx, 0ffffffffh 1348 1580 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp 1581 1582 DEBUG32_CHAR('b') 1349 1583 mov esi, [edx + CPUMCPU.Host.cr3] 1350 1584 mov cr3, esi 1585 DEBUG32_CHAR('c') 1351 1586 1352 1587 ;; now we're in host memory context, let's restore regs … … 1354 1589 mov edx, 0ffffffffh 1355 1590 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp 1591 DEBUG32_CHAR('e') 1356 1592 1357 1593 ; restore the host EFER … … 1360 1596 mov eax, [ebx + CPUMCPU.Host.efer] 1361 1597 mov edx, [ebx + CPUMCPU.Host.efer + 4] 1598 DEBUG32_CHAR('f') 1362 1599 wrmsr 1363 1600 mov edx, ebx 1601 DEBUG32_CHAR('g') 1364 1602 1365 1603 ; activate host gdt and idt … … 1455 1693 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start) 1456 1694 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start) 1695 %ifdef VBOX_WITH_64ON32_IDT ; Hack! Use offGCCode to find the IDT. 1696 at VMMSWITCHERDEF.offGCCode, dd NAME(vmm64On32Idt) - NAME(Start) 1697 %else 1457 1698 at VMMSWITCHERDEF.offGCCode, dd 0 1699 %endif 1458 1700 at VMMSWITCHERDEF.cbGCCode, dd 0 1459 1701
Note:
See TracChangeset
for help on using the changeset viewer.