Changeset 57493 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Aug 21, 2015 11:54:00 AM (9 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 1 deleted
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r57429 r57493 16 16 ; 17 17 18 ;******************************************************************************* 19 ;* Header Files*20 ;******************************************************************************* 18 ;********************************************************************************************************************************* 19 ;* Header Files * 20 ;********************************************************************************************************************************* 21 21 %include "VBox/asmdefs.mac" 22 22 %include "VBox/err.mac" … … 43 43 %endif 44 44 45 ;******************************************************************************* 46 ;* Defined Constants And Macros * 47 ;******************************************************************************* 48 %ifdef RT_OS_DARWIN 49 %ifdef RT_ARCH_AMD64 50 ;; 45 ;********************************************************************************************************************************* 46 ;* Defined Constants And Macros * 47 ;********************************************************************************************************************************* 48 ;; The offset of the XMM registers in X86FXSTATE. 49 ; Use define because I'm too lazy to convert the struct. 50 %define XMM_OFF_IN_X86FXSTATE 160 51 52 ;; 53 ; Determine skipping restoring of GDTR, IDTR, TR across VMX non-root operation 54 ; 55 %ifdef RT_ARCH_AMD64 56 %define VMX_SKIP_GDTR 57 %define VMX_SKIP_TR 58 %define VBOX_SKIP_RESTORE_SEG 59 %ifdef RT_OS_DARWIN 51 60 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't 52 61 ; risk loading a stale LDT value or something invalid. 53 62 %define HM_64_BIT_USE_NULL_SEL 54 %endif 55 %endif 56 57 %ifdef RT_ARCH_AMD64 58 %define VBOX_SKIP_RESTORE_SEG 59 %endif 60 61 ;; The offset of the XMM registers in X86FXSTATE. 62 ; Use define because I'm too lazy to convert the struct. 63 %define XMM_OFF_IN_X86FXSTATE 160 64 63 ; Darwin (Mavericks) uses IDTR limit to store the CPU Id so we need to restore it always. 64 ; See @bugref{6875}. 65 %else 66 %define VMX_SKIP_IDTR 67 %endif 68 %endif 65 69 66 70 ;; @def MYPUSHAD … … 209 213 %endmacro 210 214 211 212 ;******************************************************************************* 213 ;* External Symbols * 214 ;******************************************************************************* 215 %ifdef RT_ARCH_AMD64 216 %define MYPUSHAD MYPUSHAD64 217 %define MYPOPAD MYPOPAD64 218 %define MYPUSHSEGS MYPUSHSEGS64 219 %define MYPOPSEGS MYPOPSEGS64 220 %else 221 %define MYPUSHAD MYPUSHAD32 222 %define MYPOPAD MYPOPAD32 223 %define MYPUSHSEGS MYPUSHSEGS32 224 %define MYPOPSEGS MYPOPSEGS32 225 %endif 226 227 228 ;********************************************************************************************************************************* 229 ;* External Symbols * 230 ;********************************************************************************************************************************* 215 231 %ifdef VBOX_WITH_KERNEL_USING_XMM 216 232 extern NAME(CPUMIsGuestFPUStateActive) … … 1113 1129 %endif ; VBOX_WITH_KERNEL_USING_XMM 1114 1130 1115 ; 1116 ; The default setup of the StartVM routines. 1117 ; 1118 %define MY_NAME(name) name 1119 %ifdef RT_ARCH_AMD64 1120 %define MYPUSHAD MYPUSHAD64 1121 %define MYPOPAD MYPOPAD64 1122 %define MYPUSHSEGS MYPUSHSEGS64 1123 %define MYPOPSEGS MYPOPSEGS64 1131 1132 ;; @def RESTORE_STATE_VM32 1133 ; Macro restoring essential host state and updating guest state 1134 ; for common host, 32-bit guest for VT-x. 1135 %macro RESTORE_STATE_VM32 0 1136 ; Restore base and limit of the IDTR & GDTR. 1137 %ifndef VMX_SKIP_IDTR 1138 lidt [xSP] 1139 add xSP, xCB * 2 1140 %endif 1141 %ifndef VMX_SKIP_GDTR 1142 lgdt [xSP] 1143 add xSP, xCB * 2 1144 %endif 1145 1146 push xDI 1147 %ifndef VMX_SKIP_TR 1148 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR). 1149 %else 1150 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR). 1151 %endif 1152 1153 mov [ss:xDI + CPUMCTX.eax], eax 1154 mov [ss:xDI + CPUMCTX.ebx], ebx 1155 mov [ss:xDI + CPUMCTX.ecx], ecx 1156 mov [ss:xDI + CPUMCTX.edx], edx 1157 mov [ss:xDI + CPUMCTX.esi], esi 1158 mov [ss:xDI + CPUMCTX.ebp], ebp 1159 mov xAX, cr2 1160 mov [ss:xDI + CPUMCTX.cr2], xAX 1161 1162 %ifdef RT_ARCH_AMD64 1163 pop xAX ; The guest edi we pushed above. 1164 mov dword [ss:xDI + CPUMCTX.edi], eax 1165 %else 1166 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above. 1167 %endif 1168 1169 %ifndef VMX_SKIP_TR 1170 ; Restore TSS selector; must mark it as not busy before using ltr (!) 1171 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) 1172 ; @todo get rid of sgdt 1173 pop xBX ; Saved TR 1174 sub xSP, xCB * 2 1175 sgdt [xSP] 1176 mov xAX, xBX 1177 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset. 1178 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset. 1179 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit). 1180 ltr bx 1181 add xSP, xCB * 2 1182 %endif 1183 1184 pop xAX ; Saved LDTR 1185 %ifdef RT_ARCH_AMD64 1186 cmp eax, 0 1187 je %%skip_ldt_write32 1188 %endif 1189 lldt ax 1190 1191 %%skip_ldt_write32: 1192 add xSP, xCB ; pCtx 1193 1194 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 1195 pop xDX ; Saved pCache 1196 1197 ; Note! If we get here as a result of invalid VMCS pointer, all the following 1198 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any 1199 ; trouble only just less efficient. 1200 mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries] 1201 cmp ecx, 0 ; Can't happen 1202 je %%no_cached_read32 1203 jmp %%cached_read32 1204 1205 ALIGN(16) 1206 %%cached_read32: 1207 dec xCX 1208 mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4] 1209 vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX 1210 cmp xCX, 0 1211 jnz %%cached_read32 1212 %%no_cached_read32: 1213 %endif 1214 1215 ; Restore segment registers. 1216 MYPOPSEGS xAX, ax 1217 1218 ; Restore the host XCR0 if necessary. 1219 pop xCX 1220 test ecx, ecx 1221 jnz %%xcr0_after_skip 1222 pop xAX 1223 pop xDX 1224 xsetbv ; ecx is already zero. 1225 %%xcr0_after_skip: 1226 1227 ; Restore general purpose registers. 1228 MYPOPAD 1229 %endmacro 1230 1231 1232 ;; 1233 ; Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode) 1234 ; 1235 ; @returns VBox status code 1236 ; @param fResume x86:[ebp+8], msc:rcx,gcc:rdi Whether to use vmlauch/vmresume. 1237 ; @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Pointer to the guest-CPU context. 1238 ; @param pCache x86:[ebp+10],msc:r8, gcc:rdx Pointer to the VMCS cache. 1239 ; @param pVM x86:[ebp+14],msc:r9, gcc:rcx Pointer to the cross context VM structure. 1240 ; @param pVCpu x86:[ebp+18],msc:[ebp+30],gcc:r8 Pointer to the cross context VMCPU structure. 1241 ; 1242 ALIGNCODE(16) 1243 BEGINPROC VMXR0StartVM32 1244 push xBP 1245 mov xBP, xSP 1246 1247 pushf 1248 cli 1249 1250 ; 1251 ; Save all general purpose host registers. 1252 ; 1253 MYPUSHAD 1254 1255 ; 1256 ; First we have to write some final guest CPU context registers. 1257 ; 1258 mov eax, VMX_VMCS_HOST_RIP 1259 %ifdef RT_ARCH_AMD64 1260 lea r10, [.vmlaunch_done wrt rip] 1261 vmwrite rax, r10 1124 1262 %else 1125 %define MYPUSHAD MYPUSHAD32 1126 %define MYPOPAD MYPOPAD32 1127 %define MYPUSHSEGS MYPUSHSEGS32 1128 %define MYPOPSEGS MYPOPSEGS32 1129 %endif 1130 1131 %include "HMR0Mixed.mac" 1132 1263 mov ecx, .vmlaunch_done 1264 vmwrite eax, ecx 1265 %endif 1266 ; Note: assumes success! 1267 1268 ; 1269 ; Unify input parameter registers. 1270 ; 1271 %ifdef RT_ARCH_AMD64 1272 %ifdef ASM_CALL64_GCC 1273 ; fResume already in rdi 1274 ; pCtx already in rsi 1275 mov rbx, rdx ; pCache 1276 %else 1277 mov rdi, rcx ; fResume 1278 mov rsi, rdx ; pCtx 1279 mov rbx, r8 ; pCache 1280 %endif 1281 %else 1282 mov edi, [ebp + 8] ; fResume 1283 mov esi, [ebp + 12] ; pCtx 1284 mov ebx, [ebp + 16] ; pCache 1285 %endif 1286 1287 ; 1288 ; Save the host XCR0 and load the guest one if necessary. 1289 ; Note! Trashes rdx and rcx. 1290 ; 1291 %ifdef ASM_CALL64_MSC 1292 mov rax, [xBP + 30h] ; pVCpu 1293 %elifdef ASM_CALL64_GCC 1294 mov rax, r8 ; pVCpu 1295 %else 1296 mov eax, [xBP + 18h] ; pVCpu 1297 %endif 1298 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 1299 jz .xcr0_before_skip 1300 1301 xor ecx, ecx 1302 xgetbv ; Save the host one on the stack. 1303 push xDX 1304 push xAX 1305 1306 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one. 1307 mov edx, [xSI + CPUMCTX.aXcr + 4] 1308 xor ecx, ecx ; paranoia 1309 xsetbv 1310 1311 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0). 1312 jmp .xcr0_before_done 1313 1314 .xcr0_before_skip: 1315 push 3fh ; indicate that we need not. 1316 .xcr0_before_done: 1317 1318 ; 1319 ; Save segment registers. 1320 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case). 1321 ; 1322 MYPUSHSEGS xAX, ax 1323 1324 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 1325 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries] 1326 cmp ecx, 0 1327 je .no_cached_writes 1328 mov edx, ecx 1329 mov ecx, 0 1330 jmp .cached_write 1331 1332 ALIGN(16) 1333 .cached_write: 1334 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4] 1335 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8] 1336 inc xCX 1337 cmp xCX, xDX 1338 jl .cached_write 1339 1340 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0 1341 .no_cached_writes: 1342 1343 ; Save the pCache pointer. 1344 push xBX 1345 %endif 1346 1347 ; Save the pCtx pointer. 1348 push xSI 1349 1350 ; Save host LDTR. 1351 xor eax, eax 1352 sldt ax 1353 push xAX 1354 1355 %ifndef VMX_SKIP_TR 1356 ; The host TR limit is reset to 0x67; save & restore it manually. 1357 str eax 1358 push xAX 1359 %endif 1360 1361 %ifndef VMX_SKIP_GDTR 1362 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 1363 sub xSP, xCB * 2 1364 sgdt [xSP] 1365 %endif 1366 %ifndef VMX_SKIP_IDTR 1367 sub xSP, xCB * 2 1368 sidt [xSP] 1369 %endif 1370 1371 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction). 1372 mov xBX, [xSI + CPUMCTX.cr2] 1373 mov xDX, cr2 1374 cmp xBX, xDX 1375 je .skip_cr2_write32 1376 mov cr2, xBX 1377 1378 .skip_cr2_write32: 1379 mov eax, VMX_VMCS_HOST_RSP 1380 vmwrite xAX, xSP 1381 ; Note: assumes success! 1382 ; Don't mess with ESP anymore!!! 1383 1384 ; Load guest general purpose registers. 1385 mov eax, [xSI + CPUMCTX.eax] 1386 mov ebx, [xSI + CPUMCTX.ebx] 1387 mov ecx, [xSI + CPUMCTX.ecx] 1388 mov edx, [xSI + CPUMCTX.edx] 1389 mov ebp, [xSI + CPUMCTX.ebp] 1390 1391 ; Resume or start VM? 1392 cmp xDI, 0 ; fResume 1393 je .vmlaunch_launch 1394 1395 ; Load guest edi & esi. 1396 mov edi, [xSI + CPUMCTX.edi] 1397 mov esi, [xSI + CPUMCTX.esi] 1398 1399 vmresume 1400 jmp .vmlaunch_done; ; Here if vmresume detected a failure. 1401 1402 .vmlaunch_launch: 1403 ; Save guest edi & esi. 1404 mov edi, [xSI + CPUMCTX.edi] 1405 mov esi, [xSI + CPUMCTX.esi] 1406 1407 vmlaunch 1408 jmp .vmlaunch_done; ; Here if vmlaunch detected a failure. 1409 1410 ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off. 1411 .vmlaunch_done: 1412 jc near .vmxstart_invalid_vmcs_ptr 1413 jz near .vmxstart_start_failed 1414 1415 RESTORE_STATE_VM32 1416 mov eax, VINF_SUCCESS 1417 1418 .vmstart_end: 1419 popf 1420 pop xBP 1421 ret 1422 1423 .vmxstart_invalid_vmcs_ptr: 1424 RESTORE_STATE_VM32 1425 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM 1426 jmp .vmstart_end 1427 1428 .vmxstart_start_failed: 1429 RESTORE_STATE_VM32 1430 mov eax, VERR_VMX_UNABLE_TO_START_VM 1431 jmp .vmstart_end 1432 1433 ENDPROC VMXR0StartVM32 1434 1435 1436 %ifdef RT_ARCH_AMD64 1437 ;; @def RESTORE_STATE_VM64 1438 ; Macro restoring essential host state and updating guest state 1439 ; for 64-bit host, 64-bit guest for VT-x. 1440 ; 1441 %macro RESTORE_STATE_VM64 0 1442 ; Restore base and limit of the IDTR & GDTR 1443 %ifndef VMX_SKIP_IDTR 1444 lidt [xSP] 1445 add xSP, xCB * 2 1446 %endif 1447 %ifndef VMX_SKIP_GDTR 1448 lgdt [xSP] 1449 add xSP, xCB * 2 1450 %endif 1451 1452 push xDI 1453 %ifndef VMX_SKIP_TR 1454 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR) 1455 %else 1456 mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR) 1457 %endif 1458 1459 mov qword [xDI + CPUMCTX.eax], rax 1460 mov qword [xDI + CPUMCTX.ebx], rbx 1461 mov qword [xDI + CPUMCTX.ecx], rcx 1462 mov qword [xDI + CPUMCTX.edx], rdx 1463 mov qword [xDI + CPUMCTX.esi], rsi 1464 mov qword [xDI + CPUMCTX.ebp], rbp 1465 mov qword [xDI + CPUMCTX.r8], r8 1466 mov qword [xDI + CPUMCTX.r9], r9 1467 mov qword [xDI + CPUMCTX.r10], r10 1468 mov qword [xDI + CPUMCTX.r11], r11 1469 mov qword [xDI + CPUMCTX.r12], r12 1470 mov qword [xDI + CPUMCTX.r13], r13 1471 mov qword [xDI + CPUMCTX.r14], r14 1472 mov qword [xDI + CPUMCTX.r15], r15 1473 mov rax, cr2 1474 mov qword [xDI + CPUMCTX.cr2], rax 1475 1476 pop xAX ; The guest rdi we pushed above 1477 mov qword [xDI + CPUMCTX.edi], rax 1478 1479 %ifndef VMX_SKIP_TR 1480 ; Restore TSS selector; must mark it as not busy before using ltr (!) 1481 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p). 1482 ; @todo get rid of sgdt 1483 pop xBX ; Saved TR 1484 sub xSP, xCB * 2 1485 sgdt [xSP] 1486 mov xAX, xBX 1487 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset. 1488 add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset. 1489 and dword [xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit). 1490 ltr bx 1491 add xSP, xCB * 2 1492 %endif 1493 1494 pop xAX ; Saved LDTR 1495 cmp eax, 0 1496 je %%skip_ldt_write64 1497 lldt ax 1498 1499 %%skip_ldt_write64: 1500 pop xSI ; pCtx (needed in rsi by the macros below) 1501 1502 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 1503 pop xDX ; Saved pCache 1504 1505 ; Note! If we get here as a result of invalid VMCS pointer, all the following 1506 ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any 1507 ; trouble only just less efficient. 1508 mov ecx, [xDX + VMCSCACHE.Read.cValidEntries] 1509 cmp ecx, 0 ; Can't happen 1510 je %%no_cached_read64 1511 jmp %%cached_read64 1512 1513 ALIGN(16) 1514 %%cached_read64: 1515 dec xCX 1516 mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4] 1517 vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX 1518 cmp xCX, 0 1519 jnz %%cached_read64 1520 %%no_cached_read64: 1521 %endif 1522 1523 ; Restore segment registers. 1524 MYPOPSEGS xAX, ax 1525 1526 ; Restore the host XCR0 if necessary. 1527 pop xCX 1528 test ecx, ecx 1529 jnz %%xcr0_after_skip 1530 pop xAX 1531 pop xDX 1532 xsetbv ; ecx is already zero. 1533 %%xcr0_after_skip: 1534 1535 ; Restore general purpose registers. 1536 MYPOPAD 1537 %endmacro 1538 1539 1540 ;; 1541 ; Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode) 1542 ; 1543 ; @returns VBox status code 1544 ; @param fResume msc:rcx, gcc:rdi Whether to use vmlauch/vmresume. 1545 ; @param pCtx msc:rdx, gcc:rsi Pointer to the guest-CPU context. 1546 ; @param pCache msc:r8, gcc:rdx Pointer to the VMCS cache. 1547 ; @param pVM msc:r9, gcc:rcx Pointer to the cross context VM structure. 1548 ; @param pVCpu msc:[ebp+30], gcc:r8 Pointer to the cross context VMCPU structure. 1549 ; 1550 ALIGNCODE(16) 1551 BEGINPROC VMXR0StartVM64 1552 push xBP 1553 mov xBP, xSP 1554 1555 pushf 1556 cli 1557 1558 ; Save all general purpose host registers. 1559 MYPUSHAD 1560 1561 ; First we have to save some final CPU context registers. 1562 lea r10, [.vmlaunch64_done wrt rip] 1563 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?). 1564 vmwrite rax, r10 1565 ; Note: assumes success! 1566 1567 ; 1568 ; Unify the input parameter registers. 1569 ; 1570 %ifdef ASM_CALL64_GCC 1571 ; fResume already in rdi 1572 ; pCtx already in rsi 1573 mov rbx, rdx ; pCache 1574 %else 1575 mov rdi, rcx ; fResume 1576 mov rsi, rdx ; pCtx 1577 mov rbx, r8 ; pCache 1578 %endif 1579 1580 ; 1581 ; Save the host XCR0 and load the guest one if necessary. 1582 ; Note! Trashes rdx and rcx. 1583 ; 1584 %ifdef ASM_CALL64_MSC 1585 mov rax, [xBP + 30h] ; pVCpu 1586 %else 1587 mov rax, r8 ; pVCpu 1588 %endif 1589 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 1590 jz .xcr0_before_skip 1591 1592 xor ecx, ecx 1593 xgetbv ; Save the host one on the stack. 1594 push xDX 1595 push xAX 1596 1597 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one. 1598 mov edx, [xSI + CPUMCTX.aXcr + 4] 1599 xor ecx, ecx ; paranoia 1600 xsetbv 1601 1602 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0). 1603 jmp .xcr0_before_done 1604 1605 .xcr0_before_skip: 1606 push 3fh ; indicate that we need not. 1607 .xcr0_before_done: 1608 1609 ; 1610 ; Save segment registers. 1611 ; Note! Trashes rdx & rcx, so we moved it here (amd64 case). 1612 ; 1613 MYPUSHSEGS xAX, ax 1614 1615 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 1616 mov ecx, [xBX + VMCSCACHE.Write.cValidEntries] 1617 cmp ecx, 0 1618 je .no_cached_writes 1619 mov edx, ecx 1620 mov ecx, 0 1621 jmp .cached_write 1622 1623 ALIGN(16) 1624 .cached_write: 1625 mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4] 1626 vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8] 1627 inc xCX 1628 cmp xCX, xDX 1629 jl .cached_write 1630 1631 mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0 1632 .no_cached_writes: 1633 1634 ; Save the pCache pointer. 1635 push xBX 1636 %endif 1637 1638 ; Save the pCtx pointer. 1639 push xSI 1640 1641 ; Save host LDTR. 1642 xor eax, eax 1643 sldt ax 1644 push xAX 1645 1646 %ifndef VMX_SKIP_TR 1647 ; The host TR limit is reset to 0x67; save & restore it manually. 1648 str eax 1649 push xAX 1650 %endif 1651 1652 %ifndef VMX_SKIP_GDTR 1653 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 1654 sub xSP, xCB * 2 1655 sgdt [xSP] 1656 %endif 1657 %ifndef VMX_SKIP_IDTR 1658 sub xSP, xCB * 2 1659 sidt [xSP] 1660 %endif 1661 1662 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction). 1663 mov rbx, qword [xSI + CPUMCTX.cr2] 1664 mov rdx, cr2 1665 cmp rbx, rdx 1666 je .skip_cr2_write 1667 mov cr2, rbx 1668 1669 .skip_cr2_write: 1670 mov eax, VMX_VMCS_HOST_RSP 1671 vmwrite xAX, xSP 1672 ; Note: assumes success! 1673 ; Don't mess with ESP anymore!!! 1674 1675 ; Load guest general purpose registers. 1676 mov rax, qword [xSI + CPUMCTX.eax] 1677 mov rbx, qword [xSI + CPUMCTX.ebx] 1678 mov rcx, qword [xSI + CPUMCTX.ecx] 1679 mov rdx, qword [xSI + CPUMCTX.edx] 1680 mov rbp, qword [xSI + CPUMCTX.ebp] 1681 mov r8, qword [xSI + CPUMCTX.r8] 1682 mov r9, qword [xSI + CPUMCTX.r9] 1683 mov r10, qword [xSI + CPUMCTX.r10] 1684 mov r11, qword [xSI + CPUMCTX.r11] 1685 mov r12, qword [xSI + CPUMCTX.r12] 1686 mov r13, qword [xSI + CPUMCTX.r13] 1687 mov r14, qword [xSI + CPUMCTX.r14] 1688 mov r15, qword [xSI + CPUMCTX.r15] 1689 1690 ; Resume or start VM? 1691 cmp xDI, 0 ; fResume 1692 je .vmlaunch64_launch 1693 1694 ; Load guest rdi & rsi. 1695 mov rdi, qword [xSI + CPUMCTX.edi] 1696 mov rsi, qword [xSI + CPUMCTX.esi] 1697 1698 vmresume 1699 jmp .vmlaunch64_done; ; Here if vmresume detected a failure. 1700 1701 .vmlaunch64_launch: 1702 ; Save guest rdi & rsi. 1703 mov rdi, qword [xSI + CPUMCTX.edi] 1704 mov rsi, qword [xSI + CPUMCTX.esi] 1705 1706 vmlaunch 1707 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure. 1708 1709 ALIGNCODE(16) 1710 .vmlaunch64_done: 1711 jc near .vmxstart64_invalid_vmcs_ptr 1712 jz near .vmxstart64_start_failed 1713 1714 RESTORE_STATE_VM64 1715 mov eax, VINF_SUCCESS 1716 1717 .vmstart64_end: 1718 popf 1719 pop xBP 1720 ret 1721 1722 .vmxstart64_invalid_vmcs_ptr: 1723 RESTORE_STATE_VM64 1724 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM 1725 jmp .vmstart64_end 1726 1727 .vmxstart64_start_failed: 1728 RESTORE_STATE_VM64 1729 mov eax, VERR_VMX_UNABLE_TO_START_VM 1730 jmp .vmstart64_end 1731 ENDPROC VMXR0StartVM64 1732 %endif ; RT_ARCH_AMD64 1733 1734 1735 ;; 1736 ; Prepares for and executes VMRUN (32 bits guests) 1737 ; 1738 ; @returns VBox status code 1739 ; @param HCPhysVMCB Physical address of host VMCB. 1740 ; @param HCPhysVMCB Physical address of guest VMCB. 1741 ; @param pCtx Pointer to the guest CPU-context. 1742 ; @param pVM msc:r9, gcc:rcx Pointer to the cross context VM structure. 1743 ; @param pVCpu msc:[rsp+28],gcc:r8 Pointer to the cross context VMCPU structure. 1744 ; 1745 ALIGNCODE(16) 1746 BEGINPROC SVMR0VMRun 1747 %ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame 1748 %ifdef ASM_CALL64_GCC 1749 push r8 1750 push rcx 1751 push rdx 1752 push rsi 1753 push rdi 1754 %else 1755 mov rax, [rsp + 28h] 1756 push rax ; pVCpu 1757 push r9 ; pVM 1758 push r8 ; pCtx 1759 push rdx ; HCPHYSGuestVMCB 1760 push rcx ; HCPhysHostVMCB 1761 %endif 1762 push 0 1763 %endif 1764 push xBP 1765 mov xBP, xSP 1766 pushf 1767 1768 ; 1769 ; Save all general purpose host registers. 1770 ; 1771 MYPUSHAD 1772 1773 ; 1774 ; Load pCtx into xSI. 1775 ; 1776 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx 1777 1778 ; 1779 ; Save the host XCR0 and load the guest one if necessary. 1780 ; 1781 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB * 2 + xCB * 2] ; pVCpu 1782 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 1783 jz .xcr0_before_skip 1784 1785 xor ecx, ecx 1786 xgetbv ; Save the host one on the stack. 1787 push xDX 1788 push xAX 1789 1790 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx 1791 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one. 1792 mov edx, [xSI + CPUMCTX.aXcr + 4] 1793 xor ecx, ecx ; paranoia 1794 xsetbv 1795 1796 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0). 1797 jmp .xcr0_before_done 1798 1799 .xcr0_before_skip: 1800 push 3fh ; indicate that we need not. 1801 .xcr0_before_done: 1802 1803 ; 1804 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards. 1805 ; 1806 push xSI 1807 1808 ; Save host fs, gs, sysenter msr etc. 1809 mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only) 1810 push xAX ; save for the vmload after vmrun 1811 vmsave 1812 1813 ; Setup eax for VMLOAD. 1814 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only) 1815 1816 ; Load guest general purpose registers. 1817 ; eax is loaded from the VMCB by VMRUN. 1818 mov ebx, [xSI + CPUMCTX.ebx] 1819 mov ecx, [xSI + CPUMCTX.ecx] 1820 mov edx, [xSI + CPUMCTX.edx] 1821 mov edi, [xSI + CPUMCTX.edi] 1822 mov ebp, [xSI + CPUMCTX.ebp] 1823 mov esi, [xSI + CPUMCTX.esi] 1824 1825 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch. 1826 clgi 1827 sti 1828 1829 ; Load guest fs, gs, sysenter msr etc. 1830 vmload 1831 ; Run the VM. 1832 vmrun 1833 1834 ; eax is in the VMCB already; we can use it here. 1835 1836 ; Save guest fs, gs, sysenter msr etc. 1837 vmsave 1838 1839 ; Load host fs, gs, sysenter msr etc. 1840 pop xAX ; Pushed above 1841 vmload 1842 1843 ; Set the global interrupt flag again, but execute cli to make sure IF=0. 1844 cli 1845 stgi 1846 1847 ; 1848 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX). 1849 ; 1850 pop xAX 1851 1852 mov [ss:xAX + CPUMCTX.ebx], ebx 1853 mov [ss:xAX + CPUMCTX.ecx], ecx 1854 mov [ss:xAX + CPUMCTX.edx], edx 1855 mov [ss:xAX + CPUMCTX.esi], esi 1856 mov [ss:xAX + CPUMCTX.edi], edi 1857 mov [ss:xAX + CPUMCTX.ebp], ebp 1858 1859 ; 1860 ; Restore the host xcr0 if necessary. 1861 ; 1862 pop xCX 1863 test ecx, ecx 1864 jnz .xcr0_after_skip 1865 pop xAX 1866 pop xDX 1867 xsetbv ; ecx is already zero. 1868 .xcr0_after_skip: 1869 1870 ; 1871 ; Restore host general purpose registers. 1872 ; 1873 MYPOPAD 1874 1875 mov eax, VINF_SUCCESS 1876 1877 popf 1878 pop xBP 1879 %ifdef RT_ARCH_AMD64 1880 add xSP, 6*xCB 1881 %endif 1882 ret 1883 ENDPROC SVMR0VMRun 1884 1885 1886 %ifdef RT_ARCH_AMD64 1887 ;; 1888 ; Prepares for and executes VMRUN (64 bits guests) 1889 ; 1890 ; @returns VBox status code 1891 ; @param HCPhysVMCB Physical address of host VMCB. 1892 ; @param HCPhysVMCB Physical address of guest VMCB. 1893 ; @param pCtx Pointer to the guest-CPU context. 1894 ; @param pVM msc:r9, gcc:rcx Pointer to the cross context VM structure. 1895 ; @param pVCpu msc:[rsp+28],gcc:r8 Pointer to the cross context VMCPU structure. 1896 ; 1897 ALIGNCODE(16) 1898 BEGINPROC SVMR0VMRun64 1899 ; Fake a cdecl stack frame 1900 %ifdef ASM_CALL64_GCC 1901 push r8 1902 push rcx 1903 push rdx 1904 push rsi 1905 push rdi 1906 %else 1907 mov rax, [rsp + 28h] 1908 push rax ; rbp + 30h pVCpu 1909 push r9 ; rbp + 28h pVM 1910 push r8 ; rbp + 20h pCtx 1911 push rdx ; rbp + 18h HCPHYSGuestVMCB 1912 push rcx ; rbp + 10h HCPhysHostVMCB 1913 %endif 1914 push 0 ; rbp + 08h "fake ret addr" 1915 push rbp ; rbp + 00h 1916 mov rbp, rsp 1917 pushf 1918 1919 ; Manual save and restore: 1920 ; - General purpose registers except RIP, RSP, RAX 1921 ; 1922 ; Trashed: 1923 ; - CR2 (we don't care) 1924 ; - LDTR (reset to 0) 1925 ; - DRx (presumably not changed at all) 1926 ; - DR7 (reset to 0x400) 1927 ; 1928 1929 ; 1930 ; Save all general purpose host registers. 1931 ; 1932 MYPUSHAD 1933 1934 ; 1935 ; Load pCtx into xSI. 1936 ; 1937 mov xSI, [rbp + xCB * 2 + RTHCPHYS_CB * 2] 1938 1939 ; 1940 ; Save the host XCR0 and load the guest one if necessary. 1941 ; 1942 mov rax, [xBP + 30h] ; pVCpu 1943 test byte [xAX + VMCPU.hm + HMCPU.fLoadSaveGuestXcr0], 1 1944 jz .xcr0_before_skip 1945 1946 xor ecx, ecx 1947 xgetbv ; Save the host one on the stack. 1948 push xDX 1949 push xAX 1950 1951 mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx 1952 mov eax, [xSI + CPUMCTX.aXcr] ; Load the guest one. 1953 mov edx, [xSI + CPUMCTX.aXcr + 4] 1954 xor ecx, ecx ; paranoia 1955 xsetbv 1956 1957 push 0 ; Indicate that we must restore XCR0 (popped into ecx, thus 0). 1958 jmp .xcr0_before_done 1959 1960 .xcr0_before_skip: 1961 push 3fh ; indicate that we need not. 1962 .xcr0_before_done: 1963 1964 ; 1965 ; Save guest CPU-context pointer for simplifying saving of the GPRs afterwards. 1966 ; 1967 push rsi 1968 1969 ; 1970 ; Save host fs, gs, sysenter msr etc. 1971 ; 1972 mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only) 1973 push rax ; Save for the vmload after vmrun 1974 vmsave 1975 1976 ; Setup eax for VMLOAD. 1977 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only) 1978 1979 ; Load guest general purpose registers. 1980 ; rax is loaded from the VMCB by VMRUN. 1981 mov rbx, qword [xSI + CPUMCTX.ebx] 1982 mov rcx, qword [xSI + CPUMCTX.ecx] 1983 mov rdx, qword [xSI + CPUMCTX.edx] 1984 mov rdi, qword [xSI + CPUMCTX.edi] 1985 mov rbp, qword [xSI + CPUMCTX.ebp] 1986 mov r8, qword [xSI + CPUMCTX.r8] 1987 mov r9, qword [xSI + CPUMCTX.r9] 1988 mov r10, qword [xSI + CPUMCTX.r10] 1989 mov r11, qword [xSI + CPUMCTX.r11] 1990 mov r12, qword [xSI + CPUMCTX.r12] 1991 mov r13, qword [xSI + CPUMCTX.r13] 1992 mov r14, qword [xSI + CPUMCTX.r14] 1993 mov r15, qword [xSI + CPUMCTX.r15] 1994 mov rsi, qword [xSI + CPUMCTX.esi] 1995 1996 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch. 1997 clgi 1998 sti 1999 2000 ; Load guest fs, gs, sysenter msr etc. 2001 vmload 2002 ; Run the VM. 2003 vmrun 2004 2005 ; rax is in the VMCB already; we can use it here. 2006 2007 ; Save guest fs, gs, sysenter msr etc. 2008 vmsave 2009 2010 ; 2011 ; Load host fs, gs, sysenter msr etc. 2012 ; 2013 pop rax ; pushed above 2014 vmload 2015 2016 ; 2017 ; Set the global interrupt flag again, but execute cli to make sure IF=0. 2018 ; 2019 cli 2020 stgi 2021 2022 ; 2023 ; Pop the context pointer (pushed above) and save the guest GPRs (sans RSP and RAX). 2024 ; 2025 pop rax 2026 2027 mov qword [rax + CPUMCTX.ebx], rbx 2028 mov qword [rax + CPUMCTX.ecx], rcx 2029 mov qword [rax + CPUMCTX.edx], rdx 2030 mov qword [rax + CPUMCTX.esi], rsi 2031 mov qword [rax + CPUMCTX.edi], rdi 2032 mov qword [rax + CPUMCTX.ebp], rbp 2033 mov qword [rax + CPUMCTX.r8], r8 2034 mov qword [rax + CPUMCTX.r9], r9 2035 mov qword [rax + CPUMCTX.r10], r10 2036 mov qword [rax + CPUMCTX.r11], r11 2037 mov qword [rax + CPUMCTX.r12], r12 2038 mov qword [rax + CPUMCTX.r13], r13 2039 mov qword [rax + CPUMCTX.r14], r14 2040 mov qword [rax + CPUMCTX.r15], r15 2041 2042 ; 2043 ; Restore the host xcr0 if necessary. 2044 ; 2045 pop xCX 2046 test ecx, ecx 2047 jnz .xcr0_after_skip 2048 pop xAX 2049 pop xDX 2050 xsetbv ; ecx is already zero. 2051 .xcr0_after_skip: 2052 2053 ; 2054 ; Restore host general purpose registers. 2055 ; 2056 MYPOPAD 2057 2058 mov eax, VINF_SUCCESS 2059 2060 popf 2061 pop rbp 2062 add rsp, 6 * xCB 2063 ret 2064 ENDPROC SVMR0VMRun64 2065 %endif ; RT_ARCH_AMD64 2066
Note:
See TracChangeset
for help on using the changeset viewer.