Changeset 87754 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Feb 13, 2021 5:44:31 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 142806
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r87741 r87754 521 521 %endif 522 522 523 ; Save the guest state and restore the non-volatile registers. We use rax=pGstCtx here. 524 mov [rsp + cbFrame + frm_guest_rax], rax 525 mov rax, [rsp + cbFrame + frm_pGstCtx] 526 527 mov qword [rax + CPUMCTX.ebp], rbp 523 ; Save the guest state and restore the non-volatile registers. We use rcx=pGstCtx (&pVCpu->cpum.GstCtx) here. 524 mov [rsp + cbFrame + frm_guest_rcx], rcx 525 mov rcx, [rsp + cbFrame + frm_pGstCtx] 526 527 mov qword [rcx + CPUMCTX.eax], rax 528 mov qword [rcx + CPUMCTX.edx], rdx 529 rdtsc 530 mov qword [rcx + CPUMCTX.ebp], rbp 528 531 lea rbp, [rsp + cbFrame] ; re-establish the frame pointer as early as possible. 529 mov qword [rax + CPUMCTX.ecx], rcx530 mov rcx, SPECTRE_FILLER531 mov qword [rax + CPUMCTX.edx], rdx532 mov rdx, [rbp + frm_guest_rax]533 mov qword [rax + CPUMCTX.eax], rdx534 mov rdx, rcx535 mov qword [r ax + CPUMCTX.r8], r8536 mov r8, r cx537 mov qword [r ax + CPUMCTX.r9], r9538 mov r9, r cx539 mov qword [r ax + CPUMCTX.r10], r10540 mov r10, r cx541 mov qword [r ax + CPUMCTX.r11], r11542 mov r11, r cx543 mov qword [r ax + CPUMCTX.esi], rsi532 shl rdx, 20h 533 or rax, rdx ; TSC value in RAX 534 mov rdx, [rbp + frm_guest_rcx] 535 mov qword [rcx + CPUMCTX.ecx], rdx 536 mov rdx, SPECTRE_FILLER ; FILLER in RDX 537 mov qword [rcx + GVMCPU.hmr0 + HMR0PERVCPU.uTscExit - VMCPU.cpum.GstCtx], rax 538 mov qword [rcx + CPUMCTX.r8], r8 539 mov r8, rdx 540 mov qword [rcx + CPUMCTX.r9], r9 541 mov r9, rdx 542 mov qword [rcx + CPUMCTX.r10], r10 543 mov r10, rdx 544 mov qword [rcx + CPUMCTX.r11], r11 545 mov r11, rdx 546 mov qword [rcx + CPUMCTX.esi], rsi 544 547 %ifdef ASM_CALL64_MSC 545 548 mov rsi, [rbp + frm_saved_rsi] 546 549 %else 547 mov rsi, r cx548 %endif 549 mov qword [r ax + CPUMCTX.edi], rdi550 mov rsi, rdx 551 %endif 552 mov qword [rcx + CPUMCTX.edi], rdi 550 553 %ifdef ASM_CALL64_MSC 551 554 mov rdi, [rbp + frm_saved_rdi] 552 555 %else 553 mov rdi, r cx554 %endif 555 mov qword [r ax + CPUMCTX.ebx], rbx556 mov rdi, rdx 557 %endif 558 mov qword [rcx + CPUMCTX.ebx], rbx 556 559 mov rbx, [rbp + frm_saved_rbx] 557 mov qword [r ax + CPUMCTX.r12], r12560 mov qword [rcx + CPUMCTX.r12], r12 558 561 mov r12, [rbp + frm_saved_r12] 559 mov qword [r ax + CPUMCTX.r13], r13562 mov qword [rcx + CPUMCTX.r13], r13 560 563 mov r13, [rbp + frm_saved_r13] 561 mov qword [r ax + CPUMCTX.r14], r14564 mov qword [rcx + CPUMCTX.r14], r14 562 565 mov r14, [rbp + frm_saved_r14] 563 mov qword [r ax + CPUMCTX.r15], r15566 mov qword [rcx + CPUMCTX.r15], r15 564 567 mov r15, [rbp + frm_saved_r15] 565 568 566 mov r dx, cr2567 mov qword [r ax + CPUMCTX.cr2], rdx568 mov r dx, rcx569 mov rax, cr2 570 mov qword [rcx + CPUMCTX.cr2], rax 571 mov rax, rdx 569 572 570 573 %if %4 != 0 571 574 ; Save the context pointer in r8 for the SSE save/restore. 572 mov r8, r ax575 mov r8, rcx 573 576 %endif 574 577 … … 682 685 %define frm_saved_ldtr -03ch ; 16-bit: always saved. 683 686 %define frm_rcError -040h ; 32-bit: Error status code (not used in the success path) 684 %define frm_guest_r ax -048h ; Temporary storage slot for guest RAX.687 %define frm_guest_rcx -048h ; Temporary storage slot for guest RCX. 685 688 %if %4 = 0 686 689 %assign cbFrame 048h -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r87752 r87754 11153 11153 static void hmR0VmxPostRunGuest(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun) 11154 11154 { 11155 uint64_t const uHostTsc = ASMReadTSC(); /** @todo We can do a lot better here, see @bugref{9180#c38}. */11156 11157 11155 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */ 11158 11156 ASMAtomicIncU32(&pVCpu->hmr0.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */ … … 11167 11165 uint64_t uGstTsc; 11168 11166 if (!pVmxTransient->fIsNestedGuest) 11169 uGstTsc = uHostTsc+ pVmcsInfo->u64TscOffset;11167 uGstTsc = pVCpu->hmr0.s.uTscExit + pVmcsInfo->u64TscOffset; 11170 11168 else 11171 11169 { 11172 uint64_t const uNstGstTsc = uHostTsc+ pVmcsInfo->u64TscOffset;11170 uint64_t const uNstGstTsc = pVCpu->hmr0.s.uTscExit + pVmcsInfo->u64TscOffset; 11173 11171 uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, uNstGstTsc); 11174 11172 } … … 11177 11175 11178 11176 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x); 11179 TMNotifyEndOfExecution(pVCpu->CTX_SUFF(pVM), pVCpu, uHostTsc);/* Notify TM that the guest is no longer running. */11177 TMNotifyEndOfExecution(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->hmr0.s.uTscExit); /* Notify TM that the guest is no longer running. */ 11180 11178 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 11181 11179 … … 11258 11256 */ 11259 11257 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_VMX, pVmxTransient->uExitReason & EMEXIT_F_TYPE_MASK), 11260 UINT64_MAX, uHostTsc);11258 UINT64_MAX, pVCpu->hmr0.s.uTscExit); 11261 11259 11262 11260 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
Note:
See TracChangeset
for help on using the changeset viewer.