Changeset 15030 in vbox for trunk/src/VBox/VMM/VMMGC
- Timestamp:
- Dec 5, 2008 11:12:26 AM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 40406
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm
r14997 r15030 47 47 %endif 48 48 49 ;; @def MYPUSHAD50 ; Macro generating an equivalent to pushad51 52 ;; @def MYPOPAD53 ; Macro generating an equivalent to popad54 55 49 ;; @def MYPUSHSEGS 56 50 ; Macro saving all segment registers on the stack. 57 51 ; @param 1 full width register name 58 ; @param 2 16-bit regsiter name for \a 1.59 52 60 53 ;; @def MYPOPSEGS 61 54 ; Macro restoring all segment registers on the stack 62 55 ; @param 1 full width register name 63 ; @param 2 16-bit regsiter name for \a 1.64 56 65 57 ; Load the corresponding guest MSR (trashes rdx & rcx) … … 70 62 %endmacro 71 63 72 ; Save a guest and load the corresponding hostMSR (trashes rdx & rcx)64 ; Save a guest MSR (trashes rdx & rcx) 73 65 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs) 74 %macro LOADHOSTMSREX266 %macro SAVEGUESTMSR 2 75 67 mov rcx, %1 76 68 rdmsr … … 79 71 %endmacro 80 72 81 %ifdef ASM_CALL64_GCC 82 %macro MYPUSHAD 0 83 push r15 84 push r14 85 push r13 86 push r12 87 push rbx 88 %endmacro 89 %macro MYPOPAD 0 90 pop rbx 91 pop r12 92 pop r13 93 pop r14 94 pop r15 95 %endmacro 96 97 %else ; ASM_CALL64_MSC 98 %macro MYPUSHAD 0 99 push r15 100 push r14 101 push r13 102 push r12 103 push rbx 104 push rsi 105 push rdi 106 %endmacro 107 %macro MYPOPAD 0 108 pop rdi 109 pop rsi 110 pop rbx 111 pop r12 112 pop r13 113 pop r14 114 pop r15 115 %endmacro 116 %endif 117 118 ; trashes, rax, rdx & rcx 119 %macro MYPUSHSEGS 2 120 mov %2, es 73 %macro MYPUSHSEGS 1 74 mov %1, es 121 75 push %1 122 mov % 2, ds76 mov %1, ds 123 77 push %1 124 125 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it. 126 mov ecx, MSR_K8_FS_BASE 127 rdmsr 128 push rdx 129 push rax 130 push fs 131 132 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit 133 mov ecx, MSR_K8_GS_BASE 134 rdmsr 135 push rdx 136 push rax 137 push gs 138 %endmacro 139 140 ; trashes, rax, rdx & rcx 141 %macro MYPOPSEGS 2 142 ; Note: do not step through this code with a debugger! 143 pop gs 144 pop rax 145 pop rdx 146 mov ecx, MSR_K8_GS_BASE 147 wrmsr 148 149 pop fs 150 pop rax 151 pop rdx 152 mov ecx, MSR_K8_FS_BASE 153 wrmsr 154 ; Now it's safe to step again 155 78 %endmacro 79 80 %macro MYPOPSEGS 1 156 81 pop %1 157 mov ds, % 282 mov ds, %1 158 83 pop %1 159 mov es, %2 160 %endmacro 161 162 84 mov es, %1 85 %endmacro 86 87 ; trashes rax & rdx 88 %macro VMCSWRITE 2 89 mov rdx, %2 90 mov eax, %1 91 vmwrite rax, rdx 92 %endmacro 93 94 ; trashes rax & rdx 95 %macro VMCSREAD 2 96 mov eax, %1 97 vmwrite rax, rdx 98 mov %2, rdx 99 %endmacro 163 100 164 101 BEGINCODE … … 176 113 mov rbp, rsp 177 114 178 pushf 179 cli 180 181 ; Have to sync half the guest state as we can't access most of the 64 bits state. Sigh 182 ; VMCSWRITE VMX_VMCS64_GUEST_CS_BASE, [rsi + CPUMCTX.csHid.u64Base] 183 ; VMCSWRITE VMX_VMCS64_GUEST_DS_BASE, [rsi + CPUMCTX.dsHid.u64Base] 184 ; VMCSWRITE VMX_VMCS64_GUEST_ES_BASE, [rsi + CPUMCTX.esHid.u64Base] 185 ; VMCSWRITE VMX_VMCS64_GUEST_FS_BASE, [rsi + CPUMCTX.fsHid.u64Base] 186 ; VMCSWRITE VMX_VMCS64_GUEST_GS_BASE, [rsi + CPUMCTX.gsHid.u64Base] 187 ; VMCSWRITE VMX_VMCS64_GUEST_SS_BASE, [rsi + CPUMCTX.ssHid.u64Base] 188 ; VMCSWRITE VMX_VMCS64_GUEST_LDTR_BASE, [rsi + CPUMCTX.ldtrHid.u64Base] 189 ; VMCSWRITE VMX_VMCS64_GUEST_GDTR_BASE, [rsi + CPUMCTX.gdtrHid.u64Base] 190 ; VMCSWRITE VMX_VMCS64_GUEST_IDTR_BASE, [rsi + CPUMCTX.idtrHid.u64Base] 191 ; VMCSWRITE VMX_VMCS64_GUEST_TR_BASE, [rsi + CPUMCTX.trHid.u64Base] 192 ; 193 ; VMCSWRITE VMX_VMCS64_GUEST_SYSENTER_EIP, [rsi + CPUMCTX.SysEnter.eip] 194 ; VMCSWRITE VMX_VMCS64_GUEST_SYSENTER_ESP, [rsi + CPUMCTX.SysEnter.esp] 195 ; 196 ; VMCSWRITE VMX_VMCS64_GUEST_RIP, [rsi + CPUMCTX.eip] 197 ; VMCSWRITE VMX_VMCS64_GUEST_RSP, [rsi + CPUMCTX.esp] 198 199 115 ; Have to sync half the guest state as we can't access most of the 64 bits state in 32 bits mode. Sigh. 116 VMCSWRITE VMX_VMCS64_GUEST_CS_BASE, [rsi + CPUMCTX.csHid.u64Base] 117 VMCSWRITE VMX_VMCS64_GUEST_DS_BASE, [rsi + CPUMCTX.dsHid.u64Base] 118 VMCSWRITE VMX_VMCS64_GUEST_ES_BASE, [rsi + CPUMCTX.esHid.u64Base] 119 VMCSWRITE VMX_VMCS64_GUEST_FS_BASE, [rsi + CPUMCTX.fsHid.u64Base] 120 VMCSWRITE VMX_VMCS64_GUEST_GS_BASE, [rsi + CPUMCTX.gsHid.u64Base] 121 VMCSWRITE VMX_VMCS64_GUEST_SS_BASE, [rsi + CPUMCTX.ssHid.u64Base] 122 VMCSWRITE VMX_VMCS64_GUEST_GDTR_BASE, [rsi + CPUMCTX.gdtr.pGdt] 123 VMCSWRITE VMX_VMCS64_GUEST_IDTR_BASE, [rsi + CPUMCTX.idtr.pIdt] 124 VMCSWRITE VMX_VMCS64_GUEST_LDTR_BASE, [rsi + CPUMCTX.ldtrHid.u64Base] 125 VMCSWRITE VMX_VMCS64_GUEST_TR_BASE, [rsi + CPUMCTX.trHid.u64Base] 126 127 VMCSWRITE VMX_VMCS64_GUEST_SYSENTER_EIP, [rsi + CPUMCTX.SysEnter.eip] 128 VMCSWRITE VMX_VMCS64_GUEST_SYSENTER_ESP, [rsi + CPUMCTX.SysEnter.esp] 129 130 VMCSWRITE VMX_VMCS64_GUEST_RIP, [rsi + CPUMCTX.eip] 131 VMCSWRITE VMX_VMCS64_GUEST_RSP, [rsi + CPUMCTX.esp] 132 133 ; Save the host state that's relevant in the temporary 64 bits mode 134 mov rax, cr0 135 VMCSWRITE VMX_VMCS_HOST_CR0, rax 136 mov rax, cr3 137 VMCSWRITE VMX_VMCS_HOST_CR3, rax 138 mov rax, cr4 139 VMCSWRITE VMX_VMCS_HOST_CR4, rax 140 mov rax, cs 141 VMCSWRITE VMX_VMCS_HOST_FIELD_CS, rax 142 mov rax, ss 143 VMCSWRITE VMX_VMCS_HOST_FIELD_SS, rax 144 145 sub rsp, 8*2 146 sgdt [rsp] 147 mov rax, [rsp+2] 148 VMCSWRITE VMX_VMCS_HOST_GDTR_BASE, rax 149 add rsp, 8*2 150 151 ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode) 152 200 153 ;/* First we have to save some final CPU context registers. */ 201 lea rax, [.vmlaunch64_done wrt rip] 202 push rax 154 lea rdx, [.vmlaunch64_done wrt rip] 203 155 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */ 204 vmwrite rax, [rsp]156 vmwrite rax, rdx 205 157 ;/* Note: assumes success... */ 206 add rsp, 8207 158 208 159 ;/* Manual save and restore: … … 218 169 ; */ 219 170 220 ;/* Save all general purpose host registers. */221 MYPUSHAD222 223 ;/* Save the Guest CPU context pointer. */224 ; pCtx already in rsi225 226 171 ;/* Save segment registers */ 227 ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case) 228 MYPUSHSEGS rax, ax 229 230 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs 172 MYPUSHSEGS rax 173 174 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs 231 175 ;; @todo use the automatic load feature for MSRs 232 176 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR 177 %if 0 ; not supported on Intel CPUs 178 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR 179 %endif 233 180 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR 234 181 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK … … 237 184 ; Save the pCtx pointer 238 185 push rsi 239 240 ; Save LDTR241 xor eax, eax242 sldt ax243 push rax244 245 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!246 sub rsp, 8*2247 sgdt [rsp]248 249 sub rsp, 8*2250 sidt [rsp]251 186 252 187 ; Restore CR2 … … 286 221 jz near .vmstart64_start_failed 287 222 288 ; Restore base and limit of the IDTR & GDTR289 lidt [rsp]290 add rsp, 8*2291 lgdt [rsp]292 add rsp, 8*2293 294 223 push rdi 295 224 mov rdi, [rsp + 8 * 2] ; pCtx … … 313 242 mov qword [rdi + CPUMCTX.edi], rax 314 243 315 pop rax ; saved LDTR 316 lldt ax 317 244 pop rsi ; pCtx (needed in rsi by the macros below) 245 246 ;; @todo use the automatic load feature for MSRs 247 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 248 249 ; Restore segment registers 250 MYPOPSEGS rax 251 252 mov eax, VINF_SUCCESS 253 254 .vmstart64_end: 255 pop rbp 256 ret 257 258 259 .vmstart64_invalid_vmxon_ptr: 318 260 pop rsi ; pCtx (needed in rsi by the macros below) 319 261 320 262 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs 321 263 ;; @todo use the automatic load feature for MSRs 322 LOADHOSTMSREXMSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE264 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 323 265 324 266 ; Restore segment registers 325 MYPOPSEGS rax, ax 326 327 ; Restore general purpose registers 328 MYPOPAD 329 330 mov eax, VINF_SUCCESS 331 332 .vmstart64_end: 333 popf 334 pop rbp 335 ret 336 337 338 .vmstart64_invalid_vmxon_ptr: 339 ; Restore base and limit of the IDTR & GDTR 340 lidt [rsp] 341 add rsp, 8*2 342 lgdt [rsp] 343 add rsp, 8*2 344 345 pop rax ; saved LDTR 346 lldt ax 347 267 MYPOPSEGS rax 268 269 ; Restore all general purpose host registers. 270 mov eax, VERR_VMX_INVALID_VMXON_PTR 271 jmp .vmstart64_end 272 273 .vmstart64_start_failed: 348 274 pop rsi ; pCtx (needed in rsi by the macros below) 349 275 350 276 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs 351 277 ;; @todo use the automatic load feature for MSRs 352 LOADHOSTMSREXMSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE278 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 353 279 354 280 ; Restore segment registers 355 MYPOPSEGS rax , ax281 MYPOPSEGS rax 356 282 357 283 ; Restore all general purpose host registers. 358 MYPOPAD359 mov eax, VERR_VMX_INVALID_VMXON_PTR360 jmp .vmstart64_end361 362 .vmstart64_start_failed:363 ; Restore base and limit of the IDTR & GDTR364 lidt [rsp]365 add rsp, 8*2366 lgdt [rsp]367 add rsp, 8*2368 369 pop rax ; saved LDTR370 lldt ax371 372 pop rsi ; pCtx (needed in rsi by the macros below)373 374 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs375 ;; @todo use the automatic load feature for MSRs376 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE377 378 ; Restore segment registers379 MYPOPSEGS rax, ax380 381 ; Restore all general purpose host registers.382 MYPOPAD383 284 mov eax, VERR_VMX_UNABLE_TO_START_VM 384 285 jmp .vmstart64_end … … 408 309 ; * - DR7 (reset to 0x400) 409 310 ; */ 410 411 ;/* Save all general purpose host registers. */412 MYPUSHAD413 311 414 312 ;/* Save the Guest CPU context pointer. */ … … 482 380 mov qword [rax + CPUMCTX.r14], r14 483 381 mov qword [rax + CPUMCTX.r15], r15 484 485 ; Restore general purpose registers486 MYPOPAD487 382 488 383 mov eax, VINF_SUCCESS
Note:
See TracChangeset
for help on using the changeset viewer.