VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm@ 72747

Last change on this file since 72747 was 71222, checked in by vboxsync, 7 years ago

NEM/win,VMM,PGM: Ported NEM runloop to ring-0. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 13.0 KB
Line 
1; $Id: VMMR0JmpA-amd64.asm 71222 2018-03-05 22:07:48Z vboxsync $
2;; @file
3; VMM - R0 SetJmp / LongJmp routines for AMD64.
4;
5
6;
7; Copyright (C) 2006-2017 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VMMInternal.mac"
23%include "VBox/err.mac"
24%include "VBox/param.mac"
25
26
27;*******************************************************************************
28;* Defined Constants And Macros *
29;*******************************************************************************
30%define RESUME_MAGIC 07eadf00dh
31%define STACK_PADDING 0eeeeeeeeeeeeeeeeh
32
33;; Workaround for linux 4.6 fast/slow syscall stack depth difference.
34%ifdef VMM_R0_SWITCH_STACK
35 %define STACK_FUZZ_SIZE 0
36%else
37 %define STACK_FUZZ_SIZE 128
38%endif
39
40
41BEGINCODE
42
43
44;;
45; The setjmp variant used for calling Ring-3.
46;
47; This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
48; in the middle of a ring-3 call. Another differences is the function pointer and
49; argument. This has to do with resuming code and the stack frame of the caller.
50;
51; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
52; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+0x04] Our jmp_buf.
53; @param pfn msc:rdx gcc:rsi x86:[esp+0x08] The function to be called when not resuming.
54; @param pvUser1 msc:r8 gcc:rdx x86:[esp+0x0c] The argument of that function.
55; @param pvUser2 msc:r9 gcc:rcx x86:[esp+0x10] The argument of that function.
56;
57BEGINPROC vmmR0CallRing3SetJmp
58GLOBALNAME vmmR0CallRing3SetJmp2
59GLOBALNAME vmmR0CallRing3SetJmpEx
60 ;
61 ; Save the registers.
62 ;
63 push rbp
64 mov rbp, rsp
65 %ifdef ASM_CALL64_MSC
66 sub rsp, 30h + STACK_FUZZ_SIZE ; (10h is used by resume (??), 20h for callee spill area)
67 mov r11, rdx ; pfn
68 mov rdx, rcx ; pJmpBuf;
69 %else
70 sub rsp, 10h + STACK_FUZZ_SIZE ; (10h is used by resume (??))
71 mov r8, rdx ; pvUser1 (save it like MSC)
72 mov r9, rcx ; pvUser2 (save it like MSC)
73 mov r11, rsi ; pfn
74 mov rdx, rdi ; pJmpBuf
75 %endif
76 mov [xDX + VMMR0JMPBUF.rbx], rbx
77 %ifdef ASM_CALL64_MSC
78 mov [xDX + VMMR0JMPBUF.rsi], rsi
79 mov [xDX + VMMR0JMPBUF.rdi], rdi
80 %endif
81 mov [xDX + VMMR0JMPBUF.rbp], rbp
82 mov [xDX + VMMR0JMPBUF.r12], r12
83 mov [xDX + VMMR0JMPBUF.r13], r13
84 mov [xDX + VMMR0JMPBUF.r14], r14
85 mov [xDX + VMMR0JMPBUF.r15], r15
86 mov xAX, [rbp + 8] ; (not really necessary, except for validity check)
87 mov [xDX + VMMR0JMPBUF.rip], xAX
88 %ifdef ASM_CALL64_MSC
89 lea r10, [rsp + 20h] ; must save the spill area
90 %else
91 lea r10, [rsp]
92 %endif
93 mov [xDX + VMMR0JMPBUF.rsp], r10
94 %ifdef RT_OS_WINDOWS
95 movdqa [xDX + VMMR0JMPBUF.xmm6], xmm6
96 movdqa [xDX + VMMR0JMPBUF.xmm7], xmm7
97 movdqa [xDX + VMMR0JMPBUF.xmm8], xmm8
98 movdqa [xDX + VMMR0JMPBUF.xmm9], xmm9
99 movdqa [xDX + VMMR0JMPBUF.xmm10], xmm10
100 movdqa [xDX + VMMR0JMPBUF.xmm11], xmm11
101 movdqa [xDX + VMMR0JMPBUF.xmm12], xmm12
102 movdqa [xDX + VMMR0JMPBUF.xmm13], xmm13
103 movdqa [xDX + VMMR0JMPBUF.xmm14], xmm14
104 movdqa [xDX + VMMR0JMPBUF.xmm15], xmm15
105 %endif
106 pushf
107 pop xAX
108 mov [xDX + VMMR0JMPBUF.rflags], xAX
109
110 ;
111 ; If we're not in a ring-3 call, call pfn and return.
112 ;
113 test byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
114 jnz .resume
115
116 %ifdef VMM_R0_SWITCH_STACK
117 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]
118 test r15, r15
119 jz .entry_error
120 %ifdef VBOX_STRICT
121 cmp dword [r15], 0h
122 jne .entry_error
123 mov rdi, r15
124 mov rcx, VMM_STACK_SIZE / 8
125 mov rax, qword 0eeeeeeeffeeeeeeeh
126 repne stosq
127 mov [rdi - 10h], rbx
128 %endif
129 lea r15, [r15 + VMM_STACK_SIZE - 40h]
130 mov rsp, r15 ; Switch stack!
131 %endif ; VMM_R0_SWITCH_STACK
132
133 mov r12, rdx ; Save pJmpBuf.
134 %ifdef ASM_CALL64_MSC
135 mov rcx, r8 ; pvUser -> arg0
136 mov rdx, r9
137 %else
138 mov rdi, r8 ; pvUser -> arg0
139 mov rsi, r9
140 %endif
141 call r11
142 mov rdx, r12 ; Restore pJmpBuf
143
144 %ifdef VMM_R0_SWITCH_STACK
145 %ifdef VBOX_STRICT
146 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]
147 mov dword [r15], 0h ; Reset the marker
148 %endif
149 %endif
150
151 ;
152 ; Return like in the long jump but clear eip, no shortcuts here.
153 ;
154.proper_return:
155%ifdef RT_OS_WINDOWS
156 movdqa xmm6, [xDX + VMMR0JMPBUF.xmm6 ]
157 movdqa xmm7, [xDX + VMMR0JMPBUF.xmm7 ]
158 movdqa xmm8, [xDX + VMMR0JMPBUF.xmm8 ]
159 movdqa xmm9, [xDX + VMMR0JMPBUF.xmm9 ]
160 movdqa xmm10, [xDX + VMMR0JMPBUF.xmm10]
161 movdqa xmm11, [xDX + VMMR0JMPBUF.xmm11]
162 movdqa xmm12, [xDX + VMMR0JMPBUF.xmm12]
163 movdqa xmm13, [xDX + VMMR0JMPBUF.xmm13]
164 movdqa xmm14, [xDX + VMMR0JMPBUF.xmm14]
165 movdqa xmm15, [xDX + VMMR0JMPBUF.xmm15]
166%endif
167 mov rbx, [xDX + VMMR0JMPBUF.rbx]
168%ifdef ASM_CALL64_MSC
169 mov rsi, [xDX + VMMR0JMPBUF.rsi]
170 mov rdi, [xDX + VMMR0JMPBUF.rdi]
171%endif
172 mov r12, [xDX + VMMR0JMPBUF.r12]
173 mov r13, [xDX + VMMR0JMPBUF.r13]
174 mov r14, [xDX + VMMR0JMPBUF.r14]
175 mov r15, [xDX + VMMR0JMPBUF.r15]
176 mov rbp, [xDX + VMMR0JMPBUF.rbp]
177 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
178 mov rsp, [xDX + VMMR0JMPBUF.rsp]
179 push qword [xDX + VMMR0JMPBUF.rflags]
180 popf
181 leave
182 ret
183
184.entry_error:
185 mov eax, VERR_VMM_SET_JMP_ERROR
186 jmp .proper_return
187
188.stack_overflow:
189 mov eax, VERR_VMM_SET_JMP_STACK_OVERFLOW
190 jmp .proper_return
191
192 ;
193 ; Aborting resume.
194 ; Note! No need to restore XMM registers here since we haven't touched them yet.
195 ;
196.bad:
197 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
198 mov rbx, [xDX + VMMR0JMPBUF.rbx]
199 %ifdef ASM_CALL64_MSC
200 mov rsi, [xDX + VMMR0JMPBUF.rsi]
201 mov rdi, [xDX + VMMR0JMPBUF.rdi]
202 %endif
203 mov r12, [xDX + VMMR0JMPBUF.r12]
204 mov r13, [xDX + VMMR0JMPBUF.r13]
205 mov r14, [xDX + VMMR0JMPBUF.r14]
206 mov r15, [xDX + VMMR0JMPBUF.r15]
207 mov eax, VERR_VMM_SET_JMP_ABORTED_RESUME
208 leave
209 ret
210
211 ;
212 ; Resume VMMRZCallRing3 the call.
213 ;
214.resume:
215 %ifndef VMM_R0_SWITCH_STACK
216 ; Sanity checks incoming stack, applying fuzz if needed.
217 sub r10, [xDX + VMMR0JMPBUF.SpCheck]
218 jz .resume_stack_checked_out
219 add r10, STACK_FUZZ_SIZE ; plus/minus STACK_FUZZ_SIZE is fine.
220 cmp r10, STACK_FUZZ_SIZE * 2
221 ja .bad
222
223 mov r10, [xDX + VMMR0JMPBUF.SpCheck]
224 mov [xDX + VMMR0JMPBUF.rsp], r10 ; Must be update in case of another long jump (used for save calc).
225
226.resume_stack_checked_out:
227 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
228 cmp rcx, VMM_STACK_SIZE
229 ja .bad
230 test rcx, 7
231 jnz .bad
232 mov rdi, [xDX + VMMR0JMPBUF.SpCheck]
233 sub rdi, [xDX + VMMR0JMPBUF.SpResume]
234 cmp rcx, rdi
235 jne .bad
236 %endif
237
238%ifdef VMM_R0_SWITCH_STACK
239 ; Switch stack.
240 mov rsp, [xDX + VMMR0JMPBUF.SpResume]
241%else
242 ; Restore the stack.
243 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
244 shr ecx, 3
245 mov rsi, [xDX + VMMR0JMPBUF.pvSavedStack]
246 mov rdi, [xDX + VMMR0JMPBUF.SpResume]
247 mov rsp, rdi
248 rep movsq
249%endif ; !VMM_R0_SWITCH_STACK
250 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 0
251
252 ;
253 ; Continue where we left off.
254 ;
255%ifdef VBOX_STRICT
256 pop rax ; magic
257 cmp rax, RESUME_MAGIC
258 je .magic_ok
259 mov ecx, 0123h
260 mov [ecx], edx
261.magic_ok:
262%endif
263%ifdef RT_OS_WINDOWS
264 movdqa xmm6, [rsp + 000h]
265 movdqa xmm7, [rsp + 010h]
266 movdqa xmm8, [rsp + 020h]
267 movdqa xmm9, [rsp + 030h]
268 movdqa xmm10, [rsp + 040h]
269 movdqa xmm11, [rsp + 050h]
270 movdqa xmm12, [rsp + 060h]
271 movdqa xmm13, [rsp + 070h]
272 movdqa xmm14, [rsp + 080h]
273 movdqa xmm15, [rsp + 090h]
274 add rsp, 0a0h
275%endif
276 popf
277 pop rbx
278%ifdef ASM_CALL64_MSC
279 pop rsi
280 pop rdi
281%endif
282 pop r12
283 pop r13
284 pop r14
285 pop r15
286 pop rbp
287 xor eax, eax ; VINF_SUCCESS
288 ret
289ENDPROC vmmR0CallRing3SetJmp
290
291
292;;
293; Worker for VMMRZCallRing3.
294; This will save the stack and registers.
295;
296; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
297; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
298;
299BEGINPROC vmmR0CallRing3LongJmp
300 ;
301 ; Save the registers on the stack.
302 ;
303 push rbp
304 mov rbp, rsp
305 push r15
306 push r14
307 push r13
308 push r12
309%ifdef ASM_CALL64_MSC
310 push rdi
311 push rsi
312%endif
313 push rbx
314 pushf
315%ifdef RT_OS_WINDOWS
316 sub rsp, 0a0h
317 movdqa [rsp + 000h], xmm6
318 movdqa [rsp + 010h], xmm7
319 movdqa [rsp + 020h], xmm8
320 movdqa [rsp + 030h], xmm9
321 movdqa [rsp + 040h], xmm10
322 movdqa [rsp + 050h], xmm11
323 movdqa [rsp + 060h], xmm12
324 movdqa [rsp + 070h], xmm13
325 movdqa [rsp + 080h], xmm14
326 movdqa [rsp + 090h], xmm15
327%endif
328%ifdef VBOX_STRICT
329 push RESUME_MAGIC
330%endif
331
332 ;
333 ; Normalize the parameters.
334 ;
335%ifdef ASM_CALL64_MSC
336 mov eax, edx ; rc
337 mov rdx, rcx ; pJmpBuf
338%else
339 mov rdx, rdi ; pJmpBuf
340 mov eax, esi ; rc
341%endif
342
343 ;
344 ; Is the jump buffer armed?
345 ;
346 cmp qword [xDX + VMMR0JMPBUF.rip], byte 0
347 je .nok
348
349 ;
350 ; Sanity checks.
351 ;
352 mov rdi, [xDX + VMMR0JMPBUF.pvSavedStack]
353 test rdi, rdi ; darwin may set this to 0.
354 jz .nok
355 mov [xDX + VMMR0JMPBUF.SpResume], rsp
356 %ifndef VMM_R0_SWITCH_STACK
357 mov rsi, rsp
358 mov rcx, [xDX + VMMR0JMPBUF.rsp]
359 sub rcx, rsi
360
361 ; two sanity checks on the size.
362 cmp rcx, VMM_STACK_SIZE ; check max size.
363 jnbe .nok
364
365 ;
366 ; Copy the stack
367 ;
368 test ecx, 7 ; check alignment
369 jnz .nok
370 mov [xDX + VMMR0JMPBUF.cbSavedStack], ecx
371 shr ecx, 3
372 rep movsq
373
374 %endif ; !VMM_R0_SWITCH_STACK
375
376 ; Save RSP & RBP to enable stack dumps
377 mov rcx, rbp
378 mov [xDX + VMMR0JMPBUF.SavedEbp], rcx
379 sub rcx, 8
380 mov [xDX + VMMR0JMPBUF.SavedEsp], rcx
381
382 ; store the last pieces of info.
383 mov rcx, [xDX + VMMR0JMPBUF.rsp]
384 mov [xDX + VMMR0JMPBUF.SpCheck], rcx
385 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
386
387 ;
388 ; Do the long jump.
389 ;
390%ifdef RT_OS_WINDOWS
391 movdqa xmm6, [xDX + VMMR0JMPBUF.xmm6 ]
392 movdqa xmm7, [xDX + VMMR0JMPBUF.xmm7 ]
393 movdqa xmm8, [xDX + VMMR0JMPBUF.xmm8 ]
394 movdqa xmm9, [xDX + VMMR0JMPBUF.xmm9 ]
395 movdqa xmm10, [xDX + VMMR0JMPBUF.xmm10]
396 movdqa xmm11, [xDX + VMMR0JMPBUF.xmm11]
397 movdqa xmm12, [xDX + VMMR0JMPBUF.xmm12]
398 movdqa xmm13, [xDX + VMMR0JMPBUF.xmm13]
399 movdqa xmm14, [xDX + VMMR0JMPBUF.xmm14]
400 movdqa xmm15, [xDX + VMMR0JMPBUF.xmm15]
401%endif
402 mov rbx, [xDX + VMMR0JMPBUF.rbx]
403%ifdef ASM_CALL64_MSC
404 mov rsi, [xDX + VMMR0JMPBUF.rsi]
405 mov rdi, [xDX + VMMR0JMPBUF.rdi]
406%endif
407 mov r12, [xDX + VMMR0JMPBUF.r12]
408 mov r13, [xDX + VMMR0JMPBUF.r13]
409 mov r14, [xDX + VMMR0JMPBUF.r14]
410 mov r15, [xDX + VMMR0JMPBUF.r15]
411 mov rbp, [xDX + VMMR0JMPBUF.rbp]
412 mov rsp, [xDX + VMMR0JMPBUF.rsp]
413 push qword [xDX + VMMR0JMPBUF.rflags]
414 popf
415 leave
416 ret
417
418 ;
419 ; Failure
420 ;
421.nok:
422%ifdef VBOX_STRICT
423 pop rax ; magic
424 cmp rax, RESUME_MAGIC
425 je .magic_ok
426 mov ecx, 0123h
427 mov [rcx], edx
428.magic_ok:
429%endif
430 mov eax, VERR_VMM_LONG_JMP_ERROR
431%ifdef RT_OS_WINDOWS
432 add rsp, 0a0h ; skip XMM registers since they are unmodified.
433%endif
434 popf
435 pop rbx
436%ifdef ASM_CALL64_MSC
437 pop rsi
438 pop rdi
439%endif
440 pop r12
441 pop r13
442 pop r14
443 pop r15
444 leave
445 ret
446ENDPROC vmmR0CallRing3LongJmp
447
448
449;;
450; Internal R0 logger worker: Logger wrapper.
451;
452; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
453;
454EXPORTEDNAME vmmR0LoggerWrapper
455 int3
456 int3
457 int3
458 ret
459ENDPROC vmmR0LoggerWrapper
460
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette