VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm@ 20770

Last change on this file since 20770 was 20545, checked in by vboxsync, 16 years ago

VMM: Instrumented the setjump code with stack usage statistics (only 32-bit and stack switching version atm).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 10.1 KB
Line 
1; $Id: VMMR0JmpA-amd64.asm 20545 2009-06-13 23:56:48Z vboxsync $
2;; @file
3; VMM - R0 SetJmp / LongJmp routines for AMD64.
4;
5
6;
7; Copyright (C) 2006-2009 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "../VMMInternal.mac"
27%include "iprt/err.mac"
28%include "VBox/param.mac"
29
30
31;*******************************************************************************
32;* Defined Constants And Macros *
33;*******************************************************************************
34%define RESUME_MAGIC 07eadf00dh
35%define STACK_PADDING 0eeeeeeeeeeeeeeeeh
36
37
38; For vmmR0LoggerWrapper. (The other architecture(s) use(s) C99 variadict macros.)
39extern NAME(RTLogLogger)
40
41
42BEGINCODE
43
44
45;;
46; The setjmp variant used for calling Ring-3.
47;
48; This differs from the normal setjmp in that it will resume VMMR0CallHost if we're
49; in the middle of a ring-3 call. Another differences is the function pointer and
50; argument. This has to do with resuming code and the stack frame of the caller.
51;
52; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallHostLongJmp.
53; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+0x04] Our jmp_buf.
54; @param pfn msc:rdx gcc:rsi x86:[esp+0x08] The function to be called when not resuming.
55; @param pvUser1 msc:r8 gcc:rdx x86:[esp+0x0c] The argument of that function.
56; @param pvUser2 msc:r9 gcc:rcx x86:[esp+0x10] The argument of that function.
57;
58BEGINPROC vmmR0CallHostSetJmp
59GLOBALNAME vmmR0CallHostSetJmpEx
60 ;
61 ; Save the registers.
62 ;
63 push rbp
64 mov rbp, rsp
65 %ifdef ASM_CALL64_MSC
66 sub rsp, 30h
67 mov r11, rdx ; pfn
68 mov rdx, rcx ; pJmpBuf;
69 %else
70 sub rsp, 10h
71 mov r8, rdx ; pvUser1 (save it like MSC)
72 mov r9, rcx ; pvUser2 (save it like MSC)
73 mov r11, rsi ; pfn
74 mov rdx, rdi ; pJmpBuf
75 %endif
76 mov [xDX + VMMR0JMPBUF.rbx], rbx
77 %ifdef ASM_CALL64_MSC
78 mov [xDX + VMMR0JMPBUF.rsi], rsi
79 mov [xDX + VMMR0JMPBUF.rdi], rdi
80 %endif
81 mov r10, [rbp]
82 mov [xDX + VMMR0JMPBUF.rbp], r10
83 mov [xDX + VMMR0JMPBUF.r12], r12
84 mov [xDX + VMMR0JMPBUF.r13], r13
85 mov [xDX + VMMR0JMPBUF.r14], r14
86 mov [xDX + VMMR0JMPBUF.r15], r15
87 mov xAX, [rbp + 8]
88 mov [xDX + VMMR0JMPBUF.rip], xAX
89 lea r10, [rbp + 10h] ; (used in resume)
90 mov [xDX + VMMR0JMPBUF.rsp], r10
91
92 ;
93 ; If we're not in a ring-3 call, call pfn and return.
94 ;
95 test byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
96 jnz .resume
97
98 %ifdef VMM_R0_SWITCH_STACK
99 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]
100 test r15, r15
101 jz .entry_error
102 %ifdef VBOX_STRICT
103 cmp dword [r15], 0h
104 jne .entry_error
105 mov rdi, r15
106 mov rcx, VMM_STACK_SIZE / 8
107 mov rax, qword 0eeeeeeeffeeeeeeeh
108 repne stosq
109 mov [rdi - 10h], rbx
110 %endif
111 lea r15, [r15 + VMM_STACK_SIZE - 40h]
112 mov rsp, r15 ; Switch stack!
113 %endif ; VMM_R0_SWITCH_STACK
114
115 mov r12, rdx ; Save pJmpBuf.
116 %ifdef ASM_CALL64_MSC
117 mov rcx, r8 ; pvUser -> arg0
118 mov rdx, r9
119 %else
120 mov rdi, r8 ; pvUser -> arg0
121 mov rsi, r9
122 %endif
123 call r11
124 mov rdx, r12 ; Restore pJmpBuf
125
126 %ifdef VMM_R0_SWITCH_STACK
127 %ifdef VBOX_STRICT
128 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]
129 mov dword [r15], 0h ; Reset the marker
130 %endif
131 %endif
132
133 ;
134 ; Return like in the long jump but clear eip, no short cuts here.
135 ;
136.proper_return:
137 mov rbx, [xDX + VMMR0JMPBUF.rbx]
138 %ifdef ASM_CALL64_MSC
139 mov rsi, [xDX + VMMR0JMPBUF.rsi]
140 mov rdi, [xDX + VMMR0JMPBUF.rdi]
141 %endif
142 mov r12, [xDX + VMMR0JMPBUF.r12]
143 mov r13, [xDX + VMMR0JMPBUF.r13]
144 mov r14, [xDX + VMMR0JMPBUF.r14]
145 mov r15, [xDX + VMMR0JMPBUF.r15]
146 mov rbp, [xDX + VMMR0JMPBUF.rbp]
147 mov xCX, [xDX + VMMR0JMPBUF.rip]
148 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
149 mov rsp, [xDX + VMMR0JMPBUF.rsp]
150 jmp xCX
151
152.entry_error:
153 mov eax, VERR_INTERNAL_ERROR_2
154 jmp .proper_return
155
156.stack_overflow:
157 mov eax, VERR_INTERNAL_ERROR_5
158 jmp .proper_return
159
160 ;
161 ; Aborting resume.
162 ;
163.bad:
164 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
165 mov rbx, [xDX + VMMR0JMPBUF.rbx]
166 %ifdef ASM_CALL64_MSC
167 mov rsi, [xDX + VMMR0JMPBUF.rsi]
168 mov rdi, [xDX + VMMR0JMPBUF.rdi]
169 %endif
170 mov r12, [xDX + VMMR0JMPBUF.r12]
171 mov r13, [xDX + VMMR0JMPBUF.r13]
172 mov r14, [xDX + VMMR0JMPBUF.r14]
173 mov r15, [xDX + VMMR0JMPBUF.r15]
174 mov eax, VERR_INTERNAL_ERROR_3 ; todo better return code!
175 leave
176 ret
177
178 ;
179 ; Resume VMMR0CallHost the call.
180 ;
181.resume:
182 ; Sanity checks.
183 %ifdef VMM_R0_SWITCH_STACK
184 ;; @todo amd64/switch/resume sanity.
185 %else ; !VMM_R0_SWITCH_STACK
186 cmp r10, [xDX + VMMR0JMPBUF.SpCheck]
187 jne .bad
188
189 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
190 cmp rcx, VMM_STACK_SIZE
191 ja .bad
192 test rcx, 3
193 jnz .bad
194 mov rdi, [xDX + VMMR0JMPBUF.rsp]
195 sub rdi, [xDX + VMMR0JMPBUF.SpResume]
196 cmp rcx, rdi
197 jne .bad
198 %endif
199
200%ifdef VMM_R0_SWITCH_STACK
201 ; Switch stack.
202 mov rsp, [xDX + VMMR0JMPBUF.SpResume]
203%else
204 ; Restore the stack.
205 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
206 shr ecx, 3
207 mov rsi, [xDX + VMMR0JMPBUF.pvSavedStack]
208 mov rdi, [xDX + VMMR0JMPBUF.SpResume]
209 mov rsp, rdi
210 rep movsq
211%endif ; !VMM_R0_SWITCH_STACK
212 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 0
213
214 ;
215 ; Continue where we left off.
216 ;
217%ifdef VBOX_STRICT
218 pop rax ; magic
219 cmp rax, RESUME_MAGIC
220 je .magic_ok
221 mov ecx, 0123h
222 mov [ecx], edx
223.magic_ok:
224%endif
225 popf
226 pop rbx
227 %ifdef ASM_CALL64_MSC
228 pop rsi
229 pop rdi
230 %endif
231 pop r12
232 pop r13
233 pop r14
234 pop r15
235 pop rbp
236 xor eax, eax ; VINF_SUCCESS
237 ret
238ENDPROC vmmR0CallHostSetJmp
239
240
241;;
242; Worker for VMMR0CallHost.
243; This will save the stack and registers.
244;
245; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
246; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
247;
248BEGINPROC vmmR0CallHostLongJmp
249 ;
250 ; Save the registers on the stack.
251 ;
252 push rbp
253 mov rbp, rsp
254 push r15
255 push r14
256 push r13
257 push r12
258 %ifdef ASM_CALL64_MSC
259 push rdi
260 push rsi
261 %endif
262 push rbx
263 pushf
264%ifdef VBOX_STRICT
265 push RESUME_MAGIC
266%endif
267
268 ;
269 ; Normalize the parameters.
270 ;
271 %ifdef ASM_CALL64_MSC
272 mov eax, edx ; rc
273 mov rdx, rcx ; pJmpBuf
274 %else
275 mov rdx, rdi ; pJmpBuf
276 mov eax, esi ; rc
277 %endif
278
279 ;
280 ; Is the jump buffer armed?
281 ;
282 cmp qword [xDX + VMMR0JMPBUF.rip], byte 0
283 je .nok
284
285 ;
286 ; Sanity checks.
287 ;
288 mov rdi, [xDX + VMMR0JMPBUF.pvSavedStack]
289 test rdi, rdi ; darwin may set this to 0.
290 jz .nok
291 mov [xDX + VMMR0JMPBUF.SpResume], rsp
292 %ifndef VMM_R0_SWITCH_STACK
293 mov rsi, rsp
294 mov rcx, [xDX + VMMR0JMPBUF.rsp]
295 sub rcx, rsi
296
297 ; two sanity checks on the size.
298 cmp rcx, VMM_STACK_SIZE ; check max size.
299 jnbe .nok
300
301 ;
302 ; Copy the stack
303 ;
304 test ecx, 7 ; check alignment
305 jnz .nok
306 mov [xDX + VMMR0JMPBUF.cbSavedStack], ecx
307 shr ecx, 3
308 rep movsq
309
310 %endif ; !VMM_R0_SWITCH_STACK
311
312 ; Save RSP & RBP to enable stack dumps
313 mov rcx, rbp
314 mov [xDX + VMMR0JMPBUF.SavedEbp], rcx
315 sub rcx, 8
316 mov [xDX + VMMR0JMPBUF.SavedEsp], rcx
317
318 ; store the last pieces of info.
319 mov rcx, [xDX + VMMR0JMPBUF.rsp]
320 mov [xDX + VMMR0JMPBUF.SpCheck], rcx
321 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
322
323 ;
324 ; Do the long jump.
325 ;
326 mov rbx, [xDX + VMMR0JMPBUF.rbx]
327 %ifdef ASM_CALL64_MSC
328 mov rsi, [xDX + VMMR0JMPBUF.rsi]
329 mov rdi, [xDX + VMMR0JMPBUF.rdi]
330 %endif
331 mov r12, [xDX + VMMR0JMPBUF.r12]
332 mov r13, [xDX + VMMR0JMPBUF.r13]
333 mov r14, [xDX + VMMR0JMPBUF.r14]
334 mov r15, [xDX + VMMR0JMPBUF.r15]
335 mov rbp, [xDX + VMMR0JMPBUF.rbp]
336 mov rcx, [xDX + VMMR0JMPBUF.rip]
337 mov rsp, [xDX + VMMR0JMPBUF.rsp]
338 jmp rcx
339
340 ;
341 ; Failure
342 ;
343.nok:
344%ifdef VBOX_STRICT
345 pop rax ; magic
346 cmp rax, RESUME_MAGIC
347 je .magic_ok
348 mov ecx, 0123h
349 mov [rcx], edx
350.magic_ok:
351%endif
352 mov eax, VERR_INTERNAL_ERROR_4
353 popf
354 pop rbx
355 %ifdef ASM_CALL64_MSC
356 pop rsi
357 pop rdi
358 %endif
359 pop r12
360 pop r13
361 pop r14
362 pop r15
363 leave
364 ret
365ENDPROC vmmR0CallHostLongJmp
366
367
368;;
369; Internal R0 logger worker: Logger wrapper.
370;
371; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
372;
373EXPORTEDNAME vmmR0LoggerWrapper
374 int3
375 int3
376 int3
377 ret
378ENDPROC vmmR0LoggerWrapper
379
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette