VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0A.asm@ 1349

Last change on this file since 1349 was 1232, checked in by vboxsync, 18 years ago

align the stack correctly (Darwin).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 10.9 KB
Line 
1; $Id: VMMR0A.asm 1232 2007-03-05 16:47:35Z vboxsync $
2;; @file
3; VMM - R0 assembly routines.
4;
5
6;
7; Copyright (C) 2006 InnoTek Systemberatung GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; If you received this file as part of a commercial VirtualBox
18; distribution, then only the terms of your commercial VirtualBox
19; license agreement apply instead of the previous paragraph.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VMMInternal.mac"
27%include "iprt/err.mac"
28
29
30%ifdef __X86__ ; The other architecture(s) use(s) C99 variadict macros.
31extern IMPNAME(RTLogLogger)
32%endif
33
34
35BEGINCODE
36
37
38;;
39; The setjmp variant used for calling Ring-3.
40;
41; This differs from the normal setjmp in that it will resume VMMR0CallHost if we're
42; in the middle of a ring-3 call. Another differences is the function pointer and
43; argument. This has to do with resuming code and the stack frame of the caller.
44;
45; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallHostLongJmp.
46; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+4] Our jmp_buf.
47; @param pfn msc:rdx gcc:rsi x86:[esp+8] The function to be called when not resuming.
48; @param pVM msc:r8 gcc:rdx x86:[esp+c] The argument of that function.
49;
50BEGINPROC vmmR0CallHostSetJmp
51%ifdef __X86__
52 ;
53 ; Save the registers.
54 ;
55 mov edx, [esp + 4h] ; pJmpBuf
56 mov [edx + VMMR0JMPBUF.ebx], ebx
57 mov [edx + VMMR0JMPBUF.esi], esi
58 mov [edx + VMMR0JMPBUF.edi], edi
59 mov [edx + VMMR0JMPBUF.ebp], ebp
60 mov eax, [esp]
61 mov [edx + VMMR0JMPBUF.eip], eax
62 lea ecx, [esp + 4] ; (used in resume)
63 mov [edx + VMMR0JMPBUF.esp], ecx
64
65 ;
66 ; If we're not in a ring-3 call, call pfn and return.
67 ;
68 test byte [edx + VMMR0JMPBUF.fInRing3Call], 1
69 jnz .resume
70
71 mov ecx, [esp + 0ch] ; pvArg
72 mov eax, [esp + 08h] ; pfn
73 sub esp, 12 ; align the stack on a 16-byte boundrary.
74 mov [esp], ecx
75 call eax
76 add esp, 12
77 mov edx, [esp + 4h] ; pJmpBuf
78 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
79 ret
80
81 ;
82 ; Resume VMMR0CallHost the call.
83 ;
84.resume:
85 ; Sanity checks.
86 cmp ecx, [edx + VMMR0JMPBUF.SpCheck]
87 je .espCheck_ok
88.bad:
89 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
90 mov edi, [edx + VMMR0JMPBUF.edi]
91 mov esi, [edx + VMMR0JMPBUF.esi]
92 mov ebx, [edx + VMMR0JMPBUF.ebx]
93 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
94 ret
95
96.espCheck_ok:
97 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
98 cmp ecx, 8192
99 ja .bad
100 test ecx, 3
101 jnz .bad
102 mov edi, [edx + VMMR0JMPBUF.esp]
103 sub edi, [edx + VMMR0JMPBUF.SpResume]
104 cmp ecx, edi
105 jne .bad
106
107 ;
108 ; Restore the stack.
109 ;
110 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 0
111 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
112 shr ecx, 2
113 mov esi, [edx + VMMR0JMPBUF.pvSavedStack]
114 mov edi, [edx + VMMR0JMPBUF.SpResume]
115 mov esp, edi
116 rep movsd
117
118 ;
119 ; Continue where we left off.
120 ;
121 popf
122 pop ebx
123 pop esi
124 pop edi
125 pop ebp
126 xor eax, eax ; VINF_SUCCESS
127 ret
128%endif ; __X86__
129
130%ifdef __AMD64__
131 ;
132 ; Save the registers.
133 ;
134 %ifdef ASM_CALL64_MSC
135 mov r11, rdx ; pfn
136 mov rdx, rcx ; pJmpBuf;
137 %else
138 mov r8, rdx ; pVM (save it like MSC)
139 mov r11, rsi ; pfn
140 mov rdx, rdi ; pJmpBuf
141 %endif
142 mov [rdx + VMMR0JMPBUF.rbx], rbx
143 %ifdef ASM_CALL64_MSC
144 mov [rdx + VMMR0JMPBUF.rsi], rsi
145 mov [rdx + VMMR0JMPBUF.rdi], rdi
146 %endif
147 mov [rdx + VMMR0JMPBUF.rbp], rbp
148 mov [rdx + VMMR0JMPBUF.r12], r12
149 mov [rdx + VMMR0JMPBUF.r13], r13
150 mov [rdx + VMMR0JMPBUF.r14], r14
151 mov [rdx + VMMR0JMPBUF.r15], r15
152 mov rax, [rsp]
153 mov [rdx + VMMR0JMPBUF.rip], rax
154 lea r10, [rsp + 8] ; (used in resume)
155 mov [rdx + VMMR0JMPBUF.rsp], r10
156
157 ;
158 ; If we're not in a ring-3 call, call pfn and return.
159 ;
160 test byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
161 jnz .resume
162
163 push rdx ; Save it and fix stack alignment (16).
164 %ifdef ASM_CALL64_MSC
165 mov rcx, r8 ; pVM -> arg0
166 %else
167 mov rdi, r8 ; pVM -> arg0
168 %endif
169 call r11
170 pop rdx ; pJmpBuf
171 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
172 ret
173
174 ;
175 ; Resume VMMR0CallHost the call.
176 ;
177.resume:
178 ; Sanity checks.
179 cmp r10, [rdx + VMMR0JMPBUF.SpCheck]
180 je .rspCheck_ok
181.bad:
182 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
183 mov rbx, [rdx + VMMR0JMPBUF.rbx]
184 %ifdef ASM_CALL64_MSC
185 mov rsi, [rdx + VMMR0JMPBUF.rsi]
186 mov rdi, [rdx + VMMR0JMPBUF.rdi]
187 %endif
188 mov r12, [rdx + VMMR0JMPBUF.r12]
189 mov r13, [rdx + VMMR0JMPBUF.r13]
190 mov r14, [rdx + VMMR0JMPBUF.r14]
191 mov r15, [rdx + VMMR0JMPBUF.r15]
192 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
193 ret
194
195.rspCheck_ok:
196 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
197 cmp rcx, 8192
198 ja .bad
199 test rcx, 3
200 jnz .bad
201 mov rdi, [rdx + VMMR0JMPBUF.rsp]
202 sub rdi, [rdx + VMMR0JMPBUF.SpResume]
203 cmp rcx, rdi
204 jne .bad
205
206 ;
207 ; Restore the stack.
208 ;
209 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 0
210 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
211 shr ecx, 3
212 mov rsi, [rdx + VMMR0JMPBUF.pvSavedStack]
213 mov rdi, [rdx + VMMR0JMPBUF.SpResume]
214 mov rsp, rdi
215 rep movsq
216
217 ;
218 ; Continue where we left off.
219 ;
220 popf
221 pop rbx
222 %ifdef ASM_CALL64_MSC
223 pop rsi
224 pop rdi
225 %endif
226 pop r12
227 pop r13
228 pop r14
229 pop r15
230 pop rbp
231 xor eax, eax ; VINF_SUCCESS
232 ret
233%endif
234ENDPROC vmmR0CallHostSetJmp
235
236
237;;
238; Worker for VMMR0CallHost.
239; This will save the stack and registers.
240;
241; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
242; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
243;
244BEGINPROC vmmR0CallHostLongJmp
245%ifdef __X86__
246 ;
247 ; Save the registers on the stack.
248 ;
249 push ebp
250 mov ebp, esp
251 push edi
252 push esi
253 push ebx
254 pushf
255
256 ;
257 ; Load parameters.
258 ;
259 mov edx, [ebp + 08h] ; pJmpBuf
260 mov eax, [ebp + 0ch] ; rc
261
262 ;
263 ; Is the jump buffer armed?
264 ;
265 cmp dword [edx + VMMR0JMPBUF.eip], byte 0
266 je .nok
267
268 ;
269 ; Save the stack.
270 ;
271 mov edi, [edx + VMMR0JMPBUF.pvSavedStack]
272 mov [edx + VMMR0JMPBUF.SpResume], esp
273 mov esi, esp
274 mov ecx, [edx + VMMR0JMPBUF.esp]
275 sub ecx, esi
276
277 ; two sanity checks on the size.
278 cmp ecx, 8192 ; check max size.
279 jbe .ok
280.nok:
281 mov eax, VERR_INTERNAL_ERROR
282 popf
283 pop ebx
284 pop esi
285 pop edi
286 leave
287 ret
288.ok:
289 test ecx, 3 ; check alignment
290 jnz .nok
291 mov [edx + VMMR0JMPBUF.cbSavedStack], ecx
292 shr ecx, 2
293 rep movsd
294
295 ; store the last pieces of info.
296 mov ecx, [edx + VMMR0JMPBUF.esp]
297 mov [edx + VMMR0JMPBUF.SpCheck], ecx
298 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 1
299
300 ;
301 ; Do the long jump.
302 ;
303 mov ebx, [edx + VMMR0JMPBUF.ebx]
304 mov esi, [edx + VMMR0JMPBUF.esi]
305 mov edi, [edx + VMMR0JMPBUF.edi]
306 mov ebp, [edx + VMMR0JMPBUF.ebp]
307 mov ecx, [edx + VMMR0JMPBUF.eip]
308 mov esp, [edx + VMMR0JMPBUF.esp]
309 jmp ecx
310%endif ; __X86__
311
312%ifdef __AMD64__
313 ;
314 ; Save the registers on the stack.
315 ;
316 push rbp
317 mov rbp, rsp
318 push r15
319 push r14
320 push r13
321 push r12
322 %ifdef ASM_CALL64_MSC
323 push rdi
324 push rsi
325 %endif
326 push rbx
327 pushf
328
329 ;
330 ; Normalize the parameters.
331 ;
332 %ifdef ASM_CALL64_MSC
333 ; pJmpBuf already in rdx
334 mov eax, ecx ; rc
335 %else
336 mov rdx, rdi ; pJmpBuf
337 mov eax, esi ; rc
338 %endif
339
340 ;
341 ; Is the jump buffer armed?
342 ;
343 cmp qword [rdx + VMMR0JMPBUF.rip], byte 0
344 je .nok
345
346 ;
347 ; Save the stack.
348 ;
349 mov rdi, [rdx + VMMR0JMPBUF.pvSavedStack]
350 mov [rdx + VMMR0JMPBUF.SpResume], rsp
351 mov rsi, rsp
352 mov rcx, [rdx + VMMR0JMPBUF.rsp]
353 sub rcx, rsi
354
355 ; two sanity checks on the size.
356 cmp rcx, 8192 ; check max size.
357 jbe .ok
358.nok:
359 mov eax, VERR_INTERNAL_ERROR
360 popf
361 pop rbx
362 %ifdef ASM_CALL64_MSC
363 pop rsi
364 pop rdi
365 %endif
366 pop r12
367 pop r13
368 pop r14
369 pop r15
370 leave
371 ret
372
373.ok:
374 test ecx, 7 ; check alignment
375 jnz .nok
376 mov [rdx + VMMR0JMPBUF.cbSavedStack], ecx
377 shr ecx, 3
378 rep movsq
379
380 ; store the last pieces of info.
381 mov rcx, [edx + VMMR0JMPBUF.rsp]
382 mov [rdx + VMMR0JMPBUF.SpCheck], rcx
383 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
384
385 ;
386 ; Do the long jump.
387 ;
388 mov rbx, [rdx + VMMR0JMPBUF.rbx]
389 %ifdef ASM_CALL64_MSC
390 mov rsi, [rdx + VMMR0JMPBUF.rsi]
391 mov rdi, [rdx + VMMR0JMPBUF.rdi]
392 %endif
393 mov r12, [rdx + VMMR0JMPBUF.r12]
394 mov r13, [rdx + VMMR0JMPBUF.r13]
395 mov r14, [rdx + VMMR0JMPBUF.r14]
396 mov r15, [rdx + VMMR0JMPBUF.r15]
397 mov rbp, [rdx + VMMR0JMPBUF.rbp]
398 mov rcx, [rdx + VMMR0JMPBUF.rip]
399 mov rsp, [rdx + VMMR0JMPBUF.rsp]
400 jmp rcx
401%endif
402ENDPROC vmmR0CallHostLongJmp
403
404
405;;
406; Internal R0 logger worker: Logger wrapper.
407;
408; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
409;
410EXPORTEDNAME vmmR0LoggerWrapper
411%ifdef __X86__ ; The other architecture(s) use(s) C99 variadict macros.
412 push 0 ; assumes we're the wrapper for a default instance.
413 call IMP(RTLogLogger)
414 add esp, byte 4
415 ret
416%else
417 int3
418 int3
419 int3
420 ret
421%endif
422ENDPROC vmmR0LoggerWrapper
423
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette