VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0A.asm@ 14671

Last change on this file since 14671 was 14505, checked in by vboxsync, 16 years ago

small optimization

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 12.3 KB
Line 
1; $Id: VMMR0A.asm 14505 2008-11-24 07:10:50Z vboxsync $
2;; @file
3; VMM - R0 assembly routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VMMInternal.mac"
27%include "iprt/err.mac"
28
29
30%ifdef RT_ARCH_X86 ; The other architecture(s) use(s) C99 variadict macros.
31extern IMPNAME(RTLogLogger)
32%endif
33
34
35BEGINCODE
36
37
38;;
39; The setjmp variant used for calling Ring-3.
40;
41; This differs from the normal setjmp in that it will resume VMMR0CallHost if we're
42; in the middle of a ring-3 call. Another differences is the function pointer and
43; argument. This has to do with resuming code and the stack frame of the caller.
44;
45; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallHostLongJmp.
46; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+0x04] Our jmp_buf.
47; @param pfn msc:rdx gcc:rsi x86:[esp+0x08] The function to be called when not resuming.
48; @param pvUser1 msc:r8 gcc:rdx x86:[esp+0x0c] The argument of that function.
49; @param pvUser2 msc:r9 gcc:rcx x86:[esp+0x10] The argument of that function.
50;
51BEGINPROC vmmR0CallHostSetJmp
52GLOBALNAME vmmR0CallHostSetJmpEx
53%ifdef RT_ARCH_X86
54 ;
55 ; Save the registers.
56 ;
57 mov edx, [esp + 4h] ; pJmpBuf
58 mov [edx + VMMR0JMPBUF.ebx], ebx
59 mov [edx + VMMR0JMPBUF.esi], esi
60 mov [edx + VMMR0JMPBUF.edi], edi
61 mov [edx + VMMR0JMPBUF.ebp], ebp
62 mov eax, [esp]
63 mov [edx + VMMR0JMPBUF.eip], eax
64 lea ecx, [esp + 4] ; (used in resume)
65 mov [edx + VMMR0JMPBUF.esp], ecx
66
67 ;
68 ; If we're not in a ring-3 call, call pfn and return.
69 ;
70 test byte [edx + VMMR0JMPBUF.fInRing3Call], 1
71 jnz .resume
72
73 mov ecx, [esp + 0ch] ; pvArg1
74 mov edx, [esp + 10h] ; pvArg2
75 mov eax, [esp + 08h] ; pfn
76 sub esp, 16 ; align the stack on a 16-byte boundrary.
77 mov [esp], ecx
78 mov [esp+4], edx
79 call eax
80 add esp, 16
81 mov edx, [esp + 4h] ; pJmpBuf
82
83 ; restore the registers that we're not allowed to modify
84 ; otherwise a resume might restore the wrong values (from the previous run)
85 mov edi, [edx + VMMR0JMPBUF.edi]
86 mov esi, [edx + VMMR0JMPBUF.esi]
87 mov ebx, [edx + VMMR0JMPBUF.ebx]
88 mov ebp, [edx + VMMR0JMPBUF.ebp]
89
90 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
91 ret
92
93 ;
94 ; Resume VMMR0CallHost the call.
95 ;
96.resume:
97 ; Sanity checks.
98 cmp ecx, [edx + VMMR0JMPBUF.SpCheck]
99 je .espCheck_ok
100.bad:
101 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
102 mov edi, [edx + VMMR0JMPBUF.edi]
103 mov esi, [edx + VMMR0JMPBUF.esi]
104 mov ebx, [edx + VMMR0JMPBUF.ebx]
105 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
106 ret
107
108.espCheck_ok:
109 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
110 cmp ecx, 8192
111 ja .bad
112 test ecx, 3
113 jnz .bad
114 mov edi, [edx + VMMR0JMPBUF.esp]
115 sub edi, [edx + VMMR0JMPBUF.SpResume]
116 cmp ecx, edi
117 jne .bad
118
119 ;
120 ; Restore the stack.
121 ;
122 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 0
123 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
124 shr ecx, 2
125 mov esi, [edx + VMMR0JMPBUF.pvSavedStack]
126 mov edi, [edx + VMMR0JMPBUF.SpResume]
127 mov esp, edi
128 rep movsd
129
130 ;
131 ; Continue where we left off.
132 ;
133 popf
134 pop ebx
135 pop esi
136 pop edi
137 pop ebp
138 xor eax, eax ; VINF_SUCCESS
139 ret
140%endif ; RT_ARCH_X86
141
142%ifdef RT_ARCH_AMD64
143 ;
144 ; Save the registers.
145 ;
146 push rbp
147 mov rbp, rsp
148 %ifdef ASM_CALL64_MSC
149 sub rsp, 30h
150 mov r11, rdx ; pfn
151 mov rdx, rcx ; pJmpBuf;
152 %else
153 sub rsp, 10h
154 mov r8, rdx ; pvUser1 (save it like MSC)
155 mov r9, rcx ; pvUser2 (save it like MSC)
156 mov r11, rsi ; pfn
157 mov rdx, rdi ; pJmpBuf
158 %endif
159 mov [rdx + VMMR0JMPBUF.rbx], rbx
160 %ifdef ASM_CALL64_MSC
161 mov [rdx + VMMR0JMPBUF.rsi], rsi
162 mov [rdx + VMMR0JMPBUF.rdi], rdi
163 %endif
164 mov r10, [rbp]
165 mov [rdx + VMMR0JMPBUF.rbp], r10
166 mov [rdx + VMMR0JMPBUF.r12], r12
167 mov [rdx + VMMR0JMPBUF.r13], r13
168 mov [rdx + VMMR0JMPBUF.r14], r14
169 mov [rdx + VMMR0JMPBUF.r15], r15
170 mov rax, [rbp + 8]
171 mov [rdx + VMMR0JMPBUF.rip], rax
172 lea r10, [rbp + 10h] ; (used in resume)
173 mov [rdx + VMMR0JMPBUF.rsp], r10
174
175 ;
176 ; If we're not in a ring-3 call, call pfn and return.
177 ;
178 test byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
179 jnz .resume
180
181 mov [rbp - 8], rdx ; Save it and fix stack alignment (16).
182 %ifdef ASM_CALL64_MSC
183 mov rcx, r8 ; pvUser -> arg0
184 mov rdx, r9
185 %else
186 mov rdi, r8 ; pvUser -> arg0
187 mov rsi, r9
188 %endif
189 call r11
190 mov rdx, [rbp - 8] ; pJmpBuf
191
192 ; restore the registers that we're not allowed to modify
193 ; otherwise a resume might restore the wrong values (from the previous run)
194 mov rbx, [rdx + VMMR0JMPBUF.rbx]
195 %ifdef ASM_CALL64_MSC
196 mov rsi, [rdx + VMMR0JMPBUF.rsi]
197 mov rdi, [rdx + VMMR0JMPBUF.rdi]
198 %endif
199 mov r12, [rdx + VMMR0JMPBUF.r12]
200 mov r13, [rdx + VMMR0JMPBUF.r13]
201 mov r14, [rdx + VMMR0JMPBUF.r14]
202 mov r15, [rdx + VMMR0JMPBUF.r15]
203
204 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
205 leave
206 ret
207
208 ;
209 ; Resume VMMR0CallHost the call.
210 ;
211.resume:
212 ; Sanity checks.
213 cmp r10, [rdx + VMMR0JMPBUF.SpCheck]
214 je .rspCheck_ok
215.bad:
216 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
217 mov rbx, [rdx + VMMR0JMPBUF.rbx]
218 %ifdef ASM_CALL64_MSC
219 mov rsi, [rdx + VMMR0JMPBUF.rsi]
220 mov rdi, [rdx + VMMR0JMPBUF.rdi]
221 %endif
222 mov r12, [rdx + VMMR0JMPBUF.r12]
223 mov r13, [rdx + VMMR0JMPBUF.r13]
224 mov r14, [rdx + VMMR0JMPBUF.r14]
225 mov r15, [rdx + VMMR0JMPBUF.r15]
226 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
227 leave
228 ret
229
230.rspCheck_ok:
231 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
232 cmp rcx, 8192
233 ja .bad
234 test rcx, 3
235 jnz .bad
236 mov rdi, [rdx + VMMR0JMPBUF.rsp]
237 sub rdi, [rdx + VMMR0JMPBUF.SpResume]
238 cmp rcx, rdi
239 jne .bad
240
241 ;
242 ; Restore the stack.
243 ;
244 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 0
245 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
246 shr ecx, 3
247 mov rsi, [rdx + VMMR0JMPBUF.pvSavedStack]
248 mov rdi, [rdx + VMMR0JMPBUF.SpResume]
249 mov rsp, rdi
250 rep movsq
251
252 ;
253 ; Continue where we left off.
254 ;
255 popf
256 pop rbx
257 %ifdef ASM_CALL64_MSC
258 pop rsi
259 pop rdi
260 %endif
261 pop r12
262 pop r13
263 pop r14
264 pop r15
265 pop rbp
266 xor eax, eax ; VINF_SUCCESS
267 ret
268%endif
269ENDPROC vmmR0CallHostSetJmp
270
271
272;;
273; Worker for VMMR0CallHost.
274; This will save the stack and registers.
275;
276; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
277; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
278;
279BEGINPROC vmmR0CallHostLongJmp
280%ifdef RT_ARCH_X86
281 ;
282 ; Save the registers on the stack.
283 ;
284 push ebp
285 mov ebp, esp
286 push edi
287 push esi
288 push ebx
289 pushf
290
291 ;
292 ; Load parameters.
293 ;
294 mov edx, [ebp + 08h] ; pJmpBuf
295 mov eax, [ebp + 0ch] ; rc
296
297 ;
298 ; Is the jump buffer armed?
299 ;
300 cmp dword [edx + VMMR0JMPBUF.eip], byte 0
301 je .nok
302
303 ;
304 ; Save the stack.
305 ;
306 mov edi, [edx + VMMR0JMPBUF.pvSavedStack]
307 test edi, edi ; darwin may set this to 0.
308 jz .nok
309 mov [edx + VMMR0JMPBUF.SpResume], esp
310 mov esi, esp
311 mov ecx, [edx + VMMR0JMPBUF.esp]
312 sub ecx, esi
313
314 ; two sanity checks on the size.
315 cmp ecx, 8192 ; check max size.
316 jbe .ok
317.nok:
318 mov eax, VERR_INTERNAL_ERROR
319 popf
320 pop ebx
321 pop esi
322 pop edi
323 leave
324 ret
325.ok:
326 test ecx, 3 ; check alignment
327 jnz .nok
328 mov [edx + VMMR0JMPBUF.cbSavedStack], ecx
329 shr ecx, 2
330 rep movsd
331
332 ; store the last pieces of info.
333 mov ecx, [edx + VMMR0JMPBUF.esp]
334 mov [edx + VMMR0JMPBUF.SpCheck], ecx
335 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 1
336
337 ;
338 ; Do the long jump.
339 ;
340 mov ebx, [edx + VMMR0JMPBUF.ebx]
341 mov esi, [edx + VMMR0JMPBUF.esi]
342 mov edi, [edx + VMMR0JMPBUF.edi]
343 mov ebp, [edx + VMMR0JMPBUF.ebp]
344 mov ecx, [edx + VMMR0JMPBUF.eip]
345 mov esp, [edx + VMMR0JMPBUF.esp]
346 jmp ecx
347%endif ; RT_ARCH_X86
348
349%ifdef RT_ARCH_AMD64
350 ;
351 ; Save the registers on the stack.
352 ;
353 push rbp
354 mov rbp, rsp
355 push r15
356 push r14
357 push r13
358 push r12
359 %ifdef ASM_CALL64_MSC
360 push rdi
361 push rsi
362 %endif
363 push rbx
364 pushf
365
366 ;
367 ; Normalize the parameters.
368 ;
369 %ifdef ASM_CALL64_MSC
370 mov eax, edx ; rc
371 mov rdx, rcx ; pJmpBuf
372 %else
373 mov rdx, rdi ; pJmpBuf
374 mov eax, esi ; rc
375 %endif
376
377 ;
378 ; Is the jump buffer armed?
379 ;
380 cmp qword [rdx + VMMR0JMPBUF.rip], byte 0
381 je .nok
382
383 ;
384 ; Save the stack.
385 ;
386 mov rdi, [rdx + VMMR0JMPBUF.pvSavedStack]
387 test rdi, rdi ; darwin may set this to 0.
388 jz .nok
389 mov [rdx + VMMR0JMPBUF.SpResume], rsp
390 mov rsi, rsp
391 mov rcx, [rdx + VMMR0JMPBUF.rsp]
392 sub rcx, rsi
393
394 ; two sanity checks on the size.
395 cmp rcx, 8192 ; check max size.
396 jbe .ok
397.nok:
398 mov eax, VERR_INTERNAL_ERROR
399 popf
400 pop rbx
401 %ifdef ASM_CALL64_MSC
402 pop rsi
403 pop rdi
404 %endif
405 pop r12
406 pop r13
407 pop r14
408 pop r15
409 leave
410 ret
411
412.ok:
413 test ecx, 7 ; check alignment
414 jnz .nok
415 mov [rdx + VMMR0JMPBUF.cbSavedStack], ecx
416 shr ecx, 3
417 rep movsq
418
419 ; store the last pieces of info.
420 mov rcx, [rdx + VMMR0JMPBUF.rsp]
421 mov [rdx + VMMR0JMPBUF.SpCheck], rcx
422 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
423
424 ;
425 ; Do the long jump.
426 ;
427 mov rbx, [rdx + VMMR0JMPBUF.rbx]
428 %ifdef ASM_CALL64_MSC
429 mov rsi, [rdx + VMMR0JMPBUF.rsi]
430 mov rdi, [rdx + VMMR0JMPBUF.rdi]
431 %endif
432 mov r12, [rdx + VMMR0JMPBUF.r12]
433 mov r13, [rdx + VMMR0JMPBUF.r13]
434 mov r14, [rdx + VMMR0JMPBUF.r14]
435 mov r15, [rdx + VMMR0JMPBUF.r15]
436 mov rbp, [rdx + VMMR0JMPBUF.rbp]
437 mov rcx, [rdx + VMMR0JMPBUF.rip]
438 mov rsp, [rdx + VMMR0JMPBUF.rsp]
439 jmp rcx
440%endif
441ENDPROC vmmR0CallHostLongJmp
442
443
444;;
445; Internal R0 logger worker: Logger wrapper.
446;
447; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
448;
449EXPORTEDNAME vmmR0LoggerWrapper
450%ifdef RT_ARCH_X86 ; The other architecture(s) use(s) C99 variadict macros.
451 push 0 ; assumes we're the wrapper for a default instance.
452 call IMP(RTLogLogger)
453 add esp, byte 4
454 ret
455%else
456 int3
457 int3
458 int3
459 ret
460%endif
461ENDPROC vmmR0LoggerWrapper
462
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette