VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0A.asm@ 1122

Last change on this file since 1122 was 727, checked in by vboxsync, 18 years ago

3rd arg is rdx not rcx.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 10.8 KB
Line 
1; $Id: VMMR0A.asm 727 2007-02-06 19:38:36Z vboxsync $
2;; @file
3; VMM - R0 assembly routines.
4;
5
6;
7; Copyright (C) 2006 InnoTek Systemberatung GmbH
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License as published by the Free Software Foundation,
13; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14; distribution. VirtualBox OSE is distributed in the hope that it will
15; be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; If you received this file as part of a commercial VirtualBox
18; distribution, then only the terms of your commercial VirtualBox
19; license agreement apply instead of the previous paragraph.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VMMInternal.mac"
27%include "iprt/err.mac"
28
29
30%ifdef __X86__ ; The other architecture(s) use(s) C99 variadict macros.
31extern IMPNAME(RTLogLogger)
32%endif
33
34
35BEGINCODE
36
37
38;;
39; The setjmp variant used for calling Ring-3.
40;
41; This differs from the normal setjmp in that it will resume VMMR0CallHost if we're
42; in the middle of a ring-3 call. Another differences is the function pointer and
43; argument. This has to do with resuming code and the stack frame of the caller.
44;
45; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallHostLongJmp.
46; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+4] Our jmp_buf.
47; @param pfn msc:rdx gcc:rsi x86:[esp+8] The function to be called when not resuming.
48; @param pVM msc:r8 gcc:rdx x86:[esp+c] The argument of that function.
49;
50BEGINPROC vmmR0CallHostSetJmp
51%ifdef __X86__
52 ;
53 ; Save the registers.
54 ;
55 mov edx, [esp + 4h] ; pJmpBuf
56 mov [edx + VMMR0JMPBUF.ebx], ebx
57 mov [edx + VMMR0JMPBUF.esi], esi
58 mov [edx + VMMR0JMPBUF.edi], edi
59 mov [edx + VMMR0JMPBUF.ebp], ebp
60 mov eax, [esp]
61 mov [edx + VMMR0JMPBUF.eip], eax
62 lea ecx, [esp + 4] ; (used in resume)
63 mov [edx + VMMR0JMPBUF.esp], ecx
64
65 ;
66 ; If we're not in a ring-3 call, call pfn and return.
67 ;
68 test byte [edx + VMMR0JMPBUF.fInRing3Call], 1
69 jnz .resume
70
71 mov ecx, [esp + 0ch] ; pvArg
72 mov eax, [esp + 08h] ; pfn
73 push ecx
74 call eax
75 add esp, 4
76 mov edx, [esp + 4h] ; pJmpBuf
77 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
78 ret
79
80 ;
81 ; Resume VMMR0CallHost the call.
82 ;
83.resume:
84 ; Sanity checks.
85 cmp ecx, [edx + VMMR0JMPBUF.SpCheck]
86 je .espCheck_ok
87.bad:
88 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
89 mov edi, [edx + VMMR0JMPBUF.edi]
90 mov esi, [edx + VMMR0JMPBUF.esi]
91 mov ebx, [edx + VMMR0JMPBUF.ebx]
92 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
93 ret
94
95.espCheck_ok:
96 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
97 cmp ecx, 8192
98 ja .bad
99 test ecx, 3
100 jnz .bad
101 mov edi, [edx + VMMR0JMPBUF.esp]
102 sub edi, [edx + VMMR0JMPBUF.SpResume]
103 cmp ecx, edi
104 jne .bad
105
106 ;
107 ; Restore the stack.
108 ;
109 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 0
110 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
111 shr ecx, 2
112 mov esi, [edx + VMMR0JMPBUF.pvSavedStack]
113 mov edi, [edx + VMMR0JMPBUF.SpResume]
114 mov esp, edi
115 rep movsd
116
117 ;
118 ; Continue where we left off.
119 ;
120 popf
121 pop ebx
122 pop esi
123 pop edi
124 pop ebp
125 xor eax, eax ; VINF_SUCCESS
126 ret
127%endif ; __X86__
128
129%ifdef __AMD64__
130 ;
131 ; Save the registers.
132 ;
133 %ifdef ASM_CALL64_MSC
134 mov r11, rdx ; pfn
135 mov rdx, rcx ; pJmpBuf;
136 %else
137 mov r8, rdx ; pVM (save it like MSC)
138 mov r11, rsi ; pfn
139 mov rdx, rdi ; pJmpBuf
140 %endif
141 mov [rdx + VMMR0JMPBUF.rbx], rbx
142 %ifdef ASM_CALL64_MSC
143 mov [rdx + VMMR0JMPBUF.rsi], rsi
144 mov [rdx + VMMR0JMPBUF.rdi], rdi
145 %endif
146 mov [rdx + VMMR0JMPBUF.rbp], rbp
147 mov [rdx + VMMR0JMPBUF.r12], r12
148 mov [rdx + VMMR0JMPBUF.r13], r13
149 mov [rdx + VMMR0JMPBUF.r14], r14
150 mov [rdx + VMMR0JMPBUF.r15], r15
151 mov rax, [rsp]
152 mov [rdx + VMMR0JMPBUF.rip], rax
153 lea r10, [rsp + 8] ; (used in resume)
154 mov [rdx + VMMR0JMPBUF.rsp], r10
155
156 ;
157 ; If we're not in a ring-3 call, call pfn and return.
158 ;
159 test byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
160 jnz .resume
161
162 push rdx ; Save it and fix stack alignment (16).
163 %ifdef ASM_CALL64_MSC
164 mov rcx, r8 ; pVM -> arg0
165 %else
166 mov rdi, r8 ; pVM -> arg0
167 %endif
168 call r11
169 pop rdx ; pJmpBuf
170 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
171 ret
172
173 ;
174 ; Resume VMMR0CallHost the call.
175 ;
176.resume:
177 ; Sanity checks.
178 cmp r10, [rdx + VMMR0JMPBUF.SpCheck]
179 je .rspCheck_ok
180.bad:
181 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
182 mov rbx, [rdx + VMMR0JMPBUF.rbx]
183 %ifdef ASM_CALL64_MSC
184 mov rsi, [rdx + VMMR0JMPBUF.rsi]
185 mov rdi, [rdx + VMMR0JMPBUF.rdi]
186 %endif
187 mov r12, [rdx + VMMR0JMPBUF.r12]
188 mov r13, [rdx + VMMR0JMPBUF.r13]
189 mov r14, [rdx + VMMR0JMPBUF.r14]
190 mov r15, [rdx + VMMR0JMPBUF.r15]
191 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
192 ret
193
194.rspCheck_ok:
195 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
196 cmp rcx, 8192
197 ja .bad
198 test rcx, 3
199 jnz .bad
200 mov rdi, [rdx + VMMR0JMPBUF.rsp]
201 sub rdi, [rdx + VMMR0JMPBUF.SpResume]
202 cmp rcx, rdi
203 jne .bad
204
205 ;
206 ; Restore the stack.
207 ;
208 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 0
209 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
210 shr ecx, 3
211 mov rsi, [rdx + VMMR0JMPBUF.pvSavedStack]
212 mov rdi, [rdx + VMMR0JMPBUF.SpResume]
213 mov rsp, rdi
214 rep movsq
215
216 ;
217 ; Continue where we left off.
218 ;
219 popf
220 pop rbx
221 %ifdef ASM_CALL64_MSC
222 pop rsi
223 pop rdi
224 %endif
225 pop r12
226 pop r13
227 pop r14
228 pop r15
229 pop rbp
230 xor eax, eax ; VINF_SUCCESS
231 ret
232%endif
233ENDPROC vmmR0CallHostSetJmp
234
235
236;;
237; Worker for VMMR0CallHost.
238; This will save the stack and registers.
239;
240; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
241; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
242;
243BEGINPROC vmmR0CallHostLongJmp
244%ifdef __X86__
245 ;
246 ; Save the registers on the stack.
247 ;
248 push ebp
249 mov ebp, esp
250 push edi
251 push esi
252 push ebx
253 pushf
254
255 ;
256 ; Load parameters.
257 ;
258 mov edx, [ebp + 08h] ; pJmpBuf
259 mov eax, [ebp + 0ch] ; rc
260
261 ;
262 ; Is the jump buffer armed?
263 ;
264 cmp dword [edx + VMMR0JMPBUF.eip], byte 0
265 je .nok
266
267 ;
268 ; Save the stack.
269 ;
270 mov edi, [edx + VMMR0JMPBUF.pvSavedStack]
271 mov [edx + VMMR0JMPBUF.SpResume], esp
272 mov esi, esp
273 mov ecx, [edx + VMMR0JMPBUF.esp]
274 sub ecx, esi
275
276 ; two sanity checks on the size.
277 cmp ecx, 8192 ; check max size.
278 jbe .ok
279.nok:
280 mov eax, VERR_INTERNAL_ERROR
281 popf
282 pop ebx
283 pop esi
284 pop edi
285 leave
286 ret
287.ok:
288 test ecx, 3 ; check alignment
289 jnz .nok
290 mov [edx + VMMR0JMPBUF.cbSavedStack], ecx
291 shr ecx, 2
292 rep movsd
293
294 ; store the last pieces of info.
295 mov ecx, [edx + VMMR0JMPBUF.esp]
296 mov [edx + VMMR0JMPBUF.SpCheck], ecx
297 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 1
298
299 ;
300 ; Do the long jump.
301 ;
302 mov ebx, [edx + VMMR0JMPBUF.ebx]
303 mov esi, [edx + VMMR0JMPBUF.esi]
304 mov edi, [edx + VMMR0JMPBUF.edi]
305 mov ebp, [edx + VMMR0JMPBUF.ebp]
306 mov ecx, [edx + VMMR0JMPBUF.eip]
307 mov esp, [edx + VMMR0JMPBUF.esp]
308 jmp ecx
309%endif ; __X86__
310
311%ifdef __AMD64__
312 ;
313 ; Save the registers on the stack.
314 ;
315 push rbp
316 mov rbp, rsp
317 push r15
318 push r14
319 push r13
320 push r12
321 %ifdef ASM_CALL64_MSC
322 push rdi
323 push rsi
324 %endif
325 push rbx
326 pushf
327
328 ;
329 ; Normalize the parameters.
330 ;
331 %ifdef ASM_CALL64_MSC
332 ; pJmpBuf already in rdx
333 mov eax, ecx ; rc
334 %else
335 mov rdx, rdi ; pJmpBuf
336 mov eax, esi ; rc
337 %endif
338
339 ;
340 ; Is the jump buffer armed?
341 ;
342 cmp qword [rdx + VMMR0JMPBUF.rip], byte 0
343 je .nok
344
345 ;
346 ; Save the stack.
347 ;
348 mov rdi, [rdx + VMMR0JMPBUF.pvSavedStack]
349 mov [rdx + VMMR0JMPBUF.SpResume], rsp
350 mov rsi, rsp
351 mov rcx, [rdx + VMMR0JMPBUF.rsp]
352 sub rcx, rsi
353
354 ; two sanity checks on the size.
355 cmp rcx, 8192 ; check max size.
356 jbe .ok
357.nok:
358 mov eax, VERR_INTERNAL_ERROR
359 popf
360 pop rbx
361 %ifdef ASM_CALL64_MSC
362 pop rsi
363 pop rdi
364 %endif
365 pop r12
366 pop r13
367 pop r14
368 pop r15
369 leave
370 ret
371
372.ok:
373 test ecx, 7 ; check alignment
374 jnz .nok
375 mov [rdx + VMMR0JMPBUF.cbSavedStack], ecx
376 shr ecx, 3
377 rep movsq
378
379 ; store the last pieces of info.
380 mov rcx, [edx + VMMR0JMPBUF.rsp]
381 mov [rdx + VMMR0JMPBUF.SpCheck], rcx
382 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
383
384 ;
385 ; Do the long jump.
386 ;
387 mov rbx, [rdx + VMMR0JMPBUF.rbx]
388 %ifdef ASM_CALL64_MSC
389 mov rsi, [rdx + VMMR0JMPBUF.rsi]
390 mov rdi, [rdx + VMMR0JMPBUF.rdi]
391 %endif
392 mov r12, [rdx + VMMR0JMPBUF.r12]
393 mov r13, [rdx + VMMR0JMPBUF.r13]
394 mov r14, [rdx + VMMR0JMPBUF.r14]
395 mov r15, [rdx + VMMR0JMPBUF.r15]
396 mov rbp, [rdx + VMMR0JMPBUF.rbp]
397 mov rcx, [rdx + VMMR0JMPBUF.rip]
398 mov rsp, [rdx + VMMR0JMPBUF.rsp]
399 jmp rcx
400%endif
401ENDPROC vmmR0CallHostLongJmp
402
403
404;;
405; Internal R0 logger worker: Logger wrapper.
406;
407; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
408;
409EXPORTEDNAME vmmR0LoggerWrapper
410%ifdef __X86__ ; The other architecture(s) use(s) C99 variadict macros.
411 push 0 ; assumes we're the wrapper for a default instance.
412 call IMP(RTLogLogger)
413 add esp, byte 4
414 ret
415%else
416 int3
417 int3
418 int3
419 ret
420%endif
421ENDPROC vmmR0LoggerWrapper
422
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette