VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0A.asm@ 14414

Last change on this file since 14414 was 13872, checked in by vboxsync, 16 years ago

Pass the VMCPU id on to all hwaccm functions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 12.1 KB
Line 
1; $Id: VMMR0A.asm 13872 2008-11-05 15:00:48Z vboxsync $
2;; @file
3; VMM - R0 assembly routines.
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%include "VBox/asmdefs.mac"
26%include "VMMInternal.mac"
27%include "iprt/err.mac"
28
29
30%ifdef RT_ARCH_X86 ; The other architecture(s) use(s) C99 variadict macros.
31extern IMPNAME(RTLogLogger)
32%endif
33
34
35BEGINCODE
36
37
38;;
39; The setjmp variant used for calling Ring-3.
40;
41; This differs from the normal setjmp in that it will resume VMMR0CallHost if we're
42; in the middle of a ring-3 call. Another differences is the function pointer and
43; argument. This has to do with resuming code and the stack frame of the caller.
44;
45; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallHostLongJmp.
46; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+0x04] Our jmp_buf.
47; @param pfn msc:rdx gcc:rsi x86:[esp+0x08] The function to be called when not resuming.
48; @param pvUser1 msc:r8 gcc:rdx x86:[esp+0x0c] The argument of that function.
49; @param pvUser2 msc:r9 gcc:rcx x86:[esp+0x10] The argument of that function.
50;
51BEGINPROC vmmR0CallHostSetJmp
52GLOBALNAME vmmR0CallHostSetJmpEx
53%ifdef RT_ARCH_X86
54 ;
55 ; Save the registers.
56 ;
57 mov edx, [esp + 4h] ; pJmpBuf
58 mov [edx + VMMR0JMPBUF.ebx], ebx
59 mov [edx + VMMR0JMPBUF.esi], esi
60 mov [edx + VMMR0JMPBUF.edi], edi
61 mov [edx + VMMR0JMPBUF.ebp], ebp
62 mov eax, [esp]
63 mov [edx + VMMR0JMPBUF.eip], eax
64 lea ecx, [esp + 4] ; (used in resume)
65 mov [edx + VMMR0JMPBUF.esp], ecx
66
67 ;
68 ; If we're not in a ring-3 call, call pfn and return.
69 ;
70 test byte [edx + VMMR0JMPBUF.fInRing3Call], 1
71 jnz .resume
72
73 mov ecx, [esp + 0ch] ; pvArg1
74 mov edx, [esp + 10h] ; pvArg2
75 mov eax, [esp + 08h] ; pfn
76 sub esp, 16 ; align the stack on a 16-byte boundrary.
77 mov [esp], ecx
78 mov [esp+4], edx
79 call eax
80 add esp, 16
81 mov edx, [esp + 4h] ; pJmpBuf
82
83 ; restore the registers that we're not allowed to modify
84 ; otherwise a resume might restore the wrong values (from the previous run)
85 mov edi, [edx + VMMR0JMPBUF.edi]
86 mov esi, [edx + VMMR0JMPBUF.esi]
87 mov ebx, [edx + VMMR0JMPBUF.ebx]
88 mov ebp, [edx + VMMR0JMPBUF.ebp]
89
90 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
91 ret
92
93 ;
94 ; Resume VMMR0CallHost the call.
95 ;
96.resume:
97 ; Sanity checks.
98 cmp ecx, [edx + VMMR0JMPBUF.SpCheck]
99 je .espCheck_ok
100.bad:
101 and dword [edx + VMMR0JMPBUF.eip], byte 0 ; used for valid check.
102 mov edi, [edx + VMMR0JMPBUF.edi]
103 mov esi, [edx + VMMR0JMPBUF.esi]
104 mov ebx, [edx + VMMR0JMPBUF.ebx]
105 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
106 ret
107
108.espCheck_ok:
109 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
110 cmp ecx, 8192
111 ja .bad
112 test ecx, 3
113 jnz .bad
114 mov edi, [edx + VMMR0JMPBUF.esp]
115 sub edi, [edx + VMMR0JMPBUF.SpResume]
116 cmp ecx, edi
117 jne .bad
118
119 ;
120 ; Restore the stack.
121 ;
122 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 0
123 mov ecx, [edx + VMMR0JMPBUF.cbSavedStack]
124 shr ecx, 2
125 mov esi, [edx + VMMR0JMPBUF.pvSavedStack]
126 mov edi, [edx + VMMR0JMPBUF.SpResume]
127 mov esp, edi
128 rep movsd
129
130 ;
131 ; Continue where we left off.
132 ;
133 popf
134 pop ebx
135 pop esi
136 pop edi
137 pop ebp
138 xor eax, eax ; VINF_SUCCESS
139 ret
140%endif ; RT_ARCH_X86
141
142%ifdef RT_ARCH_AMD64
143 ;
144 ; Save the registers.
145 ;
146 push rbp
147 mov rbp, rsp
148 %ifdef ASM_CALL64_MSC
149 sub rsp, 30h
150 mov r11, rdx ; pfn
151 mov rdx, rcx ; pJmpBuf;
152 %else
153 sub rsp, 10h
154 mov r8, rdx ; pvUser1 (save it like MSC)
155 mov r9, rcx ; pvUser2 (save it like MSC)
156 mov r11, rsi ; pfn
157 mov rdx, rdi ; pJmpBuf
158 %endif
159 mov [rdx + VMMR0JMPBUF.rbx], rbx
160 %ifdef ASM_CALL64_MSC
161 mov [rdx + VMMR0JMPBUF.rsi], rsi
162 mov [rdx + VMMR0JMPBUF.rdi], rdi
163 %endif
164 mov r10, [rbp]
165 mov [rdx + VMMR0JMPBUF.rbp], r10
166 mov [rdx + VMMR0JMPBUF.r12], r12
167 mov [rdx + VMMR0JMPBUF.r13], r13
168 mov [rdx + VMMR0JMPBUF.r14], r14
169 mov [rdx + VMMR0JMPBUF.r15], r15
170 mov rax, [rbp + 8]
171 mov [rdx + VMMR0JMPBUF.rip], rax
172 lea r10, [rbp + 10h] ; (used in resume)
173 mov [rdx + VMMR0JMPBUF.rsp], r10
174
175 ;
176 ; If we're not in a ring-3 call, call pfn and return.
177 ;
178 test byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
179 jnz .resume
180
181 mov [rbp - 8], rdx ; Save it and fix stack alignment (16).
182 %ifdef ASM_CALL64_MSC
183 mov rcx, r8 ; pvUser -> arg0
184 mov rdx, r9
185 %else
186 mov rdi, r8 ; pvUser -> arg0
187 mov rsi, r9
188 %endif
189 call r11
190 mov rdx, [rbp - 8] ; pJmpBuf
191
192 ; restore the registers that we're not allowed to modify
193 ; otherwise a resume might restore the wrong values (from the previous run)
194 mov rbx, [rdx + VMMR0JMPBUF.rbx]
195 %ifdef ASM_CALL64_MSC
196 mov rsi, [rdx + VMMR0JMPBUF.rsi]
197 mov rdi, [rdx + VMMR0JMPBUF.rdi]
198 %endif
199 mov r12, [rdx + VMMR0JMPBUF.r12]
200 mov r13, [rdx + VMMR0JMPBUF.r13]
201 mov r14, [rdx + VMMR0JMPBUF.r14]
202 mov r15, [rdx + VMMR0JMPBUF.r15]
203
204 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
205 leave
206 ret
207
208 ;
209 ; Resume VMMR0CallHost the call.
210 ;
211.resume:
212 ; Sanity checks.
213 cmp r10, [rdx + VMMR0JMPBUF.SpCheck]
214 je .rspCheck_ok
215.bad:
216 and qword [rdx + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
217 mov rbx, [rdx + VMMR0JMPBUF.rbx]
218 %ifdef ASM_CALL64_MSC
219 mov rsi, [rdx + VMMR0JMPBUF.rsi]
220 mov rdi, [rdx + VMMR0JMPBUF.rdi]
221 %endif
222 mov r12, [rdx + VMMR0JMPBUF.r12]
223 mov r13, [rdx + VMMR0JMPBUF.r13]
224 mov r14, [rdx + VMMR0JMPBUF.r14]
225 mov r15, [rdx + VMMR0JMPBUF.r15]
226 mov eax, VERR_INTERNAL_ERROR ; todo better return code!
227 leave
228 ret
229
230.rspCheck_ok:
231 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
232 cmp rcx, 8192
233 ja .bad
234 test rcx, 3
235 jnz .bad
236 mov rdi, [rdx + VMMR0JMPBUF.rsp]
237 sub rdi, [rdx + VMMR0JMPBUF.SpResume]
238 cmp rcx, rdi
239 jne .bad
240
241 ;
242 ; Restore the stack.
243 ;
244 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 0
245 mov ecx, [rdx + VMMR0JMPBUF.cbSavedStack]
246 shr ecx, 3
247 mov rsi, [rdx + VMMR0JMPBUF.pvSavedStack]
248 mov rdi, [rdx + VMMR0JMPBUF.SpResume]
249 mov rsp, rdi
250 rep movsq
251
252 ;
253 ; Continue where we left off.
254 ;
255 popf
256 pop rbx
257 %ifdef ASM_CALL64_MSC
258 pop rsi
259 pop rdi
260 %endif
261 pop r12
262 pop r13
263 pop r14
264 pop r15
265 pop rbp
266 xor eax, eax ; VINF_SUCCESS
267 ret
268%endif
269ENDPROC vmmR0CallHostSetJmp
270
271
272;;
273; Worker for VMMR0CallHost.
274; This will save the stack and registers.
275;
276; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
277; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
278;
279BEGINPROC vmmR0CallHostLongJmp
280%ifdef RT_ARCH_X86
281 ;
282 ; Save the registers on the stack.
283 ;
284 push ebp
285 mov ebp, esp
286 push edi
287 push esi
288 push ebx
289 pushf
290
291 ;
292 ; Load parameters.
293 ;
294 mov edx, [ebp + 08h] ; pJmpBuf
295 mov eax, [ebp + 0ch] ; rc
296
297 ;
298 ; Is the jump buffer armed?
299 ;
300 cmp dword [edx + VMMR0JMPBUF.eip], byte 0
301 je .nok
302
303 ;
304 ; Save the stack.
305 ;
306 mov edi, [edx + VMMR0JMPBUF.pvSavedStack]
307 mov [edx + VMMR0JMPBUF.SpResume], esp
308 mov esi, esp
309 mov ecx, [edx + VMMR0JMPBUF.esp]
310 sub ecx, esi
311
312 ; two sanity checks on the size.
313 cmp ecx, 8192 ; check max size.
314 jbe .ok
315.nok:
316 mov eax, VERR_INTERNAL_ERROR
317 popf
318 pop ebx
319 pop esi
320 pop edi
321 leave
322 ret
323.ok:
324 test ecx, 3 ; check alignment
325 jnz .nok
326 mov [edx + VMMR0JMPBUF.cbSavedStack], ecx
327 shr ecx, 2
328 rep movsd
329
330 ; store the last pieces of info.
331 mov ecx, [edx + VMMR0JMPBUF.esp]
332 mov [edx + VMMR0JMPBUF.SpCheck], ecx
333 mov byte [edx + VMMR0JMPBUF.fInRing3Call], 1
334
335 ;
336 ; Do the long jump.
337 ;
338 mov ebx, [edx + VMMR0JMPBUF.ebx]
339 mov esi, [edx + VMMR0JMPBUF.esi]
340 mov edi, [edx + VMMR0JMPBUF.edi]
341 mov ebp, [edx + VMMR0JMPBUF.ebp]
342 mov ecx, [edx + VMMR0JMPBUF.eip]
343 mov esp, [edx + VMMR0JMPBUF.esp]
344 jmp ecx
345%endif ; RT_ARCH_X86
346
347%ifdef RT_ARCH_AMD64
348 ;
349 ; Save the registers on the stack.
350 ;
351 push rbp
352 mov rbp, rsp
353 push r15
354 push r14
355 push r13
356 push r12
357 %ifdef ASM_CALL64_MSC
358 push rdi
359 push rsi
360 %endif
361 push rbx
362 pushf
363
364 ;
365 ; Normalize the parameters.
366 ;
367 %ifdef ASM_CALL64_MSC
368 mov eax, edx ; rc
369 mov rdx, rcx ; pJmpBuf
370 %else
371 mov rdx, rdi ; pJmpBuf
372 mov eax, esi ; rc
373 %endif
374
375 ;
376 ; Is the jump buffer armed?
377 ;
378 cmp qword [rdx + VMMR0JMPBUF.rip], byte 0
379 je .nok
380
381 ;
382 ; Save the stack.
383 ;
384 mov rdi, [rdx + VMMR0JMPBUF.pvSavedStack]
385 mov [rdx + VMMR0JMPBUF.SpResume], rsp
386 mov rsi, rsp
387 mov rcx, [rdx + VMMR0JMPBUF.rsp]
388 sub rcx, rsi
389
390 ; two sanity checks on the size.
391 cmp rcx, 8192 ; check max size.
392 jbe .ok
393.nok:
394 mov eax, VERR_INTERNAL_ERROR
395 popf
396 pop rbx
397 %ifdef ASM_CALL64_MSC
398 pop rsi
399 pop rdi
400 %endif
401 pop r12
402 pop r13
403 pop r14
404 pop r15
405 leave
406 ret
407
408.ok:
409 test ecx, 7 ; check alignment
410 jnz .nok
411 mov [rdx + VMMR0JMPBUF.cbSavedStack], ecx
412 shr ecx, 3
413 rep movsq
414
415 ; store the last pieces of info.
416 mov rcx, [rdx + VMMR0JMPBUF.rsp]
417 mov [rdx + VMMR0JMPBUF.SpCheck], rcx
418 mov byte [rdx + VMMR0JMPBUF.fInRing3Call], 1
419
420 ;
421 ; Do the long jump.
422 ;
423 mov rbx, [rdx + VMMR0JMPBUF.rbx]
424 %ifdef ASM_CALL64_MSC
425 mov rsi, [rdx + VMMR0JMPBUF.rsi]
426 mov rdi, [rdx + VMMR0JMPBUF.rdi]
427 %endif
428 mov r12, [rdx + VMMR0JMPBUF.r12]
429 mov r13, [rdx + VMMR0JMPBUF.r13]
430 mov r14, [rdx + VMMR0JMPBUF.r14]
431 mov r15, [rdx + VMMR0JMPBUF.r15]
432 mov rbp, [rdx + VMMR0JMPBUF.rbp]
433 mov rcx, [rdx + VMMR0JMPBUF.rip]
434 mov rsp, [rdx + VMMR0JMPBUF.rsp]
435 jmp rcx
436%endif
437ENDPROC vmmR0CallHostLongJmp
438
439
440;;
441; Internal R0 logger worker: Logger wrapper.
442;
443; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
444;
445EXPORTEDNAME vmmR0LoggerWrapper
446%ifdef RT_ARCH_X86 ; The other architecture(s) use(s) C99 variadict macros.
447 push 0 ; assumes we're the wrapper for a default instance.
448 call IMP(RTLogLogger)
449 add esp, byte 4
450 ret
451%else
452 int3
453 int3
454 int3
455 ret
456%endif
457ENDPROC vmmR0LoggerWrapper
458
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette