VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0UtilA.asm@ 95134

Last change on this file since 95134 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 10.6 KB
Line 
1; $Id: HMR0UtilA.asm 93115 2022-01-01 11:31:46Z vboxsync $
2;; @file
3; HM - Ring-0 VMX & SVM Helpers.
4;
5
6;
7; Copyright (C) 2006-2022 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*********************************************************************************************************************************
19;* Header Files *
20;*********************************************************************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/hm_vmx.mac"
24%include "iprt/x86.mac"
25
26
27
28BEGINCODE
29
30;;
31; Executes VMWRITE, 64-bit value.
32;
33; @returns VBox status code.
34; @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index.
35; @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value.
36;
37ALIGNCODE(16)
38BEGINPROC VMXWriteVmcs64
39%ifdef RT_ARCH_AMD64
40 %ifdef ASM_CALL64_GCC
41 and edi, 0ffffffffh
42 xor rax, rax
43 vmwrite rdi, rsi
44 %else
45 and ecx, 0ffffffffh
46 xor rax, rax
47 vmwrite rcx, rdx
48 %endif
49%else ; RT_ARCH_X86
50 mov ecx, [esp + 4] ; idxField
51 lea edx, [esp + 8] ; &u64Data
52 vmwrite ecx, [edx] ; low dword
53 jz .done
54 jc .done
55 inc ecx
56 xor eax, eax
57 vmwrite ecx, [edx + 4] ; high dword
58.done:
59%endif ; RT_ARCH_X86
60 jnc .valid_vmcs
61 mov eax, VERR_VMX_INVALID_VMCS_PTR
62 ret
63.valid_vmcs:
64 jnz .the_end
65 mov eax, VERR_VMX_INVALID_VMCS_FIELD
66.the_end:
67 ret
68ENDPROC VMXWriteVmcs64
69
70
71;;
72; Executes VMREAD, 64-bit value.
73;
74; @returns VBox status code.
75; @param idxField VMCS index.
76; @param pData Where to store VM field value.
77;
78;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
79ALIGNCODE(16)
80BEGINPROC VMXReadVmcs64
81%ifdef RT_ARCH_AMD64
82 %ifdef ASM_CALL64_GCC
83 and edi, 0ffffffffh
84 xor rax, rax
85 vmread [rsi], rdi
86 %else
87 and ecx, 0ffffffffh
88 xor rax, rax
89 vmread [rdx], rcx
90 %endif
91%else ; RT_ARCH_X86
92 mov ecx, [esp + 4] ; idxField
93 mov edx, [esp + 8] ; pData
94 vmread [edx], ecx ; low dword
95 jz .done
96 jc .done
97 inc ecx
98 xor eax, eax
99 vmread [edx + 4], ecx ; high dword
100.done:
101%endif ; RT_ARCH_X86
102 jnc .valid_vmcs
103 mov eax, VERR_VMX_INVALID_VMCS_PTR
104 ret
105.valid_vmcs:
106 jnz .the_end
107 mov eax, VERR_VMX_INVALID_VMCS_FIELD
108.the_end:
109 ret
110ENDPROC VMXReadVmcs64
111
112
113;;
114; Executes VMREAD, 32-bit value.
115;
116; @returns VBox status code.
117; @param idxField VMCS index.
118; @param pu32Data Where to store VM field value.
119;
120;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
121ALIGNCODE(16)
122BEGINPROC VMXReadVmcs32
123%ifdef RT_ARCH_AMD64
124 %ifdef ASM_CALL64_GCC
125 and edi, 0ffffffffh
126 xor rax, rax
127 vmread r10, rdi
128 mov [rsi], r10d
129 %else
130 and ecx, 0ffffffffh
131 xor rax, rax
132 vmread r10, rcx
133 mov [rdx], r10d
134 %endif
135%else ; RT_ARCH_X86
136 mov ecx, [esp + 4] ; idxField
137 mov edx, [esp + 8] ; pu32Data
138 xor eax, eax
139 vmread [edx], ecx
140%endif ; RT_ARCH_X86
141 jnc .valid_vmcs
142 mov eax, VERR_VMX_INVALID_VMCS_PTR
143 ret
144.valid_vmcs:
145 jnz .the_end
146 mov eax, VERR_VMX_INVALID_VMCS_FIELD
147.the_end:
148 ret
149ENDPROC VMXReadVmcs32
150
151
152;;
153; Executes VMWRITE, 32-bit value.
154;
155; @returns VBox status code.
156; @param idxField VMCS index.
157; @param u32Data Where to store VM field value.
158;
159;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
160ALIGNCODE(16)
161BEGINPROC VMXWriteVmcs32
162%ifdef RT_ARCH_AMD64
163 %ifdef ASM_CALL64_GCC
164 and edi, 0ffffffffh
165 and esi, 0ffffffffh
166 xor rax, rax
167 vmwrite rdi, rsi
168 %else
169 and ecx, 0ffffffffh
170 and edx, 0ffffffffh
171 xor rax, rax
172 vmwrite rcx, rdx
173 %endif
174%else ; RT_ARCH_X86
175 mov ecx, [esp + 4] ; idxField
176 mov edx, [esp + 8] ; u32Data
177 xor eax, eax
178 vmwrite ecx, edx
179%endif ; RT_ARCH_X86
180 jnc .valid_vmcs
181 mov eax, VERR_VMX_INVALID_VMCS_PTR
182 ret
183.valid_vmcs:
184 jnz .the_end
185 mov eax, VERR_VMX_INVALID_VMCS_FIELD
186.the_end:
187 ret
188ENDPROC VMXWriteVmcs32
189
190
191;;
192; Executes VMXON.
193;
194; @returns VBox status code.
195; @param HCPhysVMXOn Physical address of VMXON structure.
196;
197;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
198BEGINPROC VMXEnable
199%ifdef RT_ARCH_AMD64
200 xor rax, rax
201 %ifdef ASM_CALL64_GCC
202 push rdi
203 %else
204 push rcx
205 %endif
206 vmxon [rsp]
207%else ; RT_ARCH_X86
208 xor eax, eax
209 vmxon [esp + 4]
210%endif ; RT_ARCH_X86
211 jnc .good
212 mov eax, VERR_VMX_INVALID_VMXON_PTR
213 jmp .the_end
214
215.good:
216 jnz .the_end
217 mov eax, VERR_VMX_VMXON_FAILED
218
219.the_end:
220%ifdef RT_ARCH_AMD64
221 add rsp, 8
222%endif
223 ret
224ENDPROC VMXEnable
225
226
227;;
228; Executes VMXOFF.
229;
230;DECLASM(void) VMXDisable(void);
231BEGINPROC VMXDisable
232 vmxoff
233.the_end:
234 ret
235ENDPROC VMXDisable
236
237
238;;
239; Executes VMCLEAR.
240;
241; @returns VBox status code.
242; @param HCPhysVmcs Physical address of VM control structure.
243;
244;DECLASM(int) VMXClearVmcs(RTHCPHYS HCPhysVmcs);
245ALIGNCODE(16)
246BEGINPROC VMXClearVmcs
247%ifdef RT_ARCH_AMD64
248 xor rax, rax
249 %ifdef ASM_CALL64_GCC
250 push rdi
251 %else
252 push rcx
253 %endif
254 vmclear [rsp]
255%else ; RT_ARCH_X86
256 xor eax, eax
257 vmclear [esp + 4]
258%endif ; RT_ARCH_X86
259 jnc .the_end
260 mov eax, VERR_VMX_INVALID_VMCS_PTR
261.the_end:
262%ifdef RT_ARCH_AMD64
263 add rsp, 8
264%endif
265 ret
266ENDPROC VMXClearVmcs
267
268
269;;
270; Executes VMPTRLD.
271;
272; @returns VBox status code.
273; @param HCPhysVmcs Physical address of VMCS structure.
274;
275;DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
276ALIGNCODE(16)
277BEGINPROC VMXLoadVmcs
278%ifdef RT_ARCH_AMD64
279 xor rax, rax
280 %ifdef ASM_CALL64_GCC
281 push rdi
282 %else
283 push rcx
284 %endif
285 vmptrld [rsp]
286%else
287 xor eax, eax
288 vmptrld [esp + 4]
289%endif
290 jnc .the_end
291 mov eax, VERR_VMX_INVALID_VMCS_PTR
292.the_end:
293%ifdef RT_ARCH_AMD64
294 add rsp, 8
295%endif
296 ret
297ENDPROC VMXLoadVmcs
298
299
300;;
301; Executes VMPTRST.
302;
303; @returns VBox status code.
304; @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer.
305;
306;DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pVMCS);
307BEGINPROC VMXGetCurrentVmcs
308%ifdef RT_OS_OS2
309 mov eax, VERR_NOT_SUPPORTED
310 ret
311%else
312 %ifdef RT_ARCH_AMD64
313 %ifdef ASM_CALL64_GCC
314 vmptrst qword [rdi]
315 %else
316 vmptrst qword [rcx]
317 %endif
318 %else
319 vmptrst qword [esp+04h]
320 %endif
321 xor eax, eax
322.the_end:
323 ret
324%endif
325ENDPROC VMXGetCurrentVmcs
326
327
328;;
329; Invalidate a page using INVEPT.
330;
331; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush.
332; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer.
333;
334;DECLASM(int) VMXR0InvEPT(VMXTLBFLUSHEPT enmTlbFlush, uint64_t *pDescriptor);
335BEGINPROC VMXR0InvEPT
336%ifdef RT_ARCH_AMD64
337 %ifdef ASM_CALL64_GCC
338 and edi, 0ffffffffh
339 xor rax, rax
340; invept rdi, qword [rsi]
341 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
342 %else
343 and ecx, 0ffffffffh
344 xor rax, rax
345; invept rcx, qword [rdx]
346 DB 0x66, 0x0F, 0x38, 0x80, 0xA
347 %endif
348%else
349 mov ecx, [esp + 4]
350 mov edx, [esp + 8]
351 xor eax, eax
352; invept ecx, qword [edx]
353 DB 0x66, 0x0F, 0x38, 0x80, 0xA
354%endif
355 jnc .valid_vmcs
356 mov eax, VERR_VMX_INVALID_VMCS_PTR
357 ret
358.valid_vmcs:
359 jnz .the_end
360 mov eax, VERR_INVALID_PARAMETER
361.the_end:
362 ret
363ENDPROC VMXR0InvEPT
364
365
366;;
367; Invalidate a page using INVVPID.
368;
369; @param enmTlbFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
370; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
371;
372;DECLASM(int) VMXR0InvVPID(VMXTLBFLUSHVPID enmTlbFlush, uint64_t *pDescriptor);
373BEGINPROC VMXR0InvVPID
374%ifdef RT_ARCH_AMD64
375 %ifdef ASM_CALL64_GCC
376 and edi, 0ffffffffh
377 xor rax, rax
378; invvpid rdi, qword [rsi]
379 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
380 %else
381 and ecx, 0ffffffffh
382 xor rax, rax
383; invvpid rcx, qword [rdx]
384 DB 0x66, 0x0F, 0x38, 0x81, 0xA
385 %endif
386%else
387 mov ecx, [esp + 4]
388 mov edx, [esp + 8]
389 xor eax, eax
390; invvpid ecx, qword [edx]
391 DB 0x66, 0x0F, 0x38, 0x81, 0xA
392%endif
393 jnc .valid_vmcs
394 mov eax, VERR_VMX_INVALID_VMCS_PTR
395 ret
396.valid_vmcs:
397 jnz .the_end
398 mov eax, VERR_INVALID_PARAMETER
399.the_end:
400 ret
401ENDPROC VMXR0InvVPID
402
403
404%if GC_ARCH_BITS == 64
405;;
406; Executes INVLPGA.
407;
408; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
409; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
410;
411;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
412BEGINPROC SVMR0InvlpgA
413%ifdef RT_ARCH_AMD64
414 %ifdef ASM_CALL64_GCC
415 mov rax, rdi
416 mov rcx, rsi
417 %else
418 mov rax, rcx
419 mov rcx, rdx
420 %endif
421%else
422 mov eax, [esp + 4]
423 mov ecx, [esp + 0Ch]
424%endif
425 invlpga [xAX], ecx
426 ret
427ENDPROC SVMR0InvlpgA
428
429%else ; GC_ARCH_BITS != 64
430;;
431; Executes INVLPGA
432;
433; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
434; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
435;
436;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
437BEGINPROC SVMR0InvlpgA
438%ifdef RT_ARCH_AMD64
439 %ifdef ASM_CALL64_GCC
440 movzx rax, edi
441 mov ecx, esi
442 %else
443 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
444 ; "Perhaps unexpectedly, instructions that move or generate 32-bit register
445 ; values also set the upper 32 bits of the register to zero. Consequently
446 ; there is no need for an instruction movzlq."
447 mov eax, ecx
448 mov ecx, edx
449 %endif
450%else
451 mov eax, [esp + 4]
452 mov ecx, [esp + 8]
453%endif
454 invlpga [xAX], ecx
455 ret
456ENDPROC SVMR0InvlpgA
457
458%endif ; GC_ARCH_BITS != 64
459
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette