VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/GIMAllKvm.cpp@ 61142

Last change on this file since 61142 was 58123, checked in by vboxsync, 9 years ago

VMM: Made @param pVCpu more uniform and to the point.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.2 KB
Line 
1/* $Id: GIMAllKvm.cpp 58123 2015-10-08 18:09:45Z vboxsync $ */
2/** @file
3 * GIM - Guest Interface Manager, KVM, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_GIM
23#include "GIMKvmInternal.h"
24#include "GIMInternal.h"
25
26#include <VBox/err.h>
27#include <VBox/dis.h>
28#include <VBox/vmm/hm.h>
29#include <VBox/vmm/em.h>
30#include <VBox/vmm/tm.h>
31#include <VBox/vmm/vm.h>
32#include <VBox/vmm/pgm.h>
33#include <VBox/vmm/pdmdev.h>
34#include <VBox/vmm/pdmapi.h>
35#include <VBox/sup.h>
36
37#include <iprt/asm-amd64-x86.h>
38#include <iprt/time.h>
39
40
41/**
42 * Handles the KVM hypercall.
43 *
44 * @returns VBox status code.
45 * @param pVCpu The cross context virtual CPU structure.
46 * @param pCtx Pointer to the guest-CPU context.
47 *
48 * @thread EMT.
49 */
50VMM_INT_DECL(int) gimKvmHypercall(PVMCPU pVCpu, PCPUMCTX pCtx)
51{
52 /*
53 * Get the hypercall operation and arguments.
54 */
55 bool const fIs64BitMode = CPUMIsGuestIn64BitCodeEx(pCtx);
56 uint64_t uHyperOp = pCtx->rax;
57 uint64_t uHyperArg0 = pCtx->rbx;
58 uint64_t uHyperArg1 = pCtx->rcx;
59 uint64_t uHyperArg2 = pCtx->rdi;
60 uint64_t uHyperArg3 = pCtx->rsi;
61 uint64_t uHyperRet = KVM_HYPERCALL_RET_ENOSYS;
62 uint64_t uAndMask = UINT64_C(0xffffffffffffffff);
63 if (!fIs64BitMode)
64 {
65 uAndMask = UINT64_C(0xffffffff);
66 uHyperOp &= UINT64_C(0xffffffff);
67 uHyperArg0 &= UINT64_C(0xffffffff);
68 uHyperArg1 &= UINT64_C(0xffffffff);
69 uHyperArg2 &= UINT64_C(0xffffffff);
70 uHyperArg3 &= UINT64_C(0xffffffff);
71 uHyperRet &= UINT64_C(0xffffffff);
72 }
73
74 /*
75 * Verify that guest ring-0 is the one making the hypercall.
76 */
77 uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
78 if (uCpl)
79 {
80 pCtx->rax = KVM_HYPERCALL_RET_EPERM & uAndMask;
81 return VINF_SUCCESS;
82 }
83
84 /*
85 * Do the work.
86 */
87 switch (uHyperOp)
88 {
89 case KVM_HYPERCALL_OP_KICK_CPU:
90 {
91 PVM pVM = pVCpu->CTX_SUFF(pVM);
92 if (uHyperArg1 < pVM->cCpus)
93 {
94 PVMCPU pVCpuTarget = &pVM->aCpus[uHyperArg1]; /** ASSUMES pVCpu index == ApicId of the VCPU. */
95 VMCPU_FF_SET(pVCpuTarget, VMCPU_FF_UNHALT);
96#ifdef IN_RING0
97 /*
98 * We might be here with preemption disabled or enabled (i.e. depending on thread-context hooks
99 * being used), so don't try obtaining the GVMMR0 used lock here. See @bugref{7270#c148}.
100 */
101 GVMMR0SchedWakeUpEx(pVM, pVCpuTarget->idCpu, false /* fTakeUsedLock */);
102#elif defined(IN_RING3)
103 int rc2 = SUPR3CallVMMR0(pVM->pVMR0, pVCpuTarget->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, NULL);
104 AssertRC(rc2);
105#elif defined(IN_RC)
106 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
107 Assert(pVM->cCpus == 1);
108#endif
109 uHyperRet = KVM_HYPERCALL_RET_SUCCESS;
110 }
111 break;
112 }
113
114 case KVM_HYPERCALL_OP_VAPIC_POLL_IRQ:
115 uHyperRet = KVM_HYPERCALL_RET_SUCCESS;
116 break;
117
118 default:
119 break;
120 }
121
122 /*
123 * Place the result in rax/eax.
124 */
125 pCtx->rax = uHyperRet & uAndMask;
126 return VINF_SUCCESS;
127}
128
129
130/**
131 * Returns whether the guest has configured and enabled the use of KVM's
132 * hypercall interface.
133 *
134 * @returns true if hypercalls are enabled, false otherwise.
135 * @param pVCpu The cross context virtual CPU structure.
136 */
137VMM_INT_DECL(bool) gimKvmAreHypercallsEnabled(PVMCPU pVCpu)
138{
139 NOREF(pVCpu);
140 /* KVM paravirt interface doesn't have hypercall control bits (like Hyper-V does)
141 that guests can control, i.e. hypercalls are always enabled. */
142 return true;
143}
144
145
146/**
147 * Returns whether the guest has configured and enabled the use of KVM's
148 * paravirtualized TSC.
149 *
150 * @returns true if paravirt. TSC is enabled, false otherwise.
151 * @param pVM The cross context VM structure.
152 */
153VMM_INT_DECL(bool) gimKvmIsParavirtTscEnabled(PVM pVM)
154{
155 uint32_t cCpus = pVM->cCpus;
156 for (uint32_t i = 0; i < cCpus; i++)
157 {
158 PVMCPU pVCpu = &pVM->aCpus[i];
159 PGIMKVMCPU pGimKvmCpu = &pVCpu->gim.s.u.KvmCpu;
160 if (MSR_GIM_KVM_SYSTEM_TIME_IS_ENABLED(pGimKvmCpu->u64SystemTimeMsr))
161 return true;
162 }
163 return false;
164}
165
166
167/**
168 * MSR read handler for KVM.
169 *
170 * @returns Strict VBox status code like CPUMQueryGuestMsr().
171 * @retval VINF_CPUM_R3_MSR_READ
172 * @retval VERR_CPUM_RAISE_GP_0
173 *
174 * @param pVCpu The cross context virtual CPU structure.
175 * @param idMsr The MSR being read.
176 * @param pRange The range this MSR belongs to.
177 * @param puValue Where to store the MSR value read.
178 */
179VMM_INT_DECL(VBOXSTRICTRC) gimKvmReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
180{
181 NOREF(pRange);
182 PVM pVM = pVCpu->CTX_SUFF(pVM);
183 PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
184 PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;
185
186 switch (idMsr)
187 {
188 case MSR_GIM_KVM_SYSTEM_TIME:
189 case MSR_GIM_KVM_SYSTEM_TIME_OLD:
190 *puValue = pKvmCpu->u64SystemTimeMsr;
191 return VINF_SUCCESS;
192
193 case MSR_GIM_KVM_WALL_CLOCK:
194 case MSR_GIM_KVM_WALL_CLOCK_OLD:
195 *puValue = pKvm->u64WallClockMsr;
196 return VINF_SUCCESS;
197
198 default:
199 {
200#ifdef IN_RING3
201 static uint32_t s_cTimes = 0;
202 if (s_cTimes++ < 20)
203 LogRel(("GIM: KVM: Unknown/invalid RdMsr (%#x) -> #GP(0)\n", idMsr));
204#endif
205 LogFunc(("Unknown/invalid RdMsr (%#RX32) -> #GP(0)\n", idMsr));
206 break;
207 }
208 }
209
210 return VERR_CPUM_RAISE_GP_0;
211}
212
213
214/**
215 * MSR write handler for KVM.
216 *
217 * @returns Strict VBox status code like CPUMSetGuestMsr().
218 * @retval VINF_CPUM_R3_MSR_WRITE
219 * @retval VERR_CPUM_RAISE_GP_0
220 *
221 * @param pVCpu The cross context virtual CPU structure.
222 * @param idMsr The MSR being written.
223 * @param pRange The range this MSR belongs to.
224 * @param uRawValue The raw value with the ignored bits not masked.
225 */
226VMM_INT_DECL(VBOXSTRICTRC) gimKvmWriteMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue)
227{
228 NOREF(pRange);
229 PVM pVM = pVCpu->CTX_SUFF(pVM);
230 PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;
231
232 switch (idMsr)
233 {
234 case MSR_GIM_KVM_SYSTEM_TIME:
235 case MSR_GIM_KVM_SYSTEM_TIME_OLD:
236 {
237 bool fEnable = RT_BOOL(uRawValue & MSR_GIM_KVM_SYSTEM_TIME_ENABLE_BIT);
238#ifdef IN_RING0
239 NOREF(fEnable); NOREF(pKvmCpu);
240 gimR0KvmUpdateSystemTime(pVM, pVCpu);
241 return VINF_CPUM_R3_MSR_WRITE;
242#elif defined(IN_RC)
243 Assert(pVM->cCpus == 1);
244 if (fEnable)
245 {
246 RTCCUINTREG fEFlags = ASMIntDisableFlags();
247 pKvmCpu->uTsc = TMCpuTickGetNoCheck(pVCpu) | UINT64_C(1);
248 pKvmCpu->uVirtNanoTS = TMVirtualGetNoCheck(pVM) | UINT64_C(1);
249 ASMSetFlags(fEFlags);
250 }
251 return VINF_CPUM_R3_MSR_WRITE;
252#else /* IN_RING3 */
253 if (!fEnable)
254 {
255 gimR3KvmDisableSystemTime(pVM);
256 pKvmCpu->u64SystemTimeMsr = uRawValue;
257 return VINF_SUCCESS;
258 }
259
260 /* Is the system-time struct. already enabled? If so, get flags that need preserving. */
261 uint8_t fFlags = 0;
262 GIMKVMSYSTEMTIME SystemTime;
263 RT_ZERO(SystemTime);
264 if ( MSR_GIM_KVM_SYSTEM_TIME_IS_ENABLED(pKvmCpu->u64SystemTimeMsr)
265 && MSR_GIM_KVM_SYSTEM_TIME_GUEST_GPA(uRawValue) == pKvmCpu->GCPhysSystemTime)
266 {
267 int rc2 = PGMPhysSimpleReadGCPhys(pVM, &SystemTime, pKvmCpu->GCPhysSystemTime, sizeof(GIMKVMSYSTEMTIME));
268 if (RT_SUCCESS(rc2))
269 pKvmCpu->fSystemTimeFlags = (SystemTime.fFlags & GIM_KVM_SYSTEM_TIME_FLAGS_GUEST_PAUSED);
270 }
271
272 /* Enable and populate the system-time struct. */
273 pKvmCpu->u64SystemTimeMsr = uRawValue;
274 pKvmCpu->GCPhysSystemTime = MSR_GIM_KVM_SYSTEM_TIME_GUEST_GPA(uRawValue);
275 pKvmCpu->u32SystemTimeVersion += 2;
276 int rc = gimR3KvmEnableSystemTime(pVM, pVCpu);
277 if (RT_FAILURE(rc))
278 {
279 pKvmCpu->u64SystemTimeMsr = 0;
280 return VERR_CPUM_RAISE_GP_0;
281 }
282 return VINF_SUCCESS;
283#endif
284 }
285
286 case MSR_GIM_KVM_WALL_CLOCK:
287 case MSR_GIM_KVM_WALL_CLOCK_OLD:
288 {
289#ifndef IN_RING3
290 return VINF_CPUM_R3_MSR_WRITE;
291#else
292 /* Enable the wall-clock struct. */
293 RTGCPHYS GCPhysWallClock = MSR_GIM_KVM_WALL_CLOCK_GUEST_GPA(uRawValue);
294 if (RT_LIKELY(RT_ALIGN_64(GCPhysWallClock, 4) == GCPhysWallClock))
295 {
296 int rc = gimR3KvmEnableWallClock(pVM, GCPhysWallClock);
297 if (RT_SUCCESS(rc))
298 {
299 PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
300 pKvm->u64WallClockMsr = uRawValue;
301 return VINF_SUCCESS;
302 }
303 }
304 return VERR_CPUM_RAISE_GP_0;
305#endif /* IN_RING3 */
306 }
307
308 default:
309 {
310#ifdef IN_RING3
311 static uint32_t s_cTimes = 0;
312 if (s_cTimes++ < 20)
313 LogRel(("GIM: KVM: Unknown/invalid WrMsr (%#x,%#x`%08x) -> #GP(0)\n", idMsr,
314 uRawValue & UINT64_C(0xffffffff00000000), uRawValue & UINT64_C(0xffffffff)));
315#endif
316 LogFunc(("Unknown/invalid WrMsr (%#RX32,%#RX64) -> #GP(0)\n", idMsr, uRawValue));
317 break;
318 }
319 }
320
321 return VERR_CPUM_RAISE_GP_0;
322}
323
324
325/**
326 * Whether we need to trap \#UD exceptions in the guest.
327 *
328 * On AMD-V we need to trap them because paravirtualized Linux/KVM guests use
329 * the Intel VMCALL instruction to make hypercalls and we need to trap and
330 * optionally patch them to the AMD-V VMMCALL instruction and handle the
331 * hypercall.
332 *
333 * I guess this was done so that guest teleporation between an AMD and an Intel
334 * machine would working without any changes at the time of teleporation.
335 * However, this also means we -always- need to intercept \#UD exceptions on one
336 * of the two CPU models (Intel or AMD). Hyper-V solves this problem more
337 * elegantly by letting the hypervisor supply an opaque hypercall page.
338 *
339 * For raw-mode VMs, this function will always return true. See gimR3KvmInit().
340 *
341 * @param pVCpu The cross context virtual CPU structure.
342 */
343VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVMCPU pVCpu)
344{
345 PVM pVM = pVCpu->CTX_SUFF(pVM);
346 return pVM->gim.s.u.Kvm.fTrapXcptUD;
347}
348
349
350/**
351 * Exception handler for \#UD.
352 *
353 * @param pVCpu The cross context virtual CPU structure.
354 * @param pCtx Pointer to the guest-CPU context.
355 * @param pDis Pointer to the disassembled instruction state at RIP.
356 * Optional, can be NULL.
357 *
358 * @thread EMT.
359 */
360VMM_INT_DECL(int) gimKvmXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis)
361{
362 /*
363 * If we didn't ask for #UD to be trapped, bail.
364 */
365 PVM pVM = pVCpu->CTX_SUFF(pVM);
366 PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
367 if (RT_UNLIKELY(!pVM->gim.s.u.Kvm.fTrapXcptUD))
368 return VERR_GIM_OPERATION_FAILED;
369
370 int rc = VINF_SUCCESS;
371 if (!pDis)
372 {
373 /*
374 * Disassemble the instruction at RIP to figure out if it's the Intel VMCALL instruction
375 * or the AMD VMMCALL instruction and if so, handle it as a hypercall.
376 */
377 DISCPUSTATE Dis;
378 rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, NULL /* pcbInstr */);
379 pDis = &Dis;
380 }
381
382 if (RT_SUCCESS(rc))
383 {
384 /*
385 * Patch the instruction to so we don't have to spend time disassembling it each time.
386 * Makes sense only for HM as with raw-mode we will be getting a #UD regardless.
387 */
388 if ( pDis->pCurInstr->uOpcode == OP_VMCALL
389 || pDis->pCurInstr->uOpcode == OP_VMMCALL)
390 {
391 /*
392 * Make sure guest ring-0 is the one making the hypercall.
393 */
394 if (CPUMGetGuestCPL(pVCpu))
395 return VERR_GIM_HYPERCALL_ACCESS_DENIED;
396
397 if ( pDis->pCurInstr->uOpcode != pKvm->uOpCodeNative
398 && HMIsEnabled(pVM))
399 {
400 uint8_t abHypercall[3];
401 size_t cbWritten = 0;
402 rc = VMMPatchHypercall(pVM, &abHypercall, sizeof(abHypercall), &cbWritten);
403 AssertRC(rc);
404 Assert(sizeof(abHypercall) == pDis->cbInstr);
405 Assert(sizeof(abHypercall) == cbWritten);
406
407 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, &abHypercall, sizeof(abHypercall));
408 }
409
410 /*
411 * Update RIP and perform the hypercall.
412 *
413 * For HM, we can simply resume guest execution without performing the hypercall now and
414 * do it on the next VMCALL/VMMCALL exit handler on the patched instruction.
415 *
416 * For raw-mode we need to do this now anyway. So we do it here regardless with an added
417 * advantage is that it saves one world-switch for the HM case.
418 */
419 if (RT_SUCCESS(rc))
420 {
421 pCtx->rip += pDis->cbInstr;
422 rc = gimKvmHypercall(pVCpu, pCtx);
423 }
424 }
425 else
426 rc = VERR_GIM_OPERATION_FAILED;
427 }
428 return rc;
429}
430
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette