VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/GIMAllKvm.cpp@ 61683

Last change on this file since 61683 was 61632, checked in by vboxsync, 9 years ago

GIM: Correct header order to match what is use *everywhere* else in the VMM and what is absolutely necessary for some of the tricks we use (like the CPUM and DBGF read-only data!).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.8 KB
Line 
1/* $Id: GIMAllKvm.cpp 61632 2016-06-09 18:06:26Z vboxsync $ */
2/** @file
3 * GIM - Guest Interface Manager, KVM, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_GIM
23#include <VBox/vmm/gim.h>
24#include <VBox/vmm/hm.h>
25#include <VBox/vmm/em.h>
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/pdmdev.h>
29#include <VBox/vmm/pdmapi.h>
30#include "GIMKvmInternal.h"
31#include "GIMInternal.h"
32#include <VBox/vmm/vm.h>
33
34#include <VBox/dis.h>
35#include <VBox/err.h>
36#include <VBox/sup.h>
37
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/time.h>
40
41
42/**
43 * Handles the KVM hypercall.
44 *
45 * @returns Strict VBox status code.
46 * @retval VINF_SUCCESS if the hypercall succeeded (even if its operation
47 * failed).
48 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
49 * @retval VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
50 *
51 * @param pVCpu The cross context virtual CPU structure.
52 * @param pCtx Pointer to the guest-CPU context.
53 *
54 * @thread EMT(pVCpu).
55 */
56VMM_INT_DECL(VBOXSTRICTRC) gimKvmHypercall(PVMCPU pVCpu, PCPUMCTX pCtx)
57{
58 VMCPU_ASSERT_EMT(pVCpu);
59
60 PVM pVM = pVCpu->CTX_SUFF(pVM);
61 STAM_REL_COUNTER_INC(&pVM->gim.s.StatHypercalls);
62
63 /*
64 * Get the hypercall operation and arguments.
65 */
66 bool const fIs64BitMode = CPUMIsGuestIn64BitCodeEx(pCtx);
67 uint64_t uHyperOp = pCtx->rax;
68 uint64_t uHyperArg0 = pCtx->rbx;
69 uint64_t uHyperArg1 = pCtx->rcx;
70 uint64_t uHyperArg2 = pCtx->rdi;
71 uint64_t uHyperArg3 = pCtx->rsi;
72 uint64_t uHyperRet = KVM_HYPERCALL_RET_ENOSYS;
73 uint64_t uAndMask = UINT64_C(0xffffffffffffffff);
74 if (!fIs64BitMode)
75 {
76 uAndMask = UINT64_C(0xffffffff);
77 uHyperOp &= UINT64_C(0xffffffff);
78 uHyperArg0 &= UINT64_C(0xffffffff);
79 uHyperArg1 &= UINT64_C(0xffffffff);
80 uHyperArg2 &= UINT64_C(0xffffffff);
81 uHyperArg3 &= UINT64_C(0xffffffff);
82 uHyperRet &= UINT64_C(0xffffffff);
83 }
84
85 /*
86 * Verify that guest ring-0 is the one making the hypercall.
87 */
88 uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
89 if (RT_UNLIKELY(uCpl))
90 {
91 pCtx->rax = KVM_HYPERCALL_RET_EPERM & uAndMask;
92 return VERR_GIM_HYPERCALL_ACCESS_DENIED;
93 }
94
95 /*
96 * Do the work.
97 */
98 int rc = VINF_SUCCESS;
99 switch (uHyperOp)
100 {
101 case KVM_HYPERCALL_OP_KICK_CPU:
102 {
103 if (uHyperArg1 < pVM->cCpus)
104 {
105 PVMCPU pVCpuTarget = &pVM->aCpus[uHyperArg1]; /** ASSUMES pVCpu index == ApicId of the VCPU. */
106 VMCPU_FF_SET(pVCpuTarget, VMCPU_FF_UNHALT);
107#ifdef IN_RING0
108 /*
109 * We might be here with preemption disabled or enabled (i.e. depending on thread-context hooks
110 * being used), so don't try obtaining the GVMMR0 used lock here. See @bugref{7270#c148}.
111 */
112 GVMMR0SchedWakeUpEx(pVM, pVCpuTarget->idCpu, false /* fTakeUsedLock */);
113#elif defined(IN_RING3)
114 int rc2 = SUPR3CallVMMR0(pVM->pVMR0, pVCpuTarget->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, NULL /* pvArg */);
115 AssertRC(rc2);
116#elif defined(IN_RC)
117 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
118 Assert(pVM->cCpus == 1);
119#endif
120 uHyperRet = KVM_HYPERCALL_RET_SUCCESS;
121 }
122 else
123 {
124 /* Shouldn't ever happen! If it does, throw a guru, as otherwise it'll lead to deadlocks in the guest anyway! */
125 rc = VERR_GIM_HYPERCALL_FAILED;
126 }
127 break;
128 }
129
130 case KVM_HYPERCALL_OP_VAPIC_POLL_IRQ:
131 uHyperRet = KVM_HYPERCALL_RET_SUCCESS;
132 break;
133
134 default:
135 break;
136 }
137
138 /*
139 * Place the result in rax/eax.
140 */
141 pCtx->rax = uHyperRet & uAndMask;
142 return rc;
143}
144
145
146/**
147 * Returns whether the guest has configured and enabled the use of KVM's
148 * hypercall interface.
149 *
150 * @returns true if hypercalls are enabled, false otherwise.
151 * @param pVCpu The cross context virtual CPU structure.
152 */
153VMM_INT_DECL(bool) gimKvmAreHypercallsEnabled(PVMCPU pVCpu)
154{
155 NOREF(pVCpu);
156 /* KVM paravirt interface doesn't have hypercall control bits (like Hyper-V does)
157 that guests can control, i.e. hypercalls are always enabled. */
158 return true;
159}
160
161
162/**
163 * Returns whether the guest has configured and enabled the use of KVM's
164 * paravirtualized TSC.
165 *
166 * @returns true if paravirt. TSC is enabled, false otherwise.
167 * @param pVM The cross context VM structure.
168 */
169VMM_INT_DECL(bool) gimKvmIsParavirtTscEnabled(PVM pVM)
170{
171 uint32_t cCpus = pVM->cCpus;
172 for (uint32_t i = 0; i < cCpus; i++)
173 {
174 PVMCPU pVCpu = &pVM->aCpus[i];
175 PGIMKVMCPU pGimKvmCpu = &pVCpu->gim.s.u.KvmCpu;
176 if (MSR_GIM_KVM_SYSTEM_TIME_IS_ENABLED(pGimKvmCpu->u64SystemTimeMsr))
177 return true;
178 }
179 return false;
180}
181
182
183/**
184 * MSR read handler for KVM.
185 *
186 * @returns Strict VBox status code like CPUMQueryGuestMsr().
187 * @retval VINF_CPUM_R3_MSR_READ
188 * @retval VERR_CPUM_RAISE_GP_0
189 *
190 * @param pVCpu The cross context virtual CPU structure.
191 * @param idMsr The MSR being read.
192 * @param pRange The range this MSR belongs to.
193 * @param puValue Where to store the MSR value read.
194 */
195VMM_INT_DECL(VBOXSTRICTRC) gimKvmReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
196{
197 NOREF(pRange);
198 PVM pVM = pVCpu->CTX_SUFF(pVM);
199 PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
200 PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;
201
202 switch (idMsr)
203 {
204 case MSR_GIM_KVM_SYSTEM_TIME:
205 case MSR_GIM_KVM_SYSTEM_TIME_OLD:
206 *puValue = pKvmCpu->u64SystemTimeMsr;
207 return VINF_SUCCESS;
208
209 case MSR_GIM_KVM_WALL_CLOCK:
210 case MSR_GIM_KVM_WALL_CLOCK_OLD:
211 *puValue = pKvm->u64WallClockMsr;
212 return VINF_SUCCESS;
213
214 default:
215 {
216#ifdef IN_RING3
217 static uint32_t s_cTimes = 0;
218 if (s_cTimes++ < 20)
219 LogRel(("GIM: KVM: Unknown/invalid RdMsr (%#x) -> #GP(0)\n", idMsr));
220#endif
221 LogFunc(("Unknown/invalid RdMsr (%#RX32) -> #GP(0)\n", idMsr));
222 break;
223 }
224 }
225
226 return VERR_CPUM_RAISE_GP_0;
227}
228
229
230/**
231 * MSR write handler for KVM.
232 *
233 * @returns Strict VBox status code like CPUMSetGuestMsr().
234 * @retval VINF_CPUM_R3_MSR_WRITE
235 * @retval VERR_CPUM_RAISE_GP_0
236 *
237 * @param pVCpu The cross context virtual CPU structure.
238 * @param idMsr The MSR being written.
239 * @param pRange The range this MSR belongs to.
240 * @param uRawValue The raw value with the ignored bits not masked.
241 */
242VMM_INT_DECL(VBOXSTRICTRC) gimKvmWriteMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue)
243{
244 NOREF(pRange);
245 PVM pVM = pVCpu->CTX_SUFF(pVM);
246 PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;
247
248 switch (idMsr)
249 {
250 case MSR_GIM_KVM_SYSTEM_TIME:
251 case MSR_GIM_KVM_SYSTEM_TIME_OLD:
252 {
253 bool fEnable = RT_BOOL(uRawValue & MSR_GIM_KVM_SYSTEM_TIME_ENABLE_BIT);
254#ifdef IN_RING0
255 NOREF(fEnable); NOREF(pKvmCpu);
256 gimR0KvmUpdateSystemTime(pVM, pVCpu);
257 return VINF_CPUM_R3_MSR_WRITE;
258#elif defined(IN_RC)
259 Assert(pVM->cCpus == 1);
260 if (fEnable)
261 {
262 RTCCUINTREG fEFlags = ASMIntDisableFlags();
263 pKvmCpu->uTsc = TMCpuTickGetNoCheck(pVCpu) | UINT64_C(1);
264 pKvmCpu->uVirtNanoTS = TMVirtualGetNoCheck(pVM) | UINT64_C(1);
265 ASMSetFlags(fEFlags);
266 }
267 return VINF_CPUM_R3_MSR_WRITE;
268#else /* IN_RING3 */
269 if (!fEnable)
270 {
271 gimR3KvmDisableSystemTime(pVM);
272 pKvmCpu->u64SystemTimeMsr = uRawValue;
273 return VINF_SUCCESS;
274 }
275
276 /* Is the system-time struct. already enabled? If so, get flags that need preserving. */
277 uint8_t fFlags = 0;
278 GIMKVMSYSTEMTIME SystemTime;
279 RT_ZERO(SystemTime);
280 if ( MSR_GIM_KVM_SYSTEM_TIME_IS_ENABLED(pKvmCpu->u64SystemTimeMsr)
281 && MSR_GIM_KVM_SYSTEM_TIME_GUEST_GPA(uRawValue) == pKvmCpu->GCPhysSystemTime)
282 {
283 int rc2 = PGMPhysSimpleReadGCPhys(pVM, &SystemTime, pKvmCpu->GCPhysSystemTime, sizeof(GIMKVMSYSTEMTIME));
284 if (RT_SUCCESS(rc2))
285 pKvmCpu->fSystemTimeFlags = (SystemTime.fFlags & GIM_KVM_SYSTEM_TIME_FLAGS_GUEST_PAUSED);
286 }
287
288 /* Enable and populate the system-time struct. */
289 pKvmCpu->u64SystemTimeMsr = uRawValue;
290 pKvmCpu->GCPhysSystemTime = MSR_GIM_KVM_SYSTEM_TIME_GUEST_GPA(uRawValue);
291 pKvmCpu->u32SystemTimeVersion += 2;
292 int rc = gimR3KvmEnableSystemTime(pVM, pVCpu);
293 if (RT_FAILURE(rc))
294 {
295 pKvmCpu->u64SystemTimeMsr = 0;
296 return VERR_CPUM_RAISE_GP_0;
297 }
298 return VINF_SUCCESS;
299#endif
300 }
301
302 case MSR_GIM_KVM_WALL_CLOCK:
303 case MSR_GIM_KVM_WALL_CLOCK_OLD:
304 {
305#ifndef IN_RING3
306 return VINF_CPUM_R3_MSR_WRITE;
307#else
308 /* Enable the wall-clock struct. */
309 RTGCPHYS GCPhysWallClock = MSR_GIM_KVM_WALL_CLOCK_GUEST_GPA(uRawValue);
310 if (RT_LIKELY(RT_ALIGN_64(GCPhysWallClock, 4) == GCPhysWallClock))
311 {
312 int rc = gimR3KvmEnableWallClock(pVM, GCPhysWallClock);
313 if (RT_SUCCESS(rc))
314 {
315 PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
316 pKvm->u64WallClockMsr = uRawValue;
317 return VINF_SUCCESS;
318 }
319 }
320 return VERR_CPUM_RAISE_GP_0;
321#endif /* IN_RING3 */
322 }
323
324 default:
325 {
326#ifdef IN_RING3
327 static uint32_t s_cTimes = 0;
328 if (s_cTimes++ < 20)
329 LogRel(("GIM: KVM: Unknown/invalid WrMsr (%#x,%#x`%08x) -> #GP(0)\n", idMsr,
330 uRawValue & UINT64_C(0xffffffff00000000), uRawValue & UINT64_C(0xffffffff)));
331#endif
332 LogFunc(("Unknown/invalid WrMsr (%#RX32,%#RX64) -> #GP(0)\n", idMsr, uRawValue));
333 break;
334 }
335 }
336
337 return VERR_CPUM_RAISE_GP_0;
338}
339
340
341/**
342 * Whether we need to trap \#UD exceptions in the guest.
343 *
344 * On AMD-V we need to trap them because paravirtualized Linux/KVM guests use
345 * the Intel VMCALL instruction to make hypercalls and we need to trap and
346 * optionally patch them to the AMD-V VMMCALL instruction and handle the
347 * hypercall.
348 *
349 * I guess this was done so that guest teleporation between an AMD and an Intel
350 * machine would working without any changes at the time of teleporation.
351 * However, this also means we -always- need to intercept \#UD exceptions on one
352 * of the two CPU models (Intel or AMD). Hyper-V solves this problem more
353 * elegantly by letting the hypervisor supply an opaque hypercall page.
354 *
355 * For raw-mode VMs, this function will always return true. See gimR3KvmInit().
356 *
357 * @param pVCpu The cross context virtual CPU structure.
358 */
359VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVMCPU pVCpu)
360{
361 PVM pVM = pVCpu->CTX_SUFF(pVM);
362 return pVM->gim.s.u.Kvm.fTrapXcptUD;
363}
364
365
366/**
367 * Checks the currently disassembled instruction and executes the hypercall if
368 * it's a hypercall instruction.
369 *
370 * @returns Strict VBox status code.
371 * @param pVCpu The cross context virtual CPU structure.
372 * @param pCtx Pointer to the guest-CPU context.
373 * @param pDis Pointer to the disassembled instruction state at RIP.
374 *
375 * @thread EMT(pVCpu).
376 *
377 * @todo Make this function static when @bugref{7270#c168} is addressed.
378 */
379VMM_INT_DECL(VBOXSTRICTRC) gimKvmExecHypercallInstr(PVMCPU pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis)
380{
381 Assert(pVCpu);
382 Assert(pCtx);
383 Assert(pDis);
384 VMCPU_ASSERT_EMT(pVCpu);
385
386 /*
387 * If the instruction at RIP is the Intel VMCALL instruction or
388 * the AMD VMMCALL instruction handle it as a hypercall.
389 *
390 * Linux/KVM guests always uses the Intel VMCALL instruction but we patch
391 * it to the host-native one whenever we encounter it so subsequent calls
392 * will not require disassembly (when coming from HM).
393 */
394 if ( pDis->pCurInstr->uOpcode == OP_VMCALL
395 || pDis->pCurInstr->uOpcode == OP_VMMCALL)
396 {
397 /*
398 * Perform the hypercall.
399 *
400 * For HM, we can simply resume guest execution without performing the hypercall now and
401 * do it on the next VMCALL/VMMCALL exit handler on the patched instruction.
402 *
403 * For raw-mode we need to do this now anyway. So we do it here regardless with an added
404 * advantage is that it saves one world-switch for the HM case.
405 */
406 VBOXSTRICTRC rcStrict = gimKvmHypercall(pVCpu, pCtx);
407 if (rcStrict == VINF_SUCCESS)
408 {
409 /*
410 * Patch the instruction to so we don't have to spend time disassembling it each time.
411 * Makes sense only for HM as with raw-mode we will be getting a #UD regardless.
412 */
413 PVM pVM = pVCpu->CTX_SUFF(pVM);
414 PCGIMKVM pKvm = &pVM->gim.s.u.Kvm;
415 if ( pDis->pCurInstr->uOpcode != pKvm->uOpCodeNative
416 && HMIsEnabled(pVM))
417 {
418 /** @todo r=ramshankar: we probably should be doing this in an
419 * EMT rendezvous. */
420 uint8_t abHypercall[3];
421 size_t cbWritten = 0;
422 int rc = VMMPatchHypercall(pVM, &abHypercall, sizeof(abHypercall), &cbWritten);
423 AssertRC(rc);
424 Assert(sizeof(abHypercall) == pDis->cbInstr);
425 Assert(sizeof(abHypercall) == cbWritten);
426
427 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, &abHypercall, sizeof(abHypercall));
428 AssertRC(rc);
429
430 /** @todo Add stats for patching. */
431 }
432 }
433 else
434 {
435 /* The KVM provider doesn't have any concept of continuing hypercalls. */
436 Assert(rcStrict != VINF_GIM_HYPERCALL_CONTINUING);
437#ifdef IN_RING3
438 Assert(rcStrict != VINF_GIM_R3_HYPERCALL);
439#endif
440 }
441 return rcStrict;
442 }
443
444 return VERR_GIM_INVALID_HYPERCALL_INSTR;
445}
446
447
448/**
449 * Exception handler for \#UD.
450 *
451 * @returns Strict VBox status code.
452 * @retval VINF_SUCCESS if the hypercall succeeded (even if its operation
453 * failed).
454 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
455 * @retval VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
456 * @retval VERR_GIM_INVALID_HYPERCALL_INSTR instruction at RIP is not a valid
457 * hypercall instruction.
458 *
459 * @param pVCpu The cross context virtual CPU structure.
460 * @param pCtx Pointer to the guest-CPU context.
461 * @param pDis Pointer to the disassembled instruction state at RIP.
462 * Optional, can be NULL.
463 * @param pcbInstr Where to store the instruction length of the hypercall
464 * instruction. Optional, can be NULL.
465 *
466 * @thread EMT(pVCpu).
467 */
468VMM_INT_DECL(VBOXSTRICTRC) gimKvmXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis, uint8_t *pcbInstr)
469{
470 VMCPU_ASSERT_EMT(pVCpu);
471
472 /*
473 * If we didn't ask for #UD to be trapped, bail.
474 */
475 PVM pVM = pVCpu->CTX_SUFF(pVM);
476 PCGIMKVM pKvm = &pVM->gim.s.u.Kvm;
477 if (RT_UNLIKELY(!pKvm->fTrapXcptUD))
478 return VERR_GIM_IPE_3;
479
480 if (!pDis)
481 {
482 unsigned cbInstr;
483 DISCPUSTATE Dis;
484 int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbInstr);
485 if (RT_SUCCESS(rc))
486 {
487 if (pcbInstr)
488 *pcbInstr = (uint8_t)cbInstr;
489 return gimKvmExecHypercallInstr(pVCpu, pCtx, &Dis);
490 }
491
492 Log(("GIM: KVM: Failed to disassemble instruction at CS:RIP=%04x:%08RX64. rc=%Rrc\n", pCtx->cs.Sel, pCtx->rip, rc));
493 return rc;
494 }
495
496 return gimKvmExecHypercallInstr(pVCpu, pCtx, pDis);
497}
498
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette