VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/GIMAllKvm.cpp@ 85416

Last change on this file since 85416 was 83372, checked in by vboxsync, 5 years ago

GIM/KVM: Update system-time struct on demand. Fixes guests that rely on the struct being frequently updated, e.g. SLES 10 SP4 (see bugref:7270).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.6 KB
Line 
1/* $Id: GIMAllKvm.cpp 83372 2020-03-23 14:52:24Z vboxsync $ */
2/** @file
3 * GIM - Guest Interface Manager, KVM, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2015-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_GIM
23#include <VBox/vmm/gim.h>
24#include <VBox/vmm/hm.h>
25#include <VBox/vmm/em.h>
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/pdmdev.h>
29#include <VBox/vmm/pdmapi.h>
30#include "GIMKvmInternal.h"
31#include "GIMInternal.h"
32#include <VBox/vmm/vmcc.h>
33
34#include <VBox/dis.h>
35#include <VBox/err.h>
36#include <VBox/sup.h>
37
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/time.h>
40
41
42/**
43 * Handles the KVM hypercall.
44 *
45 * @returns Strict VBox status code.
46 * @retval VINF_SUCCESS if the hypercall succeeded (even if its operation
47 * failed).
48 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
49 * @retval VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
50 *
51 * @param pVCpu The cross context virtual CPU structure.
52 * @param pCtx Pointer to the guest-CPU context.
53 *
54 * @thread EMT(pVCpu).
55 */
56VMM_INT_DECL(VBOXSTRICTRC) gimKvmHypercall(PVMCPUCC pVCpu, PCPUMCTX pCtx)
57{
58 VMCPU_ASSERT_EMT(pVCpu);
59
60 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
61 STAM_REL_COUNTER_INC(&pVM->gim.s.StatHypercalls);
62
63 /*
64 * Get the hypercall operation and arguments.
65 */
66 bool const fIs64BitMode = CPUMIsGuestIn64BitCodeEx(pCtx);
67 uint64_t uHyperOp = pCtx->rax;
68 uint64_t uHyperArg0 = pCtx->rbx;
69 uint64_t uHyperArg1 = pCtx->rcx;
70 uint64_t uHyperArg2 = pCtx->rdi;
71 uint64_t uHyperArg3 = pCtx->rsi;
72 uint64_t uHyperRet = KVM_HYPERCALL_RET_ENOSYS;
73 uint64_t uAndMask = UINT64_C(0xffffffffffffffff);
74 if (!fIs64BitMode)
75 {
76 uAndMask = UINT64_C(0xffffffff);
77 uHyperOp &= UINT64_C(0xffffffff);
78 uHyperArg0 &= UINT64_C(0xffffffff);
79 uHyperArg1 &= UINT64_C(0xffffffff);
80 uHyperArg2 &= UINT64_C(0xffffffff);
81 uHyperArg3 &= UINT64_C(0xffffffff);
82 uHyperRet &= UINT64_C(0xffffffff);
83 }
84
85 /*
86 * Verify that guest ring-0 is the one making the hypercall.
87 */
88 uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
89 if (RT_UNLIKELY(uCpl))
90 {
91 pCtx->rax = KVM_HYPERCALL_RET_EPERM & uAndMask;
92 return VERR_GIM_HYPERCALL_ACCESS_DENIED;
93 }
94
95 /*
96 * Do the work.
97 */
98 int rc = VINF_SUCCESS;
99 switch (uHyperOp)
100 {
101 case KVM_HYPERCALL_OP_KICK_CPU:
102 {
103 if (uHyperArg1 < pVM->cCpus)
104 {
105 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, uHyperArg1); /* ASSUMES pVCpu index == ApicId of the VCPU. */
106 EMUnhaltAndWakeUp(pVM, pVCpuDst);
107 uHyperRet = KVM_HYPERCALL_RET_SUCCESS;
108 }
109 else
110 {
111 /* Shouldn't ever happen! If it does, throw a guru, as otherwise it'll lead to deadlocks in the guest anyway! */
112 rc = VERR_GIM_HYPERCALL_FAILED;
113 }
114 break;
115 }
116
117 case KVM_HYPERCALL_OP_VAPIC_POLL_IRQ:
118 uHyperRet = KVM_HYPERCALL_RET_SUCCESS;
119 break;
120
121 default:
122 break;
123 }
124
125 /*
126 * Place the result in rax/eax.
127 */
128 pCtx->rax = uHyperRet & uAndMask;
129 return rc;
130}
131
132
133/**
134 * Returns whether the guest has configured and enabled the use of KVM's
135 * hypercall interface.
136 *
137 * @returns true if hypercalls are enabled, false otherwise.
138 * @param pVCpu The cross context virtual CPU structure.
139 */
140VMM_INT_DECL(bool) gimKvmAreHypercallsEnabled(PVMCPU pVCpu)
141{
142 NOREF(pVCpu);
143 /* KVM paravirt interface doesn't have hypercall control bits (like Hyper-V does)
144 that guests can control, i.e. hypercalls are always enabled. */
145 return true;
146}
147
148
149/**
150 * Returns whether the guest has configured and enabled the use of KVM's
151 * paravirtualized TSC.
152 *
153 * @returns true if paravirt. TSC is enabled, false otherwise.
154 * @param pVM The cross context VM structure.
155 */
156VMM_INT_DECL(bool) gimKvmIsParavirtTscEnabled(PVMCC pVM)
157{
158 uint32_t const cCpus = pVM->cCpus;
159 for (uint32_t idCpu = 0; idCpu < cCpus; idCpu++)
160 {
161 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[idCpu];
162 PGIMKVMCPU pGimKvmCpu = &pVCpu->gim.s.u.KvmCpu;
163 if (MSR_GIM_KVM_SYSTEM_TIME_IS_ENABLED(pGimKvmCpu->u64SystemTimeMsr))
164 return true;
165 }
166 return false;
167}
168
169
170/**
171 * MSR read handler for KVM.
172 *
173 * @returns Strict VBox status code like CPUMQueryGuestMsr().
174 * @retval VINF_CPUM_R3_MSR_READ
175 * @retval VERR_CPUM_RAISE_GP_0
176 *
177 * @param pVCpu The cross context virtual CPU structure.
178 * @param idMsr The MSR being read.
179 * @param pRange The range this MSR belongs to.
180 * @param puValue Where to store the MSR value read.
181 */
182VMM_INT_DECL(VBOXSTRICTRC) gimKvmReadMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
183{
184 NOREF(pRange);
185 PVM pVM = pVCpu->CTX_SUFF(pVM);
186 PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
187 PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;
188
189 switch (idMsr)
190 {
191 case MSR_GIM_KVM_SYSTEM_TIME:
192 case MSR_GIM_KVM_SYSTEM_TIME_OLD:
193 *puValue = pKvmCpu->u64SystemTimeMsr;
194 return VINF_SUCCESS;
195
196 case MSR_GIM_KVM_WALL_CLOCK:
197 case MSR_GIM_KVM_WALL_CLOCK_OLD:
198 *puValue = pKvm->u64WallClockMsr;
199 return VINF_SUCCESS;
200
201 default:
202 {
203#ifdef IN_RING3
204 static uint32_t s_cTimes = 0;
205 if (s_cTimes++ < 20)
206 LogRel(("GIM: KVM: Unknown/invalid RdMsr (%#x) -> #GP(0)\n", idMsr));
207#endif
208 LogFunc(("Unknown/invalid RdMsr (%#RX32) -> #GP(0)\n", idMsr));
209 break;
210 }
211 }
212
213 return VERR_CPUM_RAISE_GP_0;
214}
215
216
217/**
218 * MSR write handler for KVM.
219 *
220 * @returns Strict VBox status code like CPUMSetGuestMsr().
221 * @retval VINF_CPUM_R3_MSR_WRITE
222 * @retval VERR_CPUM_RAISE_GP_0
223 *
224 * @param pVCpu The cross context virtual CPU structure.
225 * @param idMsr The MSR being written.
226 * @param pRange The range this MSR belongs to.
227 * @param uRawValue The raw value with the ignored bits not masked.
228 */
229VMM_INT_DECL(VBOXSTRICTRC) gimKvmWriteMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue)
230{
231 NOREF(pRange);
232 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
233 PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;
234
235 switch (idMsr)
236 {
237 case MSR_GIM_KVM_SYSTEM_TIME:
238 case MSR_GIM_KVM_SYSTEM_TIME_OLD:
239 {
240 bool fEnable = RT_BOOL(uRawValue & MSR_GIM_KVM_SYSTEM_TIME_ENABLE_BIT);
241#ifndef IN_RING3
242 gimR0KvmUpdateSystemTime(pVM, pVCpu);
243 if ( fEnable
244 && MSR_GIM_KVM_SYSTEM_TIME_IS_ENABLED(pKvmCpu->u64SystemTimeMsr)
245 && MSR_GIM_KVM_SYSTEM_TIME_GUEST_GPA(uRawValue) == pKvmCpu->GCPhysSystemTime)
246 {
247 /*
248 * Guest is asking for an update of the system-time struct. We only
249 * need to update the TSC and system time fields, the rest stays unchanged.
250 * We have to write the version twice in case another VCPU is reading the
251 * system-time struct concurrently. Making the version number odd indicates
252 * that the data is being updated.
253 */
254 GIMKVMSYSTEMTIME SystemTime;
255 SystemTime.u32Version = ++pKvmCpu->u32SystemTimeVersion;
256 SystemTime.u32Padding0 = 0;
257 SystemTime.u64NanoTS = pKvmCpu->uVirtNanoTS;
258 SystemTime.u64Tsc = pKvmCpu->uTsc;
259 AssertCompile(RT_UOFFSETOF(GIMKVMSYSTEMTIME, u32TscScale) == 3 * sizeof(uint64_t));
260 int rc2 = PGMPhysSimpleWriteGCPhys(pVM, pKvmCpu->GCPhysSystemTime, &SystemTime, RT_UOFFSETOF(GIMKVMSYSTEMTIME, u32TscScale));
261 if (RT_FAILURE(rc2))
262 return rc2;
263
264 /* Make the version number even again to indicate the data is consistent. */
265 ++pKvmCpu->u32SystemTimeVersion;
266 rc2 = PGMPhysSimpleWriteGCPhys(pVM, pKvmCpu->GCPhysSystemTime, &pKvmCpu->u32SystemTimeVersion, sizeof(pKvmCpu->u32SystemTimeVersion));
267 if (RT_FAILURE(rc2))
268 return rc2;
269 return VINF_SUCCESS;
270 }
271 else
272 return VINF_CPUM_R3_MSR_WRITE;
273#else /* IN_RING3 */
274 if (!fEnable)
275 {
276 gimR3KvmDisableSystemTime(pVM);
277 pKvmCpu->u64SystemTimeMsr = uRawValue;
278 return VINF_SUCCESS;
279 }
280
281 /* We ASSUME that ring-0/raw-mode have updated these. */
282 /** @todo Get logically atomic NanoTS/TSC pairs in ring-3. */
283 Assert(pKvmCpu->uTsc);
284 Assert(pKvmCpu->uVirtNanoTS);
285
286 /* Enable and populate the system-time struct. */
287 pKvmCpu->u64SystemTimeMsr = uRawValue;
288 pKvmCpu->GCPhysSystemTime = MSR_GIM_KVM_SYSTEM_TIME_GUEST_GPA(uRawValue);
289 pKvmCpu->u32SystemTimeVersion += 2;
290 int rc = gimR3KvmEnableSystemTime(pVM, pVCpu);
291 if (RT_FAILURE(rc))
292 {
293 pKvmCpu->u64SystemTimeMsr = 0;
294 /* We shouldn't throw a #GP(0) here for buggy guests (neither does KVM apparently), see @bugref{8627}. */
295 }
296 return VINF_SUCCESS;
297#endif /* IN_RING3 */
298 }
299
300 case MSR_GIM_KVM_WALL_CLOCK:
301 case MSR_GIM_KVM_WALL_CLOCK_OLD:
302 {
303#ifndef IN_RING3
304 return VINF_CPUM_R3_MSR_WRITE;
305#else
306 /* Enable the wall-clock struct. */
307 RTGCPHYS GCPhysWallClock = MSR_GIM_KVM_WALL_CLOCK_GUEST_GPA(uRawValue);
308 if (RT_LIKELY(RT_ALIGN_64(GCPhysWallClock, 4) == GCPhysWallClock))
309 {
310 int rc = gimR3KvmEnableWallClock(pVM, GCPhysWallClock);
311 if (RT_SUCCESS(rc))
312 {
313 PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
314 pKvm->u64WallClockMsr = uRawValue;
315 return VINF_SUCCESS;
316 }
317 }
318 return VERR_CPUM_RAISE_GP_0;
319#endif /* IN_RING3 */
320 }
321
322 default:
323 {
324#ifdef IN_RING3
325 static uint32_t s_cTimes = 0;
326 if (s_cTimes++ < 20)
327 LogRel(("GIM: KVM: Unknown/invalid WrMsr (%#x,%#x`%08x) -> #GP(0)\n", idMsr,
328 uRawValue & UINT64_C(0xffffffff00000000), uRawValue & UINT64_C(0xffffffff)));
329#endif
330 LogFunc(("Unknown/invalid WrMsr (%#RX32,%#RX64) -> #GP(0)\n", idMsr, uRawValue));
331 break;
332 }
333 }
334
335 return VERR_CPUM_RAISE_GP_0;
336}
337
338
339/**
340 * Whether we need to trap \#UD exceptions in the guest.
341 *
342 * On AMD-V we need to trap them because paravirtualized Linux/KVM guests use
343 * the Intel VMCALL instruction to make hypercalls and we need to trap and
344 * optionally patch them to the AMD-V VMMCALL instruction and handle the
345 * hypercall.
346 *
347 * I guess this was done so that guest teleporation between an AMD and an Intel
348 * machine would working without any changes at the time of teleporation.
349 * However, this also means we -always- need to intercept \#UD exceptions on one
350 * of the two CPU models (Intel or AMD). Hyper-V solves this problem more
351 * elegantly by letting the hypervisor supply an opaque hypercall page.
352 *
353 * For raw-mode VMs, this function will always return true. See gimR3KvmInit().
354 *
355 * @param pVM The cross context VM structure.
356 */
357VMM_INT_DECL(bool) gimKvmShouldTrapXcptUD(PVM pVM)
358{
359 return pVM->gim.s.u.Kvm.fTrapXcptUD;
360}
361
362
363/**
364 * Checks the instruction and executes the hypercall if it's a valid hypercall
365 * instruction.
366 *
367 * This interface is used by \#UD handlers and IEM.
368 *
369 * @returns Strict VBox status code.
370 * @param pVCpu The cross context virtual CPU structure.
371 * @param pCtx Pointer to the guest-CPU context.
372 * @param uDisOpcode The disassembler opcode.
373 * @param cbInstr The instruction length.
374 *
375 * @thread EMT(pVCpu).
376 */
377VMM_INT_DECL(VBOXSTRICTRC) gimKvmHypercallEx(PVMCPUCC pVCpu, PCPUMCTX pCtx, unsigned uDisOpcode, uint8_t cbInstr)
378{
379 Assert(pVCpu);
380 Assert(pCtx);
381 VMCPU_ASSERT_EMT(pVCpu);
382
383 /*
384 * If the instruction at RIP is the Intel VMCALL instruction or
385 * the AMD VMMCALL instruction handle it as a hypercall.
386 *
387 * Linux/KVM guests always uses the Intel VMCALL instruction but we patch
388 * it to the host-native one whenever we encounter it so subsequent calls
389 * will not require disassembly (when coming from HM).
390 */
391 if ( uDisOpcode == OP_VMCALL
392 || uDisOpcode == OP_VMMCALL)
393 {
394 /*
395 * Perform the hypercall.
396 *
397 * For HM, we can simply resume guest execution without performing the hypercall now and
398 * do it on the next VMCALL/VMMCALL exit handler on the patched instruction.
399 *
400 * For raw-mode we need to do this now anyway. So we do it here regardless with an added
401 * advantage is that it saves one world-switch for the HM case.
402 */
403 VBOXSTRICTRC rcStrict = gimKvmHypercall(pVCpu, pCtx);
404 if (rcStrict == VINF_SUCCESS)
405 {
406 /*
407 * Patch the instruction to so we don't have to spend time disassembling it each time.
408 * Makes sense only for HM as with raw-mode we will be getting a #UD regardless.
409 */
410 PVM pVM = pVCpu->CTX_SUFF(pVM);
411 PCGIMKVM pKvm = &pVM->gim.s.u.Kvm;
412 if ( uDisOpcode != pKvm->uOpcodeNative
413 && cbInstr == sizeof(pKvm->abOpcodeNative) )
414 {
415 /** @todo r=ramshankar: we probably should be doing this in an
416 * EMT rendezvous. */
417 /** @todo Add stats for patching. */
418 int rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, pKvm->abOpcodeNative, sizeof(pKvm->abOpcodeNative));
419 AssertRC(rc);
420 }
421 }
422 else
423 {
424 /* The KVM provider doesn't have any concept of continuing hypercalls. */
425 Assert(rcStrict != VINF_GIM_HYPERCALL_CONTINUING);
426#ifdef IN_RING3
427 Assert(rcStrict != VINF_GIM_R3_HYPERCALL);
428#endif
429 }
430 return rcStrict;
431 }
432
433 return VERR_GIM_INVALID_HYPERCALL_INSTR;
434}
435
436
437/**
438 * Exception handler for \#UD.
439 *
440 * @returns Strict VBox status code.
441 * @retval VINF_SUCCESS if the hypercall succeeded (even if its operation
442 * failed).
443 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
444 * @retval VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
445 * @retval VERR_GIM_INVALID_HYPERCALL_INSTR instruction at RIP is not a valid
446 * hypercall instruction.
447 *
448 * @param pVM The cross context VM structure.
449 * @param pVCpu The cross context virtual CPU structure.
450 * @param pCtx Pointer to the guest-CPU context.
451 * @param pDis Pointer to the disassembled instruction state at RIP.
452 * Optional, can be NULL.
453 * @param pcbInstr Where to store the instruction length of the hypercall
454 * instruction. Optional, can be NULL.
455 *
456 * @thread EMT(pVCpu).
457 */
458VMM_INT_DECL(VBOXSTRICTRC) gimKvmXcptUD(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis, uint8_t *pcbInstr)
459{
460 VMCPU_ASSERT_EMT(pVCpu);
461
462 /*
463 * If we didn't ask for #UD to be trapped, bail.
464 */
465 if (RT_UNLIKELY(!pVM->gim.s.u.Kvm.fTrapXcptUD))
466 return VERR_GIM_IPE_3;
467
468 if (!pDis)
469 {
470 unsigned cbInstr;
471 DISCPUSTATE Dis;
472 int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbInstr);
473 if (RT_SUCCESS(rc))
474 {
475 if (pcbInstr)
476 *pcbInstr = (uint8_t)cbInstr;
477 return gimKvmHypercallEx(pVCpu, pCtx, Dis.pCurInstr->uOpcode, Dis.cbInstr);
478 }
479
480 Log(("GIM: KVM: Failed to disassemble instruction at CS:RIP=%04x:%08RX64. rc=%Rrc\n", pCtx->cs.Sel, pCtx->rip, rc));
481 return rc;
482 }
483
484 return gimKvmHypercallEx(pVCpu, pCtx, pDis->pCurInstr->uOpcode, pDis->cbInstr);
485}
486
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette