VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/GIMAllHv.cpp@ 93666

Last change on this file since 93666 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 56.7 KB
Line 
1/* $Id: GIMAllHv.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * GIM - Guest Interface Manager, Microsoft Hyper-V, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2014-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_GIM
23#include <VBox/vmm/gim.h>
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/hm.h>
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/dbgf.h>
28#include <VBox/vmm/pdmdev.h>
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/pgm.h>
31#include <VBox/vmm/apic.h>
32#include <VBox/vmm/em.h>
33#include "GIMHvInternal.h"
34#include "GIMInternal.h"
35#include <VBox/vmm/vmcc.h>
36
37#include <VBox/err.h>
38
39#include <iprt/asm-amd64-x86.h>
40#ifdef IN_RING3
41# include <iprt/mem.h>
42#endif
43
44
45#ifdef IN_RING3
46/**
47 * Read and validate slow hypercall parameters.
48 *
49 * @returns VBox status code.
50 * @param pVM The cross context VM structure.
51 * @param pCtx Pointer to the guest-CPU context.
52 * @param fIs64BitMode Whether the guest is currently in 64-bit mode or not.
53 * @param enmParam The hypercall parameter type.
54 * @param prcHv Where to store the Hyper-V status code. Only valid
55 * to the caller when this function returns
56 * VINF_SUCCESS.
57 */
58static int gimHvReadSlowHypercallParam(PVM pVM, PCPUMCTX pCtx, bool fIs64BitMode, GIMHVHYPERCALLPARAM enmParam, int *prcHv)
59{
60 int rc = VINF_SUCCESS;
61 PGIMHV pHv = &pVM->gim.s.u.Hv;
62 RTGCPHYS GCPhysParam;
63 void *pvDst;
64 if (enmParam == GIMHVHYPERCALLPARAM_IN)
65 {
66 GCPhysParam = fIs64BitMode ? pCtx->rdx : (pCtx->rbx << 32) | pCtx->ecx;
67 pvDst = pHv->pbHypercallIn;
68 pHv->GCPhysHypercallIn = GCPhysParam;
69 }
70 else
71 {
72 GCPhysParam = fIs64BitMode ? pCtx->r8 : (pCtx->rdi << 32) | pCtx->esi;
73 pvDst = pHv->pbHypercallOut;
74 pHv->GCPhysHypercallOut = GCPhysParam;
75 Assert(enmParam == GIMHVHYPERCALLPARAM_OUT);
76 }
77
78 const char *pcszParam = enmParam == GIMHVHYPERCALLPARAM_IN ? "input" : "output"; NOREF(pcszParam);
79 if (RT_ALIGN_64(GCPhysParam, 8) == GCPhysParam)
80 {
81 if (PGMPhysIsGCPhysNormal(pVM, GCPhysParam))
82 {
83 rc = PGMPhysSimpleReadGCPhys(pVM, pvDst, GCPhysParam, GIM_HV_PAGE_SIZE);
84 if (RT_SUCCESS(rc))
85 {
86 *prcHv = GIM_HV_STATUS_SUCCESS;
87 return VINF_SUCCESS;
88 }
89 LogRel(("GIM: HyperV: Failed reading %s param at %#RGp. rc=%Rrc\n", pcszParam, GCPhysParam, rc));
90 rc = VERR_GIM_HYPERCALL_MEMORY_READ_FAILED;
91 }
92 else
93 {
94 Log(("GIM: HyperV: Invalid %s param address %#RGp\n", pcszParam, GCPhysParam));
95 *prcHv = GIM_HV_STATUS_INVALID_PARAMETER;
96 }
97 }
98 else
99 {
100 Log(("GIM: HyperV: Misaligned %s param address %#RGp\n", pcszParam, GCPhysParam));
101 *prcHv = GIM_HV_STATUS_INVALID_ALIGNMENT;
102 }
103 return rc;
104}
105
106
107/**
108 * Helper for reading and validating slow hypercall input and output parameters.
109 *
110 * @returns VBox status code.
111 * @param pVM The cross context VM structure.
112 * @param pCtx Pointer to the guest-CPU context.
113 * @param fIs64BitMode Whether the guest is currently in 64-bit mode or not.
114 * @param prcHv Where to store the Hyper-V status code. Only valid
115 * to the caller when this function returns
116 * VINF_SUCCESS.
117 */
118static int gimHvReadSlowHypercallParamsInOut(PVM pVM, PCPUMCTX pCtx, bool fIs64BitMode, int *prcHv)
119{
120 int rc = gimHvReadSlowHypercallParam(pVM, pCtx, fIs64BitMode, GIMHVHYPERCALLPARAM_IN, prcHv);
121 if ( RT_SUCCESS(rc)
122 && *prcHv == GIM_HV_STATUS_SUCCESS)
123 rc = gimHvReadSlowHypercallParam(pVM, pCtx, fIs64BitMode, GIMHVHYPERCALLPARAM_OUT, prcHv);
124 return rc;
125}
126#endif
127
128
129/**
130 * Handles all Hyper-V hypercalls.
131 *
132 * @returns Strict VBox status code.
133 * @retval VINF_SUCCESS if the hypercall succeeded (even if its operation
134 * failed).
135 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
136 * @retval VERR_GIM_HYPERCALLS_NOT_ENABLED hypercalls are disabled by the
137 * guest.
138 * @retval VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
139 * @retval VERR_GIM_HYPERCALL_MEMORY_READ_FAILED hypercall failed while reading
140 * memory.
141 * @retval VERR_GIM_HYPERCALL_MEMORY_WRITE_FAILED hypercall failed while
142 * writing memory.
143 *
144 * @param pVCpu The cross context virtual CPU structure.
145 * @param pCtx Pointer to the guest-CPU context.
146 *
147 * @thread EMT(pVCpu).
148 */
149VMM_INT_DECL(VBOXSTRICTRC) gimHvHypercall(PVMCPUCC pVCpu, PCPUMCTX pCtx)
150{
151 VMCPU_ASSERT_EMT(pVCpu);
152
153#ifndef IN_RING3
154 RT_NOREF_PV(pVCpu);
155 RT_NOREF_PV(pCtx);
156 return VINF_GIM_R3_HYPERCALL;
157#else
158 PVM pVM = pVCpu->CTX_SUFF(pVM);
159 STAM_REL_COUNTER_INC(&pVM->gim.s.StatHypercalls);
160
161 /*
162 * Verify that hypercalls are enabled by the guest.
163 */
164 if (!gimHvAreHypercallsEnabled(pVM))
165 return VERR_GIM_HYPERCALLS_NOT_ENABLED;
166
167 /*
168 * Verify guest is in ring-0 protected mode.
169 */
170 uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
171 if ( uCpl
172 || CPUMIsGuestInRealModeEx(pCtx))
173 {
174 return VERR_GIM_HYPERCALL_ACCESS_DENIED;
175 }
176
177 /*
178 * Get the hypercall operation code and modes.
179 * Fast hypercalls have only two or fewer inputs but no output parameters.
180 */
181 const bool fIs64BitMode = CPUMIsGuestIn64BitCodeEx(pCtx);
182 const uint64_t uHyperIn = fIs64BitMode ? pCtx->rcx : (pCtx->rdx << 32) | pCtx->eax;
183 const uint16_t uHyperOp = GIM_HV_HYPERCALL_IN_CALL_CODE(uHyperIn);
184 const bool fHyperFast = GIM_HV_HYPERCALL_IN_IS_FAST(uHyperIn);
185 const uint16_t cHyperReps = GIM_HV_HYPERCALL_IN_REP_COUNT(uHyperIn);
186 const uint16_t idxHyperRepStart = GIM_HV_HYPERCALL_IN_REP_START_IDX(uHyperIn);
187 uint64_t cHyperRepsDone = 0;
188
189 /* Currently no repeating hypercalls are supported. */
190 RT_NOREF2(cHyperReps, idxHyperRepStart);
191
192 int rc = VINF_SUCCESS;
193 int rcHv = GIM_HV_STATUS_OPERATION_DENIED;
194 PGIMHV pHv = &pVM->gim.s.u.Hv;
195
196 /*
197 * Validate common hypercall input parameters.
198 */
199 if ( !GIM_HV_HYPERCALL_IN_RSVD_1(uHyperIn)
200 && !GIM_HV_HYPERCALL_IN_RSVD_2(uHyperIn)
201 && !GIM_HV_HYPERCALL_IN_RSVD_3(uHyperIn))
202 {
203 /*
204 * Perform the hypercall.
205 */
206 switch (uHyperOp)
207 {
208 case GIM_HV_HYPERCALL_OP_RETREIVE_DEBUG_DATA: /* Non-rep, memory IO. */
209 {
210 if (pHv->uPartFlags & GIM_HV_PART_FLAGS_DEBUGGING)
211 {
212 rc = gimHvReadSlowHypercallParamsInOut(pVM, pCtx, fIs64BitMode, &rcHv);
213 if ( RT_SUCCESS(rc)
214 && rcHv == GIM_HV_STATUS_SUCCESS)
215 {
216 LogRelMax(1, ("GIM: HyperV: Initiated debug data reception via hypercall\n"));
217 rc = gimR3HvHypercallRetrieveDebugData(pVM, &rcHv);
218 if (RT_FAILURE(rc))
219 LogRelMax(10, ("GIM: HyperV: gimR3HvHypercallRetrieveDebugData failed. rc=%Rrc\n", rc));
220 }
221 }
222 else
223 rcHv = GIM_HV_STATUS_ACCESS_DENIED;
224 break;
225 }
226
227 case GIM_HV_HYPERCALL_OP_POST_DEBUG_DATA: /* Non-rep, memory IO. */
228 {
229 if (pHv->uPartFlags & GIM_HV_PART_FLAGS_DEBUGGING)
230 {
231 rc = gimHvReadSlowHypercallParamsInOut(pVM, pCtx, fIs64BitMode, &rcHv);
232 if ( RT_SUCCESS(rc)
233 && rcHv == GIM_HV_STATUS_SUCCESS)
234 {
235 LogRelMax(1, ("GIM: HyperV: Initiated debug data transmission via hypercall\n"));
236 rc = gimR3HvHypercallPostDebugData(pVM, &rcHv);
237 if (RT_FAILURE(rc))
238 LogRelMax(10, ("GIM: HyperV: gimR3HvHypercallPostDebugData failed. rc=%Rrc\n", rc));
239 }
240 }
241 else
242 rcHv = GIM_HV_STATUS_ACCESS_DENIED;
243 break;
244 }
245
246 case GIM_HV_HYPERCALL_OP_RESET_DEBUG_SESSION: /* Non-rep, fast (register IO). */
247 {
248 if (pHv->uPartFlags & GIM_HV_PART_FLAGS_DEBUGGING)
249 {
250 uint32_t fFlags = 0;
251 if (!fHyperFast)
252 {
253 rc = gimHvReadSlowHypercallParam(pVM, pCtx, fIs64BitMode, GIMHVHYPERCALLPARAM_IN, &rcHv);
254 if ( RT_SUCCESS(rc)
255 && rcHv == GIM_HV_STATUS_SUCCESS)
256 {
257 PGIMHVDEBUGRESETIN pIn = (PGIMHVDEBUGRESETIN)pHv->pbHypercallIn;
258 fFlags = pIn->fFlags;
259 }
260 }
261 else
262 {
263 rcHv = GIM_HV_STATUS_SUCCESS;
264 fFlags = fIs64BitMode ? pCtx->rdx : pCtx->ebx;
265 }
266
267 /*
268 * Nothing to flush on the sending side as we don't maintain our own buffers.
269 */
270 /** @todo We should probably ask the debug receive thread to flush it's buffer. */
271 if (rcHv == GIM_HV_STATUS_SUCCESS)
272 {
273 if (fFlags)
274 LogRel(("GIM: HyperV: Resetting debug session via hypercall\n"));
275 else
276 rcHv = GIM_HV_STATUS_INVALID_PARAMETER;
277 }
278 }
279 else
280 rcHv = GIM_HV_STATUS_ACCESS_DENIED;
281 break;
282 }
283
284 case GIM_HV_HYPERCALL_OP_POST_MESSAGE: /* Non-rep, memory IO. */
285 {
286 if (pHv->fIsInterfaceVs)
287 {
288 rc = gimHvReadSlowHypercallParam(pVM, pCtx, fIs64BitMode, GIMHVHYPERCALLPARAM_IN, &rcHv);
289 if ( RT_SUCCESS(rc)
290 && rcHv == GIM_HV_STATUS_SUCCESS)
291 {
292 PGIMHVPOSTMESSAGEIN pMsgIn = (PGIMHVPOSTMESSAGEIN)pHv->pbHypercallIn;
293 PCGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
294 if ( pMsgIn->uConnectionId == GIM_HV_VMBUS_MSG_CONNECTION_ID
295 && pMsgIn->enmMessageType == GIMHVMSGTYPE_VMBUS
296 && !MSR_GIM_HV_SINT_IS_MASKED(pHvCpu->auSintMsrs[GIM_HV_VMBUS_MSG_SINT])
297 && MSR_GIM_HV_SIMP_IS_ENABLED(pHvCpu->uSimpMsr))
298 {
299 RTGCPHYS GCPhysSimp = MSR_GIM_HV_SIMP_GPA(pHvCpu->uSimpMsr);
300 if (PGMPhysIsGCPhysNormal(pVM, GCPhysSimp))
301 {
302 /*
303 * The VMBus client (guest) expects to see 0xf at offsets 4 and 16 and 1 at offset 0.
304 */
305 GIMHVMSG HvMsg;
306 RT_ZERO(HvMsg);
307 HvMsg.MsgHdr.enmMessageType = GIMHVMSGTYPE_VMBUS;
308 HvMsg.MsgHdr.cbPayload = 0xf;
309 HvMsg.aPayload[0] = 0xf;
310 uint16_t const offMsg = GIM_HV_VMBUS_MSG_SINT * sizeof(GIMHVMSG);
311 int rc2 = PGMPhysSimpleWriteGCPhys(pVM, GCPhysSimp + offMsg, &HvMsg, sizeof(HvMsg));
312 if (RT_SUCCESS(rc2))
313 LogRel(("GIM: HyperV: SIMP hypercall faking message at %#RGp:%u\n", GCPhysSimp, offMsg));
314 else
315 {
316 LogRel(("GIM: HyperV: Failed to write SIMP message at %#RGp:%u, rc=%Rrc\n", GCPhysSimp,
317 offMsg, rc));
318 }
319 }
320 }
321
322 /*
323 * Make the call fail after updating the SIMP, so the guest can go back to using
324 * the Hyper-V debug MSR interface. Any error code below GIM_HV_STATUS_NOT_ACKNOWLEDGED
325 * and the guest tries to proceed with initializing VMBus which is totally unnecessary
326 * for what we're trying to accomplish, i.e. convince guest to use Hyper-V debugging. Also,
327 * we don't implement other VMBus/SynIC functionality so the guest would #GP and die.
328 */
329 rcHv = GIM_HV_STATUS_NOT_ACKNOWLEDGED;
330 }
331 else
332 rcHv = GIM_HV_STATUS_INVALID_PARAMETER;
333 }
334 else
335 rcHv = GIM_HV_STATUS_ACCESS_DENIED;
336 break;
337 }
338
339 case GIM_HV_EXT_HYPERCALL_OP_QUERY_CAP: /* Non-rep, extended hypercall. */
340 {
341 if (pHv->uPartFlags & GIM_HV_PART_FLAGS_EXTENDED_HYPERCALLS)
342 {
343 rc = gimHvReadSlowHypercallParam(pVM, pCtx, fIs64BitMode, GIMHVHYPERCALLPARAM_OUT, &rcHv);
344 if ( RT_SUCCESS(rc)
345 && rcHv == GIM_HV_STATUS_SUCCESS)
346 {
347 rc = gimR3HvHypercallExtQueryCap(pVM, &rcHv);
348 }
349 }
350 else
351 {
352 LogRel(("GIM: HyperV: Denied HvExtCallQueryCapabilities when the feature is not exposed\n"));
353 rcHv = GIM_HV_STATUS_ACCESS_DENIED;
354 }
355 break;
356 }
357
358 case GIM_HV_EXT_HYPERCALL_OP_GET_BOOT_ZEROED_MEM: /* Non-rep, extended hypercall. */
359 {
360 if (pHv->uPartFlags & GIM_HV_PART_FLAGS_EXTENDED_HYPERCALLS)
361 {
362 rc = gimHvReadSlowHypercallParam(pVM, pCtx, fIs64BitMode, GIMHVHYPERCALLPARAM_OUT, &rcHv);
363 if ( RT_SUCCESS(rc)
364 && rcHv == GIM_HV_STATUS_SUCCESS)
365 {
366 rc = gimR3HvHypercallExtGetBootZeroedMem(pVM, &rcHv);
367 }
368 }
369 else
370 {
371 LogRel(("GIM: HyperV: Denied HvExtCallGetBootZeroedMemory when the feature is not exposed\n"));
372 rcHv = GIM_HV_STATUS_ACCESS_DENIED;
373 }
374 break;
375 }
376
377 default:
378 {
379 LogRel(("GIM: HyperV: Unknown/invalid hypercall opcode %#x (%u)\n", uHyperOp, uHyperOp));
380 rcHv = GIM_HV_STATUS_INVALID_HYPERCALL_CODE;
381 break;
382 }
383 }
384 }
385 else
386 rcHv = GIM_HV_STATUS_INVALID_HYPERCALL_INPUT;
387
388 /*
389 * Update the guest with results of the hypercall.
390 */
391 if (RT_SUCCESS(rc))
392 {
393 if (fIs64BitMode)
394 pCtx->rax = (cHyperRepsDone << 32) | rcHv;
395 else
396 {
397 pCtx->edx = cHyperRepsDone;
398 pCtx->eax = rcHv;
399 }
400 }
401
402 return rc;
403#endif
404}
405
406
407/**
408 * Returns a pointer to the MMIO2 regions supported by Hyper-V.
409 *
410 * @returns Pointer to an array of MMIO2 regions.
411 * @param pVM The cross context VM structure.
412 * @param pcRegions Where to store the number of regions in the array.
413 */
414VMM_INT_DECL(PGIMMMIO2REGION) gimHvGetMmio2Regions(PVM pVM, uint32_t *pcRegions)
415{
416 Assert(GIMIsEnabled(pVM));
417 PGIMHV pHv = &pVM->gim.s.u.Hv;
418
419 AssertCompile(RT_ELEMENTS(pHv->aMmio2Regions) <= 8);
420 *pcRegions = RT_ELEMENTS(pHv->aMmio2Regions);
421 return pHv->aMmio2Regions;
422}
423
424
425/**
426 * Returns whether the guest has configured and enabled the use of Hyper-V's
427 * hypercall interface.
428 *
429 * @returns true if hypercalls are enabled, false otherwise.
430 * @param pVM The cross context VM structure.
431 */
432VMM_INT_DECL(bool) gimHvAreHypercallsEnabled(PCVM pVM)
433{
434 return RT_BOOL(pVM->gim.s.u.Hv.u64GuestOsIdMsr != 0);
435}
436
437
438/**
439 * Returns whether the guest has configured and enabled the use of Hyper-V's
440 * paravirtualized TSC.
441 *
442 * @returns true if paravirt. TSC is enabled, false otherwise.
443 * @param pVM The cross context VM structure.
444 */
445VMM_INT_DECL(bool) gimHvIsParavirtTscEnabled(PVM pVM)
446{
447 return MSR_GIM_HV_REF_TSC_IS_ENABLED(pVM->gim.s.u.Hv.u64TscPageMsr);
448}
449
450
451#ifdef IN_RING3
452/**
453 * Gets the descriptive OS ID variant as identified via the
454 * MSR_GIM_HV_GUEST_OS_ID MSR.
455 *
456 * @returns The name.
457 * @param uGuestOsIdMsr The MSR_GIM_HV_GUEST_OS_ID MSR.
458 */
459static const char *gimHvGetGuestOsIdVariantName(uint64_t uGuestOsIdMsr)
460{
461 /* Refer the Hyper-V spec, section 3.6 "Reporting the Guest OS Identity". */
462 uint32_t uVendor = MSR_GIM_HV_GUEST_OS_ID_VENDOR(uGuestOsIdMsr);
463 if (uVendor == 1 /* Microsoft */)
464 {
465 uint32_t uOsVariant = MSR_GIM_HV_GUEST_OS_ID_OS_VARIANT(uGuestOsIdMsr);
466 switch (uOsVariant)
467 {
468 case 0: return "Undefined";
469 case 1: return "MS-DOS";
470 case 2: return "Windows 3.x";
471 case 3: return "Windows 9x";
472 case 4: return "Windows NT or derivative";
473 case 5: return "Windows CE";
474 default: return "Unknown";
475 }
476 }
477 return "Unknown";
478}
479#endif
480
481/**
482 * Gets the time reference count for the current VM.
483 *
484 * @returns The time reference count.
485 * @param pVCpu The cross context virtual CPU structure.
486 */
487DECLINLINE(uint64_t) gimHvGetTimeRefCount(PVMCPUCC pVCpu)
488{
489 /* Hyper-V reports the time in 100 ns units (10 MHz). */
490 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
491 PCGIMHV pHv = &pVCpu->CTX_SUFF(pVM)->gim.s.u.Hv;
492 uint64_t const u64Tsc = TMCpuTickGet(pVCpu); /** @todo should we be passing VCPU0 always? */
493 uint64_t const u64TscHz = pHv->cTscTicksPerSecond;
494 uint64_t const u64Tsc100NS = u64TscHz / UINT64_C(10000000); /* 100 ns */
495 uint64_t const uTimeRefCount = (u64Tsc / u64Tsc100NS);
496 return uTimeRefCount;
497}
498
499
500/**
501 * Starts the synthetic timer.
502 *
503 * @param pVCpu The cross context virtual CPU structure.
504 * @param pHvStimer Pointer to the Hyper-V synthetic timer.
505 *
506 * @remarks Caller needs to hold the timer critical section.
507 * @thread Any.
508 */
509VMM_INT_DECL(void) gimHvStartStimer(PVMCPUCC pVCpu, PCGIMHVSTIMER pHvStimer)
510{
511 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
512 TMTIMERHANDLE hTimer = pHvStimer->hTimer;
513 Assert(TMTimerIsLockOwner(pVM, hTimer));
514
515 uint64_t const uTimerCount = pHvStimer->uStimerCountMsr;
516 if (uTimerCount)
517 {
518 uint64_t const uTimerCountNS = uTimerCount * 100;
519
520 /* For periodic timers, 'uTimerCountNS' represents the relative interval. */
521 if (MSR_GIM_HV_STIMER_IS_PERIODIC(pHvStimer->uStimerConfigMsr))
522 {
523 TMTimerSetNano(pVM, hTimer, uTimerCountNS);
524 LogFlow(("GIM%u: HyperV: Started relative periodic STIMER%u with uTimerCountNS=%RU64\n", pVCpu->idCpu,
525 pHvStimer->idxStimer, uTimerCountNS));
526 }
527 else
528 {
529 /* For one-shot timers, 'uTimerCountNS' represents an absolute expiration wrt to Hyper-V reference time,
530 we convert it to a relative time and program the timer. */
531 uint64_t const uCurRefTimeNS = gimHvGetTimeRefCount(pVCpu) * 100;
532 if (uTimerCountNS > uCurRefTimeNS)
533 {
534 uint64_t const uRelativeNS = uTimerCountNS - uCurRefTimeNS;
535 TMTimerSetNano(pVM, hTimer, uRelativeNS);
536 LogFlow(("GIM%u: HyperV: Started one-shot relative STIMER%u with uRelativeNS=%RU64\n", pVCpu->idCpu,
537 pHvStimer->idxStimer, uRelativeNS));
538 }
539 }
540 /** @todo frequency hinting? */
541 }
542}
543
544
545/**
546 * Stops the synthetic timer for the given VCPU.
547 *
548 * @param pVCpu The cross context virtual CPU structure.
549 * @param pHvStimer Pointer to the Hyper-V synthetic timer.
550 *
551 * @remarks Caller needs to the hold the timer critical section.
552 * @thread EMT(pVCpu).
553 */
554static void gimHvStopStimer(PVMCPUCC pVCpu, PGIMHVSTIMER pHvStimer)
555{
556 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
557 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
558
559 TMTIMERHANDLE hTimer = pHvStimer->hTimer;
560 Assert(TMTimerIsLockOwner(pVM, hTimer));
561
562 if (TMTimerIsActive(pVM, hTimer))
563 TMTimerStop(pVM, hTimer);
564}
565
566
567/**
568 * MSR read handler for Hyper-V.
569 *
570 * @returns Strict VBox status code like CPUMQueryGuestMsr().
571 * @retval VINF_CPUM_R3_MSR_READ
572 * @retval VERR_CPUM_RAISE_GP_0
573 *
574 * @param pVCpu The cross context virtual CPU structure.
575 * @param idMsr The MSR being read.
576 * @param pRange The range this MSR belongs to.
577 * @param puValue Where to store the MSR value read.
578 *
579 * @thread EMT.
580 */
581VMM_INT_DECL(VBOXSTRICTRC) gimHvReadMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
582{
583 NOREF(pRange);
584 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
585 PCGIMHV pHv = &pVM->gim.s.u.Hv;
586
587 switch (idMsr)
588 {
589 case MSR_GIM_HV_TIME_REF_COUNT:
590 *puValue = gimHvGetTimeRefCount(pVCpu);
591 return VINF_SUCCESS;
592
593 case MSR_GIM_HV_VP_INDEX:
594 *puValue = pVCpu->idCpu;
595 return VINF_SUCCESS;
596
597 case MSR_GIM_HV_TPR:
598 *puValue = APICHvGetTpr(pVCpu);
599 return VINF_SUCCESS;
600
601 case MSR_GIM_HV_ICR:
602 *puValue = APICHvGetIcr(pVCpu);
603 return VINF_SUCCESS;
604
605 case MSR_GIM_HV_GUEST_OS_ID:
606 *puValue = pHv->u64GuestOsIdMsr;
607 return VINF_SUCCESS;
608
609 case MSR_GIM_HV_HYPERCALL:
610 *puValue = pHv->u64HypercallMsr;
611 return VINF_SUCCESS;
612
613 case MSR_GIM_HV_REF_TSC:
614 *puValue = pHv->u64TscPageMsr;
615 return VINF_SUCCESS;
616
617 case MSR_GIM_HV_TSC_FREQ:
618 *puValue = TMCpuTicksPerSecond(pVM);
619 return VINF_SUCCESS;
620
621 case MSR_GIM_HV_APIC_FREQ:
622 {
623 int rc = APICGetTimerFreq(pVM, puValue);
624 if (RT_FAILURE(rc))
625 return VERR_CPUM_RAISE_GP_0;
626 return VINF_SUCCESS;
627 }
628
629 case MSR_GIM_HV_SYNTH_DEBUG_STATUS:
630 *puValue = pHv->uDbgStatusMsr;
631 return VINF_SUCCESS;
632
633 case MSR_GIM_HV_SINT0: case MSR_GIM_HV_SINT1: case MSR_GIM_HV_SINT2: case MSR_GIM_HV_SINT3:
634 case MSR_GIM_HV_SINT4: case MSR_GIM_HV_SINT5: case MSR_GIM_HV_SINT6: case MSR_GIM_HV_SINT7:
635 case MSR_GIM_HV_SINT8: case MSR_GIM_HV_SINT9: case MSR_GIM_HV_SINT10: case MSR_GIM_HV_SINT11:
636 case MSR_GIM_HV_SINT12: case MSR_GIM_HV_SINT13: case MSR_GIM_HV_SINT14: case MSR_GIM_HV_SINT15:
637 {
638 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
639 *puValue = pHvCpu->auSintMsrs[idMsr - MSR_GIM_HV_SINT0];
640 return VINF_SUCCESS;
641 }
642
643 case MSR_GIM_HV_STIMER0_CONFIG:
644 case MSR_GIM_HV_STIMER1_CONFIG:
645 case MSR_GIM_HV_STIMER2_CONFIG:
646 case MSR_GIM_HV_STIMER3_CONFIG:
647 {
648 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
649 uint8_t const idxStimer = (idMsr - MSR_GIM_HV_STIMER0_CONFIG) >> 1;
650 PCGIMHVSTIMER pcHvStimer = &pHvCpu->aStimers[idxStimer];
651 *puValue = pcHvStimer->uStimerConfigMsr;
652 return VINF_SUCCESS;
653 }
654
655 case MSR_GIM_HV_STIMER0_COUNT:
656 case MSR_GIM_HV_STIMER1_COUNT:
657 case MSR_GIM_HV_STIMER2_COUNT:
658 case MSR_GIM_HV_STIMER3_COUNT:
659 {
660 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
661 uint8_t const idxStimer = (idMsr - MSR_GIM_HV_STIMER0_COUNT) >> 1;
662 PCGIMHVSTIMER pcHvStimer = &pHvCpu->aStimers[idxStimer];
663 *puValue = pcHvStimer->uStimerCountMsr;
664 return VINF_SUCCESS;
665 }
666
667 case MSR_GIM_HV_EOM:
668 {
669 *puValue = 0;
670 return VINF_SUCCESS;
671 }
672
673 case MSR_GIM_HV_SCONTROL:
674 {
675 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
676 *puValue = pHvCpu->uSControlMsr;
677 return VINF_SUCCESS;
678 }
679
680 case MSR_GIM_HV_SIMP:
681 {
682 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
683 *puValue = pHvCpu->uSimpMsr;
684 return VINF_SUCCESS;
685 }
686
687 case MSR_GIM_HV_SVERSION:
688 *puValue = GIM_HV_SVERSION;
689 return VINF_SUCCESS;
690
691 case MSR_GIM_HV_RESET:
692 *puValue = 0;
693 return VINF_SUCCESS;
694
695 case MSR_GIM_HV_CRASH_CTL:
696 *puValue = pHv->uCrashCtlMsr;
697 return VINF_SUCCESS;
698
699 case MSR_GIM_HV_CRASH_P0: *puValue = pHv->uCrashP0Msr; return VINF_SUCCESS;
700 case MSR_GIM_HV_CRASH_P1: *puValue = pHv->uCrashP1Msr; return VINF_SUCCESS;
701 case MSR_GIM_HV_CRASH_P2: *puValue = pHv->uCrashP2Msr; return VINF_SUCCESS;
702 case MSR_GIM_HV_CRASH_P3: *puValue = pHv->uCrashP3Msr; return VINF_SUCCESS;
703 case MSR_GIM_HV_CRASH_P4: *puValue = pHv->uCrashP4Msr; return VINF_SUCCESS;
704
705 case MSR_GIM_HV_DEBUG_OPTIONS_MSR:
706 {
707 if (pHv->fIsVendorMsHv)
708 {
709#ifndef IN_RING3
710 return VINF_CPUM_R3_MSR_READ;
711#else
712 LogRelMax(1, ("GIM: HyperV: Guest querying debug options, suggesting %s interface\n",
713 pHv->fDbgHypercallInterface ? "hypercall" : "MSR"));
714 *puValue = pHv->fDbgHypercallInterface ? GIM_HV_DEBUG_OPTIONS_USE_HYPERCALLS : 0;
715 return VINF_SUCCESS;
716#endif
717 }
718 break;
719 }
720
721 /* Write-only MSRs: */
722 case MSR_GIM_HV_EOI:
723 /* Reserved/unknown MSRs: */
724 default:
725 {
726#ifdef IN_RING3
727 static uint32_t s_cTimes = 0;
728 if (s_cTimes++ < 20)
729 LogRel(("GIM: HyperV: Unknown/invalid RdMsr (%#x) -> #GP(0)\n", idMsr));
730 LogFunc(("Unknown/invalid RdMsr (%#RX32) -> #GP(0)\n", idMsr));
731 break;
732#else
733 return VINF_CPUM_R3_MSR_READ;
734#endif
735 }
736 }
737
738 return VERR_CPUM_RAISE_GP_0;
739}
740
741
742/**
743 * MSR write handler for Hyper-V.
744 *
745 * @returns Strict VBox status code like CPUMSetGuestMsr().
746 * @retval VINF_CPUM_R3_MSR_WRITE
747 * @retval VERR_CPUM_RAISE_GP_0
748 *
749 * @param pVCpu The cross context virtual CPU structure.
750 * @param idMsr The MSR being written.
751 * @param pRange The range this MSR belongs to.
752 * @param uRawValue The raw value with the ignored bits not masked.
753 *
754 * @thread EMT.
755 */
756VMM_INT_DECL(VBOXSTRICTRC) gimHvWriteMsr(PVMCPUCC pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue)
757{
758 NOREF(pRange);
759 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
760 PGIMHV pHv = &pVM->gim.s.u.Hv;
761
762 switch (idMsr)
763 {
764 case MSR_GIM_HV_TPR:
765 return APICHvSetTpr(pVCpu, uRawValue);
766
767 case MSR_GIM_HV_EOI:
768 return APICHvSetEoi(pVCpu, uRawValue);
769
770 case MSR_GIM_HV_ICR:
771 return APICHvSetIcr(pVCpu, uRawValue);
772
773 case MSR_GIM_HV_GUEST_OS_ID:
774 {
775#ifndef IN_RING3
776 return VINF_CPUM_R3_MSR_WRITE;
777#else
778 /* Disable the hypercall-page and hypercalls if 0 is written to this MSR. */
779 if (!uRawValue)
780 {
781 if (MSR_GIM_HV_HYPERCALL_PAGE_IS_ENABLED(pHv->u64HypercallMsr))
782 {
783 gimR3HvDisableHypercallPage(pVM);
784 pHv->u64HypercallMsr &= ~MSR_GIM_HV_HYPERCALL_PAGE_ENABLE;
785 LogRel(("GIM: HyperV: Hypercall page disabled via Guest OS ID MSR\n"));
786 }
787 }
788 else
789 {
790 LogRel(("GIM: HyperV: Guest OS reported ID %#RX64\n", uRawValue));
791 LogRel(("GIM: HyperV: Open-source=%RTbool Vendor=%#x OS=%#x (%s) Major=%u Minor=%u ServicePack=%u Build=%u\n",
792 MSR_GIM_HV_GUEST_OS_ID_IS_OPENSOURCE(uRawValue), MSR_GIM_HV_GUEST_OS_ID_VENDOR(uRawValue),
793 MSR_GIM_HV_GUEST_OS_ID_OS_VARIANT(uRawValue), gimHvGetGuestOsIdVariantName(uRawValue),
794 MSR_GIM_HV_GUEST_OS_ID_MAJOR_VERSION(uRawValue), MSR_GIM_HV_GUEST_OS_ID_MINOR_VERSION(uRawValue),
795 MSR_GIM_HV_GUEST_OS_ID_SERVICE_VERSION(uRawValue), MSR_GIM_HV_GUEST_OS_ID_BUILD(uRawValue)));
796
797 /* Update the CPUID leaf, see Hyper-V spec. "Microsoft Hypervisor CPUID Leaves". */
798 CPUMCPUIDLEAF HyperLeaf;
799 RT_ZERO(HyperLeaf);
800 HyperLeaf.uLeaf = UINT32_C(0x40000002);
801 HyperLeaf.uEax = MSR_GIM_HV_GUEST_OS_ID_BUILD(uRawValue);
802 HyperLeaf.uEbx = MSR_GIM_HV_GUEST_OS_ID_MINOR_VERSION(uRawValue)
803 | (MSR_GIM_HV_GUEST_OS_ID_MAJOR_VERSION(uRawValue) << 16);
804 HyperLeaf.uEcx = MSR_GIM_HV_GUEST_OS_ID_SERVICE_VERSION(uRawValue);
805 HyperLeaf.uEdx = MSR_GIM_HV_GUEST_OS_ID_SERVICE_VERSION(uRawValue)
806 | (MSR_GIM_HV_GUEST_OS_ID_BUILD(uRawValue) << 24);
807 int rc2 = CPUMR3CpuIdInsert(pVM, &HyperLeaf);
808 AssertRC(rc2);
809 }
810
811 pHv->u64GuestOsIdMsr = uRawValue;
812
813 /*
814 * Update EM on hypercall instruction enabled state.
815 */
816 if (uRawValue)
817 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
818 EMSetHypercallInstructionsEnabled(pVM->CTX_SUFF(apCpus)[idCpu], true);
819 else
820 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
821 EMSetHypercallInstructionsEnabled(pVM->CTX_SUFF(apCpus)[idCpu], false);
822
823 return VINF_SUCCESS;
824#endif /* IN_RING3 */
825 }
826
827 case MSR_GIM_HV_HYPERCALL:
828 {
829#ifndef IN_RING3
830 return VINF_CPUM_R3_MSR_WRITE;
831#else
832 /** @todo There is/was a problem with hypercalls for FreeBSD 10.1 guests,
833 * see @bugref{7270#c116}. */
834 /* First, update all but the hypercall page enable bit. */
835 pHv->u64HypercallMsr = (uRawValue & ~MSR_GIM_HV_HYPERCALL_PAGE_ENABLE);
836
837 /* Hypercall page can only be enabled when the guest has enabled hypercalls. */
838 bool fEnable = MSR_GIM_HV_HYPERCALL_PAGE_IS_ENABLED(uRawValue);
839 if ( fEnable
840 && !gimHvAreHypercallsEnabled(pVM))
841 {
842 return VINF_SUCCESS;
843 }
844
845 /* Is the guest disabling the hypercall-page? Allow it regardless of the Guest-OS Id Msr. */
846 if (!fEnable)
847 {
848 gimR3HvDisableHypercallPage(pVM);
849 pHv->u64HypercallMsr = uRawValue;
850 return VINF_SUCCESS;
851 }
852
853 /* Enable the hypercall-page. */
854 RTGCPHYS GCPhysHypercallPage = MSR_GIM_HV_HYPERCALL_GUEST_PFN(uRawValue) << GUEST_PAGE_SHIFT;
855 int rc = gimR3HvEnableHypercallPage(pVM, GCPhysHypercallPage);
856 if (RT_SUCCESS(rc))
857 {
858 pHv->u64HypercallMsr = uRawValue;
859 return VINF_SUCCESS;
860 }
861
862 return VERR_CPUM_RAISE_GP_0;
863#endif
864 }
865
866 case MSR_GIM_HV_REF_TSC:
867 {
868#ifndef IN_RING3
869 return VINF_CPUM_R3_MSR_WRITE;
870#else /* IN_RING3 */
871 /* First, update all but the TSC page enable bit. */
872 pHv->u64TscPageMsr = (uRawValue & ~MSR_GIM_HV_REF_TSC_ENABLE);
873
874 /* Is the guest disabling the TSC page? */
875 bool fEnable = MSR_GIM_HV_REF_TSC_IS_ENABLED(uRawValue);
876 if (!fEnable)
877 {
878 gimR3HvDisableTscPage(pVM);
879 pHv->u64TscPageMsr = uRawValue;
880 return VINF_SUCCESS;
881 }
882
883 /* Enable the TSC page. */
884 RTGCPHYS GCPhysTscPage = MSR_GIM_HV_REF_TSC_GUEST_PFN(uRawValue) << GUEST_PAGE_SHIFT;
885 int rc = gimR3HvEnableTscPage(pVM, GCPhysTscPage, false /* fUseThisTscSequence */, 0 /* uTscSequence */);
886 if (RT_SUCCESS(rc))
887 {
888 pHv->u64TscPageMsr = uRawValue;
889 return VINF_SUCCESS;
890 }
891
892 return VERR_CPUM_RAISE_GP_0;
893#endif /* IN_RING3 */
894 }
895
896 case MSR_GIM_HV_APIC_ASSIST_PAGE:
897 {
898#ifndef IN_RING3
899 return VINF_CPUM_R3_MSR_WRITE;
900#else /* IN_RING3 */
901 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
902 pHvCpu->uApicAssistPageMsr = uRawValue;
903
904 if (MSR_GIM_HV_APICASSIST_PAGE_IS_ENABLED(uRawValue))
905 {
906 RTGCPHYS GCPhysApicAssistPage = MSR_GIM_HV_APICASSIST_GUEST_PFN(uRawValue) << GUEST_PAGE_SHIFT;
907 if (PGMPhysIsGCPhysNormal(pVM, GCPhysApicAssistPage))
908 {
909 int rc = gimR3HvEnableApicAssistPage(pVCpu, GCPhysApicAssistPage);
910 if (RT_SUCCESS(rc))
911 {
912 pHvCpu->uApicAssistPageMsr = uRawValue;
913 return VINF_SUCCESS;
914 }
915 }
916 else
917 {
918 LogRelMax(5, ("GIM%u: HyperV: APIC-assist page address %#RGp invalid!\n", pVCpu->idCpu,
919 GCPhysApicAssistPage));
920 }
921 }
922 else
923 gimR3HvDisableApicAssistPage(pVCpu);
924
925 return VERR_CPUM_RAISE_GP_0;
926#endif /* IN_RING3 */
927 }
928
929 case MSR_GIM_HV_RESET:
930 {
931#ifndef IN_RING3
932 return VINF_CPUM_R3_MSR_WRITE;
933#else
934 if (MSR_GIM_HV_RESET_IS_ENABLED(uRawValue))
935 {
936 LogRel(("GIM: HyperV: Reset initiated through MSR\n"));
937 int rc = PDMDevHlpVMReset(pVM->gim.s.pDevInsR3, PDMVMRESET_F_GIM);
938 AssertRC(rc); /* Note! Not allowed to return VINF_EM_RESET / VINF_EM_HALT here, so ignore them. */
939 }
940 /* else: Ignore writes to other bits. */
941 return VINF_SUCCESS;
942#endif /* IN_RING3 */
943 }
944
945 case MSR_GIM_HV_CRASH_CTL:
946 {
947#ifndef IN_RING3
948 return VINF_CPUM_R3_MSR_WRITE;
949#else
950 if (uRawValue & MSR_GIM_HV_CRASH_CTL_NOTIFY)
951 {
952 LogRel(("GIM: HyperV: Guest indicates a fatal condition! P0=%#RX64 P1=%#RX64 P2=%#RX64 P3=%#RX64 P4=%#RX64\n",
953 pHv->uCrashP0Msr, pHv->uCrashP1Msr, pHv->uCrashP2Msr, pHv->uCrashP3Msr, pHv->uCrashP4Msr));
954 DBGFR3ReportBugCheck(pVM, pVCpu, DBGFEVENT_BSOD_MSR, pHv->uCrashP0Msr, pHv->uCrashP1Msr,
955 pHv->uCrashP2Msr, pHv->uCrashP3Msr, pHv->uCrashP4Msr);
956 /* (Do not try pass VINF_EM_DBG_EVENT, doesn't work from here!) */
957 }
958 return VINF_SUCCESS;
959#endif
960 }
961
962 case MSR_GIM_HV_SYNTH_DEBUG_SEND_BUFFER:
963 {
964 if (!pHv->fDbgEnabled)
965 return VERR_CPUM_RAISE_GP_0;
966#ifndef IN_RING3
967 return VINF_CPUM_R3_MSR_WRITE;
968#else
969 RTGCPHYS GCPhysBuffer = (RTGCPHYS)uRawValue;
970 pHv->uDbgSendBufferMsr = GCPhysBuffer;
971 if (PGMPhysIsGCPhysNormal(pVM, GCPhysBuffer))
972 LogRel(("GIM: HyperV: Set up debug send buffer at %#RGp\n", GCPhysBuffer));
973 else
974 LogRel(("GIM: HyperV: Destroyed debug send buffer\n"));
975 pHv->uDbgSendBufferMsr = uRawValue;
976 return VINF_SUCCESS;
977#endif
978 }
979
980 case MSR_GIM_HV_SYNTH_DEBUG_RECEIVE_BUFFER:
981 {
982 if (!pHv->fDbgEnabled)
983 return VERR_CPUM_RAISE_GP_0;
984#ifndef IN_RING3
985 return VINF_CPUM_R3_MSR_WRITE;
986#else
987 RTGCPHYS GCPhysBuffer = (RTGCPHYS)uRawValue;
988 pHv->uDbgRecvBufferMsr = GCPhysBuffer;
989 if (PGMPhysIsGCPhysNormal(pVM, GCPhysBuffer))
990 LogRel(("GIM: HyperV: Set up debug receive buffer at %#RGp\n", GCPhysBuffer));
991 else
992 LogRel(("GIM: HyperV: Destroyed debug receive buffer\n"));
993 return VINF_SUCCESS;
994#endif
995 }
996
997 case MSR_GIM_HV_SYNTH_DEBUG_PENDING_BUFFER:
998 {
999 if (!pHv->fDbgEnabled)
1000 return VERR_CPUM_RAISE_GP_0;
1001#ifndef IN_RING3
1002 return VINF_CPUM_R3_MSR_WRITE;
1003#else
1004 RTGCPHYS GCPhysBuffer = (RTGCPHYS)uRawValue;
1005 pHv->uDbgPendingBufferMsr = GCPhysBuffer;
1006 if (PGMPhysIsGCPhysNormal(pVM, GCPhysBuffer))
1007 LogRel(("GIM: HyperV: Set up debug pending buffer at %#RGp\n", uRawValue));
1008 else
1009 LogRel(("GIM: HyperV: Destroyed debug pending buffer\n"));
1010 return VINF_SUCCESS;
1011#endif
1012 }
1013
1014 case MSR_GIM_HV_SYNTH_DEBUG_CONTROL:
1015 {
1016 if (!pHv->fDbgEnabled)
1017 return VERR_CPUM_RAISE_GP_0;
1018#ifndef IN_RING3
1019 return VINF_CPUM_R3_MSR_WRITE;
1020#else
1021 if ( MSR_GIM_HV_SYNTH_DEBUG_CONTROL_IS_WRITE(uRawValue)
1022 && MSR_GIM_HV_SYNTH_DEBUG_CONTROL_IS_READ(uRawValue))
1023 {
1024 LogRel(("GIM: HyperV: Requesting both read and write through debug control MSR -> #GP(0)\n"));
1025 return VERR_CPUM_RAISE_GP_0;
1026 }
1027
1028 if (MSR_GIM_HV_SYNTH_DEBUG_CONTROL_IS_WRITE(uRawValue))
1029 {
1030 uint32_t cbWrite = MSR_GIM_HV_SYNTH_DEBUG_CONTROL_W_LEN(uRawValue);
1031 if ( cbWrite > 0
1032 && cbWrite < GIM_HV_PAGE_SIZE)
1033 {
1034 if (PGMPhysIsGCPhysNormal(pVM, (RTGCPHYS)pHv->uDbgSendBufferMsr))
1035 {
1036 Assert(pHv->pvDbgBuffer);
1037 int rc = PGMPhysSimpleReadGCPhys(pVM, pHv->pvDbgBuffer, (RTGCPHYS)pHv->uDbgSendBufferMsr, cbWrite);
1038 if (RT_SUCCESS(rc))
1039 {
1040 LogRelMax(1, ("GIM: HyperV: Initiated debug data transmission via MSR\n"));
1041 uint32_t cbWritten = 0;
1042 rc = gimR3HvDebugWrite(pVM, pHv->pvDbgBuffer, cbWrite, &cbWritten, false /*fUdpPkt*/);
1043 if ( RT_SUCCESS(rc)
1044 && cbWrite == cbWritten)
1045 pHv->uDbgStatusMsr = MSR_GIM_HV_SYNTH_DEBUG_STATUS_W_SUCCESS;
1046 else
1047 pHv->uDbgStatusMsr = 0;
1048 }
1049 else
1050 LogRelMax(5, ("GIM: HyperV: Failed to read debug send buffer at %#RGp, rc=%Rrc\n",
1051 (RTGCPHYS)pHv->uDbgSendBufferMsr, rc));
1052 }
1053 else
1054 LogRelMax(5, ("GIM: HyperV: Debug send buffer address %#RGp invalid! Ignoring debug write!\n",
1055 (RTGCPHYS)pHv->uDbgSendBufferMsr));
1056 }
1057 else
1058 LogRelMax(5, ("GIM: HyperV: Invalid write size %u specified in MSR, ignoring debug write!\n",
1059 MSR_GIM_HV_SYNTH_DEBUG_CONTROL_W_LEN(uRawValue)));
1060 }
1061 else if (MSR_GIM_HV_SYNTH_DEBUG_CONTROL_IS_READ(uRawValue))
1062 {
1063 if (PGMPhysIsGCPhysNormal(pVM, (RTGCPHYS)pHv->uDbgRecvBufferMsr))
1064 {
1065 LogRelMax(1, ("GIM: HyperV: Initiated debug data reception via MSR\n"));
1066 uint32_t cbReallyRead;
1067 Assert(pHv->pvDbgBuffer);
1068 int rc = gimR3HvDebugRead(pVM, pHv->pvDbgBuffer, GIM_HV_PAGE_SIZE, GIM_HV_PAGE_SIZE,
1069 &cbReallyRead, 0, false /*fUdpPkt*/);
1070 if ( RT_SUCCESS(rc)
1071 && cbReallyRead > 0)
1072 {
1073 rc = PGMPhysSimpleWriteGCPhys(pVM, (RTGCPHYS)pHv->uDbgRecvBufferMsr, pHv->pvDbgBuffer, cbReallyRead);
1074 if (RT_SUCCESS(rc))
1075 {
1076 pHv->uDbgStatusMsr = ((uint16_t)cbReallyRead) << 16;
1077 pHv->uDbgStatusMsr |= MSR_GIM_HV_SYNTH_DEBUG_STATUS_R_SUCCESS;
1078 }
1079 else
1080 {
1081 pHv->uDbgStatusMsr = 0;
1082 LogRelMax(5, ("GIM: HyperV: PGMPhysSimpleWriteGCPhys failed. rc=%Rrc\n", rc));
1083 }
1084 }
1085 else
1086 pHv->uDbgStatusMsr = 0;
1087 }
1088 else
1089 {
1090 LogRelMax(5, ("GIM: HyperV: Debug receive buffer address %#RGp invalid! Ignoring debug read!\n",
1091 (RTGCPHYS)pHv->uDbgRecvBufferMsr));
1092 }
1093 }
1094 return VINF_SUCCESS;
1095#endif
1096 }
1097
1098 case MSR_GIM_HV_SINT0: case MSR_GIM_HV_SINT1: case MSR_GIM_HV_SINT2: case MSR_GIM_HV_SINT3:
1099 case MSR_GIM_HV_SINT4: case MSR_GIM_HV_SINT5: case MSR_GIM_HV_SINT6: case MSR_GIM_HV_SINT7:
1100 case MSR_GIM_HV_SINT8: case MSR_GIM_HV_SINT9: case MSR_GIM_HV_SINT10: case MSR_GIM_HV_SINT11:
1101 case MSR_GIM_HV_SINT12: case MSR_GIM_HV_SINT13: case MSR_GIM_HV_SINT14: case MSR_GIM_HV_SINT15:
1102 {
1103 uint8_t uVector = MSR_GIM_HV_SINT_GET_VECTOR(uRawValue);
1104 bool const fVMBusMsg = RT_BOOL(idMsr == GIM_HV_VMBUS_MSG_SINT);
1105 size_t const idxSintMsr = idMsr - MSR_GIM_HV_SINT0;
1106 const char *pszDesc = fVMBusMsg ? "VMBus Message" : "Generic";
1107 if (uVector < GIM_HV_SINT_VECTOR_VALID_MIN)
1108 {
1109 LogRel(("GIM%u: HyperV: Programmed an invalid vector in SINT%u (%s), uVector=%u -> #GP(0)\n", pVCpu->idCpu,
1110 idxSintMsr, pszDesc, uVector));
1111 return VERR_CPUM_RAISE_GP_0;
1112 }
1113
1114 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
1115 pHvCpu->auSintMsrs[idxSintMsr] = uRawValue;
1116 if (fVMBusMsg)
1117 {
1118 if (MSR_GIM_HV_SINT_IS_MASKED(uRawValue))
1119 Log(("GIM%u: HyperV: Masked SINT%u (%s)\n", pVCpu->idCpu, idxSintMsr, pszDesc));
1120 else
1121 Log(("GIM%u: HyperV: Unmasked SINT%u (%s), uVector=%u\n", pVCpu->idCpu, idxSintMsr, pszDesc, uVector));
1122 }
1123 Log(("GIM%u: HyperV: Written SINT%u=%#RX64\n", pVCpu->idCpu, idxSintMsr, uRawValue));
1124 return VINF_SUCCESS;
1125 }
1126
1127 case MSR_GIM_HV_SCONTROL:
1128 {
1129#ifndef IN_RING3
1130 /** @todo make this RZ later? */
1131 return VINF_CPUM_R3_MSR_WRITE;
1132#else
1133 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
1134 pHvCpu->uSControlMsr = uRawValue;
1135 if (MSR_GIM_HV_SCONTROL_IS_ENABLED(uRawValue))
1136 LogRel(("GIM%u: HyperV: Synthetic interrupt control enabled\n", pVCpu->idCpu));
1137 else
1138 LogRel(("GIM%u: HyperV: Synthetic interrupt control disabled\n", pVCpu->idCpu));
1139 return VINF_SUCCESS;
1140#endif
1141 }
1142
1143 case MSR_GIM_HV_STIMER0_CONFIG:
1144 case MSR_GIM_HV_STIMER1_CONFIG:
1145 case MSR_GIM_HV_STIMER2_CONFIG:
1146 case MSR_GIM_HV_STIMER3_CONFIG:
1147 {
1148 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
1149 uint8_t const idxStimer = (idMsr - MSR_GIM_HV_STIMER0_CONFIG) >> 1;
1150
1151 /* Validate the writable bits. */
1152 if (RT_LIKELY(!(uRawValue & ~MSR_GIM_HV_STIMER_RW_VALID)))
1153 {
1154 Assert(idxStimer < RT_ELEMENTS(pHvCpu->aStimers));
1155 PGIMHVSTIMER pHvStimer = &pHvCpu->aStimers[idxStimer];
1156
1157 /* Lock to prevent concurrent access from the timer callback. */
1158 int rc = TMTimerLock(pVM, pHvStimer->hTimer, VERR_IGNORED);
1159 if (rc == VINF_SUCCESS)
1160 {
1161 /* Update the MSR value. */
1162 pHvStimer->uStimerConfigMsr = uRawValue;
1163 Log(("GIM%u: HyperV: Set STIMER_CONFIG%u=%#RX64\n", pVCpu->idCpu, idxStimer, uRawValue));
1164
1165 /* Process the MSR bits. */
1166 if ( !MSR_GIM_HV_STIMER_GET_SINTX(uRawValue) /* Writing SINTx as 0 causes the timer to be disabled. */
1167 || !MSR_GIM_HV_STIMER_IS_ENABLED(uRawValue))
1168 {
1169 pHvStimer->uStimerConfigMsr &= ~MSR_GIM_HV_STIMER_ENABLE;
1170 gimHvStopStimer(pVCpu, pHvStimer);
1171 Log(("GIM%u: HyperV: Disabled STIMER_CONFIG%u\n", pVCpu->idCpu, idxStimer));
1172 }
1173 else if (MSR_GIM_HV_STIMER_IS_ENABLED(uRawValue))
1174 {
1175 /* Auto-enable implies writing to the STIMERx_COUNT MSR is what starts the timer. */
1176 if (!MSR_GIM_HV_STIMER_IS_AUTO_ENABLED(uRawValue))
1177 {
1178 if (!TMTimerIsActive(pVM, pHvStimer->hTimer))
1179 {
1180 gimHvStartStimer(pVCpu, pHvStimer);
1181 Log(("GIM%u: HyperV: Started STIMER%u\n", pVCpu->idCpu, idxStimer));
1182 }
1183 else
1184 {
1185 /*
1186 * Enabling a timer that's already enabled is undefined behaviour,
1187 * see Hyper-V spec. 15.3.1 "Synthetic Timer Configuration Register".
1188 *
1189 * Our implementation just re-starts the timer. Guests that comform to
1190 * the Hyper-V specs. should not be doing this anyway.
1191 */
1192 AssertFailed();
1193 gimHvStopStimer(pVCpu, pHvStimer);
1194 gimHvStartStimer(pVCpu, pHvStimer);
1195 }
1196 }
1197 }
1198
1199 TMTimerUnlock(pVM, pHvStimer->hTimer);
1200 }
1201 return rc;
1202 }
1203#ifndef IN_RING3
1204 return VINF_CPUM_R3_MSR_WRITE;
1205#else
1206 LogRel(("GIM%u: HyperV: Setting reserved bits of STIMER%u MSR (uRawValue=%#RX64) -> #GP(0)\n", pVCpu->idCpu,
1207 idxStimer, uRawValue));
1208 return VERR_CPUM_RAISE_GP_0;
1209#endif
1210 }
1211
1212 case MSR_GIM_HV_STIMER0_COUNT:
1213 case MSR_GIM_HV_STIMER1_COUNT:
1214 case MSR_GIM_HV_STIMER2_COUNT:
1215 case MSR_GIM_HV_STIMER3_COUNT:
1216 {
1217 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
1218 uint8_t const idxStimer = (idMsr - MSR_GIM_HV_STIMER0_CONFIG) >> 1;
1219 Assert(idxStimer < RT_ELEMENTS(pHvCpu->aStimers));
1220 PGIMHVSTIMER pHvStimer = &pHvCpu->aStimers[idxStimer];
1221 int const rcBusy = VINF_CPUM_R3_MSR_WRITE;
1222
1223 /*
1224 * Writing zero to this MSR disables the timer regardless of whether the auto-enable
1225 * flag is set in the config MSR corresponding to the timer.
1226 */
1227 if (!uRawValue)
1228 {
1229 gimHvStopStimer(pVCpu, pHvStimer);
1230 pHvStimer->uStimerCountMsr = 0;
1231 Log(("GIM%u: HyperV: Set STIMER_COUNT%u=%RU64, stopped timer\n", pVCpu->idCpu, idxStimer, uRawValue));
1232 return VINF_SUCCESS;
1233 }
1234
1235 /*
1236 * Concurrent writes to the config. MSR can't happen as it's serialized by way
1237 * of being done on the same EMT as this.
1238 */
1239 if (MSR_GIM_HV_STIMER_IS_AUTO_ENABLED(pHvStimer->uStimerConfigMsr))
1240 {
1241 int rc = TMTimerLock(pVM, pHvStimer->hTimer, rcBusy);
1242 if (rc == VINF_SUCCESS)
1243 {
1244 pHvStimer->uStimerCountMsr = uRawValue;
1245 gimHvStartStimer(pVCpu, pHvStimer);
1246 TMTimerUnlock(pVM, pHvStimer->hTimer);
1247 Log(("GIM%u: HyperV: Set STIMER_COUNT%u=%RU64 %RU64 msec, auto-started timer\n", pVCpu->idCpu, idxStimer,
1248 uRawValue, (uRawValue * 100) / RT_NS_1MS_64));
1249 }
1250 return rc;
1251 }
1252
1253 /* Simple update of the counter without any timer start/stop side-effects. */
1254 pHvStimer->uStimerCountMsr = uRawValue;
1255 Log(("GIM%u: HyperV: Set STIMER_COUNT%u=%RU64\n", pVCpu->idCpu, idxStimer, uRawValue));
1256 return VINF_SUCCESS;
1257 }
1258
1259 case MSR_GIM_HV_EOM:
1260 {
1261 /** @todo implement EOM. */
1262 Log(("GIM%u: HyperV: EOM\n", pVCpu->idCpu));
1263 return VINF_SUCCESS;
1264 }
1265
1266 case MSR_GIM_HV_SIEFP:
1267 {
1268#ifndef IN_RING3
1269 return VINF_CPUM_R3_MSR_WRITE;
1270#else
1271 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
1272 pHvCpu->uSiefpMsr = uRawValue;
1273 if (MSR_GIM_HV_SIEF_PAGE_IS_ENABLED(uRawValue))
1274 {
1275 RTGCPHYS GCPhysSiefPage = MSR_GIM_HV_SIEF_GUEST_PFN(uRawValue) << GUEST_PAGE_SHIFT;
1276 if (PGMPhysIsGCPhysNormal(pVM, GCPhysSiefPage))
1277 {
1278 int rc = gimR3HvEnableSiefPage(pVCpu, GCPhysSiefPage);
1279 if (RT_SUCCESS(rc))
1280 {
1281 LogRel(("GIM%u: HyperV: Enabled synthetic interrupt event flags page at %#RGp\n", pVCpu->idCpu,
1282 GCPhysSiefPage));
1283 /** @todo SIEF setup. */
1284 return VINF_SUCCESS;
1285 }
1286 }
1287 else
1288 LogRelMax(5, ("GIM%u: HyperV: SIEF page address %#RGp invalid!\n", pVCpu->idCpu, GCPhysSiefPage));
1289 }
1290 else
1291 gimR3HvDisableSiefPage(pVCpu);
1292
1293 return VERR_CPUM_RAISE_GP_0;
1294#endif
1295 break;
1296 }
1297
1298 case MSR_GIM_HV_SIMP:
1299 {
1300#ifndef IN_RING3
1301 return VINF_CPUM_R3_MSR_WRITE;
1302#else
1303 PGIMHVCPU pHvCpu = &pVCpu->gim.s.u.HvCpu;
1304 pHvCpu->uSimpMsr = uRawValue;
1305 if (MSR_GIM_HV_SIMP_IS_ENABLED(uRawValue))
1306 {
1307 RTGCPHYS GCPhysSimp = MSR_GIM_HV_SIMP_GPA(uRawValue);
1308 if (PGMPhysIsGCPhysNormal(pVM, GCPhysSimp))
1309 {
1310 uint8_t abSimp[GIM_HV_PAGE_SIZE];
1311 RT_ZERO(abSimp);
1312 int rc2 = PGMPhysSimpleWriteGCPhys(pVM, GCPhysSimp, &abSimp[0], sizeof(abSimp));
1313 if (RT_SUCCESS(rc2))
1314 LogRel(("GIM%u: HyperV: Enabled synthetic interrupt message page at %#RGp\n", pVCpu->idCpu, GCPhysSimp));
1315 else
1316 {
1317 LogRel(("GIM%u: HyperV: Failed to update synthetic interrupt message page at %#RGp. uSimpMsr=%#RX64 rc=%Rrc\n",
1318 pVCpu->idCpu, pHvCpu->uSimpMsr, GCPhysSimp, rc2));
1319 return VERR_CPUM_RAISE_GP_0;
1320 }
1321 }
1322 else
1323 {
1324 LogRel(("GIM%u: HyperV: Enabled synthetic interrupt message page at invalid address %#RGp\n", pVCpu->idCpu,
1325 GCPhysSimp));
1326 }
1327 }
1328 else
1329 LogRel(("GIM%u: HyperV: Disabled synthetic interrupt message page\n", pVCpu->idCpu));
1330 return VINF_SUCCESS;
1331#endif
1332 }
1333
1334 case MSR_GIM_HV_CRASH_P0: pHv->uCrashP0Msr = uRawValue; return VINF_SUCCESS;
1335 case MSR_GIM_HV_CRASH_P1: pHv->uCrashP1Msr = uRawValue; return VINF_SUCCESS;
1336 case MSR_GIM_HV_CRASH_P2: pHv->uCrashP2Msr = uRawValue; return VINF_SUCCESS;
1337 case MSR_GIM_HV_CRASH_P3: pHv->uCrashP3Msr = uRawValue; return VINF_SUCCESS;
1338 case MSR_GIM_HV_CRASH_P4: pHv->uCrashP4Msr = uRawValue; return VINF_SUCCESS;
1339
1340 case MSR_GIM_HV_TIME_REF_COUNT: /* Read-only MSRs. */
1341 case MSR_GIM_HV_VP_INDEX:
1342 case MSR_GIM_HV_TSC_FREQ:
1343 case MSR_GIM_HV_APIC_FREQ:
1344 LogFunc(("WrMsr on read-only MSR %#RX32 -> #GP(0)\n", idMsr));
1345 break;
1346
1347 case MSR_GIM_HV_DEBUG_OPTIONS_MSR:
1348 {
1349 if (pHv->fIsVendorMsHv)
1350 {
1351#ifndef IN_RING3
1352 return VINF_CPUM_R3_MSR_WRITE;
1353#else
1354 LogRelMax(5, ("GIM: HyperV: Write debug options MSR with %#RX64 ignored\n", uRawValue));
1355 return VINF_SUCCESS;
1356#endif
1357 }
1358 return VERR_CPUM_RAISE_GP_0;
1359 }
1360
1361 default:
1362 {
1363#ifdef IN_RING3
1364 static uint32_t s_cTimes = 0;
1365 if (s_cTimes++ < 20)
1366 LogRel(("GIM: HyperV: Unknown/invalid WrMsr (%#x,%#x`%08x) -> #GP(0)\n", idMsr,
1367 uRawValue & UINT64_C(0xffffffff00000000), uRawValue & UINT64_C(0xffffffff)));
1368 LogFunc(("Unknown/invalid WrMsr (%#RX32,%#RX64) -> #GP(0)\n", idMsr, uRawValue));
1369 break;
1370#else
1371 return VINF_CPUM_R3_MSR_WRITE;
1372#endif
1373 }
1374 }
1375
1376 return VERR_CPUM_RAISE_GP_0;
1377}
1378
1379
1380/**
1381 * Whether we need to trap \#UD exceptions in the guest.
1382 *
1383 * We only needed to trap \#UD exceptions for the old raw-mode guests when
1384 * hypercalls are enabled. For HM VMs, the hypercall would be handled via the
1385 * VMCALL/VMMCALL VM-exit.
1386 *
1387 * @param pVCpu The cross context virtual CPU structure.
1388 */
1389VMM_INT_DECL(bool) gimHvShouldTrapXcptUD(PVMCPU pVCpu)
1390{
1391 RT_NOREF(pVCpu);
1392 return false;
1393}
1394
1395
1396/**
1397 * Checks the instruction and executes the hypercall if it's a valid hypercall
1398 * instruction.
1399 *
1400 * This interface is used by \#UD handlers and IEM.
1401 *
1402 * @returns Strict VBox status code.
1403 * @param pVCpu The cross context virtual CPU structure.
1404 * @param pCtx Pointer to the guest-CPU context.
1405 * @param uDisOpcode The disassembler opcode.
1406 * @param cbInstr The instruction length.
1407 *
1408 * @thread EMT(pVCpu).
1409 */
1410VMM_INT_DECL(VBOXSTRICTRC) gimHvHypercallEx(PVMCPUCC pVCpu, PCPUMCTX pCtx, unsigned uDisOpcode, uint8_t cbInstr)
1411{
1412 Assert(pVCpu);
1413 Assert(pCtx);
1414 VMCPU_ASSERT_EMT(pVCpu);
1415
1416 PVM pVM = pVCpu->CTX_SUFF(pVM);
1417 CPUMCPUVENDOR const enmGuestCpuVendor = (CPUMCPUVENDOR)pVM->cpum.ro.GuestFeatures.enmCpuVendor;
1418 if ( ( uDisOpcode == OP_VMCALL
1419 && ( enmGuestCpuVendor == CPUMCPUVENDOR_INTEL
1420 || enmGuestCpuVendor == CPUMCPUVENDOR_VIA
1421 || enmGuestCpuVendor == CPUMCPUVENDOR_SHANGHAI))
1422 || ( uDisOpcode == OP_VMMCALL
1423 && ( enmGuestCpuVendor == CPUMCPUVENDOR_AMD
1424 || enmGuestCpuVendor == CPUMCPUVENDOR_HYGON)) )
1425 return gimHvHypercall(pVCpu, pCtx);
1426
1427 RT_NOREF_PV(cbInstr);
1428 return VERR_GIM_INVALID_HYPERCALL_INSTR;
1429}
1430
1431
1432/**
1433 * Exception handler for \#UD.
1434 *
1435 * @returns Strict VBox status code.
1436 * @retval VINF_SUCCESS if the hypercall succeeded (even if its operation
1437 * failed).
1438 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
1439 * @retval VINF_GIM_HYPERCALL_CONTINUING continue hypercall without updating
1440 * RIP.
1441 * @retval VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
1442 * @retval VERR_GIM_INVALID_HYPERCALL_INSTR instruction at RIP is not a valid
1443 * hypercall instruction.
1444 *
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param pCtx Pointer to the guest-CPU context.
1447 * @param pDis Pointer to the disassembled instruction state at RIP.
1448 * Optional, can be NULL.
1449 * @param pcbInstr Where to store the instruction length of the hypercall
1450 * instruction. Optional, can be NULL.
1451 *
1452 * @thread EMT(pVCpu).
1453 */
1454VMM_INT_DECL(VBOXSTRICTRC) gimHvXcptUD(PVMCPUCC pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis, uint8_t *pcbInstr)
1455{
1456 VMCPU_ASSERT_EMT(pVCpu);
1457
1458 /*
1459 * If we didn't ask for #UD to be trapped, bail.
1460 */
1461 if (!gimHvShouldTrapXcptUD(pVCpu))
1462 return VERR_GIM_IPE_1;
1463
1464 if (!pDis)
1465 {
1466 /*
1467 * Disassemble the instruction at RIP to figure out if it's the Intel VMCALL instruction
1468 * or the AMD VMMCALL instruction and if so, handle it as a hypercall.
1469 */
1470 unsigned cbInstr;
1471 DISCPUSTATE Dis;
1472 int rc = EMInterpretDisasCurrent(pVCpu->CTX_SUFF(pVM), pVCpu, &Dis, &cbInstr);
1473 if (RT_SUCCESS(rc))
1474 {
1475 if (pcbInstr)
1476 *pcbInstr = (uint8_t)cbInstr;
1477 return gimHvHypercallEx(pVCpu, pCtx, Dis.pCurInstr->uOpcode, Dis.cbInstr);
1478 }
1479
1480 Log(("GIM: HyperV: Failed to disassemble instruction at CS:RIP=%04x:%08RX64. rc=%Rrc\n", pCtx->cs.Sel, pCtx->rip, rc));
1481 return rc;
1482 }
1483
1484 return gimHvHypercallEx(pVCpu, pCtx, pDis->pCurInstr->uOpcode, pDis->cbInstr);
1485}
1486
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette