Changeset 99734 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- May 10, 2023 5:28:24 PM (19 months ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/GICAll.cpp
r99578 r99734 51 51 * Global Variables * 52 52 *********************************************************************************************************************************/ 53 54 /** 55 * Sets the interrupt pending force-flag and pokes the EMT if required. 56 * 57 * @param pVCpu The cross context virtual CPU structure. 58 * @param enmType The IRQ type. 59 */ 60 static void gicSetInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq) 61 { 62 Assert(fIrq || fFiq); 63 64 #ifdef IN_RING3 65 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */ 66 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3)); 67 #endif 68 69 if (fIrq) 70 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ); 71 if (fFiq) 72 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ); 73 74 /* 75 * We need to wake up the target CPU if we're not on EMT. 76 */ 77 /** @todo We could just use RTThreadNativeSelf() here, couldn't we? */ 78 #if defined(IN_RING0) 79 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 80 VMCPUID idCpu = pVCpu->idCpu; 81 if (VMMGetCpuId(pVM) != idCpu) 82 { 83 switch (VMCPU_GET_STATE(pVCpu)) 84 { 85 case VMCPUSTATE_STARTED_EXEC: 86 Log7Func(("idCpu=%u VMCPUSTATE_STARTED_EXEC\n", idCpu)); 87 GVMMR0SchedPokeNoGVMNoLock(pVM, idCpu); 88 break; 89 90 case VMCPUSTATE_STARTED_HALTED: 91 Log7Func(("idCpu=%u VMCPUSTATE_STARTED_HALTED\n", idCpu)); 92 GVMMR0SchedWakeUpNoGVMNoLock(pVM, idCpu); 93 break; 94 95 default: 96 Log7Func(("idCpu=%u enmState=%d\n", idCpu, pVCpu->enmState)); 97 break; /* nothing to do in other states. */ 98 } 99 } 100 #elif defined(IN_RING3) 101 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 102 VMCPUID idCpu = pVCpu->idCpu; 103 if (VMMGetCpuId(pVM) != idCpu) 104 { 105 Log7Func(("idCpu=%u enmState=%d\n", idCpu, pVCpu->enmState)); 106 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE); 107 } 108 #endif 109 } 110 111 112 /** 113 * Clears the interrupt pending force-flag. 114 * 115 * @param pVCpu The cross context virtual CPU structure. 116 * @param fIrq Flag whether to clear the IRQ flag. 117 * @param fFiq Flag whether to clear the FIQ flag. 118 */ 119 DECLINLINE(void) gicClearInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq) 120 { 121 Assert(fIrq || fFiq); 122 123 #ifdef IN_RING3 124 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */ 125 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3)); 126 #endif 127 128 if (fIrq) 129 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_IRQ); 130 if (fFiq) 131 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_FIQ); 132 } 133 134 135 /** 136 * Updates the internal IRQ state and sets or clears the appropirate force action flags. 137 * 138 * @returns Strict VBox status code. 139 * @param pThis The GIC re-distributor state for the associated vCPU. 140 * @param pVCpu The cross context virtual CPU structure. 141 */ 142 static VBOXSTRICTRC gicReDistUpdateIrqState(PGICCPU pThis, PVMCPUCC pVCpu) 143 { 144 /* Read the interrupt state. */ 145 uint32_t u32RegIGrp0 = ASMAtomicReadU32(&pThis->u32RegIGrp0); 146 uint32_t bmIntEnabled = ASMAtomicReadU32(&pThis->bmIntEnabled); 147 uint32_t bmIntPending = ASMAtomicReadU32(&pThis->bmIntPending); 148 uint32_t bmIntActive = ASMAtomicReadU32(&pThis->bmIntActive); 149 bool fIrqGrp0Enabled = ASMAtomicReadBool(&pThis->fIrqGrp0Enabled); 150 bool fIrqGrp1Enabled = ASMAtomicReadBool(&pThis->fIrqGrp1Enabled); 151 152 /* Is anything enabled at all? */ 153 uint32_t bmIntForward = (bmIntPending & bmIntEnabled) & ~bmIntActive; /* Exclude the currently active interrupt. */ 154 if (bmIntForward) 155 { 156 /* Determine whether we have to assert the IRQ or FIQ line. */ 157 bool fIrq = RT_BOOL(bmIntForward & u32RegIGrp0) && fIrqGrp1Enabled; 158 bool fFiq = RT_BOOL(bmIntForward & ~u32RegIGrp0) && fIrqGrp0Enabled; 159 160 if (fIrq || fFiq) 161 gicSetInterruptFF(pVCpu, fIrq, fFiq); 162 163 if (!fIrq || !fFiq) 164 gicClearInterruptFF(pVCpu, !fIrq, !fFiq); 165 } 166 else 167 gicClearInterruptFF(pVCpu, true /*fIrq*/, true /*fFiq*/); 168 169 return VINF_SUCCESS; 170 } 171 172 173 /** 174 * Sets the given SGI/PPI interrupt ID on the re-distributor of the given vCPU. 175 * 176 * @returns VBox status code. 177 * @param pVCpu The cross context virtual CPU structure. 178 * @param uIntId The SGI/PPI interrupt identifier. 179 * @param fAsserted Flag whether the SGI/PPI interrupt is asserted or not. 180 */ 181 static int gicReDistInterruptSet(PVMCPUCC pVCpu, uint32_t uIntId, bool fAsserted) 182 { 183 PGICCPU pThis = VMCPU_TO_GICCPU(pVCpu); 184 185 /* Update the interrupts pending state. */ 186 if (fAsserted) 187 ASMAtomicOrU32(&pThis->bmIntPending, RT_BIT_32(uIntId)); 188 else 189 ASMAtomicAndU32(&pThis->bmIntPending, ~RT_BIT_32(uIntId)); 190 191 return VBOXSTRICTRC_VAL(gicReDistUpdateIrqState(pThis, pVCpu)); 192 } 193 194 53 195 /** 54 196 * Reads a GIC distributor register. … … 68 210 { 69 211 case GIC_DIST_REG_TYPER_OFF: 70 *puValue = GIC_DIST_REG_TYPER_NUM_ITLINES_SET( 0) /** @todo 32 SPIs for now. */212 *puValue = GIC_DIST_REG_TYPER_NUM_ITLINES_SET(1) /** @todo 32 SPIs for now. */ 71 213 | GIC_DIST_REG_TYPER_NUM_PES_SET(0) /* 1 PE */ 72 214 /*| GIC_DIST_REG_TYPER_ESPI*/ /** @todo */ … … 136 278 137 279 /** 138 * Writes a GIC redistributor register. 280 * Reads a GIC redistributor SGI/PPI frame register. 281 * 282 * @returns VBox status code. 283 * @param pDevIns The device instance. 284 * @param pVCpu The cross context virtual CPU structure. 285 * @param offReg The offset of the register being read. 286 * @param puValue Where to store the register value. 287 */ 288 DECLINLINE(VBOXSTRICTRC) gicReDistSgiPpiRegisterRead(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue) 289 { 290 VMCPU_ASSERT_EMT(pVCpu); 291 RT_NOREF(pDevIns); 292 293 PGICCPU pThis = VMCPU_TO_GICCPU(pVCpu); 294 switch (offReg) 295 { 296 case GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF: 297 case GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF: 298 *puValue = ASMAtomicReadU32(&pThis->bmIntEnabled); 299 break; 300 case GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF: 301 case GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF: 302 *puValue = ASMAtomicReadU32(&pThis->bmIntPending); 303 break; 304 case GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF: 305 case GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF: 306 *puValue = ASMAtomicReadU32(&pThis->bmIntActive); 307 break; 308 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START: 309 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 4: 310 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 8: 311 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 12: 312 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 16: 313 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 20: 314 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 24: 315 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 28: 316 { 317 /* Figure out the register whch is written. */ 318 uint8_t idxPrio = offReg - GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START; 319 Assert(idxPrio <= RT_ELEMENTS(pThis->abIntPriority) - sizeof(uint32_t)); 320 321 uint32_t u32Value = 0; 322 for (uint32_t i = idxPrio; i < idxPrio + sizeof(uint32_t); i++) 323 u32Value |= pThis->abIntPriority[i] << ((i - idxPrio) * 8); 324 325 *puValue = u32Value; 326 break; 327 } 328 case GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF: 329 *puValue = ASMAtomicReadU32(&pThis->u32RegICfg0); 330 break; 331 case GIC_REDIST_SGI_PPI_REG_ICFGR1_OFF: 332 *puValue = ASMAtomicReadU32(&pThis->u32RegICfg1); 333 break; 334 default: 335 AssertReleaseFailed(); 336 *puValue = 0; 337 } 338 339 return VINF_SUCCESS; 340 } 341 342 343 /** 344 * Writes a GIC redistributor frame register. 139 345 * 140 346 * @returns Strict VBox status code. … … 150 356 151 357 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 358 return rcStrict; 359 } 360 361 362 /** 363 * Writes a GIC redistributor SGI/PPI frame register. 364 * 365 * @returns Strict VBox status code. 366 * @param pDevIns The device instance. 367 * @param pVCpu The cross context virtual CPU structure. 368 * @param offReg The offset of the register being written. 369 * @param uValue The register value. 370 */ 371 DECLINLINE(VBOXSTRICTRC) gicReDistSgiPpiRegisterWrite(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue) 372 { 373 VMCPU_ASSERT_EMT(pVCpu); 374 RT_NOREF(pDevIns); 375 376 PGICCPU pThis = VMCPU_TO_GICCPU(pVCpu); 377 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 378 switch (offReg) 379 { 380 case GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF: 381 ASMAtomicOrU32(&pThis->u32RegIGrp0, uValue); 382 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu); 383 break; 384 case GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF: 385 ASMAtomicOrU32(&pThis->bmIntEnabled, uValue); 386 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu); 387 break; 388 case GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF: 389 ASMAtomicAndU32(&pThis->bmIntEnabled, ~uValue); 390 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu); 391 break; 392 case GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF: 393 ASMAtomicOrU32(&pThis->bmIntPending, uValue); 394 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu); 395 break; 396 case GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF: 397 ASMAtomicAndU32(&pThis->bmIntPending, ~uValue); 398 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu); 399 break; 400 case GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF: 401 ASMAtomicOrU32(&pThis->bmIntActive, uValue); 402 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu); 403 break; 404 case GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF: 405 ASMAtomicAndU32(&pThis->bmIntActive, ~uValue); 406 rcStrict = gicReDistUpdateIrqState(pThis, pVCpu); 407 break; 408 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START: 409 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 4: 410 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 8: 411 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 12: 412 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 16: 413 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 20: 414 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 24: 415 case GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START + 28: 416 { 417 /* Figure out the register whch is written. */ 418 uint8_t idxPrio = offReg - GIC_REDIST_SGI_PPI_REG_IPRIORITYn_OFF_START; 419 Assert(idxPrio <= RT_ELEMENTS(pThis->abIntPriority) - sizeof(uint32_t)); 420 for (uint32_t i = idxPrio; i < idxPrio + sizeof(uint32_t); i++) 421 { 422 pThis->abIntPriority[i] = (uint8_t)(uValue & 0xff); 423 uValue >>= 8; 424 } 425 break; 426 } 427 case GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF: 428 ASMAtomicWriteU32(&pThis->u32RegICfg0, uValue); 429 break; 430 case GIC_REDIST_SGI_PPI_REG_ICFGR1_OFF: 431 ASMAtomicWriteU32(&pThis->u32RegICfg1, uValue); 432 break; 433 default: 434 AssertReleaseFailed(); 435 } 436 152 437 return rcStrict; 153 438 } … … 171 456 172 457 *pu64Value = 0; 458 PGICCPU pThis = VMCPU_TO_GICCPU(pVCpu); 459 switch (u32Reg) 460 { 461 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1: 462 *pu64Value = pThis->bInterruptPriority; 463 break; 464 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1: 465 AssertReleaseFailed(); 466 break; 467 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1: 468 AssertReleaseFailed(); 469 break; 470 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1: 471 AssertReleaseFailed(); 472 break; 473 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1: 474 *pu64Value = pThis->bBinaryPointGrp0 & 0x7; 475 break; 476 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1: 477 AssertReleaseFailed(); 478 break; 479 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1: 480 AssertReleaseFailed(); 481 break; 482 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1: 483 AssertReleaseFailed(); 484 break; 485 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1: 486 AssertReleaseFailed(); 487 break; 488 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1: 489 AssertReleaseFailed(); 490 break; 491 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1: 492 AssertReleaseFailed(); 493 break; 494 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1: 495 AssertReleaseFailed(); 496 break; 497 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1: 498 AssertReleaseFailed(); 499 break; 500 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1: 501 AssertReleaseFailed(); 502 break; 503 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1: 504 AssertReleaseFailed(); 505 break; 506 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1: 507 AssertReleaseFailed(); 508 break; 509 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1: 510 AssertReleaseFailed(); 511 break; 512 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1: 513 AssertReleaseFailed(); 514 break; 515 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1: 516 AssertReleaseFailed(); 517 break; 518 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1: 519 { 520 /** @todo Figure out the highest priority interrupt. */ 521 uint32_t bmPending = ASMAtomicReadU32(&pThis->bmIntPending); 522 int32_t idxIntPending = ASMBitFirstSet(&bmPending, sizeof(bmPending) * 8); 523 if (idxIntPending > -1) 524 { 525 /* Mark the interrupt as active. */ 526 ASMAtomicOrU32(&pThis->bmIntActive, idxIntPending); 527 *pu64Value = idxIntPending; 528 } 529 else 530 *pu64Value = GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT; 531 break; 532 } 533 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1: 534 AssertReleaseFailed(); 535 break; 536 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1: 537 AssertReleaseFailed(); 538 break; 539 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1: 540 *pu64Value = pThis->bBinaryPointGrp1 & 0x7; 541 break; 542 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1: 543 *pu64Value = ARMV8_ICC_CTLR_EL1_AARCH64_PMHE 544 | ARMV8_ICC_CTLR_EL1_AARCH64_PRIBITS_SET(4) 545 | ARMV8_ICC_CTLR_EL1_AARCH64_IDBITS_SET(ARMV8_ICC_CTLR_EL1_AARCH64_IDBITS_16BITS); 546 break; 547 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1: 548 AssertReleaseFailed(); 549 break; 550 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1: 551 *pu64Value = ASMAtomicReadBool(&pThis->fIrqGrp0Enabled) ? ARMV8_ICC_IGRPEN0_EL1_AARCH64_ENABLE : 0; 552 break; 553 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1: 554 *pu64Value = ASMAtomicReadBool(&pThis->fIrqGrp1Enabled) ? ARMV8_ICC_IGRPEN1_EL1_AARCH64_ENABLE : 0; 555 break; 556 default: 557 AssertReleaseFailed(); 558 break; 559 } 560 173 561 LogFlowFunc(("pVCpu=%p u32Reg=%#x pu64Value=%RX64\n", pVCpu, u32Reg, *pu64Value)); 174 562 return VINF_SUCCESS; … … 193 581 LogFlowFunc(("pVCpu=%p u32Reg=%#x u64Value=%RX64\n", pVCpu, u32Reg, u64Value)); 194 582 583 PGICCPU pThis = VMCPU_TO_GICCPU(pVCpu); 584 switch (u32Reg) 585 { 586 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1: 587 ASMAtomicWriteU8(&pThis->bInterruptPriority, (uint8_t)u64Value); 588 break; 589 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1: 590 AssertReleaseFailed(); 591 break; 592 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1: 593 AssertReleaseFailed(); 594 break; 595 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1: 596 AssertReleaseFailed(); 597 break; 598 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1: 599 pThis->bBinaryPointGrp0 = (uint8_t)(u64Value & 0x7); 600 break; 601 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1: 602 /** @todo */ 603 break; 604 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1: 605 AssertReleaseFailed(); 606 break; 607 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1: 608 AssertReleaseFailed(); 609 break; 610 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1: 611 AssertReleaseFailed(); 612 break; 613 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1: 614 /** @todo */ 615 break; 616 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1: 617 AssertReleaseFailed(); 618 break; 619 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1: 620 AssertReleaseFailed(); 621 break; 622 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1: 623 AssertReleaseFailed(); 624 break; 625 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1: 626 AssertReleaseFailed(); 627 break; 628 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1: 629 AssertReleaseFailed(); 630 break; 631 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1: 632 AssertReleaseFailed(); 633 break; 634 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1: 635 AssertReleaseFailed(); 636 break; 637 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1: 638 AssertReleaseFailed(); 639 break; 640 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1: 641 AssertReleaseFailed(); 642 break; 643 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1: 644 AssertReleaseFailed(); 645 break; 646 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1: 647 { 648 /* Mark the interrupt as not active anymore, though it might still be pending. */ 649 Assert(u64Value < GIC_INTID_RANGE_SPI_START); 650 ASMAtomicAndU32(&pThis->bmIntActive, (uint32_t)u64Value); 651 break; 652 } 653 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1: 654 AssertReleaseFailed(); 655 break; 656 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1: 657 pThis->bBinaryPointGrp0 = (uint8_t)(u64Value & 0x7); 658 break; 659 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1: 660 u64Value &= ARMV8_ICC_CTLR_EL1_RW; 661 /** @todo */ 662 break; 663 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1: 664 AssertReleaseFailed(); 665 break; 666 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1: 667 ASMAtomicWriteBool(&pThis->fIrqGrp0Enabled, RT_BOOL(u64Value & ARMV8_ICC_IGRPEN0_EL1_AARCH64_ENABLE)); 668 break; 669 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1: 670 ASMAtomicWriteBool(&pThis->fIrqGrp1Enabled, RT_BOOL(u64Value & ARMV8_ICC_IGRPEN1_EL1_AARCH64_ENABLE)); 671 break; 672 default: 673 AssertReleaseFailed(); 674 break; 675 } 676 195 677 return VINF_SUCCESS; 678 } 679 680 681 /** 682 * Sets the specified shared peripheral interrupt starting. 683 * 684 * @returns VBox status code. 685 * @param pVM The cross context virtual machine structure. 686 * @param uIntId The SPI ID (minus GIC_INTID_RANGE_SPI_START) to assert/de-assert. 687 * @param fAsserted Flag whether to mark the interrupt as asserted/de-asserted. 688 */ 689 VMM_INT_DECL(int) GICSpiSet(PVMCC pVM, uint32_t uIntId, bool fAsserted) 690 { 691 RT_NOREF(pVM, uIntId, fAsserted); 692 AssertReleaseFailed(); 693 return VERR_NOT_IMPLEMENTED; 694 } 695 696 697 /** 698 * Sets the specified private peripheral interrupt starting. 699 * 700 * @returns VBox status code. 701 * @param pVCpu The cross context virtual CPU structure. 702 * @param uIntId The PPI ID (minus GIC_INTID_RANGE_PPI_START) to assert/de-assert. 703 * @param fAsserted Flag whether to mark the interrupt as asserted/de-asserted. 704 */ 705 VMM_INT_DECL(int) GICPpiSet(PVMCPUCC pVCpu, uint32_t uIntId, bool fAsserted) 706 { 707 AssertReturn(uIntId >= 0 && uIntId <= (GIC_INTID_RANGE_PPI_LAST - GIC_INTID_RANGE_PPI_START), VERR_INVALID_PARAMETER); 708 return gicReDistInterruptSet(pVCpu, uIntId + GIC_INTID_RANGE_PPI_START, fAsserted); 709 } 710 711 712 /** 713 * Sets the specified software generated interrupt starting. 714 * 715 * @returns VBox status code. 716 * @param pVCpu The cross context virtual CPU structure. 717 * @param uIntId The PPI ID (minus GIC_INTID_RANGE_SGI_START) to assert/de-assert. 718 * @param fAsserted Flag whether to mark the interrupt as asserted/de-asserted. 719 */ 720 VMM_INT_DECL(int) GICSgiSet(PVMCPUCC pVCpu, uint32_t uIntId, bool fAsserted) 721 { 722 AssertReturn(uIntId >= 0 && uIntId <= (GIC_INTID_RANGE_SGI_LAST - GIC_INTID_RANGE_SGI_START), VERR_INVALID_PARAMETER); 723 return gicReDistInterruptSet(pVCpu, uIntId + GIC_INTID_RANGE_SGI_START, fAsserted); 196 724 } 197 725 … … 216 744 { 217 745 NOREF(pvUser); 218 //Assert(!(off & 0xf));219 //Assert(cb == 4); RT_NOREF_PV(cb);746 Assert(!(off & 0x3)); 747 Assert(cb == 4); RT_NOREF_PV(cb); 220 748 221 749 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns); … … 239 767 { 240 768 NOREF(pvUser); 241 //Assert(!(off & 0xf));242 //Assert(cb == 4); RT_NOREF_PV(cb);769 Assert(!(off & 0x3)); 770 Assert(cb == 4); RT_NOREF_PV(cb); 243 771 244 772 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns); … … 259 787 { 260 788 NOREF(pvUser); 261 //Assert(!(off & 0xf));262 //Assert(cb == 4); RT_NOREF_PV(cb);789 Assert(!(off & 0x3)); 790 Assert(cb == 4); RT_NOREF_PV(cb); 263 791 264 792 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns); 265 uint16_t offReg = off & 0xfffc;266 uint32_t uValue = 0;267 793 268 794 STAM_COUNTER_INC(&pVCpu->gic.s.CTX_SUFF_Z(StatMmioRead)); 269 795 270 VBOXSTRICTRC rc = VBOXSTRICTRC_VAL(gicReDistRegisterRead(pDevIns, pVCpu, offReg, &uValue)); 796 /* 797 * Determine the redistributor being targeted. Each redistributor takes GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE bytes 798 * and the redistributors are adjacent. 799 */ 800 uint32_t idReDist = off / (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE); 801 off %= (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE); 802 803 /* Redistributor or SGI/PPI frame? */ 804 uint16_t offReg = off & 0xfffc; 805 uint32_t uValue = 0; 806 VBOXSTRICTRC rcStrict; 807 if (off < GIC_REDIST_REG_FRAME_SIZE) 808 rcStrict = gicReDistRegisterRead(pDevIns, pVCpu, offReg, &uValue); 809 else 810 rcStrict = gicReDistSgiPpiRegisterRead(pDevIns, pVCpu, offReg, &uValue); 811 271 812 *(uint32_t *)pv = uValue; 272 273 Log2(("GIC%u: gicReDistMmioRead: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue));274 return rc ;813 Log2(("GICReDist%u: gicReDistMmioRead: off=%RGp idReDist=%u offReg=%#RX16 uValue=%#RX32 -> %Rrc\n", 814 pVCpu->idCpu, off, idReDist, offReg, uValue, VBOXSTRICTRC_VAL(rcStrict))); 815 return rcStrict; 275 816 } 276 817 … … 282 823 { 283 824 NOREF(pvUser); 284 //Assert(!(off & 0xf)); 285 //Assert(cb == 4); RT_NOREF_PV(cb); 286 287 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns); 288 uint16_t offReg = off & 0xfffc; 289 uint32_t uValue = *(uint32_t *)pv; 825 Assert(!(off & 0x3)); 826 Assert(cb == 4); RT_NOREF_PV(cb); 827 828 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns); 829 uint32_t uValue = *(uint32_t *)pv; 290 830 291 831 STAM_COUNTER_INC(&pVCpu->gic.s.CTX_SUFF_Z(StatMmioWrite)); 292 832 293 Log2(("GIC%u: gicReDistMmioWrite: offReg=%#RX16 uValue=%#RX32\n", pVCpu->idCpu, offReg, uValue)); 294 return gicReDistRegisterWrite(pDevIns, pVCpu, offReg, uValue); 833 /* 834 * Determine the redistributor being targeted. Each redistributor takes GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE bytes 835 * and the redistributors are adjacent. 836 */ 837 uint32_t idReDist = off / (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE); 838 off %= (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE); 839 840 /* Redistributor or SGI/PPI frame? */ 841 uint16_t offReg = off & 0xfffc; 842 VBOXSTRICTRC rcStrict; 843 if (off < GIC_REDIST_REG_FRAME_SIZE) 844 rcStrict = gicReDistRegisterWrite(pDevIns, pVCpu, offReg, uValue); 845 else 846 rcStrict = gicReDistSgiPpiRegisterWrite(pDevIns, pVCpu, offReg, uValue); 847 848 Log2(("GICReDist%u: gicReDistMmioWrite: off=%RGp idReDist=%u offReg=%#RX16 uValue=%#RX32 -> %Rrc\n", 849 pVCpu->idCpu, off, idReDist, offReg, uValue, VBOXSTRICTRC_VAL(rcStrict))); 850 return rcStrict; 295 851 } 296 852
Note:
See TracChangeset
for help on using the changeset viewer.