Changeset 36204 in vbox for trunk/src/VBox/Devices/PC
- Timestamp:
- Mar 8, 2011 4:12:19 PM (14 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Devices/PC/DevDMA.cpp
r35353 r36204 16 16 * -------------------------------------------------------------------- 17 17 * 18 * This code is based on:18 * This code is loosely based on: 19 19 * 20 20 * QEMU DMA emulation … … 41 41 */ 42 42 43 #ifdef VBOX44 45 43 /******************************************************************************* 46 44 * Header Files * 47 45 *******************************************************************************/ 46 #define LOG_GROUP LOG_GROUP_DEV_DMA 48 47 #include <VBox/vmm/pdmdev.h> 49 48 #include <VBox/err.h> 50 49 51 #define LOG_GROUP LOG_GROUP_DEFAULT ///@todo LOG_GROUP_DEV_DMA52 50 #include <VBox/log.h> 53 51 #include <iprt/assert.h> … … 58 56 59 57 #include "VBoxDD.h" 60 #include "vl_vbox.h" 61 typedef PFNDMATRANSFERHANDLER DMA_transfer_handler; 62 63 #else /* !VBOX */ 64 #include "vl.h" 58 59 60 /* DMA Overview and notes 61 * 62 * Modern PCs typically emulate AT-compatible DMA. The IBM PC/AT used dual 63 * cascaded 8237A DMA controllers, augmented with a 74LS612 memory mapper. 64 * The 8237As are 8-bit parts, only capable of addressing up to 64KB; the 65 * 74LS612 extends addressing to 24 bits. That leads to well known and 66 * inconvenient DMA limitations: 67 * - DMA can only access physical memory under the 16MB line 68 * - DMA transfers must occur within a 64KB/128KB 'page' 69 * 70 * The 16-bit DMA controller added in the PC/AT shifts all 8237A addresses 71 * left by one, including the control registers addresses. The DMA register 72 * offsets (except for the page registers) are therefore "double spaced". 73 * 74 * Due to the address shifting, the DMA controller decodes more addresses 75 * than are usually documented, with aliasing. See the ICH8 datasheet. 76 * 77 * In the IBM PC and PC/XT, DMA channel 0 was used for memory refresh, thus 78 * preventing the use of memory-to-memory DMA transfers (which use channels 79 * 0 and 1). In the PC/AT, memory-to-memory DMA was theoretically possible. 80 * However, it would transfer a single byte at a time, while the CPU can 81 * transfer two (on a 286) or four (on a 386+) bytes at a time. On many 82 * compatibles, memory-to-memory DMA is not even implemented at all, and 83 * therefore has no practical use. 84 * 85 * Auto-init mode is handled implicitly; a device's transfer handler may 86 * return an end count lower than the start count. 87 * 88 * Naming convention: 'channel' refers to a system-wide DMA channel (0-7) 89 * while 'chidx' refers to a DMA channel index within a controller (0-3). 90 * 91 * References: 92 * - IBM Personal Computer AT Technical Reference, 1984 93 * - Intel 8237A-5 Datasheet, 1993 94 * - Frank van Gilluwe, The Undocumented PC, 1994 95 * - OPTi 82C206 Data Book, 1996 (or Chips & Tech 82C206) 96 * - Intel ICH8 Datasheet, 2007 97 */ 98 99 100 /* Saved state versions. */ 101 #define DMA_SAVESTATE_OLD 1 /* The original saved state. */ 102 #define DMA_SAVESTATE_CURRENT 2 /* The new and improved saved state. */ 103 104 /* State information for a single DMA channel. */ 105 typedef struct { 106 void *pvUser; /* User specific context. */ 107 PFNDMATRANSFERHANDLER pfnXferHandler; /* Transfer handler for channel. */ 108 uint16_t u16BaseAddr; /* Base address for transfers. */ 109 uint16_t u16BaseCount; /* Base count for transfers. */ 110 uint16_t u16CurAddr; /* Current address. */ 111 uint16_t u16CurCount; /* Current count. */ 112 uint8_t u8Mode; /* Channel mode. */ 113 } DMAChannel; 114 115 /* State information for a DMA controller (DMA8 or DMA16). */ 116 typedef struct { 117 DMAChannel ChState[4]; /* Per-channel state. */ 118 uint8_t au8Page[8]; /* Page registers (A16-A23). */ 119 uint8_t au8PageHi[8]; /* High page registers (A24-A31). */ 120 uint8_t u8Command; /* Command register. */ 121 uint8_t u8Status; /* Status register. */ 122 uint8_t u8Mask; /* Mask register. */ 123 uint8_t u8Temp; /* Temporary (mem/mem) register. */ 124 uint8_t u8ModeCtr; /* Mode register counter for reads. */ 125 bool bHiByte; /* Byte pointer (T/F -> high/low). */ 126 uint32_t is16bit; /* True for 16-bit DMA. */ 127 } DMAControl; 128 129 /* Complete DMA state information. */ 130 typedef struct { 131 PPDMDEVINS pDevIns; /* Device instance. */ 132 PCPDMDMACHLP pHlp; /* PDM DMA helpers. */ 133 DMAControl DMAC[2]; /* Two DMA controllers. */ 134 } DMAState; 135 136 /* DMA command register bits. */ 137 enum { 138 CMD_MEMTOMEM = 0x01, /* Enable mem-to-mem trasfers. */ 139 CMD_ADRHOLD = 0x02, /* Address hold for mem-to-mem. */ 140 CMD_DISABLE = 0x04, /* Disable controller. */ 141 CMD_COMPRTIME = 0x08, /* Compressed timing. */ 142 CMD_ROTPRIO = 0x10, /* Rotating priority. */ 143 CMD_EXTWR = 0x20, /* Extended write. */ 144 CMD_DREQHI = 0x40, /* DREQ is active high if set. */ 145 CMD_DACKHI = 0x80, /* DACK is active high if set. */ 146 CMD_UNSUPPORTED = CMD_MEMTOMEM | CMD_ADRHOLD | CMD_COMPRTIME 147 | CMD_EXTWR | CMD_DREQHI | CMD_DACKHI 148 }; 149 150 /* DMA control register offsets for read accesses. */ 151 enum { 152 CTL_R_STAT, /* Read status registers. */ 153 CTL_R_DMAREQ, /* Read DRQ register. */ 154 CTL_R_CMD, /* Read command register. */ 155 CTL_R_MODE, /* Read mode register. */ 156 CTL_R_SETBPTR, /* Set byte pointer flip-flop. */ 157 CTL_R_TEMP, /* Read temporary register. */ 158 CTL_R_CLRMODE, /* Clear mode register counter. */ 159 CTL_R_MASK /* Read all DRQ mask bits. */ 160 }; 161 162 /* DMA control register offsets for read accesses. */ 163 enum { 164 CTL_W_CMD, /* Write command register. */ 165 CTL_W_DMAREQ, /* Write DRQ register. */ 166 CTL_W_MASKONE, /* Write single DRQ mask bit. */ 167 CTL_W_MODE, /* Write mode register. */ 168 CTL_W_CLRBPTR, /* Clear byte pointer flip-flop. */ 169 CTL_W_MASTRCLR, /* Master clear. */ 170 CTL_W_CLRMASK, /* Clear all DRQ mask bits. */ 171 CTL_W_MASK /* Write all DRQ mask bits. */ 172 }; 173 174 /* Convert DMA channel number (0-7) to controller number (0-1). */ 175 #define DMACH2C(c) (c < 4 ? 0 : 1) 176 177 static int dmaChannelMap[8] = {-1, 2, 3, 1, -1, -1, -1, 0}; 178 /* Map a DMA page register offset (0-7) to channel index (0-3). */ 179 #define DMAPG2CX(c) (dmaChannelMap[c]) 180 181 static int dmaMapChannel[4] = {7, 3, 1, 2}; 182 /* Map a channel index (0-3) to DMA page register offset (0-7). */ 183 #define DMACX2PG(c) (dmaMapChannel[c]) 184 /* Map a channel number (0-7) to DMA page register offset (0-7). */ 185 #define DMACH2PG(c) (dmaMapChannel[c & 3]) 186 187 /* Test the decrement bit of mode register. */ 188 #define IS_MODE_DEC(c) ((c) & 0x20) 189 /* Test the auto-init bit of mode register. */ 190 #define IS_MODE_AI(c) ((c) & 0x10) 191 192 /* Perform a master clear (reset) on a DMA controller. */ 193 static void dmaClear(DMAControl *dc) 194 { 195 dc->u8Command = 0; 196 dc->u8Status = 0; 197 dc->u8Temp = 0; 198 dc->u8ModeCtr = 0; 199 dc->bHiByte = false; 200 dc->u8Mask = ~0; 201 } 202 203 /* Read the byte pointer and flip it. */ 204 static inline bool dmaReadBytePtr(DMAControl *dc) 205 { 206 bool bHighByte; 207 208 bHighByte = !!dc->bHiByte; 209 dc->bHiByte ^= 1; 210 return bHighByte; 211 } 212 213 /* DMA address registers writes and reads. */ 214 215 static DECLCALLBACK(int) dmaWriteAddr(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT port, 216 uint32_t u32, unsigned cb) 217 { 218 if (cb == 1) 219 { 220 DMAControl *dc = (DMAControl *)pvUser; 221 DMAChannel *ch; 222 int chidx, reg, is_count; 223 224 Assert(!(u32 & ~0xff)); /* Check for garbage in high bits. */ 225 reg = (port >> dc->is16bit) & 0x0f; 226 chidx = reg >> 1; 227 is_count = reg & 1; 228 ch = &dc->ChState[chidx]; 229 if (dmaReadBytePtr(dc)) 230 { 231 /* Write the high byte. */ 232 if (is_count) 233 ch->u16BaseCount = RT_MAKE_U16(ch->u16BaseCount, u32); 234 else 235 ch->u16BaseAddr = RT_MAKE_U16(ch->u16BaseAddr, u32); 236 237 ch->u16CurCount = 0; 238 ch->u16CurAddr = ch->u16BaseAddr; 239 } 240 else 241 { 242 /* Write the low byte. */ 243 if (is_count) 244 ch->u16BaseCount = RT_MAKE_U16(u32, RT_HIBYTE(ch->u16BaseCount)); 245 else 246 ch->u16BaseAddr = RT_MAKE_U16(u32, RT_HIBYTE(ch->u16BaseAddr)); 247 } 248 Log2(("dmaWriteAddr: port %#06x, chidx %d, data %#02x\n", 249 port, chidx, u32)); 250 } 251 else 252 { 253 /* Likely a guest bug. */ 254 Log(("Bad size write to count register %#x (size %d, data %#x)\n", 255 port, cb, u32)); 256 } 257 return VINF_SUCCESS; 258 } 259 260 static DECLCALLBACK(int) dmaReadAddr(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT port, 261 uint32_t *pu32, unsigned cb) 262 { 263 if (cb == 1) 264 { 265 DMAControl *dc = (DMAControl *)pvUser; 266 DMAChannel *ch; 267 int chidx, reg, val, dir; 268 int bptr; 269 270 reg = (port >> dc->is16bit) & 0x0f; 271 chidx = reg >> 1; 272 ch = &dc->ChState[chidx]; 273 274 dir = IS_MODE_DEC(ch->u8Mode) ? -1 : 1; 275 if (reg & 1) 276 val = ch->u16BaseCount - ch->u16CurCount; 277 else 278 val = ch->u16CurAddr + ch->u16CurCount * dir; 279 280 bptr = dmaReadBytePtr(dc); 281 *pu32 = RT_LOBYTE(val >> (bptr * 8)); 282 283 Log(("Count read: port %#06x, reg %#04x, data %#x\n", port, reg, val)); 284 return VINF_SUCCESS; 285 } 286 else 287 return VERR_IOM_IOPORT_UNUSED; 288 } 289 290 /* DMA control registers writes and reads. */ 291 292 static DECLCALLBACK(int) dmaWriteCtl(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT port, 293 uint32_t u32, unsigned cb) 294 { 295 if (cb == 1) 296 { 297 DMAControl *dc = (DMAControl *)pvUser; 298 int chidx = 0; 299 int reg; 300 301 reg = ((port >> dc->is16bit) & 0x0f) - 8; 302 Assert((reg >= CTL_W_CMD && reg <= CTL_W_MASK)); 303 Assert(!(u32 & ~0xff)); /* Check for garbage in high bits. */ 304 305 switch (reg) { 306 case CTL_W_CMD: 307 /* Unsupported commands are entirely ignored. */ 308 if (u32 & CMD_UNSUPPORTED) 309 { 310 Log(("DMA command %#x is not supported, ignoring!\n", u32)); 311 break; 312 } 313 dc->u8Command = u32; 314 break; 315 case CTL_W_DMAREQ: 316 chidx = u32 & 3; 317 if (u32 & 4) 318 dc->u8Status |= 1 << (chidx + 4); 319 else 320 dc->u8Status &= ~(1 << (chidx + 4)); 321 dc->u8Status &= ~(1 << chidx); /* Clear TC for channel. */ 322 break; 323 case CTL_W_MASKONE: 324 chidx = u32 & 3; 325 if (u32 & 4) 326 dc->u8Mask |= 1 << chidx; 327 else 328 dc->u8Mask &= ~(1 << chidx); 329 break; 330 case CTL_W_MODE: 331 { 332 int op, opmode; 333 334 chidx = u32 & 3; 335 op = (u32 >> 2) & 3; 336 opmode = (u32 >> 6) & 3; 337 Log2(("chidx %d, op %d, %sauto-init, %screment, opmode %d\n", 338 chidx, op, IS_MODE_AI(u32) ? "" : "no ", 339 IS_MODE_DEC(u32) ? "de" : "in", opmode)); 340 341 dc->ChState[chidx].u8Mode = u32; 342 break; 343 } 344 case CTL_W_CLRBPTR: 345 dc->bHiByte = false; 346 break; 347 case CTL_W_MASTRCLR: 348 dmaClear(dc); 349 break; 350 case CTL_W_CLRMASK: 351 dc->u8Mask = 0; 352 break; 353 case CTL_W_MASK: 354 dc->u8Mask = u32; 355 break; 356 default: 357 Assert(0); 358 break; 359 } 360 Log(("dmaWriteCtl: port %#06x, chidx %d, data %#02x\n", 361 port, chidx, u32)); 362 } 363 else 364 { 365 /* Likely a guest bug. */ 366 Log(("Bad size write to controller register %#x (size %d, data %#x)\n", 367 port, cb, u32)); 368 } 369 return VINF_SUCCESS; 370 } 371 372 static DECLCALLBACK(int) dmaReadCtl(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT port, 373 uint32_t *pu32, unsigned cb) 374 { 375 if (cb == 1) 376 { 377 DMAControl *dc = (DMAControl *)pvUser; 378 uint8_t val; 379 int reg; 380 381 reg = ((port >> dc->is16bit) & 0x0f) - 8; 382 Assert((reg >= CTL_R_STAT && reg <= CTL_R_MASK)); 383 384 switch (reg) { 385 case CTL_R_STAT: 386 val = dc->u8Status; 387 dc->u8Status &= 0xf0; /* A read clears all TCs. */ 388 break; 389 case CTL_R_DMAREQ: 390 val = (dc->u8Status >> 4) | 0xf0; 391 break; 392 case CTL_R_CMD: 393 val = dc->u8Command; 394 break; 395 case CTL_R_MODE: 396 val = dc->ChState[dc->u8ModeCtr].u8Mode | 3; 397 dc->u8ModeCtr = (dc->u8ModeCtr + 1) & 3; 398 case CTL_R_SETBPTR: 399 dc->bHiByte = true; 400 break; 401 case CTL_R_TEMP: 402 val = dc->u8Temp; 403 break; 404 case CTL_R_CLRMODE: 405 dc->u8ModeCtr = 0; 406 break; 407 case CTL_R_MASK: 408 val = dc->u8Mask; 409 break; 410 default: 411 Assert(0); 412 val = 0; 413 break; 414 } 415 416 Log(("Ctrl read: port %#06x, reg %#04x, data %#x\n", port, reg, val)); 417 *pu32 = val; 418 419 return VINF_SUCCESS; 420 } 421 else 422 return VERR_IOM_IOPORT_UNUSED; 423 } 424 425 /* DMA page registers. There are 16 R/W page registers for compatibility with 426 * the IBM PC/AT; only some of those registers are used for DMA. The page register 427 * accessible via port 80h may be read to insert small delays or used as a scratch 428 * register by a BIOS. 429 */ 430 static DECLCALLBACK(int) dmaReadPage(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT port, 431 uint32_t *pu32, unsigned cb) 432 { 433 if (cb == 1) 434 { 435 DMAControl *dc = (DMAControl *)pvUser; 436 int reg; 437 438 reg = port & 7; 439 *pu32 = dc->au8Page[reg]; 440 Log2(("Read %#x to from page register %#x (channel %d)\n", 441 *pu32, port, DMAPG2CX(reg))); 442 return VINF_SUCCESS; 443 } 444 else 445 return VERR_IOM_IOPORT_UNUSED; 446 } 447 448 static DECLCALLBACK(int) dmaWritePage(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT port, 449 uint32_t u32, unsigned cb) 450 { 451 if (cb == 1) 452 { 453 DMAControl *dc = (DMAControl *)pvUser; 454 int reg; 455 456 Assert(!(u32 & ~0xff)); /* Check for garbage in high bits. */ 457 reg = port & 7; 458 dc->au8Page[reg] = u32; 459 dc->au8PageHi[reg] = 0; /* Corresponding high page cleared. */ 460 Log2(("Wrote %#x to page register %#x (channel %d)\n", 461 u32, port, DMAPG2CX(reg))); 462 } 463 else 464 { 465 /* Likely a guest bug. */ 466 Log(("Bad size write to page register %#x (size %d, data %#x)\n", 467 port, cb, u32)); 468 } 469 return VINF_SUCCESS; 470 } 471 472 /* EISA style high page registers, for extending the DMA addresses to cover 473 * the entire 32-bit address space. 474 */ 475 static DECLCALLBACK(int) dmaReadHiPage(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT port, 476 uint32_t *pu32, unsigned cb) 477 { 478 if (cb == 1) 479 { 480 DMAControl *dc = (DMAControl *)pvUser; 481 int reg; 482 483 reg = port & 7; 484 *pu32 = dc->au8PageHi[reg]; 485 Log2(("Read %#x to from high page register %#x (channel %d)\n", 486 *pu32, port, DMAPG2CX(reg))); 487 return VINF_SUCCESS; 488 } 489 else 490 return VERR_IOM_IOPORT_UNUSED; 491 } 492 493 static DECLCALLBACK(int) dmaWriteHiPage(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT port, 494 uint32_t u32, unsigned cb) 495 { 496 if (cb == 1) 497 { 498 DMAControl *dc = (DMAControl *)pvUser; 499 int reg; 500 501 Assert(!(u32 & ~0xff)); /* Check for garbage in high bits. */ 502 reg = port & 7; 503 dc->au8PageHi[reg] = u32; 504 Log2(("Wrote %#x to high page register %#x (channel %d)\n", 505 u32, port, DMAPG2CX(reg))); 506 } 507 else 508 { 509 /* Likely a guest bug. */ 510 Log(("Bad size write to high page register %#x (size %d, data %#x)\n", 511 port, cb, u32)); 512 } 513 return VINF_SUCCESS; 514 } 515 516 /* Perform any pending transfers on a single DMA channel. */ 517 static void dmaRunChannel(DMAState *s, int ctlidx, int chidx) 518 { 519 DMAControl *dc = &s->DMAC[ctlidx]; 520 DMAChannel *ch = &dc->ChState[chidx]; 521 uint32_t start_cnt, end_cnt; 522 int opmode; 523 524 opmode = (ch->u8Mode >> 6) & 3; 525 526 Log3(("DMA address %screment, mode %d\n", 527 IS_MODE_DEC(ch->u8Mode) ? "de" : "in", 528 ch->u8Mode >> 6)); 529 530 /* Addresses and counts are shifted for 16-bit channels. */ 531 start_cnt = ch->u16CurCount << dc->is16bit; 532 end_cnt = ch->pfnXferHandler(s->pDevIns, ch->pvUser, (ctlidx * 4) + chidx, 533 start_cnt, (ch->u16BaseCount + 1) << dc->is16bit); 534 ch->u16CurCount = end_cnt >> dc->is16bit; 535 Log3(("DMA position %d, size %d\n", end_cnt, (ch->u16BaseCount + 1) << dc->is16bit)); 536 } 537 538 static bool dmaRun(PPDMDEVINS pDevIns) 539 { 540 DMAState *s = PDMINS_2_DATA(pDevIns, DMAState *); 541 DMAControl *dc; 542 int ctlidx, chidx, mask; 543 544 /* Run all controllers and channels. */ 545 for (ctlidx = 0; ctlidx < 2; ++ctlidx) 546 { 547 dc = &s->DMAC[ctlidx]; 548 549 /* If controller is disabled, don't even bother. */ 550 if (dc->u8Command & CMD_DISABLE) 551 continue; 552 553 for (chidx = 0; chidx < 4; ++chidx) 554 { 555 mask = 1 << chidx; 556 if (!(dc->u8Mask & mask) && (dc->u8Status & (mask << 4))) 557 dmaRunChannel(s, ctlidx, chidx); 558 } 559 } 560 return 0; 561 } 562 563 static void dmaRegister(PPDMDEVINS pDevIns, unsigned channel, 564 PFNDMATRANSFERHANDLER handler, void *pvUser) 565 { 566 DMAState *s = PDMINS_2_DATA(pDevIns, DMAState *); 567 DMAChannel *ch = &s->DMAC[DMACH2C(channel)].ChState[channel & 3]; 568 569 LogFlow(("dmaRegister: s=%p channel=%u XferHandler=%p pvUser=%p\n", 570 s, channel, handler, pvUser)); 571 572 ch->pfnXferHandler = handler; 573 ch->pvUser = pvUser; 574 } 575 576 /* Reverse the order of bytes in a memory buffer. */ 577 static void dmaReverseBuf8(void *buf, unsigned len) 578 { 579 uint8_t *pBeg, *pEnd; 580 uint8_t temp; 581 582 pBeg = (uint8_t *)buf; 583 pEnd = pBeg + len - 1; 584 for (len = len / 2; len; --len) 585 { 586 temp = *pBeg; 587 *pBeg++ = *pEnd; 588 *pEnd-- = temp; 589 } 590 } 591 592 /* Reverse the order of words in a memory buffer. */ 593 static void dmaReverseBuf16(void *buf, unsigned len) 594 { 595 uint16_t *pBeg, *pEnd; 596 uint16_t temp; 597 598 Assert(!(len & 1)); 599 len /= 2; /* Convert to word count. */ 600 pBeg = (uint16_t *)buf; 601 pEnd = pBeg + len - 1; 602 for (len = len / 2; len; --len) 603 { 604 temp = *pBeg; 605 *pBeg++ = *pEnd; 606 *pEnd-- = temp; 607 } 608 } 609 610 static uint32_t dmaReadMemory(PPDMDEVINS pDevIns, unsigned channel, 611 void *buf, uint32_t pos, uint32_t len) 612 { 613 DMAState *s = PDMINS_2_DATA(pDevIns, DMAState *); 614 DMAControl *dc = &s->DMAC[DMACH2C(channel)]; 615 DMAChannel *ch = &dc->ChState[channel & 3]; 616 uint32_t page, pagehi; 617 uint32_t addr; 618 619 LogFlow(("dmaReadMemory: s=%p channel=%u buf=%p pos=%u len=%u\n", 620 s, channel, buf, pos, len)); 621 622 /* Build the address for this transfer. */ 623 page = dc->au8Page[DMACH2PG(channel)] & ~dc->is16bit; 624 pagehi = dc->au8PageHi[DMACH2PG(channel)]; 625 addr = (pagehi << 24) | (page << 16) | (ch->u16CurAddr << dc->is16bit); 626 627 if (IS_MODE_DEC(ch->u8Mode)) 628 { 629 PDMDevHlpPhysRead(s->pDevIns, addr - pos - len, buf, len); 630 if (dc->is16bit) 631 dmaReverseBuf16(buf, len); 632 else 633 dmaReverseBuf8(buf, len); 634 } 635 else 636 PDMDevHlpPhysRead(s->pDevIns, addr + pos, buf, len); 637 638 return len; 639 } 640 641 static uint32_t dmaWriteMemory(PPDMDEVINS pDevIns, unsigned channel, 642 const void *buf, uint32_t pos, uint32_t len) 643 { 644 DMAState *s = PDMINS_2_DATA(pDevIns, DMAState *); 645 DMAControl *dc = &s->DMAC[DMACH2C(channel)]; 646 DMAChannel *ch = &dc->ChState[channel & 3]; 647 uint32_t page, pagehi; 648 uint32_t addr; 649 650 LogFlow(("dmaWriteMemory: s=%p channel=%u buf=%p pos=%u len=%u\n", 651 s, channel, buf, pos, len)); 652 653 /* Build the address for this transfer. */ 654 page = dc->au8Page[DMACH2PG(channel)] & ~dc->is16bit; 655 pagehi = dc->au8PageHi[DMACH2PG(channel)]; 656 addr = (pagehi << 24) | (page << 16) | (ch->u16CurAddr << dc->is16bit); 657 658 if (IS_MODE_DEC(ch->u8Mode)) 659 { 660 //@todo: This would need a temporary buffer. 661 Assert(0); 662 #if 0 663 if (dc->is16bit) 664 dmaReverseBuf16(buf, len); 665 else 666 dmaReverseBuf8(buf, len); 65 667 #endif 66 67 /* #define DEBUG_DMA */ 68 69 #ifndef VBOX 70 #ifndef __WIN32__ 71 #define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__) 72 #ifdef DEBUG_DMA 73 #define lwarn(...) fprintf (stderr, "dma: " __VA_ARGS__) 74 #define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__) 75 #define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__) 76 #else 77 #define lwarn(...) 78 #define linfo(...) 79 #define ldebug(...) 80 #endif 81 #else 82 #define dolog() 83 #define lwarn() 84 #define linfo() 85 #define ldebug() 86 #endif 87 #else /* VBOX */ 88 89 # ifdef LOG_ENABLED 90 # define DEBUG_DMA 91 static void DMA_DPRINTF (const char *fmt, ...) 92 { 93 if (LogIsEnabled ()) { 94 va_list args; 95 va_start (args, fmt); 96 RTLogLogger (NULL, NULL, "dma: %N", fmt, &args); /* %N - nested va_list * type formatting call. */ 97 va_end (args); 668 PDMDevHlpPhysWrite(s->pDevIns, addr - pos - len, buf, len); 669 } 670 else 671 PDMDevHlpPhysWrite(s->pDevIns, addr + pos, buf, len); 672 673 return len; 674 } 675 676 static void dmaSetDREQ(PPDMDEVINS pDevIns, unsigned channel, unsigned level) 677 { 678 DMAState *s = PDMINS_2_DATA(pDevIns, DMAState *); 679 DMAControl *dc = &s->DMAC[DMACH2C(channel)]; 680 int chidx; 681 682 LogFlow(("dmaSetDREQ: s=%p channel=%u level=%u\n", s, channel, level)); 683 684 chidx = channel & 3; 685 if (level) 686 dc->u8Status |= 1 << (chidx + 4); 687 else 688 dc->u8Status &= ~(1 << (chidx + 4)); 689 } 690 691 static uint8_t dmaGetChannelMode(PPDMDEVINS pDevIns, unsigned channel) 692 { 693 DMAState *s = PDMINS_2_DATA(pDevIns, DMAState *); 694 695 LogFlow(("dmaGetChannelMode: s=%p channel=%u\n", s, channel)); 696 697 return s->DMAC[DMACH2C(channel)].ChState[channel & 3].u8Mode; 698 } 699 700 static void dmaReset(PPDMDEVINS pDevIns) 701 { 702 DMAState *s = PDMINS_2_DATA(pDevIns, DMAState *); 703 704 LogFlow(("dmaReset: s=%p\n", s)); 705 706 /* NB: The page and address registers are unaffected by a reset 707 * and in an undefined state after power-up. 708 */ 709 dmaClear(&s->DMAC[0]); 710 dmaClear(&s->DMAC[1]); 711 } 712 713 /* Register DMA I/O port handlers. */ 714 static void dmaIORegister(PPDMDEVINS pDevIns, bool bHighPage) 715 { 716 DMAState *s = PDMINS_2_DATA(pDevIns, DMAState *); 717 DMAControl *dc8; 718 DMAControl *dc16; 719 720 dc8 = &s->DMAC[0]; 721 dc16 = &s->DMAC[1]; 722 723 dc8->is16bit = false; 724 dc16->is16bit = true; 725 726 /* Base and current address for each channel. */ 727 PDMDevHlpIOPortRegister(s->pDevIns, 0x00, 8, dc8, 728 dmaWriteAddr, dmaReadAddr, NULL, NULL, "DMA8 Address"); 729 PDMDevHlpIOPortRegister(s->pDevIns, 0xC0, 16, dc16, 730 dmaWriteAddr, dmaReadAddr, NULL, NULL, "DMA16 Address"); 731 /* Control registers for both DMA controllers. */ 732 PDMDevHlpIOPortRegister(s->pDevIns, 0x08, 8, dc8, 733 dmaWriteCtl, dmaReadCtl, NULL, NULL, "DMA8 Control"); 734 PDMDevHlpIOPortRegister(s->pDevIns, 0xD0, 16, dc16, 735 dmaWriteCtl, dmaReadCtl, NULL, NULL, "DMA16 Control"); 736 /* Page registers for each channel (plus a few unused ones). */ 737 PDMDevHlpIOPortRegister(s->pDevIns, 0x80, 8, dc8, 738 dmaWritePage, dmaReadPage, NULL, NULL, "DMA8 Page"); 739 PDMDevHlpIOPortRegister(s->pDevIns, 0x88, 8, dc16, 740 dmaWritePage, dmaReadPage, NULL, NULL, "DMA16 Page"); 741 /* Optional EISA style high page registers (address bits 24-31). */ 742 if (bHighPage) 743 { 744 PDMDevHlpIOPortRegister(s->pDevIns, 0x480, 8, dc8, 745 dmaWriteHiPage, dmaReadHiPage, NULL, NULL, "DMA8 Page High"); 746 PDMDevHlpIOPortRegister(s->pDevIns, 0x488, 8, dc16, 747 dmaWriteHiPage, dmaReadHiPage, NULL, NULL, "DMA16 Page High"); 748 } 749 } 750 751 static void dmaSaveController(PSSMHANDLE pSSMHandle, DMAControl *dc) 752 { 753 int chidx; 754 755 /* Save controller state... */ 756 SSMR3PutU8(pSSMHandle, dc->u8Command); 757 SSMR3PutU8(pSSMHandle, dc->u8Mask); 758 SSMR3PutU8(pSSMHandle, dc->bHiByte); 759 SSMR3PutU32(pSSMHandle, dc->is16bit); 760 SSMR3PutU8(pSSMHandle, dc->u8Status); 761 SSMR3PutU8(pSSMHandle, dc->u8Temp); 762 SSMR3PutU8(pSSMHandle, dc->u8ModeCtr); 763 SSMR3PutMem(pSSMHandle, &dc->au8Page, sizeof(dc->au8Page)); 764 SSMR3PutMem(pSSMHandle, &dc->au8PageHi, sizeof(dc->au8PageHi)); 765 766 /* ...and all four of its channels. */ 767 for (chidx = 0; chidx < 4; ++chidx) 768 { 769 DMAChannel *ch = &dc->ChState[chidx]; 770 771 SSMR3PutU16(pSSMHandle, ch->u16CurAddr); 772 SSMR3PutU16(pSSMHandle, ch->u16CurCount); 773 SSMR3PutU16(pSSMHandle, ch->u16BaseAddr); 774 SSMR3PutU16(pSSMHandle, ch->u16BaseCount); 775 SSMR3PutU8(pSSMHandle, ch->u8Mode); 776 } 777 } 778 779 static int dmaLoadController(PSSMHANDLE pSSMHandle, DMAControl *dc, int version) 780 { 781 uint8_t u8val; 782 uint32_t u32val; 783 int chidx; 784 785 SSMR3GetU8(pSSMHandle, &dc->u8Command); 786 SSMR3GetU8(pSSMHandle, &dc->u8Mask); 787 SSMR3GetU8(pSSMHandle, &u8val); 788 dc->bHiByte = !!u8val; 789 SSMR3GetU32(pSSMHandle, &dc->is16bit); 790 if (version > DMA_SAVESTATE_OLD) 791 { 792 SSMR3GetU8(pSSMHandle, &dc->u8Status); 793 SSMR3GetU8(pSSMHandle, &dc->u8Temp); 794 SSMR3GetU8(pSSMHandle, &dc->u8ModeCtr); 795 SSMR3GetMem(pSSMHandle, &dc->au8Page, sizeof(dc->au8Page)); 796 SSMR3GetMem(pSSMHandle, &dc->au8PageHi, sizeof(dc->au8PageHi)); 797 } 798 799 for (chidx = 0; chidx < 4; ++chidx) 800 { 801 DMAChannel *ch = &dc->ChState[chidx]; 802 803 if (version == DMA_SAVESTATE_OLD) 804 { 805 /* Convert from 17-bit to 16-bit format. */ 806 SSMR3GetU32(pSSMHandle, &u32val); 807 ch->u16CurAddr = u32val >> dc->is16bit; 808 SSMR3GetU32(pSSMHandle, &u32val); 809 ch->u16CurCount = u32val >> dc->is16bit; 98 810 } 99 } 100 # else 101 DECLINLINE(void) DMA_DPRINTF(const char *pszFmt, ...) {} 102 # endif 103 104 # define dolog DMA_DPRINTF 105 # define lwarn DMA_DPRINTF 106 # define linfo DMA_DPRINTF 107 # define ldebug DMA_DPRINTF 108 109 #endif /* VBOX */ 110 111 #define LENOFA(a) ((int) (sizeof(a)/sizeof(a[0]))) 112 113 struct dma_regs { 114 unsigned int now[2]; 115 uint16_t base[2]; 116 uint8_t mode; 117 uint8_t page; 118 uint8_t pageh; 119 uint8_t dack; 120 uint8_t eop; 121 DMA_transfer_handler transfer_handler; 122 void *opaque; 123 }; 124 125 #define ADDR 0 126 #define COUNT 1 127 128 struct dma_cont { 129 uint8_t status; 130 uint8_t command; 131 uint8_t mask; 132 uint8_t flip_flop; 133 unsigned int dshift; 134 struct dma_regs regs[4]; 135 }; 136 137 typedef struct { 138 PPDMDEVINS pDevIns; 139 PCPDMDMACHLP pHlp; 140 struct dma_cont dma_controllers[2]; 141 } DMAState; 142 143 enum { 144 CMD_MEMORY_TO_MEMORY = 0x01, 145 CMD_FIXED_ADDRESS = 0x02, 146 CMD_BLOCK_CONTROLLER = 0x04, 147 CMD_COMPRESSED_TIME = 0x08, 148 CMD_CYCLIC_PRIORITY = 0x10, 149 CMD_EXTENDED_WRITE = 0x20, 150 CMD_LOW_DREQ = 0x40, 151 CMD_LOW_DACK = 0x80, 152 CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS 153 | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE 154 | CMD_LOW_DREQ | CMD_LOW_DACK 155 156 }; 157 158 static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0}; 159 160 static void write_page (void *opaque, uint32_t nport, uint32_t data) 161 { 162 struct dma_cont *d = (struct dma_cont*)opaque; 163 int ichan; 164 165 ichan = channels[nport & 7]; 166 if (-1 == ichan) { 167 dolog ("invalid channel %#x %#x\n", nport, data); 168 return; 169 } 170 d->regs[ichan].page = data; 171 } 172 173 static void write_pageh (void *opaque, uint32_t nport, uint32_t data) 174 { 175 struct dma_cont *d = (struct dma_cont*)opaque; 176 int ichan; 177 178 ichan = channels[nport & 7]; 179 if (-1 == ichan) { 180 dolog ("invalid channel %#x %#x\n", nport, data); 181 return; 182 } 183 d->regs[ichan].pageh = data; 184 } 185 186 static uint32_t read_page (void *opaque, uint32_t nport) 187 { 188 struct dma_cont *d = (struct dma_cont*)opaque; 189 int ichan; 190 191 ichan = channels[nport & 7]; 192 if (-1 == ichan) { 193 dolog ("invalid channel read %#x\n", nport); 194 return 0; 195 } 196 return d->regs[ichan].page; 197 } 198 199 static uint32_t read_pageh (void *opaque, uint32_t nport) 200 { 201 struct dma_cont *d = (struct dma_cont*)opaque; 202 int ichan; 203 204 ichan = channels[nport & 7]; 205 if (-1 == ichan) { 206 dolog ("invalid channel read %#x\n", nport); 207 return 0; 208 } 209 return d->regs[ichan].pageh; 210 } 211 212 static inline void init_chan (struct dma_cont *d, int ichan) 213 { 214 struct dma_regs *r; 215 216 r = d->regs + ichan; 217 r->now[ADDR] = r->base[ADDR] << d->dshift; 218 r->now[COUNT] = 0; 219 } 220 221 static inline int getff (struct dma_cont *d) 222 { 223 int ff; 224 225 ff = d->flip_flop; 226 d->flip_flop = !ff; 227 return ff; 228 } 229 230 static uint32_t read_chan (void *opaque, uint32_t nport) 231 { 232 struct dma_cont *d = (struct dma_cont*)opaque; 233 int ichan, nreg, iport, ff, val, dir; 234 struct dma_regs *r; 235 236 iport = (nport >> d->dshift) & 0x0f; 237 ichan = iport >> 1; 238 nreg = iport & 1; 239 r = d->regs + ichan; 240 241 dir = ((r->mode >> 5) & 1) ? -1 : 1; 242 ff = getff (d); 243 if (nreg) 244 val = (r->base[COUNT] << d->dshift) - r->now[COUNT]; 245 else 246 val = r->now[ADDR] + r->now[COUNT] * dir; 247 248 ldebug ("read_chan %#x -> %d\n", iport, val); 249 return (val >> (d->dshift + (ff << 3))) & 0xff; 250 } 251 252 static void write_chan (void *opaque, uint32_t nport, uint32_t data) 253 { 254 struct dma_cont *d = (struct dma_cont*)opaque; 255 int iport, ichan, nreg; 256 struct dma_regs *r; 257 258 iport = (nport >> d->dshift) & 0x0f; 259 ichan = iport >> 1; 260 nreg = iport & 1; 261 r = d->regs + ichan; 262 if (getff (d)) { 263 r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00); 264 init_chan (d, ichan); 265 } else { 266 r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff); 267 } 268 } 269 270 static void write_cont (void *opaque, uint32_t nport, uint32_t data) 271 { 272 struct dma_cont *d = (struct dma_cont*)opaque; 273 int iport, ichan = 0; 274 275 iport = (nport >> d->dshift) & 0x0f; 276 switch (iport) { 277 case 0x08: /* command */ 278 if ((data != 0) && (data & CMD_NOT_SUPPORTED)) { 279 dolog ("command %#x not supported\n", data); 280 return; 811 else 812 { 813 SSMR3GetU16(pSSMHandle, &ch->u16CurAddr); 814 SSMR3GetU16(pSSMHandle, &ch->u16CurCount); 281 815 } 282 d->command = data; 283 break; 284 285 case 0x09: 286 ichan = data & 3; 287 if (data & 4) { 288 d->status |= 1 << (ichan + 4); 816 SSMR3GetU16(pSSMHandle, &ch->u16BaseAddr); 817 SSMR3GetU16(pSSMHandle, &ch->u16BaseCount); 818 SSMR3GetU8(pSSMHandle, &ch->u8Mode); 819 /* Convert from old save state. */ 820 if (version == DMA_SAVESTATE_OLD) 821 { 822 /* Remap page register contents. */ 823 SSMR3GetU8(pSSMHandle, &u8val); 824 dc->au8Page[DMACX2PG(chidx)] = u8val; 825 SSMR3GetU8(pSSMHandle, &u8val); 826 dc->au8PageHi[DMACX2PG(chidx)] = u8val; 827 /* Throw away dack, eop. */ 828 SSMR3GetU8(pSSMHandle, &u8val); 829 SSMR3GetU8(pSSMHandle, &u8val); 289 830 } 290 else { 291 d->status &= ~(1 << (ichan + 4)); 292 } 293 d->status &= ~(1 << ichan); 294 break; 295 296 case 0x0a: /* single mask */ 297 if (data & 4) 298 d->mask |= 1 << (data & 3); 299 else 300 d->mask &= ~(1 << (data & 3)); 301 break; 302 303 case 0x0b: /* mode */ 304 { 305 ichan = data & 3; 306 #ifdef DEBUG_DMA 307 { 308 int op, ai, dir, opmode; 309 op = (data >> 2) & 3; 310 ai = (data >> 4) & 1; 311 dir = (data >> 5) & 1; 312 opmode = (data >> 6) & 3; 313 314 linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n", 315 ichan, op, ai, dir, opmode); 316 } 317 #endif 318 d->regs[ichan].mode = data; 319 break; 320 } 321 322 case 0x0c: /* clear flip flop */ 323 d->flip_flop = 0; 324 break; 325 326 case 0x0d: /* reset */ 327 d->flip_flop = 0; 328 d->mask = ~0; 329 d->status = 0; 330 d->command = 0; 331 break; 332 333 case 0x0e: /* clear mask for all channels */ 334 d->mask = 0; 335 break; 336 337 case 0x0f: /* write mask for all channels */ 338 d->mask = data; 339 break; 340 341 default: 342 dolog ("unknown iport %#x\n", iport); 343 break; 344 } 345 346 #ifdef DEBUG_DMA 347 if (0xc != iport) { 348 linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n", 349 nport, ichan, data); 350 } 351 #endif 352 } 353 354 static uint32_t read_cont (void *opaque, uint32_t nport) 355 { 356 struct dma_cont *d = (struct dma_cont*)opaque; 357 int iport, val; 358 359 iport = (nport >> d->dshift) & 0x0f; 360 switch (iport) { 361 case 0x08: /* status */ 362 val = d->status; 363 d->status &= 0xf0; 364 break; 365 case 0x0f: /* mask */ 366 val = d->mask; 367 break; 368 default: 369 val = 0; 370 break; 371 } 372 373 ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val); 374 return val; 375 } 376 377 static uint8_t DMA_get_channel_mode (DMAState *s, int nchan) 378 { 379 return s->dma_controllers[nchan > 3].regs[nchan & 3].mode; 380 } 381 382 static void DMA_hold_DREQ (DMAState *s, int nchan) 383 { 384 int ncont, ichan; 385 386 ncont = nchan > 3; 387 ichan = nchan & 3; 388 linfo ("held cont=%d chan=%d\n", ncont, ichan); 389 s->dma_controllers[ncont].status |= 1 << (ichan + 4); 390 } 391 392 static void DMA_release_DREQ (DMAState *s, int nchan) 393 { 394 int ncont, ichan; 395 396 ncont = nchan > 3; 397 ichan = nchan & 3; 398 linfo ("released cont=%d chan=%d\n", ncont, ichan); 399 s->dma_controllers[ncont].status &= ~(1 << (ichan + 4)); 400 } 401 402 static void channel_run (DMAState *s, int ncont, int ichan) 403 { 404 int n; 405 struct dma_regs *r = &s->dma_controllers[ncont].regs[ichan]; 406 #ifdef DEBUG_DMA 407 int dir, opmode; 408 409 dir = (r->mode >> 5) & 1; 410 opmode = (r->mode >> 6) & 3; 411 412 if (dir) { 413 dolog ("DMA in address decrement mode\n"); 414 } 415 if (opmode != 1) { 416 dolog ("DMA not in single mode select %#x\n", opmode); 417 } 418 #endif 419 420 r = s->dma_controllers[ncont].regs + ichan; 421 n = r->transfer_handler (s->pDevIns, r->opaque, ichan + (ncont << 2), 422 r->now[COUNT], (r->base[COUNT] + 1) << ncont); 423 r->now[COUNT] = n; 424 ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont); 425 } 426 427 static void DMA_run (DMAState *s) 428 { 429 struct dma_cont *d; 430 int icont, ichan; 431 432 d = s->dma_controllers; 433 434 for (icont = 0; icont < 2; icont++, d++) { 435 for (ichan = 0; ichan < 4; ichan++) { 436 int mask; 437 438 mask = 1 << ichan; 439 440 if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) 441 channel_run (s, icont, ichan); 442 } 443 } 444 } 445 446 static void DMA_register_channel (DMAState *s, unsigned nchan, 447 DMA_transfer_handler transfer_handler, 448 void *opaque) 449 { 450 struct dma_regs *r; 451 int ichan, ncont; 452 LogFlow (("DMA_register_channel: s=%p nchan=%d transfer_handler=%p opaque=%p\n", 453 s, nchan, transfer_handler, opaque)); 454 455 ncont = nchan > 3; 456 ichan = nchan & 3; 457 458 r = s->dma_controllers[ncont].regs + ichan; 459 r->transfer_handler = transfer_handler; 460 r->opaque = opaque; 461 } 462 463 static uint32_t DMA_read_memory (DMAState *s, 464 unsigned nchan, 465 void *buf, 466 uint32_t pos, 467 uint32_t len) 468 { 469 struct dma_regs *r = &s->dma_controllers[nchan > 3].regs[nchan & 3]; 470 uint32_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR]; 471 472 if (r->mode & 0x20) { 473 unsigned i; 474 uint8_t *p = (uint8_t*)buf; 475 476 #ifdef VBOX 477 PDMDevHlpPhysRead (s->pDevIns, addr - pos - len, buf, len); 478 #else 479 cpu_physical_memory_read (addr - pos - len, buf, len); 480 #endif 481 /* What about 16bit transfers? */ 482 for (i = 0; i < len >> 1; i++) { 483 uint8_t b = p[len - i - 1]; 484 p[i] = b; 485 } 486 } 487 else 488 #ifdef VBOX 489 PDMDevHlpPhysRead (s->pDevIns, addr + pos, buf, len); 490 #else 491 cpu_physical_memory_read (addr + pos, buf, len); 492 #endif 493 return len; 494 } 495 496 static uint32_t DMA_write_memory (DMAState *s, 497 unsigned nchan, 498 const void *buf, 499 uint32_t pos, 500 uint32_t len) 501 { 502 struct dma_regs *r = &s->dma_controllers[nchan > 3].regs[nchan & 3]; 503 uint32_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR]; 504 505 if (r->mode & 0x20) { 506 unsigned i; 507 uint8_t *p = (uint8_t *) buf; 508 509 #ifdef VBOX 510 PDMDevHlpPhysWrite (s->pDevIns, addr - pos - len, buf, len); 511 #else 512 cpu_physical_memory_write (addr - pos - len, buf, len); 513 #endif 514 /* What about 16bit transfers? */ 515 for (i = 0; i < len; i++) { 516 uint8_t b = p[len - i - 1]; 517 p[i] = b; 518 } 519 } 520 else 521 #ifdef VBOX 522 PDMDevHlpPhysWrite (s->pDevIns, addr + pos, buf, len); 523 #else 524 cpu_physical_memory_write (addr + pos, buf, len); 525 #endif 526 527 return len; 528 } 529 530 531 #ifndef VBOX 532 /* request the emulator to transfer a new DMA memory block ASAP */ 533 void DMA_schedule(int nchan) 534 { 535 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT); 536 } 537 #endif 538 539 static void dma_reset(void *opaque) 540 { 541 struct dma_cont *d = (struct dma_cont*)opaque; 542 write_cont (d, (0x0d << d->dshift), 0); 543 } 544 545 #ifdef VBOX 546 #define IO_READ_PROTO(n) \ 547 static DECLCALLBACK(int) io_read_##n (PPDMDEVINS pDevIns, \ 548 void *pvUser, \ 549 RTIOPORT Port, \ 550 uint32_t *pu32, \ 551 unsigned cb) 552 553 554 #define IO_WRITE_PROTO(n) \ 555 static DECLCALLBACK(int) io_write_##n (PPDMDEVINS pDevIns, \ 556 void *pvUser, \ 557 RTIOPORT Port, \ 558 uint32_t u32, \ 559 unsigned cb) 560 561 IO_WRITE_PROTO (chan) 562 { 563 if (cb == 1) { 564 write_chan (pvUser, Port, u32); 565 } 566 #ifdef PARANOID 567 else { 568 Log (("Unknown write to %#x of size %d, value %#x\n", 569 Port, cb, u32)); 570 } 571 #endif 831 } 832 return 0; 833 } 834 835 static DECLCALLBACK(int) dmaSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSMHandle) 836 { 837 DMAState *s = PDMINS_2_DATA(pDevIns, DMAState *); 838 839 dmaSaveController(pSSMHandle, &s->DMAC[0]); 840 dmaSaveController(pSSMHandle, &s->DMAC[1]); 572 841 return VINF_SUCCESS; 573 842 } 574 843 575 IO_WRITE_PROTO (page) 576 { 577 if (cb == 1) { 578 write_page (pvUser, Port, u32); 579 } 580 #ifdef PARANOID 581 else { 582 Log (("Unknown write to %#x of size %d, value %#x\n", 583 Port, cb, u32)); 584 } 585 #endif 586 return VINF_SUCCESS; 587 } 588 589 IO_WRITE_PROTO (pageh) 590 { 591 if (cb == 1) { 592 write_pageh (pvUser, Port, u32); 593 } 594 #ifdef PARANOID 595 else { 596 Log (("Unknown write to %#x of size %d, value %#x\n", 597 Port, cb, u32)); 598 } 599 #endif 600 return VINF_SUCCESS; 601 } 602 603 IO_WRITE_PROTO (cont) 604 { 605 if (cb == 1) { 606 write_cont (pvUser, Port, u32); 607 } 608 #ifdef PARANOID 609 else { 610 Log (("Unknown write to %#x of size %d, value %#x\n", 611 Port, cb, u32)); 612 } 613 #endif 614 return VINF_SUCCESS; 615 } 616 617 IO_READ_PROTO (chan) 618 { 619 if (cb == 1) { 620 *pu32 = read_chan (pvUser, Port); 621 return VINF_SUCCESS; 622 } 623 else { 624 return VERR_IOM_IOPORT_UNUSED; 625 } 626 } 627 628 IO_READ_PROTO (page) 629 { 630 if (cb == 1) { 631 *pu32 = read_page (pvUser, Port); 632 return VINF_SUCCESS; 633 } 634 else { 635 return VERR_IOM_IOPORT_UNUSED; 636 } 637 } 638 639 IO_READ_PROTO (pageh) 640 { 641 if (cb == 1) { 642 *pu32 = read_pageh (pvUser, Port); 643 return VINF_SUCCESS; 644 } 645 else { 646 return VERR_IOM_IOPORT_UNUSED; 647 } 648 } 649 650 IO_READ_PROTO (cont) 651 { 652 if (cb == 1) { 653 *pu32 = read_cont (pvUser, Port); 654 return VINF_SUCCESS; 655 } 656 else { 657 return VERR_IOM_IOPORT_UNUSED; 658 } 659 } 660 #endif 661 662 /* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */ 663 static void dma_init2(DMAState *s, struct dma_cont *d, int base, int dshift, 664 int page_base, int pageh_base) 665 { 666 const static int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 }; 667 int i; 668 669 d->dshift = dshift; 670 for (i = 0; i < 8; i++) { 671 #ifdef VBOX 672 PDMDevHlpIOPortRegister (s->pDevIns, base + (i << dshift), 1, d, 673 io_write_chan, io_read_chan, NULL, NULL, "DMA"); 674 #else 675 register_ioport_write (base + (i << dshift), 1, 1, write_chan, d); 676 register_ioport_read (base + (i << dshift), 1, 1, read_chan, d); 677 #endif 678 } 679 for (i = 0; i < LENOFA (page_port_list); i++) { 680 #ifdef VBOX 681 PDMDevHlpIOPortRegister (s->pDevIns, page_base + page_port_list[i], 1, d, 682 io_write_page, io_read_page, NULL, NULL, "DMA Page"); 683 #else 684 register_ioport_write (page_base + page_port_list[i], 1, 1, 685 write_page, d); 686 register_ioport_read (page_base + page_port_list[i], 1, 1, 687 read_page, d); 688 #endif 689 if (pageh_base >= 0) { 690 #ifdef VBOX 691 PDMDevHlpIOPortRegister (s->pDevIns, pageh_base + page_port_list[i], 1, d, 692 io_write_pageh, io_read_pageh, NULL, NULL, "DMA Page High"); 693 #else 694 register_ioport_write (pageh_base + page_port_list[i], 1, 1, 695 write_pageh, d); 696 register_ioport_read (pageh_base + page_port_list[i], 1, 1, 697 read_pageh, d); 698 #endif 699 } 700 } 701 for (i = 0; i < 8; i++) { 702 #ifdef VBOX 703 PDMDevHlpIOPortRegister (s->pDevIns, base + ((i + 8) << dshift), 1, d, 704 io_write_cont, io_read_cont, NULL, NULL, "DMA cont"); 705 #else 706 register_ioport_write (base + ((i + 8) << dshift), 1, 1, 707 write_cont, d); 708 register_ioport_read (base + ((i + 8) << dshift), 1, 1, 709 read_cont, d); 710 #endif 711 } 712 #ifndef VBOX 713 qemu_register_reset(dma_reset, d); 714 #endif 715 dma_reset(d); 716 } 717 718 static void dma_save (QEMUFile *f, void *opaque) 719 { 720 struct dma_cont *d = (struct dma_cont*)opaque; 721 int i; 722 723 /* qemu_put_8s (f, &d->status); */ 724 qemu_put_8s (f, &d->command); 725 qemu_put_8s (f, &d->mask); 726 qemu_put_8s (f, &d->flip_flop); 727 qemu_put_be32s (f, &d->dshift); 728 729 for (i = 0; i < 4; ++i) { 730 struct dma_regs *r = &d->regs[i]; 731 qemu_put_be32s (f, &r->now[0]); 732 qemu_put_be32s (f, &r->now[1]); 733 qemu_put_be16s (f, &r->base[0]); 734 qemu_put_be16s (f, &r->base[1]); 735 qemu_put_8s (f, &r->mode); 736 qemu_put_8s (f, &r->page); 737 qemu_put_8s (f, &r->pageh); 738 qemu_put_8s (f, &r->dack); 739 qemu_put_8s (f, &r->eop); 740 } 741 } 742 743 static int dma_load (QEMUFile *f, void *opaque, int version_id) 744 { 745 struct dma_cont *d = (struct dma_cont*)opaque; 746 int i; 747 748 if (version_id != 1) 749 #ifdef VBOX 750 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION; 751 #else 752 return -EINVAL; 753 #endif 754 755 /* qemu_get_8s (f, &d->status); */ 756 qemu_get_8s (f, &d->command); 757 qemu_get_8s (f, &d->mask); 758 qemu_get_8s (f, &d->flip_flop); 759 qemu_get_be32s (f, &d->dshift); 760 761 for (i = 0; i < 4; ++i) { 762 struct dma_regs *r = &d->regs[i]; 763 qemu_get_be32s (f, &r->now[0]); 764 qemu_get_be32s (f, &r->now[1]); 765 qemu_get_be16s (f, &r->base[0]); 766 qemu_get_be16s (f, &r->base[1]); 767 qemu_get_8s (f, &r->mode); 768 qemu_get_8s (f, &r->page); 769 qemu_get_8s (f, &r->pageh); 770 qemu_get_8s (f, &r->dack); 771 qemu_get_8s (f, &r->eop); 772 } 773 return 0; 774 } 775 776 #ifndef VBOX 777 void DMA_init (int high_page_enable) 778 { 779 dma_init2(&dma_controllers[0], 0x00, 0, 0x80, 780 high_page_enable ? 0x480 : -1); 781 dma_init2(&dma_controllers[1], 0xc0, 1, 0x88, 782 high_page_enable ? 0x488 : -1); 783 register_savevm ("dma", 0, 1, dma_save, dma_load, &dma_controllers[0]); 784 register_savevm ("dma", 1, 1, dma_save, dma_load, &dma_controllers[1]); 785 } 786 #endif 787 788 #ifdef VBOX 789 static bool run_wrapper (PPDMDEVINS pDevIns) 790 { 791 DMA_run (PDMINS_2_DATA (pDevIns, DMAState *)); 792 return 0; 793 } 794 795 static void register_channel_wrapper (PPDMDEVINS pDevIns, 796 unsigned nchan, 797 PFNDMATRANSFERHANDLER f, 798 void *opaque) 799 { 800 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *); 801 DMA_register_channel (s, nchan, f, opaque); 802 } 803 804 static uint32_t rd_mem_wrapper (PPDMDEVINS pDevIns, 805 unsigned nchan, 806 void *buf, 807 uint32_t pos, 808 uint32_t len) 809 { 810 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *); 811 return DMA_read_memory (s, nchan, buf, pos, len); 812 } 813 814 static uint32_t wr_mem_wrapper (PPDMDEVINS pDevIns, 815 unsigned nchan, 816 const void *buf, 817 uint32_t pos, 818 uint32_t len) 819 { 820 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *); 821 return DMA_write_memory (s, nchan, buf, pos, len); 822 } 823 824 static void set_DREQ_wrapper (PPDMDEVINS pDevIns, 825 unsigned nchan, 826 unsigned level) 827 { 828 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *); 829 if (level) { 830 DMA_hold_DREQ (s, nchan); 831 } 832 else { 833 DMA_release_DREQ (s, nchan); 834 } 835 } 836 837 static uint8_t get_mode_wrapper (PPDMDEVINS pDevIns, unsigned nchan) 838 { 839 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *); 840 return DMA_get_channel_mode (s, nchan); 841 } 842 843 static void dmaReset (PPDMDEVINS pDevIns) 844 { 845 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *); 846 dma_reset (&s->dma_controllers[0]); 847 dma_reset (&s->dma_controllers[1]); 848 } 849 850 static DECLCALLBACK(int) dmaSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSMHandle) 851 { 852 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *); 853 dma_save (pSSMHandle, &s->dma_controllers[0]); 854 dma_save (pSSMHandle, &s->dma_controllers[1]); 855 return VINF_SUCCESS; 856 } 857 858 static DECLCALLBACK(int) dmaLoadExec (PPDMDEVINS pDevIns, 859 PSSMHANDLE pSSMHandle, 860 uint32_t uVersion, 861 uint32_t uPass) 862 { 863 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *); 864 865 AssertMsgReturn (uVersion == 1, ("%d\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION); 866 Assert (uPass == SSM_PASS_FINAL); NOREF(uPass); 867 868 dma_load (pSSMHandle, &s->dma_controllers[0], uVersion); 869 return dma_load (pSSMHandle, &s->dma_controllers[1], uVersion); 844 static DECLCALLBACK(int) dmaLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSMHandle, 845 uint32_t uVersion, uint32_t uPass) 846 { 847 DMAState *s = PDMINS_2_DATA(pDevIns, DMAState *); 848 849 AssertMsgReturn(uVersion <= DMA_SAVESTATE_CURRENT, ("%d\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION); 850 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass); 851 852 dmaLoadController(pSSMHandle, &s->DMAC[0], uVersion); 853 return dmaLoadController(pSSMHandle, &s->DMAC[1], uVersion); 870 854 } 871 855 … … 873 857 * @interface_method_impl{PDMDEVREG,pfnConstruct} 874 858 */ 875 static DECLCALLBACK(int) dmaConstruct(PPDMDEVINS pDevIns, 876 int iInstance, 877 PCFGMNODE pCfg) 878 { 879 DMAState *s = PDMINS_2_DATA (pDevIns, DMAState *); 880 bool high_page_enable = 0; 881 PDMDMACREG reg; 882 int rc; 859 static DECLCALLBACK(int) dmaConstruct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg) 860 { 861 DMAState *s = PDMINS_2_DATA(pDevIns, DMAState *); 862 bool bHighPage = false; 863 PDMDMACREG reg; 864 int rc; 883 865 884 866 s->pDevIns = pDevIns; … … 891 873 892 874 #if 0 893 rc = CFGMR3QueryBool (pCfg, "HighPageEnable", &high_page_enable);894 if (RT_FAILURE (rc)) {875 rc = CFGMR3QueryBool(pCfg, "HighPageEnable", &bHighPage); 876 if (RT_FAILURE (rc)) 895 877 return rc; 896 }897 878 #endif 898 879 899 dma_init2(s, &s->dma_controllers[0], 0x00, 0, 0x80, 900 high_page_enable ? 0x480 : -1); 901 dma_init2(s, &s->dma_controllers[1], 0xc0, 1, 0x88, 902 high_page_enable ? 0x488 : -1); 880 dmaIORegister(pDevIns, bHighPage); 881 dmaReset(pDevIns); 903 882 904 883 reg.u32Version = PDM_DMACREG_VERSION; 905 reg.pfnRun = run_wrapper;906 reg.pfnRegister = register_channel_wrapper;907 reg.pfnReadMemory = rd_mem_wrapper;908 reg.pfnWriteMemory = wr_mem_wrapper;909 reg.pfnSetDREQ = set_DREQ_wrapper;910 reg.pfnGetChannelMode = get_mode_wrapper;911 912 rc = PDMDevHlpDMACRegister 913 if (RT_FAILURE (rc)) {884 reg.pfnRun = dmaRun; 885 reg.pfnRegister = dmaRegister; 886 reg.pfnReadMemory = dmaReadMemory; 887 reg.pfnWriteMemory = dmaWriteMemory; 888 reg.pfnSetDREQ = dmaSetDREQ; 889 reg.pfnGetChannelMode = dmaGetChannelMode; 890 891 rc = PDMDevHlpDMACRegister(pDevIns, ®, &s->pHlp); 892 if (RT_FAILURE (rc)) 914 893 return rc; 915 } 916 917 rc = PDMDevHlpSSMRegister (pDevIns, 1 /*uVersion*/, sizeof (*s),dmaSaveExec, dmaLoadExec);894 895 rc = PDMDevHlpSSMRegister(pDevIns, DMA_SAVESTATE_CURRENT, sizeof(*s), 896 dmaSaveExec, dmaLoadExec); 918 897 if (RT_FAILURE(rc)) 919 898 return rc; … … 976 955 PDM_DEVREG_VERSION 977 956 }; 978 #endif /* VBOX */
Note:
See TracChangeset
for help on using the changeset viewer.